python_code stringlengths 0 869k |
|---|
from setuptools import setup
setup(
name="consistency-models",
py_modules=["cm", "evaluations"],
install_requires=[
"blobfile>=1.0.5",
"torch",
"tqdm",
"numpy",
"scipy",
"pandas",
"Cython",
"piq==0.7.0",
"joblib==0.14.0",
"albu... |
from .inception_v3 import InceptionV3
import blobfile as bf
import torch
import torch.distributed as dist
import torch.nn as nn
from cm import dist_util
import numpy as np
import warnings
from scipy import linalg
from PIL import Image
from tqdm import tqdm
def clip_preproc(preproc_fn, x):
return preproc_fn(Image.... |
# Ported from the model here:
# https://github.com/NVlabs/stylegan3/blob/407db86e6fe432540a22515310188288687858fa/metrics/frechet_inception_distance.py#L22
#
# I have verified that the spatial features and output features are correct
# within a mean absolute error of ~3e-5.
import collections
import torch
class Con... |
import argparse
import io
import os
import random
import warnings
import zipfile
from abc import ABC, abstractmethod
from contextlib import contextmanager
from functools import partial
from multiprocessing import cpu_count
from multiprocessing.pool import ThreadPool
from typing import Iterable, Optional, Tuple
import ... |
"""
Convert an LSUN lmdb database into a directory of images.
"""
import argparse
import io
import os
from PIL import Image
import lmdb
import numpy as np
def read_images(lmdb_path, image_size):
env = lmdb.open(lmdb_path, map_size=1099511627776, max_readers=100, readonly=True)
with env.begin(write=False) as... |
"""
Train a diffusion model on images.
"""
import argparse
from cm import dist_util, logger
from cm.image_datasets import load_data
from cm.resample import create_named_schedule_sampler
from cm.script_util import (
model_and_diffusion_defaults,
create_model_and_diffusion,
cm_train_defaults,
args_to_di... |
"""
Generate a large batch of image samples from a model and save them as a large
numpy array. This can be used to produce samples for FID evaluation.
"""
import argparse
import os
import numpy as np
import torch as th
import torch.distributed as dist
from functools import cache
from mpi4py import MPI
from cm import... |
"""
Generate a large batch of image samples from a model and save them as a large
numpy array. This can be used to produce samples for FID evaluation.
"""
import argparse
import os
import numpy as np
import torch as th
import torch.distributed as dist
from cm import dist_util, logger
from cm.script_util import (
... |
"""
Train a diffusion model on images.
"""
import argparse
from cm import dist_util, logger
from cm.image_datasets import load_data
from cm.resample import create_named_schedule_sampler
from cm.script_util import (
model_and_diffusion_defaults,
create_model_and_diffusion,
args_to_dict,
add_dict_to_arg... |
from abc import ABC, abstractmethod
import numpy as np
import torch as th
from scipy.stats import norm
import torch.distributed as dist
def create_named_schedule_sampler(name, diffusion):
"""
Create a ScheduleSampler from a library of pre-defined samplers.
:param name: the name of the sampler.
:para... |
import math
import random
from PIL import Image
import blobfile as bf
from mpi4py import MPI
import numpy as np
from torch.utils.data import DataLoader, Dataset
def load_data(
*,
data_dir,
batch_size,
image_size,
class_cond=False,
deterministic=False,
random_crop=False,
random_flip=Tr... |
import torch as th
import torch.distributed as dist
from . import dist_util
def get_generator(generator, num_samples=0, seed=0):
if generator == "dummy":
return DummyGenerator()
elif generator == "determ":
return DeterministicGenerator(num_samples, seed)
elif generator == "determ-indiv":
... |
"""
Various utilities for neural networks.
"""
import math
import torch as th
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
# PyTorch 1.7 has SiLU, but we support PyTorch 1.5.
class SiLU(nn.Module):
def forward(self, x):
return x * th.sigmoid(x)
class GroupNorm32(nn.GroupNor... |
from abc import abstractmethod
import math
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from .fp16_util import convert_module_to_f16, convert_module_to_f32
from .nn import (
checkpoint,
conv_nd,
linear,
avg_pool_nd,
zero_module,
normalization,
... |
import argparse
from .karras_diffusion import KarrasDenoiser
from .unet import UNetModel
import numpy as np
NUM_CLASSES = 1000
def cm_train_defaults():
return dict(
teacher_model_path="",
teacher_dropout=0.1,
training_mode="consistency_distillation",
target_ema_mode="fixed",
... |
"""
Codebase for "Improved Denoising Diffusion Probabilistic Models".
"""
|
"""
Logger copied from OpenAI baselines to avoid extra RL-based dependencies:
https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/logger.py
"""
import os
import sys
import shutil
import os.path as osp
import json
import time
import datetime
import tempfile
import warnings
from c... |
import copy
import functools
import os
import blobfile as bf
import torch as th
import torch.distributed as dist
from torch.nn.parallel.distributed import DistributedDataParallel as DDP
from torch.optim import RAdam
from . import dist_util, logger
from .fp16_util import MixedPrecisionTrainer
from .nn import update_em... |
"""
Based on: https://github.com/crowsonkb/k-diffusion
"""
import random
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from piq import LPIPS
from torchvision.transforms import RandomCrop
from . import dist_util
from .nn import mean_flat, append_dims, append_zero
from .ran... |
"""
Helpers for various likelihood-based losses. These are ported from the original
Ho et al. diffusion models codebase:
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/utils.py
"""
import numpy as np
import torch as th
def normal_kl(mean1, logvar1, mean2, logvar... |
"""
Helpers to train with 16-bit precision.
"""
import numpy as np
import torch as th
import torch.nn as nn
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from . import logger
INITIAL_LOG_LOSS_SCALE = 20.0
def convert_module_to_f16(l):
"""
Convert primitive modules to float16.
... |
"""
Helpers for distributed training.
"""
import io
import os
import socket
import blobfile as bf
from mpi4py import MPI
import torch as th
import torch.distributed as dist
# Change this to reflect your cluster layout.
# The GPU for a given rank is (rank % GPUS_PER_NODE).
GPUS_PER_NODE = 8
SETUP_RETRY_COUNT = 3
d... |
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import time
import os.path
import subprocess
import shutil
# helpful for kernel development
debug = 0
gen_kernels = [
[ "xgemm_blocksparse_32x32x32_xprop", "fprop", "A32",... |
#!/usr/bin/env python
import setuptools
setuptools.setup(
name='blocksparse',
version='1.13.1',
description='Tensorflow ops for blocksparse matmul, transformer, convolution and related operations.',
author='OpenAI',
maintainer='Scott Gray',
maintainer_email='scott@openai.com',
install_requ... |
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import tensorflow as tf
import blocksparse.ewops as ew
import blocksparse.norms as norms
import blocksparse.lstm as lstm
from time import time
shapes = [
... |
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from time import time
import numpy as np
import tensorflow as tf
import blocksparse as bs
ones = 0
out = 0
bench = 0
config = tf.ConfigProto(
intra_op_parallelism_threa... |
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from blocksparse.norms import layer_norm, layer_norm_test, layer_norm_grad_test
import blocksparse.ewops as ew
np.set_printoptions(threshold=8... |
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import tensorflow as tf
import blocksparse.ewops as ew
from time import time
shapes = [
# [64, 16, 10, 10, 16, ],
# [64, 16, 10, 6, 32, ],
... |
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from time import time
import sys
import networkx
import numpy as np
import tensorflow as tf
import blocksparse as bs
np.set_printoptions(threshold=8193, linewidth=600, formatter={... |
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from time import time
import numpy as np
import tensorflow as tf
import blocksparse.ewops as ew
import blocksparse.transformer as trans
from tensorflow.python.ops import gradient_checker
... |
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import tensorflow as tf
import blocksparse.ewops as ew
import math
#from tensorflow.python.ops import gradient_checker
ones = 0
out = 0
def gelu(x):
r... |
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import blocksparse as bs
shapes = [
# [ 4, 4 ],
# [ 60, 60 ],
# [ 64, 64 ],
# [ 64, 256 ],
# [ 256,... |
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import blocksparse as bs
from tensorflow.python.ops import gradient_checker
def ceil_div(x, y):
return -(-x // y)
shapes = [
# ... |
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import blocksparse as bs
from blocksparse.optimize import adam_op
ones = 0
out = 0
beta1 = 0.8
beta2 = 0.5
le... |
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import tensorflow as tf
import blocksparse.ewops as ew
from time import time
shapes = [
[ 128, 16, 149, ],
[ 128, 16, 30, ], # int32
[ ... |
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import function
from blocksparse.embed import embedding_lookup
import blocksparse.ewops as ew
from time import ... |
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from random import shuffle
from tensorflow.python.ops import gradient_checker
from blocksparse.conv import BlocksparseConv, BlocksparseDeconv
... |
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import blocksparse as bs
from blocksparse.optimize import adafactor1d_op, adafactor2d_op
ones = 0
out = 0
beta2 = 0... |
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from time import time
from blocksparse.conv import cwise_linear
from blocksparse.ewops import float_cast
ones = 0
out = 0
shapes = [
[ 1... |
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import tensorflow as tf
from operator import mul
from blocksparse.conv import ConvEdgeBias, ceil_div
import blocksparse.ewops as ew
ones = 0
out = 0
bench... |
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import blocksparse as bs
from struct import pack, unpack
from time import time
class QuantizeTest(tf.test.TestCase):
def testQuanti... |
#!/usr/bin/env python
# nvprof -f -o "nccl_test_%p.nvvp" --profile-child-processes
# nvprof --profile-child-processes
import numpy as np
import platform
from collections import defaultdict
from mpi4py import MPI
import blocksparse.nccl as nccl
import blocksparse.ewops as ew
from time import time
import tensorflow as ... |
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import tensorflow as tf
from blocksparse import dw_matmul_large_n
import blocksparse.ewops as ew
from time import time
shapes = [
[ 1024*1024, 32 ],
... |
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import tensorflow as tf
from operator import mul
import blocksparse.ewops as ew
from tensorflow.python.framework import function
ones = 0
out = 0
bench = ... |
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import tensorflow as tf
from time import time
from blocksparse.matmul import BlocksparseMatMul, SparseProj, group_param_grads
import blocksparse.ewops as ew... |
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import blocksparse as bs
from tensorflow.python.ops import gradient_checker
config = tf.ConfigProto(
intra_op_parallelism_threads=1,
i... |
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import blocksparse as bs
from blocksparse.matmul import blocksparse_reduced_dw
config = tf.ConfigProto(
intra_op_parallelism_threads... |
from blocksparse.matmul import BlocksparseMatMul
import tensorflow as tf
import numpy as np
hidden_size = 4096
block_size = 32
minibatch_size = 64
# Create a (random) sparsity pattern
sparsity = np.random.randint(2, size=(hidden_size//block_size,hidden_size//block_size))
# Initialize the sparse matrix multiplication... |
#!/usr/bin/env python
'''
Example of the blocksparse transformer on enwik8.
To download data:
wget http://mattmahoney.net/dc/enwik8.zip
unzip enwik8.zip -d /tmp
'''
import argparse
import numpy as np
import tensorflow as tf
import blocksparse as bs
from mpi4py import MPI
def layernorm(x, scope, epsilon=1e-5... |
#!/usr/bin/env python
import argparse
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from mpi4py import MPI
from tensorflow.examples.tutorials.mnist import input_data
from blocksparse.transformer import transpose_0213, masked_softmax
from blocksparse.norms import layer_norm
from blocksparse.... |
import os
import os.path
import string
import json
import numpy as np
import tensorflow as tf
import random
def ceil_div(x, y):
return -(-x // y)
def text8(path):
print("opening:", path)
text = open(path).read()
tr_text = text[:int(90e6)]
va_text = text[int(90e6):int(95e6)]
te_text = text[int(... |
#!/usr/bin/env python
# import memory_util as mu
# mu.vlog(1)
import os
import time
import argparse
import logging
import platform
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import layers
from layers import HParams, LSTM_Model
from utils import text8, text8_stream, wiki3, wiki3_stream, n... |
import numpy as np
import networkx
from random import shuffle, randint
def make_mask(n, kind, axis=0):
if kind == 'dense':
a = np.ones((n, n), dtype=np.int32)
elif kind.startswith('old_ba_'):
_, _, m = kind.split('_')
a = old_barabasi_albert(n, int(m))
elif kind.startswith('ba_'):
... |
import os
import re
import sys
import tempfile
import tensorflow as tf
debug_messages = False
def vlog(level):
os.environ['TF_CPP_MIN_VLOG_LEVEL'] = str(level)
# this helper is here in case we later want to capture huge stderr that doesn't fit in RAM
class TemporaryFileHelper:
"""Provides a way to fetch contents... |
import numpy as np
import tensorflow as tf
from sklearn.externals import joblib
from blocksparse.matmul import BlocksparseMatMul, SparseProj, group_param_grads, get_parents, add_control_input, largest_block
from blocksparse.norms import layer_norm
import blocksparse.ewops as ew
import masks
from utils im... |
"""Cuda op Python library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.training import slot_creator
from tensorflow.python.t... |
"""Cuda op Python library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import scipy.sparse as sparse
from tensorflow.python.framework import ops
from tensorflow.python.ops.init_ops import Initializer
from bl... |
"""Cuda op Python library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops, function
from blocksparse.utils import _op_module, scalar_constant
embedding_lookup_op ... |
"""Cuda op Python library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import time
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
from blocksparse.utils import _op_module, get_entropy
#########... |
"""Cuda op Python library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
from blocksparse.utils import _op_module, get_entropy, scalar_constant
ew_z_xy_op = ... |
"""Cuda op Python library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import tensorflow as tf
from mpi4py import MPI
from tensorflow.python.framework import ops
from blocksparse.utils import _op_module
from blocksparse.e... |
__version__ = '1.13.1_master'
from blocksparse.utils import (
_op_module,
entropy_size,
get_entropy,
set_entropy,
reset_scalar_constants,
scalar_constant,
ceil_div,
reduce_mul,
bst_conv_layout,
bst_deconv_layout,
)
dw_matmul_large_n = _op_module.dw_matmul_large_n
from blockspar... |
"""Cuda op Python library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os.path
import numpy as np
import tensorflow as tf
from operator import mul
if sys.version_info >= (3, 0):
from functools import reduce
data_files_path = tf... |
"""Cuda op Python library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
from blocksparse.utils import _op_module, scalar_constant
############################## B... |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import collections
import tensorflow as tf
from tensorflow.python.framework import ops
from blocksparse.utils import _op_module
import blocksparse.ewops as ew
recompute_op = _op_module.reco... |
"""Cuda op Python library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from operator import lt
from tensorflow.python.framework import ops
from blocksparse.utils import _op_module, reduce_mul, ceil_div, z_o... |
"""Cuda op Python library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
from blocksparse.utils import _op_module
############################## fused_lstm_gates ... |
"""Cuda op Python library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import sys
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
from blocksparse.utils import _op_module, reduce_mul
layer_norm_... |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applica... |
#!/usr/bin/env python
# Experimental depthwise seperable convolution kernels (just the spatial components) that run on tensorcores.
# (C,H,W,N) format is used, but if remapped to (N, heads, H, W, head_state) can be resused in self attention style convolution.
# Though the filters can no longer be broadcast, and relati... |
#!/usr/bin/env python
#
# Author: Hans Chris Jones <chris.jones@lambdastack.io>
# Copyright 2018, LambdaStack
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/license... |
#!/usr/bin/env python
#
# Author: Hans Chris Jones <chris.jones@lambdastack.io>
#
# Copyright 2017, Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apach... |
import shutil
from typing import Dict, List
import pytest
import random
from datastore.providers.chroma_datastore import ChromaDataStore
from models.models import (
DocumentChunk,
DocumentChunkMetadata,
DocumentMetadataFilter,
QueryWithEmbedding,
Source,
)
TEST_PERSISTENCE_DIR = "chroma_test_datas... |
from datastore.providers.redis_datastore import RedisDataStore
from models.models import DocumentChunk, DocumentChunkMetadata, QueryWithEmbedding, Source, DocumentMetadataFilter
import pytest
import redis.asyncio as redis
import numpy as np
NUM_TEST_DOCS = 10
@pytest.fixture
async def redis_datastore():
return aw... |
import pytest
import os
import time
from typing import Union
from azure.search.documents.indexes import SearchIndexClient
from models.models import DocumentMetadataFilter, Query, Source, Document, DocumentMetadata
AZURESEARCH_TEST_INDEX = "testindex"
os.environ["AZURESEARCH_INDEX"] = AZURESEARCH_TEST_INDEX
if os.envir... |
from typing import Dict, List
import pytest
from datastore.providers.supabase_datastore import SupabaseDataStore
from models.models import (
DocumentChunk,
DocumentChunkMetadata,
DocumentMetadataFilter,
QueryWithEmbedding,
)
def create_embedding(non_zero_pos: int) -> List[float]:
# create a vector... |
from typing import Dict, List
import pytest
from datastore.providers.postgres_datastore import PostgresDataStore
from models.models import (
DocumentChunk,
DocumentChunkMetadata,
DocumentMetadataFilter,
QueryWithEmbedding,
)
def create_embedding(non_zero_pos: int) -> List[float]:
# create a vector... |
from typing import Dict, List
import pytest
import qdrant_client
from qdrant_client.http.models import PayloadSchemaType
from datastore.providers.qdrant_datastore import QdrantDataStore
from models.models import (
DocumentChunk,
DocumentChunkMetadata,
QueryWithEmbedding,
DocumentMetadataFilter,
So... |
# from pathlib import Path
# from dotenv import find_dotenv, load_dotenv
# env_path = Path(".") / "zilliz.env"
# load_dotenv(dotenv_path=env_path, verbose=True)
import pytest
from datastore.providers.zilliz_datastore import (
ZillizDataStore,
)
from datastore.providers.milvus_datastore import (
EMBEDDING_FIE... |
import logging
import os
import pytest
import weaviate
from _pytest.logging import LogCaptureFixture
from fastapi.testclient import TestClient
from loguru import logger
from weaviate import Client
from datastore.providers.weaviate_datastore import (
SCHEMA,
WeaviateDataStore,
extract_schema_properties,
)
... |
import pytest
from models.models import (
DocumentChunkMetadata,
DocumentMetadataFilter,
DocumentChunk,
QueryWithEmbedding,
Source,
)
from datastore.providers.elasticsearch_datastore import (
ElasticsearchDataStore,
)
import time
DIM_SIZE = 1536
@pytest.fixture
def elasticsearch_datastore():
... |
# from pathlib import Path
# from dotenv import find_dotenv, load_dotenv
# env_path = Path(".") / "milvus.env"
# load_dotenv(dotenv_path=env_path, verbose=True)
import pytest
from models.models import (
DocumentChunkMetadata,
DocumentMetadataFilter,
DocumentChunk,
QueryWithEmbedding,
Source,
)
from... |
import pytest
from models.models import (
DocumentChunkMetadata,
DocumentMetadataFilter,
DocumentChunk,
QueryWithEmbedding,
Source,
)
from datastore.providers.analyticdb_datastore import (
OUTPUT_DIM,
AnalyticDBDataStore,
)
@pytest.fixture
def analyticdb_datastore():
return AnalyticDBD... |
from typing import Dict, List
import pytest
from datastore.providers.llama_datastore import LlamaDataStore
from models.models import DocumentChunk, DocumentChunkMetadata, QueryWithEmbedding
def create_embedding(non_zero_pos: int, size: int) -> List[float]:
vector = [0.0] * size
vector[non_zero_pos % size] = 1... |
import os
from typing import Optional
import uvicorn
from fastapi import FastAPI, File, Form, HTTPException, Depends, Body, UploadFile
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
from fastapi.staticfiles import StaticFiles
from loguru import logger
from models.api import (
DeleteRequest,
... |
from pydantic import BaseModel
from typing import List, Optional
from enum import Enum
class Source(str, Enum):
email = "email"
file = "file"
chat = "chat"
class DocumentMetadata(BaseModel):
source: Optional[Source] = None
source_id: Optional[str] = None
url: Optional[str] = None
created... |
from models.models import (
Document,
DocumentMetadataFilter,
Query,
QueryResult,
)
from pydantic import BaseModel
from typing import List, Optional
class UpsertRequest(BaseModel):
documents: List[Document]
class UpsertResponse(BaseModel):
ids: List[str]
class QueryRequest(BaseModel):
... |
# This is a version of the main.py file found in ../../../server/main.py for testing the plugin locally.
# Use the command `poetry run dev` to run this.
from typing import Optional
import uvicorn
from fastapi import FastAPI, File, Form, HTTPException, Body, UploadFile
from loguru import logger
from models.api import (... |
from datastore.datastore import DataStore
import os
async def get_datastore() -> DataStore:
datastore = os.environ.get("DATASTORE")
assert datastore is not None
match datastore:
case "chroma":
from datastore.providers.chroma_datastore import ChromaDataStore
return ChromaD... |
from abc import ABC, abstractmethod
from typing import Dict, List, Optional
import asyncio
from models.models import (
Document,
DocumentChunk,
DocumentMetadataFilter,
Query,
QueryResult,
QueryWithEmbedding,
)
from services.chunks import get_document_chunks
from services.openai import get_embed... |
import os
from loguru import logger
from typing import Optional
from pymilvus import (
connections,
)
from uuid import uuid4
from datastore.providers.milvus_datastore import (
MilvusDataStore,
)
ZILLIZ_COLLECTION = os.environ.get("ZILLIZ_COLLECTION") or "c" + uuid4().hex
ZILLIZ_URI = os.environ.get("ZILLIZ_... |
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional
from datetime import datetime
from loguru import logger
from services.date import to_unix_timestamp
from datastore.datastore import DataStore
from models.models import (
DocumentChunk,
DocumentChunkMetadata,
DocumentMetadataFi... |
"""
Chroma datastore support for the ChatGPT retrieval plugin.
Consult the Chroma docs and GitHub repo for more information:
- https://docs.trychroma.com/usage-guide?lang=py
- https://github.com/chroma-core/chroma
- https://www.trychroma.com/
"""
import os
from datetime import datetime
from typing import Dict, List, ... |
import asyncio
import os
import re
import uuid
from typing import Dict, List, Optional
import weaviate
from loguru import logger
from weaviate import Client
from weaviate.util import generate_uuid5
from datastore.datastore import DataStore
from models.models import (
DocumentChunk,
DocumentChunkMetadata,
... |
import json
import os
from typing import Dict, List, Optional, Type
from loguru import logger
from datastore.datastore import DataStore
from models.models import DocumentChunk, DocumentChunkMetadata, DocumentChunkWithScore, DocumentMetadataFilter, Query, QueryResult, QueryWithEmbedding
from llama_index.indices.base im... |
import asyncio
import os
import re
import json
import redis.asyncio as redis
import numpy as np
from redis.commands.search.query import Query as RediSearchQuery
from redis.commands.search.indexDefinition import IndexDefinition, IndexType
from redis.commands.search.field import (
TagField,
TextField,
Numeri... |
import os
import asyncio
from typing import Dict, List, Optional, Tuple, Any
from datetime import datetime
from loguru import logger
from psycopg2cffi import compat
compat.register()
import psycopg2
from psycopg2.extras import DictCursor
from psycopg2.pool import SimpleConnectionPool
from services.date import to_uni... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.