code
stringlengths
141
97.3k
apis
listlengths
1
24
extract_api
stringlengths
113
214k
from langchain_community.vectorstores import LanceDB from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings def test_lancedb() -> None: import lancedb embeddings = FakeEmbeddings() db = lancedb.connect("/tmp/lancedb") texts = ["text 1", "text 2", "item 3"] vectors = embed...
[ "lancedb.connect" ]
[((200, 216), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (214, 216), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((226, 257), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lance...
""" Unit test for retrieve_utils.py """ from autogen.retrieve_utils import ( split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db, ) from autogen.token_count_utils import count_token import os import pyte...
[ "lancedb.connect" ]
[((365, 390), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (380, 390), False, 'import os\n'), ((7383, 7396), 'pytest.main', 'pytest.main', ([], {}), '()\n', (7394, 7396), False, 'import pytest\n'), ((7458, 7481), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (7472, 7...
import lancedb import numpy as np import pandas as pd import pyarrow as pa def client_vector_db(vector_db_config: dict) -> lancedb.LanceDBConnection: """Connect to a lancedb instance""" return lancedb.connect(**vector_db_config) def initialize_vector_db_indices( client_vector_db: lancedb.LanceDBConnecti...
[ "lancedb.connect" ]
[((203, 238), 'lancedb.connect', 'lancedb.connect', ([], {}), '(**vector_db_config)\n', (218, 238), False, 'import lancedb\n'), ((1932, 1971), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['data_objects'], {}), '(data_objects)\n', (1957, 1971), True, 'import pandas as pd\n'), ((568, 579), 'pyarrow.str...
import os, sqlite3, lancedb, tiktoken, bcrypt from pinecone import Pinecone, ServerlessSpec from enum import Enum from langchain_community.vectorstores import LanceDB, Chroma from langchain_community.vectorstores import Pinecone as LangPinecone import streamlit as st def SetHeader(page_title: str): st.set_page_con...
[ "lancedb.connect" ]
[((305, 433), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': 'page_title', 'page_icon': '"""https://indico.bnl.gov/event/19560/logo-410523303.png"""', 'layout': '"""wide"""'}), "(page_title=page_title, page_icon=\n 'https://indico.bnl.gov/event/19560/logo-410523303.png', layout='wide')\n", (3...
from langchain import PromptTemplate, LLMChain from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.vectorstores import Chroma from langchain.chains import RetrievalQA from langchain.embeddings import HuggingFaceBgeEmbeddings from io import BytesIO from langchain.document_loaders import PyP...
[ "lancedb.connect" ]
[((704, 712), 'langchain.llms.OpenAI', 'OpenAI', ([], {}), '()\n', (710, 712), False, 'from langchain.llms import OpenAI\n'), ((841, 948), 'langchain.embeddings.HuggingFaceBgeEmbeddings', 'HuggingFaceBgeEmbeddings', ([], {'model_name': 'model_name', 'model_kwargs': 'model_kwargs', 'encode_kwargs': 'encode_kwargs'}), '(...
"""LanceDB vector store.""" from typing import Any, List, Optional import numpy as np from pandas import DataFrame from llama_index.schema import ( BaseNode, MetadataMode, NodeRelationship, RelatedNodeInfo, TextNode, ) from llama_index.vector_stores.types import ( MetadataFilters, VectorSt...
[ "lancedb.connect" ]
[((2773, 2793), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (2788, 2793), False, 'import lancedb\n'), ((1170, 1199), 'numpy.exp', 'np.exp', (["(-results['_distance'])"], {}), "(-results['_distance'])\n", (1176, 1199), True, 'import numpy as np\n'), ((3205, 3284), 'llama_index.vector_stores.utils.nod...
""" Unit test for retrieve_utils.py """ try: import chromadb from autogen.retrieve_utils import ( split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db, ) from aut...
[ "lancedb.connect" ]
[((1021, 1083), 'pytest.mark.skipif', 'pytest.mark.skipif', (['skip'], {'reason': '"""dependency is not installed"""'}), "(skip, reason='dependency is not installed')\n", (1039, 1083), False, 'import pytest\n'), ((619, 644), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (634, 644), False, 'i...
""" Unit test for retrieve_utils.py """ try: import chromadb from autogen.retrieve_utils import ( split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db, ) from aut...
[ "lancedb.connect" ]
[((1021, 1083), 'pytest.mark.skipif', 'pytest.mark.skipif', (['skip'], {'reason': '"""dependency is not installed"""'}), "(skip, reason='dependency is not installed')\n", (1039, 1083), False, 'import pytest\n'), ((619, 644), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (634, 644), False, 'i...
from langchain_community.vectorstores import LanceDB from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings def test_lancedb() -> None: import lancedb embeddings = FakeEmbeddings() db = lancedb.connect("/tmp/lancedb") texts = ["text 1", "text 2", "item 3"] vectors = embed...
[ "lancedb.connect" ]
[((200, 216), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (214, 216), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((226, 257), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lance...
from langchain_community.vectorstores import LanceDB from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings def test_lancedb() -> None: import lancedb embeddings = FakeEmbeddings() db = lancedb.connect("/tmp/lancedb") texts = ["text 1", "text 2", "item 3"] vectors = embed...
[ "lancedb.connect" ]
[((200, 216), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (214, 216), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((226, 257), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lance...
""" Unit test for retrieve_utils.py """ from autogen.retrieve_utils import ( split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db, ) from autogen.token_count_utils import count_token import os import pyte...
[ "lancedb.connect" ]
[((365, 390), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (380, 390), False, 'import os\n'), ((7383, 7396), 'pytest.main', 'pytest.main', ([], {}), '()\n', (7394, 7396), False, 'import pytest\n'), ((7458, 7481), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (7472, 7...
""" Unit test for retrieve_utils.py """ from autogen.retrieve_utils import ( split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db, ) from autogen.token_count_utils import count_token import os import pyte...
[ "lancedb.connect" ]
[((365, 390), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (380, 390), False, 'import os\n'), ((7383, 7396), 'pytest.main', 'pytest.main', ([], {}), '()\n', (7394, 7396), False, 'import pytest\n'), ((7458, 7481), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (7472, 7...
import os import lancedb import shutil import uvicorn import openai from fastapi import FastAPI, HTTPException, WebSocket, UploadFile, File from langchain.chains import RetrievalQA from langchain.chat_models import ChatOpenAI from langchain.embeddings import OpenAIEmbeddings from langchain.prompts import PromptTemplate...
[ "lancedb.connect" ]
[((621, 737), 'fastapi.FastAPI', 'FastAPI', ([], {'title': '"""Chatbot RAG API"""', 'description': '"""This is a chatbot API template for RAG system."""', 'version': '"""1.0.0"""'}), "(title='Chatbot RAG API', description=\n 'This is a chatbot API template for RAG system.', version='1.0.0')\n", (628, 737), False, 'f...
import os import typer import pickle import pandas as pd from dotenv import load_dotenv import openai import pinecone import lancedb import pyarrow as pa from collections import deque TASK_CREATION_PROMPT = """ You are an task creation AI that uses the result of an execution agent to create new tasks with the followi...
[ "lancedb.connect" ]
[((8057, 8070), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (8068, 8070), False, 'from dotenv import load_dotenv\n'), ((8801, 8816), 'typer.run', 'typer.run', (['main'], {}), '(main)\n', (8810, 8816), False, 'import typer\n'), ((2034, 2060), 'os.path.isfile', 'os.path.isfile', (['cache_file'], {}), '(cache_f...
import openai import os import lancedb import pickle import requests from pathlib import Path from bs4 import BeautifulSoup import re from langchain.document_loaders import UnstructuredHTMLLoader from langchain.embeddings import OpenAIEmbeddings from langchain.text_splitter import RecursiveCharacterTextSplitter from l...
[ "lancedb.connect" ]
[((1788, 1806), 'pathlib.Path', 'Path', (['"""cities.pkl"""'], {}), "('cities.pkl')\n", (1792, 1806), False, 'from pathlib import Path\n'), ((2462, 2526), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(500)', 'chunk_overlap': '(50)'}), '(chunk_size=500,...
from flask import Flask, render_template, jsonify, request from scripts.mock_llm_api import llm_api import lancedb import pandas as pd uri = "data/lancedb" db = lancedb.connect(uri) # Set initial entries in items vector database def _reset_tables(): items = ['Fire', 'Earth', 'Water', 'Wind'] descriptions = ["...
[ "lancedb.connect" ]
[((162, 182), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (177, 182), False, 'import lancedb\n'), ((879, 894), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (884, 894), False, 'from flask import Flask, render_template, jsonify, request\n'), ((640, 717), 'pandas.DataFrame', 'pd.DataFram...
import logging import json import gradio as gr import numpy as np import lancedb import os from huggingface_hub import AsyncInferenceClient # Setting up the logging logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) # db TABLE_NAME = "docs" TEXT_COLUMN = "text" BATCH_SIZE = int(os.getenv("B...
[ "lancedb.connect" ]
[((167, 206), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (186, 206), False, 'import logging\n'), ((216, 243), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (233, 243), False, 'import logging\n'), ((573, 609), 'lancedb.connect'...
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: MIT-0 import os from langchain.text_splitter import CharacterTextSplitter from langchain.vectorstores import LanceDB from langchain.embeddings import BedrockEmbeddings from langchain.document_loaders import PyPDFDirectoryL...
[ "lancedb.connect" ]
[((384, 403), 'langchain.embeddings.BedrockEmbeddings', 'BedrockEmbeddings', ([], {}), '()\n', (401, 403), False, 'from langchain.embeddings import BedrockEmbeddings\n'), ((625, 682), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(200)'}), '(ch...
# Ultralytics YOLO 🚀, AGPL-3.0 license from io import BytesIO from pathlib import Path from typing import Any, List, Tuple, Union import cv2 import numpy as np import torch from PIL import Image from matplotlib import pyplot as plt from pandas import DataFrame from tqdm import tqdm from engine.data.augment import F...
[ "lancedb.connect" ]
[((1672, 1865), 'engine.data.augment.Format', 'Format', ([], {'bbox_format': '"""xyxy"""', 'normalize': '(False)', 'return_mask': 'self.use_segments', 'return_keypoint': 'self.use_keypoints', 'batch_idx': '(True)', 'mask_ratio': 'hyp.mask_ratio', 'mask_overlap': 'hyp.overlap_mask'}), "(bbox_format='xyxy', normalize=Fal...
import pyarrow as pa from typing import Union from dryg.settings import DB_URI import lancedb def connection() -> lancedb.LanceDBConnection: """ Connect to the database Returns: lancedb.LanceDBConnection: LanceDBConnection object """ db = lancedb.connect(DB_URI) return db def open_t...
[ "lancedb.connect" ]
[((271, 294), 'lancedb.connect', 'lancedb.connect', (['DB_URI'], {}), '(DB_URI)\n', (286, 294), False, 'import lancedb\n')]
import os import typer import pickle import pandas as pd from dotenv import load_dotenv import openai import pinecone import lancedb import pyarrow as pa from collections import deque TASK_CREATION_PROMPT = """ You are an task creation AI that uses the result of an execution agent to create new tasks with the followi...
[ "lancedb.connect" ]
[((7282, 7295), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (7293, 7295), False, 'from dotenv import load_dotenv\n'), ((8026, 8041), 'typer.run', 'typer.run', (['main'], {}), '(main)\n', (8035, 8041), False, 'import typer\n'), ((2219, 2245), 'os.path.isfile', 'os.path.isfile', (['cache_file'], {}), '(cache_f...
import json from generate_data import * from create_embeddings import * import lancedb uri = "./sample-lancedb" db = lancedb.connect(uri) text_table = "table_from_df_text" img_table = "table_from_df_images" tbl_txt = db.open_table(text_table) tbl_img = db.open_table(img_table) with open('./test_data.json') as f: ...
[ "lancedb.connect" ]
[((119, 139), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (134, 139), False, 'import lancedb\n'), ((1192, 1239), 'json.loads', 'json.loads', (['response.choices[0].message.content'], {}), '(response.choices[0].message.content)\n', (1202, 1239), False, 'import json\n')]
# Ultralytics YOLO 🚀, AGPL-3.0 license from io import BytesIO from pathlib import Path from typing import Any, List, Tuple, Union import cv2 import numpy as np import torch from matplotlib import pyplot as plt from pandas import DataFrame from PIL import Image from tqdm import tqdm from ultralytics.data.augment imp...
[ "lancedb.connect" ]
[((1681, 1874), 'ultralytics.data.augment.Format', 'Format', ([], {'bbox_format': '"""xyxy"""', 'normalize': '(False)', 'return_mask': 'self.use_segments', 'return_keypoint': 'self.use_keypoints', 'batch_idx': '(True)', 'mask_ratio': 'hyp.mask_ratio', 'mask_overlap': 'hyp.overlap_mask'}), "(bbox_format='xyxy', normaliz...
from PIL import Image import streamlit as st import openai #exercise 11 from langchain.llms import OpenAI from langchain.prompts import PromptTemplate from langchain.chains import LLMChain from langchain.chat_models import ChatOpenAI #exercise 12 from langchain.memory import ConversationBufferWindowMemory #exercise 13 ...
[ "lancedb.connect" ]
[((649, 660), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (658, 660), False, 'import os\n'), ((681, 710), 'os.path.join', 'os.path.join', (['cwd', '"""database"""'], {}), "(cwd, 'database')\n", (693, 710), False, 'import os\n'), ((719, 752), 'os.path.exists', 'os.path.exists', (['WORKING_DIRECTORY'], {}), '(WORKING_DIR...
from PIL import Image import streamlit as st import openai #exercise 11 from langchain.llms import OpenAI from langchain.prompts import PromptTemplate from langchain.chains import LLMChain #exercise 12 from langchain.memory import ConversationBufferWindowMemory #exercise 13 from langchain.document_loaders import TextLo...
[ "lancedb.connect" ]
[((1106, 1117), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1115, 1117), False, 'import os\n'), ((1138, 1167), 'os.path.join', 'os.path.join', (['cwd', '"""database"""'], {}), "(cwd, 'database')\n", (1150, 1167), False, 'import os\n'), ((1259, 1304), 'os.path.join', 'os.path.join', (['WORKING_DIRECTORY', '"""default_d...
import os import glob import tqdm import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import pickle from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_mod...
[ "lancedb.connect" ]
[((938, 967), 'warnings.simplefilter', 'warnings.simplefilter', (['"""once"""'], {}), "('once')\n", (959, 967), False, 'import warnings\n'), ((2673, 2701), 'glob.glob', 'glob.glob', (['data_file_pattern'], {}), '(data_file_pattern)\n', (2682, 2701), False, 'import glob\n'), ((3011, 3048), 'tqdm.tqdm', 'tqdm.tqdm', (['f...
"""LanceDB vector store.""" from typing import Any, List, Optional from llama_index.schema import MetadataMode, NodeRelationship, RelatedNodeInfo, TextNode from llama_index.vector_stores.types import ( MetadataFilters, NodeWithEmbedding, VectorStore, VectorStoreQuery, VectorStoreQueryResult, ) from...
[ "lancedb.connect" ]
[((2271, 2291), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (2286, 2291), False, 'import lancedb\n'), ((2716, 2807), 'llama_index.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['result.node'], {'remove_text': '(True)', 'flat_metadata': 'self.flat_metadata'}), '(result.node, r...
from time import time_ns import lancedb uri = "./.lancedb" db = lancedb.connect(uri) tns = db.table_names() print(tns) tn = 'my_table' now = time_ns() if (tn not in tns): # 创建表的时候就确定了字段结构了。 # 之后通过 add 添加字段无效。 table = db.create_table( tn, data=[ {"vector": [3.1, 4.1], "item": "f...
[ "lancedb.connect" ]
[((65, 85), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (80, 85), False, 'import lancedb\n'), ((143, 152), 'time.time_ns', 'time_ns', ([], {}), '()\n', (150, 152), False, 'from time import time_ns\n')]
from datetime import datetime import lancedb from langchain.embeddings.base import Embeddings from langchain.vectorstores import VectorStore, LanceDB from config import Config from utils.files import get_root_path def get_vectorstore(table_name: str, embedding: Embeddings) -> VectorStore: config = Config() ...
[ "lancedb.connect" ]
[((307, 315), 'config.Config', 'Config', ([], {}), '()\n', (313, 315), False, 'from config import Config\n'), ((376, 400), 'lancedb.connect', 'lancedb.connect', (['db_path'], {}), '(db_path)\n', (391, 400), False, 'import lancedb\n'), ((792, 838), 'langchain.vectorstores.LanceDB', 'LanceDB', ([], {'embedding': 'embeddi...
# Answer questions about a PDF file using the RAG model # TODO: Maintain the context of the conversation import lancedb from langchain_community.document_loaders import TextLoader from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain_openai import OpenAIEmbeddings from langchain.text_spl...
[ "lancedb.connect" ]
[((972, 1003), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (987, 1003), False, 'import lancedb\n'), ((1015, 1033), 'langchain_openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1031, 1033), False, 'from langchain_openai import OpenAIEmbeddings\n'), ((1412, 143...
import lancedb import os import gradio as gr from sentence_transformers import SentenceTransformer from transformers import AutoTokenizer, AutoModelForSequenceClassification import torch # For Text Similarity and Relevance Ranking: # valhalla/distilbart-mnli-12-3 # sentence-transformers/cross-encoder/stsb-roberta-larg...
[ "lancedb.connect" ]
[((440, 508), 'os.getenv', 'os.getenv', (['"""CROSS_ENC_MODEL"""', '"""cross-encoder/ms-marco-MiniLM-L-6-v2"""'], {}), "('CROSS_ENC_MODEL', 'cross-encoder/ms-marco-MiniLM-L-6-v2')\n", (449, 508), False, 'import os\n'), ((573, 619), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['CROSS_...
from langchain_community.document_loaders import PyPDFDirectoryLoader from langchain_text_splitters import TokenTextSplitter from langchain_community.embeddings import OllamaEmbeddings import lancedb import pyarrow as pa embedding_model = OllamaEmbeddings() db_path = "./lancedb" db = lancedb.connect(db_path)...
[ "lancedb.connect" ]
[((246, 264), 'langchain_community.embeddings.OllamaEmbeddings', 'OllamaEmbeddings', ([], {}), '()\n', (262, 264), False, 'from langchain_community.embeddings import OllamaEmbeddings\n'), ((296, 320), 'lancedb.connect', 'lancedb.connect', (['db_path'], {}), '(db_path)\n', (311, 320), False, 'import lancedb\n'), ((844, ...
import os import pandas as pd from datetime import datetime import time import subprocess from docarray import DocumentArray, Document import json import pyarrow as pa import lancedb from google.cloud import bigquery GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "passculture-data-ehp") ENV_SHORT_NAME = os.enviro...
[ "lancedb.connect" ]
[((237, 293), 'os.environ.get', 'os.environ.get', (['"""GCP_PROJECT_ID"""', '"""passculture-data-ehp"""'], {}), "('GCP_PROJECT_ID', 'passculture-data-ehp')\n", (251, 293), False, 'import os\n'), ((311, 350), 'os.environ.get', 'os.environ.get', (['"""ENV_SHORT_NAME"""', '"""dev"""'], {}), "('ENV_SHORT_NAME', 'dev')\n", ...
from pgvector.psycopg import register_vector from pgvector.sqlalchemy import Vector import psycopg from sqlalchemy import create_engine, Column, String, BIGINT, select, inspect, text from sqlalchemy.orm import sessionmaker, mapped_column from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.sql impo...
[ "lancedb.connect" ]
[((702, 720), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (718, 720), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((771, 790), 'memgpt.config.MemGPTConfig.load', 'MemGPTConfig.load', ([], {}), '()\n', (788, 790), False, 'from memgpt.config import AgentCo...
from dotenv import load_dotenv import os import lancedb import torch from PIL import Image import glob import re from transformers import CLIPModel, CLIPProcessor, CLIPTokenizerFast import concurrent.futures # Set options for youtube_dl ydl_opts = { "quiet": True, # Silence youtube_dl output "extract_flat": ...
[ "lancedb.connect" ]
[((955, 992), 'lancedb.connect', 'lancedb.connect', (['"""data/video-lancedb"""'], {}), "('data/video-lancedb')\n", (970, 992), False, 'import lancedb\n'), ((553, 596), 'transformers.CLIPTokenizerFast.from_pretrained', 'CLIPTokenizerFast.from_pretrained', (['MODEL_ID'], {}), '(MODEL_ID)\n', (586, 596), False, 'from tra...
import uvicorn from fastapi import FastAPI, HTTPException, UploadFile, File from pydantic import BaseModel import openai from langchain.chains import RetrievalQA from langchain.chat_models import ChatOpenAI from langchain.embeddings import OpenAIEmbeddings from langchain.prompts import PromptTemplate from langchain.doc...
[ "lancedb.connect" ]
[((548, 664), 'fastapi.FastAPI', 'FastAPI', ([], {'title': '"""Chatbot RAG API"""', 'description': '"""This is a chatbot API template for RAG system."""', 'version': '"""1.0.0"""'}), "(title='Chatbot RAG API', description=\n 'This is a chatbot API template for RAG system.', version='1.0.0')\n", (555, 664), False, 'f...
import os import typer import pickle import pandas as pd from dotenv import load_dotenv import openai import pinecone import lancedb import pyarrow as pa from collections import deque TASK_CREATION_PROMPT = """ You are an task creation AI that uses the result of an execution agent to create new tasks with the followi...
[ "lancedb.connect" ]
[((7628, 7641), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (7639, 7641), False, 'from dotenv import load_dotenv\n'), ((8372, 8387), 'typer.run', 'typer.run', (['main'], {}), '(main)\n', (8381, 8387), False, 'import typer\n'), ((2036, 2062), 'os.path.isfile', 'os.path.isfile', (['cache_file'], {}), '(cache_f...
import argparse import io import PIL import duckdb import lancedb import lance import pyarrow.compute as pc from transformers import CLIPModel, CLIPProcessor, CLIPTokenizerFast import gradio as gr MODEL_ID = None MODEL = None TOKENIZER = None PROCESSOR = None def create_table(dataset): db = lancedb.connect("~/da...
[ "lancedb.connect" ]
[((299, 333), 'lancedb.connect', 'lancedb.connect', (['"""~/datasets/demo"""'], {}), "('~/datasets/demo')\n", (314, 333), False, 'import lancedb\n'), ((759, 802), 'transformers.CLIPTokenizerFast.from_pretrained', 'CLIPTokenizerFast.from_pretrained', (['MODEL_ID'], {}), '(MODEL_ID)\n', (792, 802), False, 'from transform...
import os import typer import pickle import pandas as pd from dotenv import load_dotenv import openai import pinecone import lancedb import pyarrow as pa from collections import deque TASK_CREATION_PROMPT = """ You are an task creation AI that uses the result of an execution agent to create new tasks with the followi...
[ "lancedb.connect" ]
[((7093, 7106), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (7104, 7106), False, 'from dotenv import load_dotenv\n'), ((7837, 7852), 'typer.run', 'typer.run', (['main'], {}), '(main)\n', (7846, 7852), False, 'import typer\n'), ((2219, 2245), 'os.path.isfile', 'os.path.isfile', (['cache_file'], {}), '(cache_f...
from datasets import load_dataset import lancedb import pytest import main # ==================== TESTING ==================== @pytest.fixture def mock_embed_func(monkeypatch): def mock_api_call(*args, **kwargs): return [0.5, 0.5] monkeypatch.setattr(main, "embed", mock_api_call) @pytest.fixture d...
[ "lancedb.connect" ]
[((570, 646), 'datasets.load_dataset', 'load_dataset', (['"""CVdatasets/ImageNet15_animals_unbalanced_aug1"""'], {'split': '"""train"""'}), "('CVdatasets/ImageNet15_animals_unbalanced_aug1', split='train')\n", (582, 646), False, 'from datasets import load_dataset\n'), ((698, 730), 'lancedb.connect', 'lancedb.connect', ...
from datasets import load_dataset import numpy as np import lancedb import pytest import main # ==================== TESTING ==================== @pytest.fixture def mock_embed(monkeypatch): def mock_inference(audio_data): return (None, [[0.5, 0.5]]) monkeypatch.setattr(main, "create_audio_embedding...
[ "lancedb.connect" ]
[((417, 460), 'datasets.load_dataset', 'load_dataset', (['"""ashraq/esc50"""'], {'split': '"""train"""'}), "('ashraq/esc50', split='train')\n", (429, 460), False, 'from datasets import load_dataset\n'), ((471, 508), 'lancedb.connect', 'lancedb.connect', (['"""data/audio-lancedb"""'], {}), "('data/audio-lancedb')\n", (4...
# Ultralytics YOLO 🚀, AGPL-3.0 license from io import BytesIO from pathlib import Path from typing import Any, List, Tuple, Union import cv2 import numpy as np import torch from matplotlib import pyplot as plt from pandas import DataFrame from PIL import Image from tqdm import tqdm from ultralytics.data.augment imp...
[ "lancedb.connect" ]
[((1681, 1874), 'ultralytics.data.augment.Format', 'Format', ([], {'bbox_format': '"""xyxy"""', 'normalize': '(False)', 'return_mask': 'self.use_segments', 'return_keypoint': 'self.use_keypoints', 'batch_idx': '(True)', 'mask_ratio': 'hyp.mask_ratio', 'mask_overlap': 'hyp.overlap_mask'}), "(bbox_format='xyxy', normaliz...
# Ultralytics YOLO 🚀, AGPL-3.0 license from io import BytesIO from pathlib import Path from typing import Any, List, Tuple, Union import cv2 import numpy as np import torch from matplotlib import pyplot as plt from pandas import DataFrame from PIL import Image from tqdm import tqdm from ultralytics.data.augment imp...
[ "lancedb.connect" ]
[((1681, 1874), 'ultralytics.data.augment.Format', 'Format', ([], {'bbox_format': '"""xyxy"""', 'normalize': '(False)', 'return_mask': 'self.use_segments', 'return_keypoint': 'self.use_keypoints', 'batch_idx': '(True)', 'mask_ratio': 'hyp.mask_ratio', 'mask_overlap': 'hyp.overlap_mask'}), "(bbox_format='xyxy', normaliz...
import lancedb import pyarrow as pa import json embedding_models=[ "", "" ] class LanceDBAssistant: def __init__(self, dirpath, filename,n=384): self.dirpath = dirpath self.filename = filename self.db = None self.create_schema(n) def create_schema(self,n=384): ...
[ "lancedb.connect" ]
[((574, 603), 'lancedb.connect', 'lancedb.connect', (['self.dirpath'], {}), '(self.dirpath)\n', (589, 603), False, 'import lancedb\n'), ((434, 445), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (443, 445), True, 'import pyarrow as pa\n'), ((475, 486), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (484, 486), Tru...
# langchain Chatbot from langchain.document_loaders import DataFrameLoader import pandas as pd from langchain.memory import ConversationSummaryMemory import lancedb from langchain.vectorstores import LanceDB from langchain.embeddings import OpenAIEmbeddings from langchain.text_splitter import RecursiveCharacterTextSpl...
[ "lancedb.connect" ]
[((472, 503), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (487, 503), False, 'import lancedb\n'), ((645, 688), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': 'OPENAI_KEY'}), '(openai_api_key=OPENAI_KEY)\n', (661, 688), False, 'from lang...
from langchain.prompts import ( ChatPromptTemplate, HumanMessagePromptTemplate, ) from .base_tool import BaseTool from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.document_loaders import PDFPlumberLoader from langchain.embeddings import OpenAIEmbeddings from langchain.document...
[ "lancedb.connect" ]
[((2402, 2434), 'langchain.document_loaders.PDFPlumberLoader', 'PDFPlumberLoader', (['temp_file_path'], {}), '(temp_file_path)\n', (2418, 2434), False, 'from langchain.document_loaders import PDFPlumberLoader\n'), ((2533, 2599), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter',...
from langchain.chat_models import ChatOpenAI from langchain.prompts import ChatPromptTemplate from langchain_core.output_parsers import StrOutputParser from langchain_core.runnables import RunnablePassthrough from langchain_community.vectorstores import LanceDB from langchain.embeddings.openai import OpenAIEmbedding...
[ "lancedb.connect" ]
[((455, 471), 'langchain.document_loaders.TextLoader', 'TextLoader', (['path'], {}), '(path)\n', (465, 471), False, 'from langchain.document_loaders import TextLoader\n'), ((539, 557), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (555, 557), False, 'from langchain.embeddings.ope...
import argparse import os from typing import Any from PIL import Image import lancedb from schema import Myntra, get_schema_by_name def run_vector_search( database: str, table_name: str, schema: Any, search_query: Any, limit: int = 6, output_folder: str = "output", ) -> None: """ Thi...
[ "lancedb.connect" ]
[((1422, 1451), 'os.path.exists', 'os.path.exists', (['output_folder'], {}), '(output_folder)\n', (1436, 1451), False, 'import os\n'), ((1650, 1675), 'lancedb.connect', 'lancedb.connect', (['database'], {}), '(database)\n', (1665, 1675), False, 'import lancedb\n'), ((2586, 2638), 'argparse.ArgumentParser', 'argparse.Ar...
import streamlit as st import sqlite3 import streamlit_antd_components as sac import pandas as pd import os from langchain.embeddings.openai import OpenAIEmbeddings from langchain.document_loaders import UnstructuredFileLoader from langchain.text_splitter import CharacterTextSplitter from langchain.vectorstores import...
[ "lancedb.connect" ]
[((1396, 1407), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1405, 1407), False, 'import os\n'), ((1428, 1457), 'os.path.join', 'os.path.join', (['cwd', '"""database"""'], {}), "(cwd, 'database')\n", (1440, 1457), False, 'import os\n'), ((2203, 2245), 'os.path.join', 'os.path.join', (['WORKING_DIRECTORY', '"""lancedb""...
# import libraries import re import gradio as gr from typing import List, Union import lancedb from langchain.vectorstores import LanceDB from langchain.llms import CTransformers from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.chains import ConversationalRetrievalChain from langchain.m...
[ "lancedb.connect" ]
[((683, 721), 're.compile', 're.compile', (['"""https?://\\\\S+|www\\\\.\\\\S+"""'], {}), "('https?://\\\\S+|www\\\\.\\\\S+')\n", (693, 721), False, 'import re\n'), ((1668, 1789), 'langchain.llms.CTransformers', 'CTransformers', ([], {'model': '"""TheBloke/Mistral-7B-v0.1-GGUF"""', 'model_file': '"""mistral-7b-v0.1.Q4_...
# load_pdf.py - Loads PDF documents into the LanceDB vector store ## Imports: from langchain.embeddings import OpenAIEmbeddings from langchain.text_splitter import CharacterTextSplitter from langchain.vectorstores import LanceDB import dotenv import lancedb import os from pypdf import PdfReader ## Set Env Variables d...
[ "lancedb.connect" ]
[((319, 339), 'dotenv.load_dotenv', 'dotenv.load_dotenv', ([], {}), '()\n', (337, 339), False, 'import dotenv\n'), ((586, 604), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (602, 604), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((695, 719), 'lancedb.connect', 'lance...
import lancedb from langchain.vectorstores import LanceDB from langchain.document_loaders import DirectoryLoader from langchain.text_splitter import CharacterTextSplitter from langchain.embeddings.openai import OpenAIEmbeddings db = lancedb.connect(".lance-data") path = "/workspace/flancian" loader = DirectoryLoader(pa...
[ "lancedb.connect" ]
[((233, 263), 'lancedb.connect', 'lancedb.connect', (['""".lance-data"""'], {}), "('.lance-data')\n", (248, 263), False, 'import lancedb\n'), ((302, 339), 'langchain.document_loaders.DirectoryLoader', 'DirectoryLoader', (['path'], {'glob': '"""**/*.md"""'}), "(path, glob='**/*.md')\n", (317, 339), False, 'from langchai...
import lancedb import pyarrow as pa import pandas as pd # Connect to the database uri = "/tmp/sample-lancedb" db = lancedb.connect(uri) schema = pa.schema([ pa.field("unique_id", pa.string()), pa.field("embedded_user_input", pa.list_(pa.list_(pa.float32()))), pa.field("metadata", pa.struct([ pa.fi...
[ "lancedb.connect" ]
[((116, 136), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (131, 136), False, 'import lancedb\n'), ((11225, 11245), 'pandas.DataFrame', 'pd.DataFrame', (['[data]'], {}), '([data])\n', (11237, 11245), True, 'import pandas as pd\n'), ((185, 196), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (194, 1...
import lancedb import numpy as np from .base_index import BaseIndex from concurrent.futures import ThreadPoolExecutor from multiprocessing import cpu_count from functools import partial def search_single(q: np.ndarray, k: int, db_address: str, table_name: str, metric: str): index = lancedb.connect(db_address) ...
[ "lancedb.connect" ]
[((290, 317), 'lancedb.connect', 'lancedb.connect', (['db_address'], {}), '(db_address)\n', (305, 317), False, 'import lancedb\n'), ((613, 645), 'lancedb.connect', 'lancedb.connect', (['self.db_address'], {}), '(self.db_address)\n', (628, 645), False, 'import lancedb\n'), ((1394, 1502), 'functools.partial', 'partial', ...
import streamlit as st import sqlite3 import streamlit_antd_components as sac import pandas as pd import os import openai from langchain.embeddings.openai import OpenAIEmbeddings from langchain.document_loaders import UnstructuredFileLoader from langchain.text_splitter import CharacterTextSplitter from langchain.vector...
[ "lancedb.connect" ]
[((1345, 1356), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1354, 1356), False, 'import os\n'), ((1377, 1406), 'os.path.join', 'os.path.join', (['cwd', '"""database"""'], {}), "(cwd, 'database')\n", (1389, 1406), False, 'import os\n'), ((1686, 1702), 'basecode.authenticate.return_api_key', 'return_api_key', ([], {}), ...
# See; https://www.mongodb.com/developer/products/atlas/rag-atlas-vector-search-langchain-openai/ from langchain_openai import OpenAI,ChatOpenAI from langchain.chains import RetrievalQA from langchain.prompts import PromptTemplate from langchain_community.llms import Ollama from langchain.text_splitter import Rec...
[ "lancedb.connect" ]
[((867, 945), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': 'OPENAI_API_KEY', 'model_name': '"""gpt-4"""', 'max_tokens': '(1000)'}), "(openai_api_key=OPENAI_API_KEY, model_name='gpt-4', max_tokens=1000)\n", (877, 945), False, 'from langchain_openai import OpenAI, ChatOpenAI\n'), ((949, 1003), 'la...
#!/usr/bin/env python # -*- coding: utf-8 -*- """ @Time : 2023/8/9 15:42 @Author : unkn-wn (Leon Yee) @File : lancedb_store.py """ import os import shutil import lancedb class LanceStore: def __init__(self, name): db = lancedb.connect("./data/lancedb") self.db = db self.name = name...
[ "lancedb.connect" ]
[((241, 274), 'lancedb.connect', 'lancedb.connect', (['"""./data/lancedb"""'], {}), "('./data/lancedb')\n", (256, 274), False, 'import lancedb\n'), ((2822, 2864), 'os.path.join', 'os.path.join', (['self.db.uri', "(name + '.lance')"], {}), "(self.db.uri, name + '.lance')\n", (2834, 2864), False, 'import os\n'), ((2876, ...
import json import gzip from sentence_transformers import SentenceTransformer from fastapi import FastAPI from pydantic import BaseModel from pathlib import Path from tqdm.auto import tqdm import pandas as pd import lancedb import sqlite3 app = FastAPI() encoder = SentenceTransformer('all-MiniLM-L6-v2') lance_location...
[ "lancedb.connect" ]
[((246, 255), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (253, 255), False, 'from fastapi import FastAPI\n'), ((266, 305), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['"""all-MiniLM-L6-v2"""'], {}), "('all-MiniLM-L6-v2')\n", (285, 305), False, 'from sentence_transformers import SentenceTr...
from datasets import load_dataset from enum import Enum import lancedb from tqdm import tqdm from IPython.display import display import clip import torch class Animal(Enum): italian_greyhound = 0 coyote = 1 beagle = 2 rottweiler = 3 hyena = 4 greater_swiss_mountain_dog = 5 Triceratops = 6 ...
[ "lancedb.connect" ]
[((748, 772), 'IPython.display.display', 'display', (["test[id]['img']"], {}), "(test[id]['img'])\n", (755, 772), False, 'from IPython.display import display\n'), ((1853, 1871), 'tqdm.tqdm', 'tqdm', (['batched_data'], {}), '(batched_data)\n', (1857, 1871), False, 'from tqdm import tqdm\n'), ((2196, 2272), 'datasets.loa...
import uvicorn from fastapi import FastAPI, HTTPException from openai import OpenAI from pydantic import BaseModel from typing import List import lancedb import pyarrow as pa import json from collections import Counter import requests from dotenv import load_dotenv import os from fastapi.middleware.cors import CORSMidd...
[ "lancedb.connect" ]
[((377, 397), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (392, 397), False, 'import lancedb\n'), ((569, 578), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (576, 578), False, 'from fastapi import FastAPI, HTTPException\n'), ((579, 592), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (590,...
import lancedb import tantivy def create_lancedb_index(bucket, vector_name, num_partitions=256, num_sub_vectors=96, text_key="text"): try: db = lancedb.connect(bucket) tbl = db.open_table(vector_name) tbl.create_index(num_partitions=num_partitions, num_sub_vectors=num_sub_vectors) ...
[ "lancedb.connect" ]
[((157, 180), 'lancedb.connect', 'lancedb.connect', (['bucket'], {}), '(bucket)\n', (172, 180), False, 'import lancedb\n')]
import lancedb uri = "./.lancedb" db = lancedb.connect(uri) table = db.open_table("my_table") result = table.search([100, 100]).limit(2).to_df() print(result) df = table.to_pandas() print(df)
[ "lancedb.connect" ]
[((40, 60), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (55, 60), False, 'import lancedb\n')]
import torch import open_clip import pandas as pd from tqdm import tqdm from collections import defaultdict import arxiv import lancedb def get_arxiv_df(embed_func): length = 30000 results = arxiv.Search( query="cat:cs.AI OR cat:cs.CV OR cat:stat.ML", max_results=length, sort_by=arxiv....
[ "lancedb.connect" ]
[((417, 434), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (428, 434), False, 'from collections import defaultdict\n'), ((453, 480), 'tqdm.tqdm', 'tqdm', (['results'], {'total': 'length'}), '(results, total=length)\n', (457, 480), False, 'from tqdm import tqdm\n'), ((837, 853), 'pandas.DataFram...
import lancedb from langchain.document_loaders import DirectoryLoader from langchain.schema import Document from langchain.text_splitter import CharacterTextSplitter from typing import List from langchain.chat_models import ChatOpenAI from langchain.chains import RetrievalQA from langchain.vectorstores import LanceDB f...
[ "lancedb.connect" ]
[((2368, 2423), 'langchain.tools.tool', 'tool', (['"""knowledge_base"""'], {'args_schema': 'KnowledgeBaseSchema'}), "('knowledge_base', args_schema=KnowledgeBaseSchema)\n", (2372, 2423), False, 'from langchain.tools import tool\n'), ((2293, 2363), 'pydantic.Field', 'Field', ([], {'description': '"""information you want...
""" Unit test for retrieve_utils.py """ from autogen.retrieve_utils import ( split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, get_file_from_url, is_url, create_vector_db_from_dir, query_vector_db, num_tokens_from_text, num_tokens_from_messa...
[ "lancedb.connect" ]
[((439, 464), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (454, 464), False, 'import os\n'), ((7837, 7850), 'pytest.main', 'pytest.main', ([], {}), '()\n', (7848, 7850), False, 'import pytest\n'), ((7912, 7935), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (7926, 7...
#!/usr/bin/env python # -*- coding: utf-8 -*- """ @Time : 2023/8/9 15:42 @Author : unkn-wn (Leon Yee) @File : lancedb_store.py """ import os import shutil import lancedb class LanceStore: def __init__(self, name): db = lancedb.connect("./data/lancedb") self.db = db self.name = name...
[ "lancedb.connect" ]
[((241, 274), 'lancedb.connect', 'lancedb.connect', (['"""./data/lancedb"""'], {}), "('./data/lancedb')\n", (256, 274), False, 'import lancedb\n'), ((2822, 2864), 'os.path.join', 'os.path.join', (['self.db.uri', "(name + '.lance')"], {}), "(self.db.uri, name + '.lance')\n", (2834, 2864), False, 'import os\n'), ((2876, ...
#!/usr/bin/env python # -*- coding: utf-8 -*- """ @Time : 2023/8/9 15:42 @Author : unkn-wn (Leon Yee) @File : lancedb_store.py """ import os import shutil import lancedb class LanceStore: def __init__(self, name): db = lancedb.connect("./data/lancedb") self.db = db self.name = name...
[ "lancedb.connect" ]
[((241, 274), 'lancedb.connect', 'lancedb.connect', (['"""./data/lancedb"""'], {}), "('./data/lancedb')\n", (256, 274), False, 'import lancedb\n'), ((2822, 2864), 'os.path.join', 'os.path.join', (['self.db.uri', "(name + '.lance')"], {}), "(self.db.uri, name + '.lance')\n", (2834, 2864), False, 'import os\n'), ((2876, ...
#!/usr/bin/env python # -*- coding: utf-8 -*- """ @Time : 2023/8/9 15:42 @Author : unkn-wn (Leon Yee) @File : lancedb_store.py """ import os import shutil import lancedb class LanceStore: def __init__(self, name): db = lancedb.connect("./data/lancedb") self.db = db self.name = name...
[ "lancedb.connect" ]
[((241, 274), 'lancedb.connect', 'lancedb.connect', (['"""./data/lancedb"""'], {}), "('./data/lancedb')\n", (256, 274), False, 'import lancedb\n'), ((2822, 2864), 'os.path.join', 'os.path.join', (['self.db.uri', "(name + '.lance')"], {}), "(self.db.uri, name + '.lance')\n", (2834, 2864), False, 'import os\n'), ((2876, ...
#!/usr/bin/env python # -*- coding: utf-8 -*- """ @Time : 2023/8/9 15:42 @Author : unkn-wn (Leon Yee) @File : lancedb_store.py """ import os import shutil import lancedb class LanceStore: def __init__(self, name): db = lancedb.connect("./data/lancedb") self.db = db self.name = name...
[ "lancedb.connect" ]
[((241, 274), 'lancedb.connect', 'lancedb.connect', (['"""./data/lancedb"""'], {}), "('./data/lancedb')\n", (256, 274), False, 'import lancedb\n'), ((2822, 2864), 'os.path.join', 'os.path.join', (['self.db.uri', "(name + '.lance')"], {}), "(self.db.uri, name + '.lance')\n", (2834, 2864), False, 'import os\n'), ((2876, ...
"""LanceDB vector store.""" import logging from typing import Any, List, Optional import numpy as np from pandas import DataFrame from llama_index.legacy.schema import ( BaseNode, MetadataMode, NodeRelationship, RelatedNodeInfo, TextNode, ) from llama_index.legacy.vector_stores.types import ( ...
[ "lancedb.connect" ]
[((607, 634), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (624, 634), False, 'import logging\n'), ((3288, 3308), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (3303, 3308), False, 'import lancedb\n'), ((1371, 1400), 'numpy.exp', 'np.exp', (["(-results['_distance'])"],...
import pickle import re import zipfile from pathlib import Path import requests from langchain.chains import RetrievalQA from langchain.document_loaders import UnstructuredHTMLLoader from langchain.embeddings import OpenAIEmbeddings from langchain.llms import OpenAI from langchain.text_splitter import RecursiveCharact...
[ "lancedb.connect" ]
[((746, 762), 'pathlib.Path', 'Path', (['"""docs.pkl"""'], {}), "('docs.pkl')\n", (750, 762), False, 'from pathlib import Path\n'), ((773, 788), 'pathlib.Path', 'Path', (['"""lancedb"""'], {}), "('lancedb')\n", (777, 788), False, 'from pathlib import Path\n'), ((2956, 2982), 'modal.web_endpoint', 'web_endpoint', ([], {...
from typing import List, Any from dataclasses import dataclass import lancedb import pandas as pd from autochain.tools.base import Tool from autochain.models.base import BaseLanguageModel from autochain.tools.internal_search.base_search_tool import BaseSearchTool @dataclass class LanceDBDoc: doc: str vector:...
[ "lancedb.connect" ]
[((1275, 1300), 'lancedb.connect', 'lancedb.connect', (['self.uri'], {}), '(self.uri)\n', (1290, 1300), False, 'import lancedb\n'), ((1984, 2054), 'pandas.DataFrame', 'pd.DataFrame', (["[{'doc': doc.doc, 'vector': doc.vector} for doc in docs]"], {}), "([{'doc': doc.doc, 'vector': doc.vector} for doc in docs])\n", (1996...
import lancedb import matplotlib.pyplot as plt import rasterio as rio import streamlit as st from rasterio.plot import show st.set_page_config(layout="wide") # Get preferrred chips def get_unique_chips(tbl): chips = [ {"tile": "17MNP", "idx": "0271", "year": 2023}, {"tile": "19HGU", "idx": "0033"...
[ "lancedb.connect" ]
[((125, 158), 'streamlit.set_page_config', 'st.set_page_config', ([], {'layout': '"""wide"""'}), "(layout='wide')\n", (143, 158), True, 'import streamlit as st\n'), ((1119, 1138), 'streamlit.cache_resource', 'st.cache_resource', ([], {}), '()\n', (1136, 1138), True, 'import streamlit as st\n'), ((1264, 1283), 'streamli...
import lancedb import numpy as np import pandas as pd global data data = [] global table table = None def get_recommendations(title): pd_data = pd.DataFrame(data) # Table Search result = ( table.search(pd_data[pd_data["title"] == title]["vector"].values[0]) .limit(5) .to_df() ...
[ "lancedb.connect" ]
[((152, 170), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (164, 170), True, 'import pandas as pd\n'), ((357, 484), 'pandas.read_csv', 'pd.read_csv', (['"""./ml-latest-small/links.csv"""'], {'header': '(0)', 'names': "['movie id', 'imdb id', 'tmdb id']", 'converters': "{'imdb id': str}"}), "('./ml-la...
from hashlib import md5 from typing import List, Optional import json try: import lancedb import pyarrow as pa except ImportError: raise ImportError("`lancedb` not installed.") from phi.document import Document from phi.embedder import Embedder from phi.embedder.openai import OpenAIEmbedder from phi.vecto...
[ "lancedb.connect" ]
[((509, 525), 'phi.embedder.openai.OpenAIEmbedder', 'OpenAIEmbedder', ([], {}), '()\n', (523, 525), False, 'from phi.embedder.openai import OpenAIEmbedder\n'), ((1143, 1168), 'lancedb.connect', 'lancedb.connect', (['self.uri'], {}), '(self.uri)\n', (1158, 1168), False, 'import lancedb\n'), ((2476, 2525), 'phi.utils.log...
import lancedb from langchain.prompts import PromptTemplate from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler from langchain_community.llms import GPT4All from langchain.chains import ConversationalRetrievalChain, LLMChain from langchain_community.vectorstores import LanceDB from langchain...
[ "lancedb.connect" ]
[((373, 401), 'lancedb.connect', 'lancedb.connect', (['"""./lancedb"""'], {}), "('./lancedb')\n", (388, 401), False, 'import lancedb\n'), ((713, 805), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'template': 'template', 'input_variables': "['context, question', 'chat_history']"}), "(template=template, in...
from typing import Any, List, Optional, Dict from ._base import Record, VectorStore from ._embeddings import Embeddings VECTOR_COLUMN_NAME = "_vector" class LanceDB(VectorStore): def __init__(self, db_uri, embeddings: Embeddings = None) -> None: super().__init__() try: import pyarrow...
[ "lancedb.connect" ]
[((1012, 1041), 'lancedb.connect', 'lancedb.connect', (['self._db_uri'], {}), '(self._db_uri)\n', (1027, 1041), True, 'import lancedb as lancedb\n'), ((4319, 4337), 'pyarrow.schema', 'pa.schema', (['columns'], {}), '(columns)\n', (4328, 4337), True, 'import pyarrow as pa\n'), ((4016, 4027), 'pyarrow.string', 'pa.string...
import typing as t from docarray import DocumentArray, Document import lancedb from filter import Filter import joblib import numpy as np DETAIL_COLUMNS = [ "item_id", "topic_id", "cluster_id", "is_geolocated", "booking_number", "stock_price", "offer_creation_date", "stock_beginning_da...
[ "lancedb.connect" ]
[((689, 731), 'docarray.DocumentArray.load', 'DocumentArray.load', (['"""./metadata/item.docs"""'], {}), "('./metadata/item.docs')\n", (707, 731), False, 'from docarray import DocumentArray, Document\n'), ((779, 799), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (794, 799), False, 'import lancedb\n')...
#!/usr/bin/env python # -*- coding: utf-8 -*- """ @Time : 2023/8/9 15:42 @Author : unkn-wn (Leon Yee) @File : lancedb_store.py """ import lancedb import shutil, os class LanceStore: def __init__(self, name): db = lancedb.connect('./data/lancedb') self.db = db self.name = name ...
[ "lancedb.connect" ]
[((234, 267), 'lancedb.connect', 'lancedb.connect', (['"""./data/lancedb"""'], {}), "('./data/lancedb')\n", (249, 267), False, 'import lancedb\n'), ((2866, 2908), 'os.path.join', 'os.path.join', (['self.db.uri', "(name + '.lance')"], {}), "(self.db.uri, name + '.lance')\n", (2878, 2908), False, 'import shutil, os\n'), ...
from pgvector.psycopg import register_vector from pgvector.sqlalchemy import Vector import psycopg from sqlalchemy import create_engine, Column, String, BIGINT, select, inspect, text from sqlalchemy.orm import sessionmaker, mapped_column from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.sql impo...
[ "lancedb.connect" ]
[((702, 720), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (718, 720), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((771, 790), 'memgpt.config.MemGPTConfig.load', 'MemGPTConfig.load', ([], {}), '()\n', (788, 790), False, 'from memgpt.config import AgentCo...
from langchain_community.vectorstores import LanceDB from langchain_openai.embeddings import OpenAIEmbeddings import lancedb from common import EXAMPLE_TEXTS SEARCH_NUM_RESULTS = 3 def main(): embeddings = OpenAIEmbeddings() table, vectorstore = get_table_and_vectorstore(embeddings) # Add example text...
[ "lancedb.connect" ]
[((214, 232), 'langchain_openai.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (230, 232), False, 'from langchain_openai.embeddings import OpenAIEmbeddings\n'), ((947, 975), 'lancedb.connect', 'lancedb.connect', (['"""./lancedb"""'], {}), "('./lancedb')\n", (962, 975), False, 'import lancedb\n'), (...
import openai from langchain.agents import load_tools from langchain.agents import initialize_agent from langchain.agents import AgentType from langchain.chat_models import ChatOpenAI from langchain.tools import tool from pydantic import BaseModel, Field import argparse import lancedb def embed_func(c): rs = open...
[ "lancedb.connect" ]
[((925, 983), 'langchain.tools.tool', 'tool', (['"""insert_critiques"""'], {'args_schema': 'InsertCritiquesInput'}), "('insert_critiques', args_schema=InsertCritiquesInput)\n", (929, 983), False, 'from langchain.tools import tool\n'), ((1769, 1831), 'langchain.tools.tool', 'tool', (['"""retrieve_critiques"""'], {'args_...
import streamlit as st import sqlite3 import streamlit_antd_components as sac import pandas as pd import os import openai from langchain.embeddings.openai import OpenAIEmbeddings from langchain.document_loaders import UnstructuredFileLoader from langchain.text_splitter import CharacterTextSplitter from langchain.vector...
[ "lancedb.connect" ]
[((1289, 1300), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1298, 1300), False, 'import os\n'), ((1321, 1350), 'os.path.join', 'os.path.join', (['cwd', '"""database"""'], {}), "(cwd, 'database')\n", (1333, 1350), False, 'import os\n'), ((1359, 1392), 'os.path.exists', 'os.path.exists', (['WORKING_DIRECTORY'], {}), '(W...
import lancedb from langchain_community.embeddings import GPT4AllEmbeddings from langchain_community.vectorstores import LanceDB from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain_community.document_loaders import TextLoader, PyPDFLoader db = lancedb.connect("./lancedb") table = db.creat...
[ "lancedb.connect" ]
[((275, 303), 'lancedb.connect', 'lancedb.connect', (['"""./lancedb"""'], {}), "('./lancedb')\n", (290, 303), False, 'import lancedb\n'), ((552, 615), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(256)', 'chunk_overlap': '(0)'}), '(chunk_size=256, chun...
from langchain.document_loaders import TextLoader from langchain.embeddings.openai import OpenAIEmbeddings from langchain.text_splitter import CharacterTextSplitter from langchain.vectorstores import LanceDB # embedding_model = HuggingFaceEmbeddings(model_name = "moka-ai/m3e-base") from langchain.embeddings import Lo...
[ "lancedb.connect" ]
[((439, 577), 'langchain.embeddings.LocalAIEmbeddings', 'LocalAIEmbeddings', ([], {'openai_api_key': '"""aaabbbcccdddeeefffedddsfasdfasdf"""', 'openai_api_base': 'openai_api_base_address', 'model': '"""vicuna-13b-v1.5"""'}), "(openai_api_key='aaabbbcccdddeeefffedddsfasdfasdf',\n openai_api_base=openai_api_base_addre...
from flask import Flask, request, jsonify import requests import json from flask_cors import CORS from FlagEmbedding import LLMEmbedder, FlagReranker from searchdb import search import lancedb import pandas as pd task = "qa" # Encode for a specific task (qa, icl, chat, lrlm, tool, convsearch) embed_model = LLMEmbedd...
[ "lancedb.connect" ]
[((311, 359), 'FlagEmbedding.LLMEmbedder', 'LLMEmbedder', (['"""BAAI/llm-embedder"""'], {'use_fp16': '(False)'}), "('BAAI/llm-embedder', use_fp16=False)\n", (322, 359), False, 'from FlagEmbedding import LLMEmbedder, FlagReranker\n'), ((416, 469), 'FlagEmbedding.FlagReranker', 'FlagReranker', (['"""BAAI/bge-reranker-bas...
"""Provides a LanceDB interface for adding and querying embeddings.""" import os import sys from logging import Logger from typing import TypeVar import lancedb import pyarrow as pa from lance.vector import vec_to_table from deckard.core import get_data_dir T = TypeVar('T', dict, list, int) class LanceDB: """Pr...
[ "lancedb.connect" ]
[((265, 294), 'typing.TypeVar', 'TypeVar', (['"""T"""', 'dict', 'list', 'int'], {}), "('T', dict, list, int)\n", (272, 294), False, 'from typing import TypeVar\n'), ((1068, 1082), 'deckard.core.get_data_dir', 'get_data_dir', ([], {}), '()\n', (1080, 1082), False, 'from deckard.core import get_data_dir\n'), ((1323, 1363...
from typing import Any, List, Optional, Tuple import gradio as gr import lancedb from transformers import CLIPModel, CLIPTokenizerFast from homematch.config import DATA_DIR, MODEL_ID, TABLE_NAME from homematch.data.types import ImageData DEVICE: str = "cpu" model: CLIPModel = CLIPModel.from_pretrained(MODEL_ID).to(D...
[ "lancedb.connect" ]
[((358, 401), 'transformers.CLIPTokenizerFast.from_pretrained', 'CLIPTokenizerFast.from_pretrained', (['MODEL_ID'], {}), '(MODEL_ID)\n', (391, 401), False, 'from transformers import CLIPModel, CLIPTokenizerFast\n'), ((448, 468), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (463, 468), False, 'import ...
import flask import lancedb import openai import langchain # import clip import torch from langchain.embeddings import OpenAIEmbeddings from langchain.vectorstores import LanceDB from langchain.docstore.document import Document from langchain.embeddings.openai import OpenAIEmbeddings from langchain.text_splitter import...
[ "lancedb.connect" ]
[((760, 781), 'dotenv.dotenv_values', 'dotenv_values', (['""".env"""'], {}), "('.env')\n", (773, 781), False, 'from dotenv import dotenv_values\n'), ((852, 872), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (867, 872), False, 'import lancedb\n'), ((886, 933), 'langchain.embeddings.openai.OpenAIEmbedd...
from dotenv import load_dotenv import os import lancedb import clip import torch from PIL import Image import glob import re from concurrent.futures import ThreadPoolExecutor import yt_dlp from transformers import CLIPModel, CLIPProcessor, CLIPTokenizerFast # Set options for youtube_dl ydl_opts = { "retries": 0, ...
[ "lancedb.connect" ]
[((1313, 1350), 'lancedb.connect', 'lancedb.connect', (['"""data/video-lancedb"""'], {}), "('data/video-lancedb')\n", (1328, 1350), False, 'import lancedb\n'), ((621, 664), 'transformers.CLIPTokenizerFast.from_pretrained', 'CLIPTokenizerFast.from_pretrained', (['MODEL_ID'], {}), '(MODEL_ID)\n', (654, 664), False, 'from...
"""LanceDB vector store.""" from typing import Any, List, Optional from llama_index.schema import MetadataMode, NodeRelationship, RelatedNodeInfo, TextNode from llama_index.vector_stores.types import ( NodeWithEmbedding, VectorStore, VectorStoreQuery, VectorStoreQueryResult, ) class LanceDBVectorStor...
[ "lancedb.connect" ]
[((1731, 1751), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (1746, 1751), False, 'import lancedb\n'), ((3914, 3950), 'llama_index.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': 'item.doc_id'}), '(node_id=item.doc_id)\n', (3929, 3950), False, 'from llama_index.schema import MetadataMode...
import streamlit as st import sqlite3 import streamlit_antd_components as sac import pandas as pd import os import openai from langchain.embeddings.openai import OpenAIEmbeddings from langchain_community.document_loaders import UnstructuredFileLoader from langchain.text_splitter import CharacterTextSplitter from langch...
[ "lancedb.connect" ]
[((1365, 1376), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1374, 1376), False, 'import os\n'), ((1397, 1426), 'os.path.join', 'os.path.join', (['cwd', '"""database"""'], {}), "(cwd, 'database')\n", (1409, 1426), False, 'import os\n'), ((1706, 1722), 'basecode.authenticate.return_api_key', 'return_api_key', ([], {}), ...
#!/usr/bin/env python3 -m pytest """ Unit test for retrieve_utils.py """ import pytest try: import chromadb from autogen.retrieve_utils import ( split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from...
[ "lancedb.connect" ]
[((1045, 1107), 'pytest.mark.skipif', 'pytest.mark.skipif', (['skip'], {'reason': '"""dependency is not installed"""'}), "(skip, reason='dependency is not installed')\n", (1063, 1107), False, 'import pytest\n'), ((643, 668), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (658, 668), False, 'i...
import pytest from langchain_community.vectorstores import LanceDB from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings @pytest.mark.requires("lancedb") def test_lancedb_with_connection() -> None: import lancedb embeddings = FakeEmbeddings() db = lancedb.connect("/tmp/lancedb"...
[ "lancedb.connect" ]
[((151, 182), 'pytest.mark.requires', 'pytest.mark.requires', (['"""lancedb"""'], {}), "('lancedb')\n", (171, 182), False, 'import pytest\n'), ((810, 841), 'pytest.mark.requires', 'pytest.mark.requires', (['"""lancedb"""'], {}), "('lancedb')\n", (830, 841), False, 'import pytest\n'), ((1178, 1209), 'pytest.mark.require...
# Copyright 2023 LanceDB Developers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to i...
[ "lancedb.connect" ]
[((1069, 1113), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)', 'scope': '"""module"""'}), "(autouse=True, scope='module')\n", (1083, 1113), False, 'import pytest\n'), ((1159, 1192), 'os.environ.get', 'os.environ.get', (['"""REMOTE_BASE_URL"""'], {}), "('REMOTE_BASE_URL')\n", (1173, 1192), False, 'import...
""" Unit test for retrieve_utils.py """ import pytest try: import chromadb from autogen.retrieve_utils import ( split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db, ...
[ "lancedb.connect" ]
[((1011, 1073), 'pytest.mark.skipif', 'pytest.mark.skipif', (['skip'], {'reason': '"""dependency is not installed"""'}), "(skip, reason='dependency is not installed')\n", (1029, 1073), False, 'import pytest\n'), ((609, 634), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (624, 634), False, 'i...
from langchain.vectorstores import LanceDB from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings def test_lancedb() -> None: import lancedb embeddings = FakeEmbeddings() db = lancedb.connect("/tmp/lancedb") texts = ["text 1", "text 2", "item 3"] vectors = embeddings.embe...
[ "lancedb.connect" ]
[((190, 206), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (204, 206), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((216, 247), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lance...
import lancedb from langchain.vectorstores import LanceDB from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings def test_lancedb() -> None: embeddings = FakeEmbeddings() db = lancedb.connect("/tmp/lancedb") texts = ["text 1", "text 2", "item 3"] vectors = embeddings.embed_do...
[ "lancedb.connect" ]
[((186, 202), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (200, 202), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((212, 243), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lance...
import lancedb db = lancedb.connect("data/sample-lancedb") table = db.open_table("python_docs") print(table.to_pandas()) print(table.to_pandas()["text"]) print(table.to_pandas().columns) print("vector size: " + str(len(table.to_pandas()['vector'].values[0])))
[ "lancedb.connect" ]
[((21, 59), 'lancedb.connect', 'lancedb.connect', (['"""data/sample-lancedb"""'], {}), "('data/sample-lancedb')\n", (36, 59), False, 'import lancedb\n')]
import lancedb import numpy as np import pandas as pd import pytest import subprocess from main import get_recommendations, data import main # DOWNLOAD ====================================================== subprocess.Popen( "curl https://files.grouplens.org/datasets/movielens/ml-latest-small.zip -o ml-latest-sm...
[ "lancedb.connect" ]
[((519, 634), 'pandas.read_csv', 'pd.read_csv', (['"""./ml-latest-small/ratings.csv"""'], {'header': 'None', 'names': "['user id', 'movie id', 'rating', 'timestamp']"}), "('./ml-latest-small/ratings.csv', header=None, names=['user id',\n 'movie id', 'rating', 'timestamp'])\n", (530, 634), True, 'import pandas as pd\...