code stringlengths 141 97.3k | apis listlengths 1 24 | extract_api stringlengths 113 214k |
|---|---|---|
"""LanceDB vector store."""
import logging
from typing import Any, List, Optional
import numpy as np
from pandas import DataFrame
from llama_index.legacy.schema import (
BaseNode,
MetadataMode,
NodeRelationship,
RelatedNodeInfo,
TextNode,
)
from llama_index.legacy.vector_stores.types import (
... | [
"lancedb.connect"
] | [((607, 634), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (624, 634), False, 'import logging\n'), ((3288, 3308), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (3303, 3308), False, 'import lancedb\n'), ((1371, 1400), 'numpy.exp', 'np.exp', (["(-results['_distance'])"],... |
"""LanceDB vector store."""
import logging
from typing import Any, List, Optional
import numpy as np
from pandas import DataFrame
from llama_index.legacy.schema import (
BaseNode,
MetadataMode,
NodeRelationship,
RelatedNodeInfo,
TextNode,
)
from llama_index.legacy.vector_stores.types import (
... | [
"lancedb.connect"
] | [((607, 634), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (624, 634), False, 'import logging\n'), ((3288, 3308), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (3303, 3308), False, 'import lancedb\n'), ((1371, 1400), 'numpy.exp', 'np.exp', (["(-results['_distance'])"],... |
from typing import List, Any
from dataclasses import dataclass
import lancedb
import pandas as pd
from autochain.tools.base import Tool
from autochain.models.base import BaseLanguageModel
from autochain.tools.internal_search.base_search_tool import BaseSearchTool
@dataclass
class LanceDBDoc:
doc: str
vector:... | [
"lancedb.connect"
] | [((1275, 1300), 'lancedb.connect', 'lancedb.connect', (['self.uri'], {}), '(self.uri)\n', (1290, 1300), False, 'import lancedb\n'), ((1984, 2054), 'pandas.DataFrame', 'pd.DataFrame', (["[{'doc': doc.doc, 'vector': doc.vector} for doc in docs]"], {}), "([{'doc': doc.doc, 'vector': doc.vector} for doc in docs])\n", (1996... |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/8/9 15:42
@Author : unkn-wn (Leon Yee)
@File : lancedb_store.py
"""
import lancedb
import shutil, os
class LanceStore:
def __init__(self, name):
db = lancedb.connect('./data/lancedb')
self.db = db
self.name = name
... | [
"lancedb.connect"
] | [((234, 267), 'lancedb.connect', 'lancedb.connect', (['"""./data/lancedb"""'], {}), "('./data/lancedb')\n", (249, 267), False, 'import lancedb\n'), ((2866, 2908), 'os.path.join', 'os.path.join', (['self.db.uri', "(name + '.lance')"], {}), "(self.db.uri, name + '.lance')\n", (2878, 2908), False, 'import shutil, os\n'), ... |
import pytest
from langchain_community.vectorstores import LanceDB
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
@pytest.mark.requires("lancedb")
def test_lancedb_with_connection() -> None:
import lancedb
embeddings = FakeEmbeddings()
db = lancedb.connect("/tmp/lancedb"... | [
"lancedb.connect"
] | [((151, 182), 'pytest.mark.requires', 'pytest.mark.requires', (['"""lancedb"""'], {}), "('lancedb')\n", (171, 182), False, 'import pytest\n'), ((810, 841), 'pytest.mark.requires', 'pytest.mark.requires', (['"""lancedb"""'], {}), "('lancedb')\n", (830, 841), False, 'import pytest\n'), ((1178, 1209), 'pytest.mark.require... |
"""
Unit test for retrieve_utils.py
"""
import pytest
try:
import chromadb
from autogen.retrieve_utils import (
split_text_to_chunks,
extract_text_from_pdf,
split_files_to_chunks,
get_files_from_dir,
is_url,
create_vector_db_from_dir,
query_vector_db,
... | [
"lancedb.connect"
] | [((1011, 1073), 'pytest.mark.skipif', 'pytest.mark.skipif', (['skip'], {'reason': '"""dependency is not installed"""'}), "(skip, reason='dependency is not installed')\n", (1029, 1073), False, 'import pytest\n'), ((609, 634), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (624, 634), False, 'i... |
from langchain.vectorstores import LanceDB
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
def test_lancedb() -> None:
import lancedb
embeddings = FakeEmbeddings()
db = lancedb.connect("/tmp/lancedb")
texts = ["text 1", "text 2", "item 3"]
vectors = embeddings.embe... | [
"lancedb.connect"
] | [((190, 206), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (204, 206), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((216, 247), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lance... |
from langchain.vectorstores import LanceDB
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
def test_lancedb() -> None:
import lancedb
embeddings = FakeEmbeddings()
db = lancedb.connect("/tmp/lancedb")
texts = ["text 1", "text 2", "item 3"]
vectors = embeddings.embe... | [
"lancedb.connect"
] | [((190, 206), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (204, 206), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((216, 247), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lance... |
from langchain.vectorstores import LanceDB
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
def test_lancedb() -> None:
import lancedb
embeddings = FakeEmbeddings()
db = lancedb.connect("/tmp/lancedb")
texts = ["text 1", "text 2", "item 3"]
vectors = embeddings.embe... | [
"lancedb.connect"
] | [((190, 206), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (204, 206), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((216, 247), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lance... |
from langchain.vectorstores import LanceDB
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
def test_lancedb() -> None:
import lancedb
embeddings = FakeEmbeddings()
db = lancedb.connect("/tmp/lancedb")
texts = ["text 1", "text 2", "item 3"]
vectors = embeddings.embe... | [
"lancedb.connect"
] | [((190, 206), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (204, 206), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((216, 247), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lance... |
import lancedb
from langchain.vectorstores import LanceDB
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
def test_lancedb() -> None:
embeddings = FakeEmbeddings()
db = lancedb.connect("/tmp/lancedb")
texts = ["text 1", "text 2", "item 3"]
vectors = embeddings.embed_do... | [
"lancedb.connect"
] | [((186, 202), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (200, 202), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((212, 243), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lance... |
import lancedb
from langchain.vectorstores import LanceDB
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
def test_lancedb() -> None:
embeddings = FakeEmbeddings()
db = lancedb.connect("/tmp/lancedb")
texts = ["text 1", "text 2", "item 3"]
vectors = embeddings.embed_do... | [
"lancedb.connect"
] | [((186, 202), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (200, 202), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((212, 243), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lance... |
import lancedb
from langchain.vectorstores import LanceDB
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
def test_lancedb() -> None:
embeddings = FakeEmbeddings()
db = lancedb.connect("/tmp/lancedb")
texts = ["text 1", "text 2", "item 3"]
vectors = embeddings.embed_do... | [
"lancedb.connect"
] | [((186, 202), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (200, 202), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((212, 243), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lance... |
import lancedb
from langchain.vectorstores import LanceDB
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
def test_lancedb() -> None:
embeddings = FakeEmbeddings()
db = lancedb.connect("/tmp/lancedb")
texts = ["text 1", "text 2", "item 3"]
vectors = embeddings.embed_do... | [
"lancedb.connect"
] | [((186, 202), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (200, 202), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((212, 243), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lance... |
import argparse
from pprint import pprint
import pandas as pd
from mlx_lm import generate, load
import lancedb.embeddings.gte
TEMPLATE = """You are a helpful, respectful and honest assistant. Always answer as helpfully as possible using the context text provided. Your answers should only answer the question once and... | [
"lancedb.connect"
] | [((689, 745), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Query a vector DB"""'}), "(description='Query a vector DB')\n", (712, 745), False, 'import argparse\n'), ((1112, 1141), 'lancedb.connect', 'lancedb.connect', (['args.db_path'], {}), '(args.db_path)\n', (1127, 1141), False, 'imp... |
import lancedb
uri = "./.lancedb"
db = lancedb.connect(uri)
table = db.open_table("my_table")
# table.delete("createAt = '1690358416394516300'") # 此条莫名失败了。Column createat does not exist in the dataset
table.delete("item = 'foo'")
df = table.to_pandas()
print(df)
| [
"lancedb.connect"
] | [((40, 60), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (55, 60), False, 'import lancedb\n')] |
import requests
import time
import numpy as np
import pyarrow as pa
import lancedb
import logging
import os
from tqdm import tqdm
from pathlib import Path
from transformers import AutoConfig
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
TEI_URL= os.getenv("EMBED_URL") + "/embed"
DIRPA... | [
"lancedb.connect"
] | [((194, 233), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (213, 233), False, 'import logging\n'), ((243, 270), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (260, 270), False, 'import logging\n'), ((281, 303), 'os.getenv', 'os.... |
import lancedb
from datasets import Dataset
from homematch.config import DATA_DIR, TABLE_NAME
from homematch.data.types import ImageData
def datagen() -> list[ImageData]:
dataset = Dataset.load_from_disk(DATA_DIR / "properties_dataset")
# return Image instances
return [ImageData(**batch) for batch in da... | [
"lancedb.connect"
] | [((188, 243), 'datasets.Dataset.load_from_disk', 'Dataset.load_from_disk', (["(DATA_DIR / 'properties_dataset')"], {}), "(DATA_DIR / 'properties_dataset')\n", (210, 243), False, 'from datasets import Dataset\n'), ((397, 417), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (412, 417), False, 'import lan... |
import openai
import os
import lancedb
import pickle
import requests
from pathlib import Path
from bs4 import BeautifulSoup
import re
from langchain.document_loaders import UnstructuredHTMLLoader
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSpli... | [
"lancedb.connect"
] | [((1848, 1866), 'pathlib.Path', 'Path', (['"""cities.pkl"""'], {}), "('cities.pkl')\n", (1852, 1866), False, 'from pathlib import Path\n'), ((2545, 2609), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(500)', 'chunk_overlap': '(50)'}), '(chunk_size=500,... |
import queue
import threading
from dataclasses import dataclass
import lancedb
import pyarrow as pa
import numpy as np
import torch
import torch.nn.functional as F
from safetensors import safe_open
from tqdm import tqdm
from .app.schemas.task import TaskCompletion
from .ops.object_detectors import YOLOV8TRTEngine
fro... | [
"lancedb.connect"
] | [((8969, 8984), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8982, 8984), False, 'import torch\n'), ((22228, 22243), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (22241, 22243), False, 'import torch\n'), ((1191, 1219), 'lancedb.connect', 'lancedb.connect', (['output_path'], {}), '(output_path)\n', (1206,... |
import os
import openai
import json
import numpy as np
from numpy.linalg import norm
import re
from time import time, sleep
from uuid import uuid4
import datetime
import lancedb
import pandas as pd
def open_file(filepath):
with open(filepath, 'r', encoding='utf-8') as infile:
return infile.read()
def save... | [
"lancedb.connect"
] | [((12752, 12817), 'tensorflow_hub.load', 'hub.load', (['"""https://tfhub.dev/google/universal-sentence-encoder/4"""'], {}), "('https://tfhub.dev/google/universal-sentence-encoder/4')\n", (12760, 12817), True, 'import tensorflow_hub as hub\n'), ((720, 773), 'openai.Embedding.create', 'openai.Embedding.create', ([], {'in... |
import logging
import chainlit as cl
import lancedb
import pandas as pd
from langchain import LLMChain
from langchain.agents.agent_toolkits import create_conversational_retrieval_agent
from langchain.agents.agent_toolkits import create_retriever_tool
from langchain.chat_models import ChatOpenAI
from langchain.embeddin... | [
"lancedb.connect"
] | [((498, 525), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (515, 525), False, 'import logging\n'), ((537, 584), 'pandas.read_pickle', 'pd.read_pickle', (['"""data/preprocessed/recipes.pkl"""'], {}), "('data/preprocessed/recipes.pkl')\n", (551, 584), True, 'import pandas as pd\n'), ((755... |
import streamlit as st
import pandas as pd
import json
import requests
from pathlib import Path
from datetime import datetime
from jinja2 import Template
import lancedb
import sqlite3
from services.lancedb_notes import IndexDocumentsNotes
st.set_page_config(layout='wide',
page_title='Notes')
@st.ca... | [
"lancedb.connect"
] | [((240, 293), 'streamlit.set_page_config', 'st.set_page_config', ([], {'layout': '"""wide"""', 'page_title': '"""Notes"""'}), "(layout='wide', page_title='Notes')\n", (258, 293), True, 'import streamlit as st\n'), ((1504, 1522), 'pathlib.Path', 'Path', (['"""data/notes"""'], {}), "('data/notes')\n", (1508, 1522), False... |
from langchain.vectorstores import LanceDB
import lancedb
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chat_models import ChatOpenAI
from langchain.chains import RetrievalQA
# load agents and tools modules
import pandas as pd
from io import StringIO
from langchain.tools.python.tool import Py... | [
"lancedb.connect"
] | [((1254, 1285), 'langchain.document_loaders.TextLoader', 'TextLoader', (['self.text_data_path'], {}), '(self.text_data_path)\n', (1264, 1285), False, 'from langchain.document_loaders import TextLoader\n'), ((1386, 1450), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'... |
import lancedb
import numpy as np
import pandas as pd
global data
data = []
global table
table = None
def get_recommendations(title):
pd_data = pd.DataFrame(data)
# Table Search
result = (
table.search(pd_data[pd_data["title"] == title]["vector"].values[0])
.limit(5)
.to_pandas()... | [
"lancedb.connect"
] | [((152, 170), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (164, 170), True, 'import pandas as pd\n'), ((361, 488), 'pandas.read_csv', 'pd.read_csv', (['"""./ml-latest-small/links.csv"""'], {'header': '(0)', 'names': "['movie id', 'imdb id', 'tmdb id']", 'converters': "{'imdb id': str}"}), "('./ml-la... |
import lancedb
from datasets import load_dataset
import pandas as pd
import numpy as np
from hyperdemocracy.embedding.models import BGESmallEn
class Lance:
def __init__(self):
self.model = BGESmallEn()
uri = "data/sample-lancedb"
self.db = lancedb.connect(uri)
def create_table(self):
... | [
"lancedb.connect"
] | [((203, 215), 'hyperdemocracy.embedding.models.BGESmallEn', 'BGESmallEn', ([], {}), '()\n', (213, 215), False, 'from hyperdemocracy.embedding.models import BGESmallEn\n'), ((270, 290), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (285, 290), False, 'import lancedb\n'), ((333, 407), 'datasets.load_dat... |
import lancedb
uri = "test_data"
db = lancedb.connect(uri)
tbl = db.create_table("my_table",
data=[{"vector": [3.1, 4.1], "item": "foo", "price": 10.0},
{"vector": [5.9, 26.5], "item": "bar", "price": 20.0}]) | [
"lancedb.connect"
] | [((38, 58), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (53, 58), False, 'import lancedb\n')] |
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in... | [
"lancedb.fts.search_index",
"lancedb.fts.populate_index",
"lancedb.connect"
] | [((716, 750), 'pytest.importorskip', 'pytest.importorskip', (['"""lancedb.fts"""'], {}), "('lancedb.fts')\n", (735, 750), False, 'import pytest\n'), ((761, 791), 'pytest.importorskip', 'pytest.importorskip', (['"""tantivy"""'], {}), "('tantivy')\n", (780, 791), False, 'import pytest\n'), ((864, 885), 'lancedb.connect',... |
import pytest
import os
import openai
import argparse
import lancedb
import re
import pickle
import requests
import zipfile
from pathlib import Path
from main import get_document_title
from langchain.document_loaders import BSHTMLLoader
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter imp... | [
"lancedb.connect"
] | [((766, 783), 'os.mkdir', 'os.mkdir', (['"""./tmp"""'], {}), "('./tmp')\n", (774, 783), False, 'import os\n'), ((795, 846), 'argparse.Namespace', 'argparse.Namespace', ([], {'query': '"""test"""', 'openai_key': '"""test"""'}), "(query='test', openai_key='test')\n", (813, 846), False, 'import argparse\n'), ((906, 922), ... |
"""
AI Module
This module provides an AI class that interfaces with language models to perform various tasks such as
starting a conversation, advancing the conversation, and handling message serialization. It also includes
backoff strategies for handling rate limit errors from the OpenAI API.
Classes:
AI: A class... | [
"langchain.schema.AIMessage",
"langchain.schema.messages_to_dict",
"langchain.schema.HumanMessage",
"langchain.schema.SystemMessage",
"langchain.schema.messages_from_dict",
"langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler"
] | [((1266, 1293), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1283, 1293), False, 'import logging\n'), ((7101, 7188), 'backoff.on_exception', 'backoff.on_exception', (['backoff.expo', 'openai.RateLimitError'], {'max_tries': '(7)', 'max_time': '(45)'}), '(backoff.expo, openai.RateLimitEr... |
from fastapi import Body
from sse_starlette.sse import EventSourceResponse
from configs import LLM_MODELS, TEMPERATURE
from server.utils import wrap_done, get_OpenAI
from langchain.chains import LLMChain
from langchain.callbacks import AsyncIteratorCallbackHandler
from typing import AsyncIterable, Optional
import async... | [
"langchain.callbacks.AsyncIteratorCallbackHandler",
"langchain.chains.LLMChain",
"langchain.prompts.PromptTemplate.from_template"
] | [((450, 498), 'fastapi.Body', 'Body', (['...'], {'description': '"""用户输入"""', 'examples': "['恼羞成怒']"}), "(..., description='用户输入', examples=['恼羞成怒'])\n", (454, 498), False, 'from fastapi import Body\n'), ((536, 567), 'fastapi.Body', 'Body', (['(False)'], {'description': '"""流式输出"""'}), "(False, description='流式输出')\n", ... |
# — coding: utf-8 –
import openai
import json
import logging
import sys
import argparse
from langchain.chat_models import ChatOpenAI
from langchain.prompts import (
ChatPromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate
)
from langchain import LLMCh... | [
"langchain.prompts.SystemMessagePromptTemplate.from_template",
"langchain.chat_models.ChatOpenAI",
"langchain.prompts.ChatPromptTemplate.from_messages",
"langchain.prompts.HumanMessagePromptTemplate.from_template",
"langchain.LLMChain"
] | [((717, 746), 'os.path.exists', 'os.path.exists', (['progress_file'], {}), '(progress_file)\n', (731, 746), False, 'import os\n'), ((1210, 1243), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': 'model_name'}), '(model_name=model_name)\n', (1220, 1243), False, 'from langchain.chat_models import Cha... |
from langchain.llms import Ollama
input = input("What is your question?")
llm = Ollama(model="llama2")
res = llm.predict(input)
print (res)
| [
"langchain.llms.Ollama"
] | [((81, 103), 'langchain.llms.Ollama', 'Ollama', ([], {'model': '"""llama2"""'}), "(model='llama2')\n", (87, 103), False, 'from langchain.llms import Ollama\n')] |
import os
from pathlib import Path
from typing import Union
import cloudpickle
import yaml
from mlflow.exceptions import MlflowException
from mlflow.langchain.utils import (
_BASE_LOAD_KEY,
_CONFIG_LOAD_KEY,
_MODEL_DATA_FOLDER_NAME,
_MODEL_DATA_KEY,
_MODEL_DATA_PKL_FILE_NAME,
_MODEL_DATA_YAML_... | [
"langchain.llms.get_type_to_cls_dict",
"langchain.schema.runnable.passthrough.RunnableAssign",
"langchain.chains.loading.load_chain",
"langchain.schema.runnable.RunnableParallel",
"langchain.schema.runnable.RunnableSequence",
"langchain.llms.loading.load_llm",
"langchain.schema.runnable.RunnableBranch",... | [((2386, 2443), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""Unsupported type {_type} for loading."""'], {}), "(f'Unsupported type {_type} for loading.')\n", (2401, 2443), False, 'from mlflow.exceptions import MlflowException\n'), ((2853, 2915), 'mlflow.exceptions.MlflowException', 'MlflowException', ... |
import json
from langchain.schema import OutputParserException
def parse_json_markdown(json_string: str) -> dict:
# Remove the triple backticks if present
json_string = json_string.strip()
start_index = json_string.find("```json")
end_index = json_string.find("```", start_index + len("```json"))
... | [
"langchain.schema.OutputParserException"
] | [((526, 555), 'json.loads', 'json.loads', (['extracted_content'], {}), '(extracted_content)\n', (536, 555), False, 'import json\n'), ((871, 900), 'json.loads', 'json.loads', (['extracted_content'], {}), '(extracted_content)\n', (881, 900), False, 'import json\n'), ((1322, 1383), 'langchain.schema.OutputParserException'... |
import os
import uuid
from typing import Any, Dict, List, Optional, Tuple
from langchain.agents.agent import RunnableAgent
from langchain.agents.tools import tool as LangChainTool
from langchain.memory import ConversationSummaryMemory
from langchain.tools.render import render_text_description
from langchain_core.agent... | [
"langchain.tools.render.render_text_description",
"langchain.agents.agent.RunnableAgent",
"langchain.memory.ConversationSummaryMemory"
] | [((2392, 2405), 'pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2403, 2405), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((2443, 2468), 'pydantic.PrivateAttr', 'PrivateAttr', ([], {'default': 'None'}), '(default=None)\n', (24... |
import os
import logging
import hashlib
import PyPDF2
from tqdm import tqdm
from modules.presets import *
from modules.utils import *
from modules.config import local_embedding
def get_documents(file_src):
from langchain.schema import Document
from langchain.text_splitter import TokenTextSplitter
text_s... | [
"langchain.embeddings.huggingface.HuggingFaceEmbeddings",
"langchain.document_loaders.UnstructuredWordDocumentLoader",
"langchain.vectorstores.FAISS.from_documents",
"langchain.document_loaders.UnstructuredPowerPointLoader",
"langchain.schema.Document",
"langchain.embeddings.OpenAIEmbeddings",
"langchai... | [((330, 381), 'langchain.text_splitter.TokenTextSplitter', 'TokenTextSplitter', ([], {'chunk_size': '(500)', 'chunk_overlap': '(30)'}), '(chunk_size=500, chunk_overlap=30)\n', (347, 381), False, 'from langchain.text_splitter import TokenTextSplitter\n'), ((406, 443), 'logging.debug', 'logging.debug', (['"""Loading docu... |
import re
from typing import Union
from langchain.agents.mrkl.output_parser import MRKLOutputParser
from langchain.schema import AgentAction, AgentFinish, OutputParserException
FORMAT_INSTRUCTIONS0 = """Use the following format and be sure to use new lines after each task.
Question: the input question you must answe... | [
"langchain.schema.AgentAction",
"langchain.schema.OutputParserException"
] | [((3055, 3088), 're.search', 're.search', (['regex', 'text', 're.DOTALL'], {}), '(regex, text, re.DOTALL)\n', (3064, 3088), False, 'import re\n'), ((3689, 3749), 're.search', 're.search', (['"""Action\\\\s*\\\\d*\\\\s*:[\\\\s]*(.*?)"""', 'text', 're.DOTALL'], {}), "('Action\\\\s*\\\\d*\\\\s*:[\\\\s]*(.*?)', text, re.DO... |
from typing import Any, Callable, Dict, TypeVar
from langchain import BasePromptTemplate, LLMChain
from langchain.chat_models.base import BaseChatModel
from langchain.schema import BaseOutputParser, OutputParserException
from openai.error import (
AuthenticationError,
InvalidRequestError,
RateLimitError,
... | [
"langchain.LLMChain"
] | [((469, 481), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (476, 481), False, 'from typing import Any, Callable, Dict, TypeVar\n'), ((2486, 2520), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'model', 'prompt': 'prompt'}), '(llm=model, prompt=prompt)\n', (2494, 2520), False, 'from langchain import BaseP... |
import json
import os.path
import logging
import time
from langchain.vectorstores import FAISS
from langchain import PromptTemplate
from utils.references import References
from utils.knowledge import Knowledge
from utils.file_operations import make_archive, copy_templates
from utils.tex_processing import create_copies... | [
"langchain.vectorstores.FAISS.load_local",
"langchain.PromptTemplate"
] | [((1271, 1292), 'logging.info', 'logging.info', (['message'], {}), '(message)\n', (1283, 1292), False, 'import logging\n'), ((1552, 1587), 'utils.gpt_interaction.GPTModel', 'GPTModel', ([], {'model': '"""gpt-3.5-turbo-16k"""'}), "(model='gpt-3.5-turbo-16k')\n", (1560, 1587), False, 'from utils.gpt_interaction import GP... |
import sys
import os
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'NeuralSeq'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__))... | [
"langchain.llms.openai.OpenAI",
"langchain.chains.conversation.memory.ConversationBufferMemory",
"langchain.agents.initialize.initialize_agent",
"langchain.agents.tools.Tool"
] | [((3966, 3992), 'scipy.io.wavfile.read', 'wavfile.read', (['audio_path_1'], {}), '(audio_path_1)\n', (3978, 3992), True, 'import scipy.io.wavfile as wavfile\n'), ((4014, 4040), 'scipy.io.wavfile.read', 'wavfile.read', (['audio_path_2'], {}), '(audio_path_2)\n', (4026, 4040), True, 'import scipy.io.wavfile as wavfile\n'... |
from langchain.indexes import VectorstoreIndexCreator
from langchain_community.document_loaders import CSVLoader
from langchain_community.vectorstores import FAISS
loader = CSVLoader("/Users/harrisonchase/Downloads/titanic.csv")
docs = loader.load()
index_creator = VectorstoreIndexCreator(vectorstore_cls=FAISS)
inde... | [
"langchain.indexes.VectorstoreIndexCreator",
"langchain_community.document_loaders.CSVLoader"
] | [((174, 229), 'langchain_community.document_loaders.CSVLoader', 'CSVLoader', (['"""/Users/harrisonchase/Downloads/titanic.csv"""'], {}), "('/Users/harrisonchase/Downloads/titanic.csv')\n", (183, 229), False, 'from langchain_community.document_loaders import CSVLoader\n'), ((268, 314), 'langchain.indexes.VectorstoreInde... |
# ruff: noqa: E402
"""Main entrypoint into package."""
import warnings
from importlib import metadata
from typing import Any, Optional
from langchain_core._api.deprecation import surface_langchain_deprecation_warnings
try:
__version__ = metadata.version(__package__)
except metadata.PackageNotFoundError:
# Cas... | [
"langchain.utils.interactive_env.is_interactive_env",
"langchain_core._api.deprecation.surface_langchain_deprecation_warnings"
] | [((1348, 1388), 'langchain_core._api.deprecation.surface_langchain_deprecation_warnings', 'surface_langchain_deprecation_warnings', ([], {}), '()\n', (1386, 1388), False, 'from langchain_core._api.deprecation import surface_langchain_deprecation_warnings\n'), ((243, 272), 'importlib.metadata.version', 'metadata.version... |
from typing import Any, Dict, List, Type, Union
from langchain_community.graphs import NetworkxEntityGraph
from langchain_community.graphs.networkx_graph import (
KnowledgeTriple,
get_entities,
parse_triples,
)
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import... | [
"langchain_core.messages.get_buffer_string",
"langchain.chains.llm.LLMChain",
"langchain_community.graphs.networkx_graph.parse_triples",
"langchain.memory.utils.get_prompt_input_key",
"langchain_community.graphs.networkx_graph.get_entities",
"langchain_core.pydantic_v1.Field"
] | [((1062, 1104), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'NetworkxEntityGraph'}), '(default_factory=NetworkxEntityGraph)\n', (1067, 1104), False, 'from langchain_core.pydantic_v1 import Field\n'), ((3163, 3223), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt':... |
"""
**LLM** classes provide
access to the large language model (**LLM**) APIs and services.
**Class hierarchy:**
.. code-block::
BaseLanguageModel --> BaseLLM --> LLM --> <name> # Examples: AI21, HuggingFaceHub, OpenAI
**Main helpers:**
.. code-block::
LLMResult, PromptValue,
CallbackManagerForLLMRun... | [
"langchain.utils.interactive_env.is_interactive_env"
] | [((11338, 11358), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (11356, 11358), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((11368, 11729), 'warnings.warn', 'warnings.warn', (['f"""Importing LLMs from langchain is deprecated. Importing fro... |
import logging
from abc import ABC, abstractmethod
from itertools import islice
from typing import Any, Dict, Iterable, List, Optional
from langchain_community.utilities.redis import get_client
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage, get_buffer_stri... | [
"langchain_core.messages.get_buffer_string",
"langchain.chains.llm.LLMChain",
"langchain.memory.utils.get_prompt_input_key",
"langchain_core.pydantic_v1.Field",
"langchain_community.utilities.redis.get_client"
] | [((701, 728), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (718, 728), False, 'import logging\n'), ((10994, 11036), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'InMemoryEntityStore'}), '(default_factory=InMemoryEntityStore)\n', (10999, 11036), False, 'from lang... |
"""**Tools** are classes that an Agent uses to interact with the world.
Each tool has a **description**. Agent uses the description to choose the right
tool for the job.
**Class hierarchy:**
.. code-block::
ToolMetaclass --> BaseTool --> <name>Tool # Examples: AIPluginTool, BaseGraphQLTool
... | [
"langchain.utils.interactive_env.is_interactive_env"
] | [((2151, 2171), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (2169, 2171), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((2185, 2548), 'warnings.warn', 'warnings.warn', (['f"""Importing tools from langchain is deprecated. Importing from lan... |
from functools import partial
from typing import Optional
from langchain_core.callbacks.manager import (
Callbacks,
)
from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_core.retrievers import BaseRetriever
f... | [
"langchain.tools.Tool",
"langchain_core.prompts.format_document",
"langchain_core.prompts.PromptTemplate.from_template",
"langchain_core.pydantic_v1.Field"
] | [((439, 489), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'description': '"""query to look up in retriever"""'}), "(description='query to look up in retriever')\n", (444, 489), False, 'from langchain_core.pydantic_v1 import BaseModel, Field\n'), ((1996, 2126), 'functools.partial', 'partial', (['_get_relevant_doc... |
from typing import Any, List, Sequence, Tuple, Union
from langchain_core._api import deprecated
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.callbacks import Callbacks
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts.base import BasePromptTempla... | [
"langchain_core.prompts.chat.AIMessagePromptTemplate.from_template",
"langchain.agents.format_scratchpad.format_xml",
"langchain.agents.output_parsers.XMLAgentOutputParser",
"langchain_core._api.deprecated",
"langchain_core.prompts.chat.ChatPromptTemplate.from_template"
] | [((875, 943), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'alternative': '"""create_xml_agent"""', 'removal': '"""0.2.0"""'}), "('0.1.0', alternative='create_xml_agent', removal='0.2.0')\n", (885, 943), False, 'from langchain_core._api import deprecated\n'), ((1644, 1696), 'langchain_core.prompts... |
"""**Graphs** provide a natural language interface to graph databases."""
import warnings
from typing import Any
from langchain_core._api import LangChainDeprecationWarning
from langchain.utils.interactive_env import is_interactive_env
def __getattr__(name: str) -> Any:
from langchain_community import graphs
... | [
"langchain.utils.interactive_env.is_interactive_env"
] | [((378, 398), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (396, 398), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((408, 773), 'warnings.warn', 'warnings.warn', (['f"""Importing graphs from langchain is deprecated. Importing from langchai... |
"""Chain that makes API calls and summarizes the responses to answer a question."""
from __future__ import annotations
from typing import Any, Dict, List, Optional, Sequence, Tuple
from urllib.parse import urlparse
from langchain_community.utilities.requests import TextRequestsWrapper
from langchain_core.callbacks im... | [
"langchain.chains.llm.LLMChain",
"langchain_core.callbacks.AsyncCallbackManagerForChainRun.get_noop_manager",
"langchain_community.utilities.requests.TextRequestsWrapper",
"langchain_core.callbacks.CallbackManagerForChainRun.get_noop_manager",
"langchain_core.pydantic_v1.root_validator",
"langchain_core.p... | [((979, 992), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (987, 992), False, 'from urllib.parse import urlparse\n'), ((2555, 2574), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'exclude': '(True)'}), '(exclude=True)\n', (2560, 2574), False, 'from langchain_core.pydantic_v1 import Field, root_va... |
"""Hypothetical Document Embeddings.
https://arxiv.org/abs/2212.10496
"""
from __future__ import annotations
from typing import Any, Dict, List, Optional
import numpy as np
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.embeddings import Embeddings
from langchain_core.language_mo... | [
"langchain.chains.llm.LLMChain",
"langchain.chains.hyde.prompts.PROMPT_MAP.keys",
"langchain_core.callbacks.CallbackManagerForChainRun.get_noop_manager"
] | [((3148, 3180), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (3156, 3180), False, 'from langchain.chains.llm import LLMChain\n'), ((2258, 2303), 'langchain_core.callbacks.CallbackManagerForChainRun.get_noop_manager', 'CallbackManagerForChainRun.get... |
"""Attempt to implement MRKL systems as described in arxiv.org/pdf/2205.00445.pdf."""
from __future__ import annotations
from typing import Any, Callable, List, NamedTuple, Optional, Sequence
from langchain_core._api import deprecated
from langchain_core.callbacks import BaseCallbackManager
from langchain_core.langua... | [
"langchain.agents.mrkl.output_parser.MRKLOutputParser",
"langchain.chains.LLMChain",
"langchain.agents.utils.validate_tools_single_input",
"langchain_core.prompts.PromptTemplate",
"langchain.agents.tools.Tool",
"langchain_core._api.deprecated",
"langchain_core.prompts.PromptTemplate.from_template",
"l... | [((1278, 1348), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'alternative': '"""create_react_agent"""', 'removal': '"""0.2.0"""'}), "('0.1.0', alternative='create_react_agent', removal='0.2.0')\n", (1288, 1348), False, 'from langchain_core._api import deprecated\n'), ((5068, 5104), 'langchain_core... |
import base64
import io
import os
import uuid
from io import BytesIO
from pathlib import Path
from langchain.retrievers.multi_vector import MultiVectorRetriever
from langchain.storage import LocalFileStore
from langchain_community.chat_models import ChatOllama
from langchain_community.embeddings import OllamaEmbedding... | [
"langchain_community.embeddings.OllamaEmbeddings",
"langchain_community.chat_models.ChatOllama",
"langchain_core.messages.HumanMessage",
"langchain.retrievers.multi_vector.MultiVectorRetriever",
"langchain_core.documents.Document"
] | [((731, 774), 'langchain_community.chat_models.ChatOllama', 'ChatOllama', ([], {'model': '"""bakllava"""', 'temperature': '(0)'}), "(model='bakllava', temperature=0)\n", (741, 774), False, 'from langchain_community.chat_models import ChatOllama\n'), ((2494, 2525), 'base64.b64decode', 'base64.b64decode', (['base64_strin... |
from fastapi import Body
from sse_starlette.sse import EventSourceResponse
from configs import LLM_MODELS, TEMPERATURE
from server.utils import wrap_done, get_OpenAI
from langchain.chains import LLMChain
from langchain.callbacks import AsyncIteratorCallbackHandler
from typing import AsyncIterable, Optional
import async... | [
"langchain.callbacks.AsyncIteratorCallbackHandler",
"langchain.chains.LLMChain",
"langchain.prompts.PromptTemplate.from_template"
] | [((450, 498), 'fastapi.Body', 'Body', (['...'], {'description': '"""用户输入"""', 'examples': "['恼羞成怒']"}), "(..., description='用户输入', examples=['恼羞成怒'])\n", (454, 498), False, 'from fastapi import Body\n'), ((536, 567), 'fastapi.Body', 'Body', (['(False)'], {'description': '"""流式输出"""'}), "(False, description='流式输出')\n", ... |
from fastapi import Body
from sse_starlette.sse import EventSourceResponse
from configs import LLM_MODELS, TEMPERATURE
from server.utils import wrap_done, get_OpenAI
from langchain.chains import LLMChain
from langchain.callbacks import AsyncIteratorCallbackHandler
from typing import AsyncIterable, Optional
import async... | [
"langchain.callbacks.AsyncIteratorCallbackHandler",
"langchain.chains.LLMChain",
"langchain.prompts.PromptTemplate.from_template"
] | [((450, 498), 'fastapi.Body', 'Body', (['...'], {'description': '"""用户输入"""', 'examples': "['恼羞成怒']"}), "(..., description='用户输入', examples=['恼羞成怒'])\n", (454, 498), False, 'from fastapi import Body\n'), ((536, 567), 'fastapi.Body', 'Body', (['(False)'], {'description': '"""流式输出"""'}), "(False, description='流式输出')\n", ... |
from fastapi import Body
from sse_starlette.sse import EventSourceResponse
from configs import LLM_MODELS, TEMPERATURE
from server.utils import wrap_done, get_OpenAI
from langchain.chains import LLMChain
from langchain.callbacks import AsyncIteratorCallbackHandler
from typing import AsyncIterable, Optional
import async... | [
"langchain.callbacks.AsyncIteratorCallbackHandler",
"langchain.chains.LLMChain",
"langchain.prompts.PromptTemplate.from_template"
] | [((450, 498), 'fastapi.Body', 'Body', (['...'], {'description': '"""用户输入"""', 'examples': "['恼羞成怒']"}), "(..., description='用户输入', examples=['恼羞成怒'])\n", (454, 498), False, 'from fastapi import Body\n'), ((536, 567), 'fastapi.Body', 'Body', (['(False)'], {'description': '"""流式输出"""'}), "(False, description='流式输出')\n", ... |
from fastapi import Body
from sse_starlette.sse import EventSourceResponse
from configs import LLM_MODELS, TEMPERATURE
from server.utils import wrap_done, get_OpenAI
from langchain.chains import LLMChain
from langchain.callbacks import AsyncIteratorCallbackHandler
from typing import AsyncIterable, Optional
import async... | [
"langchain.callbacks.AsyncIteratorCallbackHandler",
"langchain.chains.LLMChain",
"langchain.prompts.PromptTemplate.from_template"
] | [((450, 498), 'fastapi.Body', 'Body', (['...'], {'description': '"""用户输入"""', 'examples': "['恼羞成怒']"}), "(..., description='用户输入', examples=['恼羞成怒'])\n", (454, 498), False, 'from fastapi import Body\n'), ((536, 567), 'fastapi.Body', 'Body', (['(False)'], {'description': '"""流式输出"""'}), "(False, description='流式输出')\n", ... |
from langchain.llms import Ollama
input = input("What is your question?")
llm = Ollama(model="llama2")
res = llm.predict(input)
print (res)
| [
"langchain.llms.Ollama"
] | [((81, 103), 'langchain.llms.Ollama', 'Ollama', ([], {'model': '"""llama2"""'}), "(model='llama2')\n", (87, 103), False, 'from langchain.llms import Ollama\n')] |
from langchain.llms import Ollama
input = input("What is your question?")
llm = Ollama(model="llama2")
res = llm.predict(input)
print (res)
| [
"langchain.llms.Ollama"
] | [((81, 103), 'langchain.llms.Ollama', 'Ollama', ([], {'model': '"""llama2"""'}), "(model='llama2')\n", (87, 103), False, 'from langchain.llms import Ollama\n')] |
import os
import tempfile
from typing import List, Union
import streamlit as st
import tiktoken
from langchain.text_splitter import (
CharacterTextSplitter,
RecursiveCharacterTextSplitter,
)
from langchain.text_splitter import (
TextSplitter as LCSplitter,
)
from langchain.text_splitter import TokenTextSpl... | [
"langchain.text_splitter.CharacterTextSplitter.from_tiktoken_encoder",
"langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder",
"langchain.text_splitter.TokenTextSplitter"
] | [((718, 772), 'streamlit.sidebar.text_area', 'st.sidebar.text_area', (['"""Enter text"""'], {'value': 'DEFAULT_TEXT'}), "('Enter text', value=DEFAULT_TEXT)\n", (738, 772), True, 'import streamlit as st\n'), ((790, 857), 'streamlit.sidebar.file_uploader', 'st.sidebar.file_uploader', (['"""Upload file"""'], {'accept_mult... |
import os
import tempfile
from typing import List, Union
import streamlit as st
import tiktoken
from langchain.text_splitter import (
CharacterTextSplitter,
RecursiveCharacterTextSplitter,
)
from langchain.text_splitter import (
TextSplitter as LCSplitter,
)
from langchain.text_splitter import TokenTextSpl... | [
"langchain.text_splitter.CharacterTextSplitter.from_tiktoken_encoder",
"langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder",
"langchain.text_splitter.TokenTextSplitter"
] | [((718, 772), 'streamlit.sidebar.text_area', 'st.sidebar.text_area', (['"""Enter text"""'], {'value': 'DEFAULT_TEXT'}), "('Enter text', value=DEFAULT_TEXT)\n", (738, 772), True, 'import streamlit as st\n'), ((790, 857), 'streamlit.sidebar.file_uploader', 'st.sidebar.file_uploader', (['"""Upload file"""'], {'accept_mult... |
import json
from langchain.schema import OutputParserException
def parse_json_markdown(json_string: str) -> dict:
# Remove the triple backticks if present
json_string = json_string.strip()
start_index = json_string.find("```json")
end_index = json_string.find("```", start_index + len("```json"))
... | [
"langchain.schema.OutputParserException"
] | [((526, 555), 'json.loads', 'json.loads', (['extracted_content'], {}), '(extracted_content)\n', (536, 555), False, 'import json\n'), ((871, 900), 'json.loads', 'json.loads', (['extracted_content'], {}), '(extracted_content)\n', (881, 900), False, 'import json\n'), ((1322, 1383), 'langchain.schema.OutputParserException'... |
# From project chatglm-langchain
from langchain.document_loaders import UnstructuredFileLoader
from langchain.text_splitter import CharacterTextSplitter
import re
from typing import List
class ChineseTextSplitter(CharacterTextSplitter):
def __init__(self, pdf: bool = False, sentence_size: int = None, **kwargs):
... | [
"langchain.document_loaders.UnstructuredFileLoader"
] | [((3017, 3066), 'langchain.document_loaders.UnstructuredFileLoader', 'UnstructuredFileLoader', (['filepath'], {'mode': '"""elements"""'}), "(filepath, mode='elements')\n", (3039, 3066), False, 'from langchain.document_loaders import UnstructuredFileLoader\n'), ((657, 714), 're.compile', 're.compile', (['"""([﹒﹔﹖﹗.。!?][... |
import os
import uuid
from typing import Any, Dict, List, Optional, Tuple
from langchain.agents.agent import RunnableAgent
from langchain.agents.tools import tool as LangChainTool
from langchain.memory import ConversationSummaryMemory
from langchain.tools.render import render_text_description
from langchain_core.agent... | [
"langchain.tools.render.render_text_description",
"langchain.agents.agent.RunnableAgent",
"langchain.memory.ConversationSummaryMemory"
] | [((2392, 2405), 'pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2403, 2405), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((2443, 2468), 'pydantic.PrivateAttr', 'PrivateAttr', ([], {'default': 'None'}), '(default=None)\n', (24... |
import re
from typing import Union
from langchain.agents.mrkl.output_parser import MRKLOutputParser
from langchain.schema import AgentAction, AgentFinish, OutputParserException
FORMAT_INSTRUCTIONS0 = """Use the following format and be sure to use new lines after each task.
Question: the input question you must answe... | [
"langchain.schema.AgentAction",
"langchain.schema.OutputParserException"
] | [((3055, 3088), 're.search', 're.search', (['regex', 'text', 're.DOTALL'], {}), '(regex, text, re.DOTALL)\n', (3064, 3088), False, 'import re\n'), ((3689, 3749), 're.search', 're.search', (['"""Action\\\\s*\\\\d*\\\\s*:[\\\\s]*(.*?)"""', 'text', 're.DOTALL'], {}), "('Action\\\\s*\\\\d*\\\\s*:[\\\\s]*(.*?)', text, re.DO... |
import os
import re
import uuid
import cv2
import torch
import requests
import io, base64
import numpy as np
import gradio as gr
from PIL import Image
from omegaconf import OmegaConf
from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering
from transformers import AutoMod... | [
"langchain.llms.openai.OpenAI",
"langchain.chains.conversation.memory.ConversationBufferMemory",
"langchain.agents.initialize.initialize_agent",
"langchain.agents.tools.Tool"
] | [((3812, 3837), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3835, 3837), False, 'import torch\n'), ((3891, 3907), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (3901, 3907), False, 'import cv2\n'), ((3929, 3954), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'img'], {}), "('.jpg... |
from typing import Any, Callable, Dict, TypeVar
from langchain import BasePromptTemplate, LLMChain
from langchain.chat_models.base import BaseChatModel
from langchain.schema import BaseOutputParser, OutputParserException
from openai.error import (
AuthenticationError,
InvalidRequestError,
RateLimitError,
... | [
"langchain.LLMChain"
] | [((469, 481), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (476, 481), False, 'from typing import Any, Callable, Dict, TypeVar\n'), ((2486, 2520), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'model', 'prompt': 'prompt'}), '(llm=model, prompt=prompt)\n', (2494, 2520), False, 'from langchain import BaseP... |
from typing import Any, Callable, Dict, TypeVar
from langchain import BasePromptTemplate, LLMChain
from langchain.chat_models.base import BaseChatModel
from langchain.schema import BaseOutputParser, OutputParserException
from openai.error import (
AuthenticationError,
InvalidRequestError,
RateLimitError,
... | [
"langchain.LLMChain"
] | [((469, 481), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (476, 481), False, 'from typing import Any, Callable, Dict, TypeVar\n'), ((2486, 2520), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'model', 'prompt': 'prompt'}), '(llm=model, prompt=prompt)\n', (2494, 2520), False, 'from langchain import BaseP... |
from typing import Any, Callable, Dict, TypeVar
from langchain import BasePromptTemplate, LLMChain
from langchain.chat_models.base import BaseChatModel
from langchain.schema import BaseOutputParser, OutputParserException
from openai.error import (
AuthenticationError,
InvalidRequestError,
RateLimitError,
... | [
"langchain.LLMChain"
] | [((469, 481), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (476, 481), False, 'from typing import Any, Callable, Dict, TypeVar\n'), ((2486, 2520), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'model', 'prompt': 'prompt'}), '(llm=model, prompt=prompt)\n', (2494, 2520), False, 'from langchain import BaseP... |
from typing import Any, Callable, Dict, TypeVar
from langchain import BasePromptTemplate, LLMChain
from langchain.chat_models.base import BaseChatModel
from langchain.schema import BaseOutputParser, OutputParserException
from openai.error import (
AuthenticationError,
InvalidRequestError,
RateLimitError,
... | [
"langchain.LLMChain"
] | [((469, 481), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (476, 481), False, 'from typing import Any, Callable, Dict, TypeVar\n'), ((2486, 2520), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'model', 'prompt': 'prompt'}), '(llm=model, prompt=prompt)\n', (2494, 2520), False, 'from langchain import BaseP... |
import json
import os.path
import logging
import time
from langchain.vectorstores import FAISS
from langchain import PromptTemplate
from utils.references import References
from utils.knowledge import Knowledge
from utils.file_operations import make_archive, copy_templates
from utils.tex_processing import create_copies... | [
"langchain.vectorstores.FAISS.load_local",
"langchain.PromptTemplate"
] | [((1271, 1292), 'logging.info', 'logging.info', (['message'], {}), '(message)\n', (1283, 1292), False, 'import logging\n'), ((1552, 1587), 'utils.gpt_interaction.GPTModel', 'GPTModel', ([], {'model': '"""gpt-3.5-turbo-16k"""'}), "(model='gpt-3.5-turbo-16k')\n", (1560, 1587), False, 'from utils.gpt_interaction import GP... |
import json
import os.path
import logging
import time
from langchain.vectorstores import FAISS
from langchain import PromptTemplate
from utils.references import References
from utils.knowledge import Knowledge
from utils.file_operations import make_archive, copy_templates
from utils.tex_processing import create_copies... | [
"langchain.vectorstores.FAISS.load_local",
"langchain.PromptTemplate"
] | [((1271, 1292), 'logging.info', 'logging.info', (['message'], {}), '(message)\n', (1283, 1292), False, 'import logging\n'), ((1552, 1587), 'utils.gpt_interaction.GPTModel', 'GPTModel', ([], {'model': '"""gpt-3.5-turbo-16k"""'}), "(model='gpt-3.5-turbo-16k')\n", (1560, 1587), False, 'from utils.gpt_interaction import GP... |
import sys
import os
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'NeuralSeq'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__))... | [
"langchain.llms.openai.OpenAI",
"langchain.chains.conversation.memory.ConversationBufferMemory",
"langchain.agents.initialize.initialize_agent",
"langchain.agents.tools.Tool"
] | [((3966, 3992), 'scipy.io.wavfile.read', 'wavfile.read', (['audio_path_1'], {}), '(audio_path_1)\n', (3978, 3992), True, 'import scipy.io.wavfile as wavfile\n'), ((4014, 4040), 'scipy.io.wavfile.read', 'wavfile.read', (['audio_path_2'], {}), '(audio_path_2)\n', (4026, 4040), True, 'import scipy.io.wavfile as wavfile\n'... |
from langchain.indexes import VectorstoreIndexCreator
from langchain_community.document_loaders import CSVLoader
from langchain_community.vectorstores import FAISS
loader = CSVLoader("/Users/harrisonchase/Downloads/titanic.csv")
docs = loader.load()
index_creator = VectorstoreIndexCreator(vectorstore_cls=FAISS)
inde... | [
"langchain.indexes.VectorstoreIndexCreator",
"langchain_community.document_loaders.CSVLoader"
] | [((174, 229), 'langchain_community.document_loaders.CSVLoader', 'CSVLoader', (['"""/Users/harrisonchase/Downloads/titanic.csv"""'], {}), "('/Users/harrisonchase/Downloads/titanic.csv')\n", (183, 229), False, 'from langchain_community.document_loaders import CSVLoader\n'), ((268, 314), 'langchain.indexes.VectorstoreInde... |
from langchain.indexes import VectorstoreIndexCreator
from langchain_community.document_loaders import CSVLoader
from langchain_community.vectorstores import FAISS
loader = CSVLoader("/Users/harrisonchase/Downloads/titanic.csv")
docs = loader.load()
index_creator = VectorstoreIndexCreator(vectorstore_cls=FAISS)
inde... | [
"langchain.indexes.VectorstoreIndexCreator",
"langchain_community.document_loaders.CSVLoader"
] | [((174, 229), 'langchain_community.document_loaders.CSVLoader', 'CSVLoader', (['"""/Users/harrisonchase/Downloads/titanic.csv"""'], {}), "('/Users/harrisonchase/Downloads/titanic.csv')\n", (183, 229), False, 'from langchain_community.document_loaders import CSVLoader\n'), ((268, 314), 'langchain.indexes.VectorstoreInde... |
from langchain.indexes import VectorstoreIndexCreator
from langchain_community.document_loaders import CSVLoader
from langchain_community.vectorstores import FAISS
loader = CSVLoader("/Users/harrisonchase/Downloads/titanic.csv")
docs = loader.load()
index_creator = VectorstoreIndexCreator(vectorstore_cls=FAISS)
inde... | [
"langchain.indexes.VectorstoreIndexCreator",
"langchain_community.document_loaders.CSVLoader"
] | [((174, 229), 'langchain_community.document_loaders.CSVLoader', 'CSVLoader', (['"""/Users/harrisonchase/Downloads/titanic.csv"""'], {}), "('/Users/harrisonchase/Downloads/titanic.csv')\n", (183, 229), False, 'from langchain_community.document_loaders import CSVLoader\n'), ((268, 314), 'langchain.indexes.VectorstoreInde... |
from langchain.indexes import VectorstoreIndexCreator
from langchain_community.document_loaders import CSVLoader
from langchain_community.vectorstores import FAISS
loader = CSVLoader("/Users/harrisonchase/Downloads/titanic.csv")
docs = loader.load()
index_creator = VectorstoreIndexCreator(vectorstore_cls=FAISS)
inde... | [
"langchain.indexes.VectorstoreIndexCreator",
"langchain_community.document_loaders.CSVLoader"
] | [((174, 229), 'langchain_community.document_loaders.CSVLoader', 'CSVLoader', (['"""/Users/harrisonchase/Downloads/titanic.csv"""'], {}), "('/Users/harrisonchase/Downloads/titanic.csv')\n", (183, 229), False, 'from langchain_community.document_loaders import CSVLoader\n'), ((268, 314), 'langchain.indexes.VectorstoreInde... |
from typing import Any, Dict, List, Type, Union
from langchain_community.graphs import NetworkxEntityGraph
from langchain_community.graphs.networkx_graph import (
KnowledgeTriple,
get_entities,
parse_triples,
)
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import... | [
"langchain_core.messages.get_buffer_string",
"langchain.chains.llm.LLMChain",
"langchain_community.graphs.networkx_graph.parse_triples",
"langchain.memory.utils.get_prompt_input_key",
"langchain_community.graphs.networkx_graph.get_entities",
"langchain_core.pydantic_v1.Field"
] | [((1062, 1104), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'NetworkxEntityGraph'}), '(default_factory=NetworkxEntityGraph)\n', (1067, 1104), False, 'from langchain_core.pydantic_v1 import Field\n'), ((3163, 3223), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt':... |
from typing import Any, Dict, List, Type, Union
from langchain_community.graphs import NetworkxEntityGraph
from langchain_community.graphs.networkx_graph import (
KnowledgeTriple,
get_entities,
parse_triples,
)
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import... | [
"langchain_core.messages.get_buffer_string",
"langchain.chains.llm.LLMChain",
"langchain_community.graphs.networkx_graph.parse_triples",
"langchain.memory.utils.get_prompt_input_key",
"langchain_community.graphs.networkx_graph.get_entities",
"langchain_core.pydantic_v1.Field"
] | [((1062, 1104), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'NetworkxEntityGraph'}), '(default_factory=NetworkxEntityGraph)\n', (1067, 1104), False, 'from langchain_core.pydantic_v1 import Field\n'), ((3163, 3223), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt':... |
from typing import Any, Dict, List, Type, Union
from langchain_community.graphs import NetworkxEntityGraph
from langchain_community.graphs.networkx_graph import (
KnowledgeTriple,
get_entities,
parse_triples,
)
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import... | [
"langchain_core.messages.get_buffer_string",
"langchain.chains.llm.LLMChain",
"langchain_community.graphs.networkx_graph.parse_triples",
"langchain.memory.utils.get_prompt_input_key",
"langchain_community.graphs.networkx_graph.get_entities",
"langchain_core.pydantic_v1.Field"
] | [((1062, 1104), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'NetworkxEntityGraph'}), '(default_factory=NetworkxEntityGraph)\n', (1067, 1104), False, 'from langchain_core.pydantic_v1 import Field\n'), ((3163, 3223), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt':... |
from typing import Any, Dict, List, Type, Union
from langchain_community.graphs import NetworkxEntityGraph
from langchain_community.graphs.networkx_graph import (
KnowledgeTriple,
get_entities,
parse_triples,
)
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import... | [
"langchain_core.messages.get_buffer_string",
"langchain.chains.llm.LLMChain",
"langchain_community.graphs.networkx_graph.parse_triples",
"langchain.memory.utils.get_prompt_input_key",
"langchain_community.graphs.networkx_graph.get_entities",
"langchain_core.pydantic_v1.Field"
] | [((1062, 1104), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'NetworkxEntityGraph'}), '(default_factory=NetworkxEntityGraph)\n', (1067, 1104), False, 'from langchain_core.pydantic_v1 import Field\n'), ((3163, 3223), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt':... |
"""
**LLM** classes provide
access to the large language model (**LLM**) APIs and services.
**Class hierarchy:**
.. code-block::
BaseLanguageModel --> BaseLLM --> LLM --> <name> # Examples: AI21, HuggingFaceHub, OpenAI
**Main helpers:**
.. code-block::
LLMResult, PromptValue,
CallbackManagerForLLMRun... | [
"langchain.utils.interactive_env.is_interactive_env"
] | [((11338, 11358), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (11356, 11358), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((11368, 11729), 'warnings.warn', 'warnings.warn', (['f"""Importing LLMs from langchain is deprecated. Importing fro... |
"""
**LLM** classes provide
access to the large language model (**LLM**) APIs and services.
**Class hierarchy:**
.. code-block::
BaseLanguageModel --> BaseLLM --> LLM --> <name> # Examples: AI21, HuggingFaceHub, OpenAI
**Main helpers:**
.. code-block::
LLMResult, PromptValue,
CallbackManagerForLLMRun... | [
"langchain.utils.interactive_env.is_interactive_env"
] | [((11338, 11358), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (11356, 11358), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((11368, 11729), 'warnings.warn', 'warnings.warn', (['f"""Importing LLMs from langchain is deprecated. Importing fro... |
"""
**LLM** classes provide
access to the large language model (**LLM**) APIs and services.
**Class hierarchy:**
.. code-block::
BaseLanguageModel --> BaseLLM --> LLM --> <name> # Examples: AI21, HuggingFaceHub, OpenAI
**Main helpers:**
.. code-block::
LLMResult, PromptValue,
CallbackManagerForLLMRun... | [
"langchain.utils.interactive_env.is_interactive_env"
] | [((11338, 11358), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (11356, 11358), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((11368, 11729), 'warnings.warn', 'warnings.warn', (['f"""Importing LLMs from langchain is deprecated. Importing fro... |
import logging
from abc import ABC, abstractmethod
from itertools import islice
from typing import Any, Dict, Iterable, List, Optional
from langchain_community.utilities.redis import get_client
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage, get_buffer_stri... | [
"langchain_core.messages.get_buffer_string",
"langchain.chains.llm.LLMChain",
"langchain.memory.utils.get_prompt_input_key",
"langchain_core.pydantic_v1.Field",
"langchain_community.utilities.redis.get_client"
] | [((701, 728), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (718, 728), False, 'import logging\n'), ((10994, 11036), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'InMemoryEntityStore'}), '(default_factory=InMemoryEntityStore)\n', (10999, 11036), False, 'from lang... |
import logging
from abc import ABC, abstractmethod
from itertools import islice
from typing import Any, Dict, Iterable, List, Optional
from langchain_community.utilities.redis import get_client
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage, get_buffer_stri... | [
"langchain_core.messages.get_buffer_string",
"langchain.chains.llm.LLMChain",
"langchain.memory.utils.get_prompt_input_key",
"langchain_core.pydantic_v1.Field",
"langchain_community.utilities.redis.get_client"
] | [((701, 728), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (718, 728), False, 'import logging\n'), ((10994, 11036), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'InMemoryEntityStore'}), '(default_factory=InMemoryEntityStore)\n', (10999, 11036), False, 'from lang... |
import logging
from abc import ABC, abstractmethod
from itertools import islice
from typing import Any, Dict, Iterable, List, Optional
from langchain_community.utilities.redis import get_client
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage, get_buffer_stri... | [
"langchain_core.messages.get_buffer_string",
"langchain.chains.llm.LLMChain",
"langchain.memory.utils.get_prompt_input_key",
"langchain_core.pydantic_v1.Field",
"langchain_community.utilities.redis.get_client"
] | [((701, 728), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (718, 728), False, 'import logging\n'), ((10994, 11036), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'InMemoryEntityStore'}), '(default_factory=InMemoryEntityStore)\n', (10999, 11036), False, 'from lang... |
from typing import Any, Dict, List, Optional
from langchain_core.messages import BaseMessage, get_buffer_string
from langchain_core.pydantic_v1 import root_validator
from langchain.memory.chat_memory import BaseChatMemory, BaseMemory
from langchain.memory.utils import get_prompt_input_key
class ConversationBufferMe... | [
"langchain_core.messages.get_buffer_string",
"langchain_core.pydantic_v1.root_validator",
"langchain.memory.utils.get_prompt_input_key"
] | [((2888, 2904), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {}), '()\n', (2902, 2904), False, 'from langchain_core.pydantic_v1 import root_validator\n'), ((983, 1073), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['messages'], {'human_prefix': 'self.human_prefix', 'ai_prefi... |
from typing import Any, Dict, List, Optional
from langchain_core.messages import BaseMessage, get_buffer_string
from langchain_core.pydantic_v1 import root_validator
from langchain.memory.chat_memory import BaseChatMemory, BaseMemory
from langchain.memory.utils import get_prompt_input_key
class ConversationBufferMe... | [
"langchain_core.messages.get_buffer_string",
"langchain_core.pydantic_v1.root_validator",
"langchain.memory.utils.get_prompt_input_key"
] | [((2888, 2904), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {}), '()\n', (2902, 2904), False, 'from langchain_core.pydantic_v1 import root_validator\n'), ((983, 1073), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['messages'], {'human_prefix': 'self.human_prefix', 'ai_prefi... |
from typing import Any, Dict, List, Optional
from langchain_core.messages import BaseMessage, get_buffer_string
from langchain_core.pydantic_v1 import root_validator
from langchain.memory.chat_memory import BaseChatMemory, BaseMemory
from langchain.memory.utils import get_prompt_input_key
class ConversationBufferMe... | [
"langchain_core.messages.get_buffer_string",
"langchain_core.pydantic_v1.root_validator",
"langchain.memory.utils.get_prompt_input_key"
] | [((2888, 2904), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {}), '()\n', (2902, 2904), False, 'from langchain_core.pydantic_v1 import root_validator\n'), ((983, 1073), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['messages'], {'human_prefix': 'self.human_prefix', 'ai_prefi... |
from typing import Any, Dict, List, Optional
from langchain_core.messages import BaseMessage, get_buffer_string
from langchain_core.pydantic_v1 import root_validator
from langchain.memory.chat_memory import BaseChatMemory, BaseMemory
from langchain.memory.utils import get_prompt_input_key
class ConversationBufferMe... | [
"langchain_core.messages.get_buffer_string",
"langchain_core.pydantic_v1.root_validator",
"langchain.memory.utils.get_prompt_input_key"
] | [((2888, 2904), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {}), '()\n', (2902, 2904), False, 'from langchain_core.pydantic_v1 import root_validator\n'), ((983, 1073), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['messages'], {'human_prefix': 'self.human_prefix', 'ai_prefi... |
"""**Tools** are classes that an Agent uses to interact with the world.
Each tool has a **description**. Agent uses the description to choose the right
tool for the job.
**Class hierarchy:**
.. code-block::
ToolMetaclass --> BaseTool --> <name>Tool # Examples: AIPluginTool, BaseGraphQLTool
... | [
"langchain.utils.interactive_env.is_interactive_env"
] | [((2151, 2171), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (2169, 2171), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((2185, 2548), 'warnings.warn', 'warnings.warn', (['f"""Importing tools from langchain is deprecated. Importing from lan... |
"""**Tools** are classes that an Agent uses to interact with the world.
Each tool has a **description**. Agent uses the description to choose the right
tool for the job.
**Class hierarchy:**
.. code-block::
ToolMetaclass --> BaseTool --> <name>Tool # Examples: AIPluginTool, BaseGraphQLTool
... | [
"langchain.utils.interactive_env.is_interactive_env"
] | [((2151, 2171), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (2169, 2171), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((2185, 2548), 'warnings.warn', 'warnings.warn', (['f"""Importing tools from langchain is deprecated. Importing from lan... |
"""**Tools** are classes that an Agent uses to interact with the world.
Each tool has a **description**. Agent uses the description to choose the right
tool for the job.
**Class hierarchy:**
.. code-block::
ToolMetaclass --> BaseTool --> <name>Tool # Examples: AIPluginTool, BaseGraphQLTool
... | [
"langchain.utils.interactive_env.is_interactive_env"
] | [((2151, 2171), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (2169, 2171), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((2185, 2548), 'warnings.warn', 'warnings.warn', (['f"""Importing tools from langchain is deprecated. Importing from lan... |
"""**Tools** are classes that an Agent uses to interact with the world.
Each tool has a **description**. Agent uses the description to choose the right
tool for the job.
**Class hierarchy:**
.. code-block::
ToolMetaclass --> BaseTool --> <name>Tool # Examples: AIPluginTool, BaseGraphQLTool
... | [
"langchain.utils.interactive_env.is_interactive_env"
] | [((2151, 2171), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (2169, 2171), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((2185, 2548), 'warnings.warn', 'warnings.warn', (['f"""Importing tools from langchain is deprecated. Importing from lan... |
from functools import partial
from typing import Optional
from langchain_core.callbacks.manager import (
Callbacks,
)
from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_core.retrievers import BaseRetriever
f... | [
"langchain.tools.Tool",
"langchain_core.prompts.format_document",
"langchain_core.prompts.PromptTemplate.from_template",
"langchain_core.pydantic_v1.Field"
] | [((439, 489), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'description': '"""query to look up in retriever"""'}), "(description='query to look up in retriever')\n", (444, 489), False, 'from langchain_core.pydantic_v1 import BaseModel, Field\n'), ((1996, 2126), 'functools.partial', 'partial', (['_get_relevant_doc... |
from functools import partial
from typing import Optional
from langchain_core.callbacks.manager import (
Callbacks,
)
from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_core.retrievers import BaseRetriever
f... | [
"langchain.tools.Tool",
"langchain_core.prompts.format_document",
"langchain_core.prompts.PromptTemplate.from_template",
"langchain_core.pydantic_v1.Field"
] | [((439, 489), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'description': '"""query to look up in retriever"""'}), "(description='query to look up in retriever')\n", (444, 489), False, 'from langchain_core.pydantic_v1 import BaseModel, Field\n'), ((1996, 2126), 'functools.partial', 'partial', (['_get_relevant_doc... |
from functools import partial
from typing import Optional
from langchain_core.callbacks.manager import (
Callbacks,
)
from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_core.retrievers import BaseRetriever
f... | [
"langchain.tools.Tool",
"langchain_core.prompts.format_document",
"langchain_core.prompts.PromptTemplate.from_template",
"langchain_core.pydantic_v1.Field"
] | [((439, 489), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'description': '"""query to look up in retriever"""'}), "(description='query to look up in retriever')\n", (444, 489), False, 'from langchain_core.pydantic_v1 import BaseModel, Field\n'), ((1996, 2126), 'functools.partial', 'partial', (['_get_relevant_doc... |
from functools import partial
from typing import Optional
from langchain_core.callbacks.manager import (
Callbacks,
)
from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_core.retrievers import BaseRetriever
f... | [
"langchain.tools.Tool",
"langchain_core.prompts.format_document",
"langchain_core.prompts.PromptTemplate.from_template",
"langchain_core.pydantic_v1.Field"
] | [((439, 489), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'description': '"""query to look up in retriever"""'}), "(description='query to look up in retriever')\n", (444, 489), False, 'from langchain_core.pydantic_v1 import BaseModel, Field\n'), ((1996, 2126), 'functools.partial', 'partial', (['_get_relevant_doc... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.