| import logging |
| import sys |
| import gradio as gr |
| import asyncio |
| import nest_asyncio |
|
|
| logging.basicConfig(stream=sys.stdout, level=logging.INFO) |
| logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) |
|
|
| from llama_index.core import ( |
| ServiceContext, |
| SimpleDirectoryReader, |
| StorageContext, |
| VectorStoreIndex, |
| set_global_service_context, |
| ) |
|
|
|
|
| |
| |
| from langchain_community.embeddings import HuggingFaceInstructEmbeddings |
|
|
|
|
| from g4f import Provider, models |
| from langchain.llms.base import LLM |
| from llama_index.llms.langchain import LangChainLLM |
| from langchain_g4f import G4FLLM |
|
|
| nest_asyncio.apply() |
| """ |
| documents = SimpleDirectoryReader('data').load_data() |
| model_kwargs = {'device': 'cpu'} |
| encode_kwargs = {'normalize_embeddings': True} |
| embed_model = HuggingFaceInstructEmbeddings( |
| model_name="hkunlp/instructor-xl", model_kwargs=model_kwargs, |
| encode_kwargs=encode_kwargs |
| ) |
| |
| """ |
|
|
| |
|
|
| model_name = "hkunlp/instructor-large" |
| model_kwargs = {'device': 'cpu'} |
| encode_kwargs = {'normalize_embeddings': True} |
| embed_model = HuggingFaceInstructEmbeddings( |
| model_name=model_name, |
| model_kwargs=model_kwargs, |
| encode_kwargs=encode_kwargs |
| ) |
| llm= LLM = G4FLLM( |
| model=models.gpt_35_turbo, |
| provider=Provider.ChatgptAi, |
| ) |
|
|
| llm = LangChainLLM(llm=llm) |
|
|
| service_context = ServiceContext.from_defaults(chunk_size=512, llm=llm, embed_model=embed_model) |
|
|
| index = VectorStoreIndex.from_documents(documents, service_context=service_context) |
|
|
| |
| async def main(query): |
|
|
| query_engine = index.as_query_engine() |
| response = query_engine.query(query) |
| print(response) |
| return response |
|
|
| iface = gr.Interface(fn=main, inputs="text", outputs="text") |
| iface.launch() |
|
|