| import gradio as gr |
| from langchain import PromptTemplate, LLMChain |
| from langchain.llms import GPT4All |
| from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler |
|
|
|
|
| def func(prompt): |
| |
| template = """Question: {question} |
| |
| Answer: Let's think step by step.""" |
| |
| prompt = PromptTemplate(template=template, input_variables=["question"]) |
| |
| local_path = ( |
| "https://tommy24-llm.hf.space/file=nous-hermes-13b.ggmlv3.q4_0.bin" |
| ) |
| |
| |
| callbacks = [StreamingStdOutCallbackHandler()] |
| |
| |
| llm = GPT4All(model=local_path, callbacks=callbacks, verbose=True) |
| |
| |
| |
| llm = GPT4All(model=local_path, backend="gptj", callbacks=callbacks, verbose=True) |
| |
| llm_chain = LLMChain(prompt=prompt, llm=llm) |
| question = prompt |
|
|
| return llm_chain.run(question) |
|
|
| iface = gr.Interface(fn=func, inputs="text", outputs="text") |
| iface.launch() |