|
|
import gradio as gr |
|
|
from langchain.prompts import PromptTemplate |
|
|
from langchain.llms.fireworks import Fireworks |
|
|
import os |
|
|
|
|
|
os.environ["FIREWORKS_API_KEY"] = "ku9UYtzjSAATlcAstO8yrB89MzvDqJL3lGIkNgnVZ7URxPxK" |
|
|
|
|
|
|
|
|
llm = Fireworks( |
|
|
model="accounts/fireworks/models/llama-v2-13b", |
|
|
model_kwargs={"temperature": 0, "max_tokens": 100, "top_p": 1.0}, |
|
|
) |
|
|
prompt = PromptTemplate.from_template("Tell me a joke about {topic}?") |
|
|
chain = prompt | llm |
|
|
|
|
|
def echo(message, history): |
|
|
return message |
|
|
|
|
|
|
|
|
iface = gr.Interface(fn=get_joke, inputs="text", outputs="text", live=True) |
|
|
iface.launch() |