File size: 650 Bytes
938b9d7 dd64b8c 938b9d7 dd64b8c 938b9d7 dd64b8c | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 | import gradio as gr
from langchain.prompts import PromptTemplate
from langchain.llms.fireworks import Fireworks
import os
os.environ["FIREWORKS_API_KEY"] = "ku9UYtzjSAATlcAstO8yrB89MzvDqJL3lGIkNgnVZ7URxPxK"
# Initialize the model and prompt template
llm = Fireworks(
model="accounts/fireworks/models/llama-v2-13b",
model_kwargs={"temperature": 0, "max_tokens": 100, "top_p": 1.0},
)
prompt = PromptTemplate.from_template("Tell me a joke about {topic}?")
chain = prompt | llm
def echo(message, history):
return message
# Create Gradio Interface
iface = gr.Interface(fn=get_joke, inputs="text", outputs="text", live=True)
iface.launch() |