Spaces:
Runtime error
Runtime error
Update function.py
Browse files- function.py +6 -8
function.py
CHANGED
|
@@ -3,7 +3,7 @@ from langchain.chains import LLMChain, SimpleSequentialChain
|
|
| 3 |
from langchain.memory import SimpleMemory
|
| 4 |
from langchain.prompts import PromptTemplate
|
| 5 |
from langchain.llms import OpenAI
|
| 6 |
-
|
| 7 |
|
| 8 |
|
| 9 |
|
|
@@ -16,12 +16,10 @@ def ai_generated_content(topic, audience, benefit, date_time):
|
|
| 16 |
memory = SimpleMemory(memory={"topic": topic, "audience": audience, "benefit": benefit, "date_time": date_time})
|
| 17 |
|
| 18 |
# Define prompt templates using stored memory variables
|
| 19 |
-
headline_template =
|
|
|
|
|
|
|
| 20 |
)
|
| 21 |
-
|
| 22 |
-
# input_variables=["topic", "audience", "benefit"],
|
| 23 |
-
# template="Create a headline for {audience} interested in {topic} that highlights the benefit of {benefit}."
|
| 24 |
-
# )
|
| 25 |
|
| 26 |
subheadline_template = PromptTemplate(
|
| 27 |
input_variables=["audience", "benefit"],
|
|
@@ -29,8 +27,8 @@ def ai_generated_content(topic, audience, benefit, date_time):
|
|
| 29 |
)
|
| 30 |
|
| 31 |
# Create LLMChains for each section
|
| 32 |
-
headline_chain = LLMChain(llm=
|
| 33 |
-
subheadline_chain = LLMChain(llm=
|
| 34 |
|
| 35 |
# Run chains and retrieve results
|
| 36 |
headline = headline_chain.run({ "topic": topic,
|
|
|
|
| 3 |
from langchain.memory import SimpleMemory
|
| 4 |
from langchain.prompts import PromptTemplate
|
| 5 |
from langchain.llms import OpenAI
|
| 6 |
+
|
| 7 |
|
| 8 |
|
| 9 |
|
|
|
|
| 16 |
memory = SimpleMemory(memory={"topic": topic, "audience": audience, "benefit": benefit, "date_time": date_time})
|
| 17 |
|
| 18 |
# Define prompt templates using stored memory variables
|
| 19 |
+
headline_template = PromptTemplate(
|
| 20 |
+
input_variables=["topic", "audience", "benefit"],
|
| 21 |
+
template="Create a headline for {audience} interested in {topic} that highlights the benefit of {benefit}."
|
| 22 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
|
| 24 |
subheadline_template = PromptTemplate(
|
| 25 |
input_variables=["audience", "benefit"],
|
|
|
|
| 27 |
)
|
| 28 |
|
| 29 |
# Create LLMChains for each section
|
| 30 |
+
headline_chain = LLMChain(llm=llm, prompt=headline_template, memory=memory)
|
| 31 |
+
subheadline_chain = LLMChain(llm=llm), prompt=subheadline_template, memory=memory)
|
| 32 |
|
| 33 |
# Run chains and retrieve results
|
| 34 |
headline = headline_chain.run({ "topic": topic,
|