AnishaG0201 commited on
Commit
c40e1d2
·
verified ·
1 Parent(s): 38379e4

Update function.py

Browse files
Files changed (1) hide show
  1. function.py +6 -8
function.py CHANGED
@@ -3,7 +3,7 @@ from langchain.chains import LLMChain, SimpleSequentialChain
3
  from langchain.memory import SimpleMemory
4
  from langchain.prompts import PromptTemplate
5
  from langchain.llms import OpenAI
6
- from langchain.prompts import ChatPromptTemplate
7
 
8
 
9
 
@@ -16,12 +16,10 @@ def ai_generated_content(topic, audience, benefit, date_time):
16
  memory = SimpleMemory(memory={"topic": topic, "audience": audience, "benefit": benefit, "date_time": date_time})
17
 
18
  # Define prompt templates using stored memory variables
19
- headline_template = ChatPromptTemplate.from_template("Create a headline for {audience} interested in {topic} that highlights the benefit of {benefit}."
 
 
20
  )
21
-
22
- # input_variables=["topic", "audience", "benefit"],
23
- # template="Create a headline for {audience} interested in {topic} that highlights the benefit of {benefit}."
24
- # )
25
 
26
  subheadline_template = PromptTemplate(
27
  input_variables=["audience", "benefit"],
@@ -29,8 +27,8 @@ def ai_generated_content(topic, audience, benefit, date_time):
29
  )
30
 
31
  # Create LLMChains for each section
32
- headline_chain = LLMChain(llm=OpenAI(model="gpt-4"), prompt=headline_template, memory=memory)
33
- subheadline_chain = LLMChain(llm=OpenAI(model="gpt-4"), prompt=subheadline_template, memory=memory)
34
 
35
  # Run chains and retrieve results
36
  headline = headline_chain.run({ "topic": topic,
 
3
  from langchain.memory import SimpleMemory
4
  from langchain.prompts import PromptTemplate
5
  from langchain.llms import OpenAI
6
+
7
 
8
 
9
 
 
16
  memory = SimpleMemory(memory={"topic": topic, "audience": audience, "benefit": benefit, "date_time": date_time})
17
 
18
  # Define prompt templates using stored memory variables
19
+ headline_template = PromptTemplate(
20
+ input_variables=["topic", "audience", "benefit"],
21
+ template="Create a headline for {audience} interested in {topic} that highlights the benefit of {benefit}."
22
  )
 
 
 
 
23
 
24
  subheadline_template = PromptTemplate(
25
  input_variables=["audience", "benefit"],
 
27
  )
28
 
29
  # Create LLMChains for each section
30
+ headline_chain = LLMChain(llm=llm, prompt=headline_template, memory=memory)
31
+ subheadline_chain = LLMChain(llm=llm), prompt=subheadline_template, memory=memory)
32
 
33
  # Run chains and retrieve results
34
  headline = headline_chain.run({ "topic": topic,