Jobanpreet commited on
Commit
748f04d
·
verified ·
1 Parent(s): 310a1b6

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +140 -0
  2. requirements.txt +5 -0
app.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_community.document_loaders import WebBaseLoader
2
+ from langchain.prompts import ChatPromptTemplate
3
+ from langchain.output_parsers import ResponseSchema
4
+ from langchain.output_parsers import StructuredOutputParser
5
+ from langchain.prompts import PromptTemplate
6
+ from langchain.chat_models import ChatOpenAI
7
+ from langchain.chains import LLMChain
8
+ from dotenv import load_dotenv
9
+ import requests
10
+ from fastapi import FastAPI,Form
11
+ import re
12
+ import openai
13
+
14
+
15
+ load_dotenv()
16
+
17
+
18
+ app = FastAPI()
19
+
20
+ def is_shortened_url(url): # It is checking whether it is a shorten url or regular website url
21
+ try:
22
+ response = requests.head(url, allow_redirects=True)
23
+ final_url = response.url
24
+ if final_url != url:
25
+ return True
26
+ return False
27
+ except requests.exceptions.RequestException as e:
28
+ print("Error:", e)
29
+ return False
30
+
31
+ def expand_short_url(short_url): # It is converting shorten url to regular url
32
+ try:
33
+ response = requests.head(short_url, allow_redirects=True)
34
+ if response.status_code == 200:
35
+ return response.url
36
+ else:
37
+ print("Error: Short URL couldn't be expanded.")
38
+ return None
39
+ except requests.exceptions.RequestException as e:
40
+ print("Error:", e)
41
+ return None
42
+
43
+ def get_original_url(url):
44
+ if is_shortened_url(url):
45
+ return expand_short_url(url)
46
+ else:
47
+ return url
48
+
49
+
50
+
51
+ # This is the complete code where we are extracting content from the url using WebBaseLoader , using LLM to extract blog content only and then paraphrasing it
52
+ def paraphrased_post(url):
53
+ loader=WebBaseLoader([url],encoding='utf-8')
54
+ docs = loader.load()
55
+
56
+ template="""You are a helpful LinkedIn webscrapper. You are provided with a data , extract the content of the post only.
57
+ {docs}"""
58
+
59
+
60
+ prompt=PromptTemplate(template=template,input_variables=['docs'])
61
+ llm=ChatOpenAI(temperature=0)
62
+ chain=LLMChain(llm=llm,prompt=prompt)
63
+
64
+
65
+ result=chain.invoke({'docs':docs},return_only_outputs=True)
66
+
67
+ data=result['text']
68
+
69
+ template="""You are a helpful LinkedIn post paraphraser and plagiarism remover bot. You are provided with LinkedIn post content and your task is to paraphrase it and remove plagiarism .Return the output in the format with spaces or stickers if present.
70
+ {data}"""
71
+
72
+ prompt2=PromptTemplate(template=template,input_variables=['data'])
73
+ llm=ChatOpenAI(temperature=0)
74
+ chain2=LLMChain(llm=llm,prompt=prompt2)
75
+
76
+ result2=chain2({'data':data},return_only_outputs=True)
77
+ data2=extract_data(result2['text'])
78
+ keywords=data2['Keywords'][:3]
79
+ take_aways=data2['Take Aways'][:3]
80
+ highlights=data2['Highlights'][:3]
81
+ return result2['text'] ,keywords , take_aways, highlights
82
+
83
+
84
+ def extract_data(post_data):
85
+ keywords = ResponseSchema(name="Keywords",
86
+ description="These are the keywords extracted from LinkedIn post",type="list")
87
+
88
+ Take_aways = ResponseSchema(name="Take Aways",
89
+ description="These are the take aways extracted from LinkedIn post", type= "list")
90
+ Highlights=ResponseSchema(name="Highlights",
91
+ description="These are the highlights extracted from LinkedIn post", type= "list")
92
+
93
+ response_schema = [
94
+ keywords,
95
+ Take_aways,
96
+ Highlights
97
+
98
+ ]
99
+ output_parser = StructuredOutputParser.from_response_schemas(response_schema)
100
+ format_instructions = output_parser.get_format_instructions()
101
+
102
+ template = """
103
+ You are a helpful keywords , take aways and highlights extractor from the post of LinkedIn Bot. Your task is to extract relevant keywords , take aways and highlights extractor.
104
+ From the following text message, extract the following information:
105
+
106
+ text message: {content}
107
+ {format_instructions}
108
+ """
109
+
110
+ prompt_template = ChatPromptTemplate.from_template(template)
111
+ messages = prompt_template.format_messages(content=post_data, format_instructions=format_instructions)
112
+ llm = ChatOpenAI(temperature=0)
113
+ response = llm(messages)
114
+ output_dict= output_parser.parse(response.content)
115
+ return output_dict
116
+
117
+
118
+
119
+
120
+ # Define a route to handle POST requests to '/paraphrase'
121
+
122
+ @app.post("/paraphrase")
123
+ async def paraphrase(url: str = Form(...)):
124
+ try:
125
+ if url:
126
+ original_url=get_original_url(url)
127
+ match = re.match(r"https?://(?:www\.)?linkedin\.com/(posts|feed|pulse)/.*", original_url) # checking domain and url page (means it should only be a post nothing else like login page or something else)
128
+
129
+ if match :
130
+ paraphrased_content,keywords, take_aways , highlights =paraphrased_post(original_url)
131
+ return {"Paraphrased post":paraphrased_content , "Keywords": keywords,"Take Aways":take_aways, "Highlights":highlights}
132
+ else:
133
+ return "Put a valid LinkedIn post url only"
134
+ else:
135
+ return "Please enter a link"
136
+ except (openai.BadRequestError,TypeError) as e:
137
+ return "Put a valid LinkedIn post url only"
138
+
139
+
140
+
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ langchain_community
2
+ python-dotenv
3
+ langchain
4
+ fastapi
5
+ python-multipart