SoDa12321 commited on
Commit
f9d0987
·
verified ·
1 Parent(s): d8dc46b

Create funtions.py

Browse files
Files changed (1) hide show
  1. funtions.py +150 -0
funtions.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from exa_py import Exa
2
+ from groq import Groq
3
+ import os
4
+
5
+ # Declare the exa search API
6
+ exa = Exa(api_key=os.getenv("EXA_API_KEY"))
7
+
8
+ # Define your API Model and key
9
+ client = Groq(api_key=os.getenv("GROQ_API_KEY"))
10
+ utilized_model = "llama3-70b-8192"
11
+
12
+ highlights_options = {
13
+ "num_sentences": 7, # Length of highlights
14
+ "highlights_per_url": 1, # Get the best highlight for each URL
15
+ }
16
+
17
+ def call_llm(prompt):
18
+ search_response = exa.search_and_contents(query=prompt, highlights=highlights_options, num_results=3, use_autoprompt=True)
19
+ info = [sr.highlights[0] for sr in search_response.results]
20
+
21
+ system_prompt = "You are an academic PhD proposal generator. Read the provided contexts and, if relevant, use them to generate a well-structured research proposal."
22
+ user_prompt = f"Sources: {info}\nResearch Prompt: {prompt}"
23
+
24
+ completion = client.chat.completions.create(
25
+ model=utilized_model,
26
+ messages=[
27
+ {"role": "system", "content": system_prompt},
28
+ {"role": "user", "content": user_prompt},
29
+ ]
30
+ )
31
+ return completion.choices[0].message.content
32
+
33
+ def generate_executive_summary(data):
34
+ prompt = f"""
35
+ Generate a concise executive summary for a PhD research proposal based on the following information:
36
+
37
+ Research Topic: {data["research_topic"]}
38
+ Research Question: {data["research_question"]}
39
+ Objectives: {data["objectives"]}
40
+ Methodology: {data["methodology"]}
41
+ Contribution to the Field: {data["contribution"]}
42
+ Literature Gap: {data["literature_gap"]}
43
+
44
+ The summary should highlight the research problem, its significance, the approach, and expected contributions.
45
+ """
46
+ return call_llm(prompt)
47
+
48
+ def generate_literature_review_outline(data):
49
+ prompt = f"""
50
+ Generate a structured outline for the literature review of a PhD thesis on the following topic:
51
+
52
+ Research Topic: {data["research_topic"]}
53
+ Key Authors: {data["key_authors"]}
54
+ Recent Developments: {data["recent_developments"]}
55
+ Gaps in Literature: {data["literature_gap"]}
56
+
57
+ The outline should cover key themes, debates, and the relevance of existing work to the proposed research.
58
+ """
59
+ return call_llm(prompt)
60
+
61
+ def generate_methodology_section(data):
62
+ prompt = f"""
63
+ Write a detailed research methodology section for a PhD proposal based on the following:
64
+
65
+ Research Topic: {data["research_topic"]}
66
+ Data Collection Methods: {data["data_collection"]}
67
+ Data Analysis Methods: {data["data_analysis"]}
68
+ Justification: {data["justification"]}
69
+
70
+ The methodology should demonstrate how the research will be conducted reliably and validly.
71
+ """
72
+ return call_llm(prompt)
73
+
74
+ def generate_research_objectives(data):
75
+ prompt = f"""
76
+ Generate a detailed list of short-term and long-term research objectives for the following PhD thesis topic:
77
+
78
+ Research Topic: {data["research_topic"]}
79
+ Objectives: {data["objectives"]}
80
+
81
+ The objectives should follow the SMART criteria (Specific, Measurable, Achievable, Relevant, and Time-bound).
82
+ """
83
+ return call_llm(prompt)
84
+
85
+ def generate_hypotheses(data):
86
+ prompt = f"""
87
+ Generate research hypotheses based on the following topic:
88
+
89
+ Research Topic: {data["research_topic"]}
90
+ Research Question: {data["research_question"]}
91
+
92
+ The hypotheses should clearly predict expected outcomes based on theoretical foundations.
93
+ """
94
+ return call_llm(prompt)
95
+
96
+ def generate_contribution_statement(data):
97
+ prompt = f"""
98
+ Generate a statement of contribution for the following PhD research proposal:
99
+
100
+ Research Topic: {data["research_topic"]}
101
+ Contribution to the Field: {data["contribution"]}
102
+
103
+ The statement should highlight how the research will address existing gaps and advance knowledge in the field.
104
+ """
105
+ return call_llm(prompt)
106
+
107
+ def generate_research_timeline(data):
108
+ prompt = f"""
109
+ Generate a detailed research timeline for completing a PhD thesis on the following topic:
110
+
111
+ Research Topic: {data["research_topic"]}
112
+ Total Timeframe: {data["total_timeframe"]}
113
+
114
+ The timeline should break down tasks into manageable phases (e.g., literature review, data collection, analysis) with deadlines.
115
+ """
116
+ return call_llm(prompt)
117
+
118
+ def generate_proposal_introduction(data):
119
+ prompt = f"""
120
+ Write an engaging introduction for a PhD proposal on the following research topic:
121
+
122
+ Research Topic: {data["research_topic"]}
123
+ Research Problem: {data["research_problem"]}
124
+
125
+ The introduction should provide background, introduce the problem, and explain the significance of the research.
126
+ """
127
+ return call_llm(prompt)
128
+
129
+ def generate_limitations_section(data):
130
+ prompt = f"""
131
+ Generate a section describing the potential limitations and challenges of the following research:
132
+
133
+ Research Topic: {data["research_topic"]}
134
+ Methodology: {data["methodology"]}
135
+
136
+ The limitations should address possible obstacles and suggest ways to mitigate them.
137
+ """
138
+ return call_llm(prompt)
139
+
140
+ def generate_future_work_section(data):
141
+ prompt = f"""
142
+ Generate a section on future work based on the following research:
143
+
144
+ Research Topic: {data["research_topic"]}
145
+ Contribution: {data["contribution"]}
146
+
147
+ The future work section should suggest further areas for research that could build upon the findings.
148
+ """
149
+ return call_llm(prompt)
150
+