sharma-kanishka commited on
Commit
a138eb5
·
verified ·
1 Parent(s): 92ace54

Upload 17 files

Browse files

working files addes

.github/workflows/python-app.yml ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Streamlit app
2
+
3
+ on:
4
+ push:
5
+ branches: [ "main" ]
6
+ pull_request:
7
+ branches: [ "main" ]
8
+
9
+ permissions:
10
+ contents: read
11
+
12
+ jobs:
13
+ streamlit:
14
+ runs-on: ubuntu-latest
15
+ steps:
16
+ - uses: actions/checkout@v4
17
+ - uses: actions/setup-python@v5
18
+ with:
19
+ python-version: '3.11'
20
+ - uses: streamlit/streamlit-app-action@v0.0.3
21
+ with:
22
+ app-path: streamlit_app.py
23
+ ruff: true
app.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from src.api.model_integration import stream_response
3
+ from src.utils.prompt_templates import (
4
+ get_translation_prompt,
5
+ get_sentiment_analysis_prompt,
6
+ get_cultural_reference_explanation_prompt,
7
+ get_interactive_translation_prompt,
8
+ )
9
+ from config.config import Config
10
+
11
+
12
+ def setup_page():
13
+ """
14
+ Sets up the page with custom styles and page configuration.
15
+ """
16
+ st.set_page_config(
17
+ page_title="Translator-AI (Llama3.1)",
18
+ layout="wide",
19
+ initial_sidebar_state="expanded",
20
+ )
21
+
22
+ st.markdown(
23
+ """
24
+ <style>
25
+ :root {
26
+ --llama-color: #4e8cff;
27
+ --llama-color-light: #e6f0ff;
28
+ --llama-color-dark: #1a3a6c;
29
+ --llama-gradient-start: #4e54c8;
30
+ --llama-gradient-end: #8f94fb;
31
+ }
32
+ .stApp {
33
+ margin: auto;
34
+ background-color: var(--background-color);
35
+ color: var(--text-color);
36
+ }
37
+ .logo-container {
38
+ display: flex;
39
+ justify-content: center;
40
+ margin-bottom: 1rem;
41
+ }
42
+ .logo-container img {
43
+ width: 150px;
44
+ }
45
+ </style>
46
+ """,
47
+ unsafe_allow_html=True,
48
+ )
49
+
50
+
51
+ def main():
52
+ setup_page()
53
+
54
+ # Header section with title and subtitle
55
+ st.markdown(
56
+ """
57
+ <div style="text-align: center;">
58
+ <h1 class="header-title">🦙 Meta-Llama 3.1 Translator-AI</h1>
59
+ <p class="header-subtitle">Powered by Meta's advanced language models</p>
60
+ </div>
61
+ """,
62
+ unsafe_allow_html=True,
63
+ )
64
+
65
+ # Meta logo
66
+ st.markdown(
67
+ """
68
+ <div class="logo-container">
69
+ <img src="https://upload.wikimedia.org/wikipedia/commons/7/7b/Meta_Platforms_Inc._logo.svg" alt="Meta Logo">
70
+ </div>
71
+ """,
72
+ unsafe_allow_html=True,
73
+ )
74
+
75
+ # Remove the Llama image display
76
+
77
+ # Sidebar for settings
78
+ with st.sidebar:
79
+ st.title("🦙 Llama Translator Settings")
80
+ model_name = st.selectbox("Choose a model", Config.AVAILABLE_MODELS)
81
+
82
+ source_lang = st.selectbox(
83
+ "From", ["English", "Spanish", "French", "German", "Japanese"]
84
+ )
85
+ target_lang = st.selectbox(
86
+ "To", ["Spanish", "English", "French", "German", "Japanese"]
87
+ )
88
+ cultural_context = st.selectbox(
89
+ "Context", ["Formal", "Casual", "Business", "Youth Slang", "Poetic"]
90
+ )
91
+
92
+ # Main container with border
93
+ main_container = st.container(border=True)
94
+
95
+ with main_container:
96
+ st.header("Enter Text for Translation and Analysis")
97
+ text = st.text_area(
98
+ "Text to translate",
99
+ "It was the best of times, it was the worst of times...",
100
+ height=200,
101
+ )
102
+ st.caption(f"Character count: {len(text)}")
103
+
104
+ if st.button("Translate and Analyze", type="primary"):
105
+ if text:
106
+ # Tabs for different analysis types
107
+ tab1, tab2, tab3, tab4 = st.tabs(
108
+ [
109
+ "Translation",
110
+ "Sentiment Analysis",
111
+ "Cultural References",
112
+ "Interactive Translation",
113
+ ]
114
+ )
115
+
116
+ # Tab 1: Translation
117
+ with tab1:
118
+ st.subheader("Translation Result")
119
+ translation_container = st.empty()
120
+ translation_prompt = get_translation_prompt(
121
+ text, source_lang, target_lang, cultural_context
122
+ )
123
+ translation = stream_response(
124
+ [{"role": "user", "content": translation_prompt}],
125
+ translation_container,
126
+ model_name,
127
+ )
128
+
129
+ # Tab 2: Sentiment Analysis
130
+ with tab2:
131
+ st.subheader("Sentiment Analysis")
132
+ sentiment_container = st.empty()
133
+ sentiment_prompt = get_sentiment_analysis_prompt(text, source_lang)
134
+ sentiment_analysis = stream_response(
135
+ [{"role": "user", "content": sentiment_prompt}],
136
+ sentiment_container,
137
+ model_name,
138
+ )
139
+
140
+ # Tab 3: Cultural References
141
+ with tab3:
142
+ st.subheader("Cultural References")
143
+ cultural_container = st.empty()
144
+ cultural_prompt = get_cultural_reference_explanation_prompt(
145
+ text, source_lang, target_lang
146
+ )
147
+ cultural_references = stream_response(
148
+ [{"role": "user", "content": cultural_prompt}],
149
+ cultural_container,
150
+ model_name,
151
+ )
152
+
153
+ # Tab 4: Interactive Translation
154
+ with tab4:
155
+ st.subheader("Interactive Translation")
156
+ interactive_container = st.empty()
157
+ interactive_prompt = get_interactive_translation_prompt(
158
+ text, source_lang, target_lang
159
+ )
160
+ interactive_translation = stream_response(
161
+ [{"role": "user", "content": interactive_prompt}],
162
+ interactive_container,
163
+ model_name,
164
+ )
165
+
166
+ # Sidebar for additional information and feedback
167
+ with st.sidebar:
168
+ st.subheader("About")
169
+ st.info("This app demonstrates Meta's Llama 3.1 capabilities.")
170
+
171
+ st.subheader("Feedback")
172
+ feedback = st.text_area("Leave your feedback here", height=100)
173
+ if st.button("Submit Feedback"):
174
+ st.success("Thank you for your feedback!")
175
+
176
+
177
+ if __name__ == "__main__":
178
+ main()
config/__init__.py ADDED
File without changes
config/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (182 Bytes). View file
 
config/__pycache__/config.cpython-311.pyc ADDED
Binary file (1.05 kB). View file
 
config/config.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from dotenv import load_dotenv
3
+ load_dotenv()
4
+
5
+ class Config:
6
+ """
7
+ A configuration class that retrieves env variables and stores config settings.
8
+ """
9
+
10
+ HOSTED_BASE_URL = os.getenv("HOSTED_BASE_URL")
11
+ HOSTED_API_KEY = os.getenv("HOSTED_API_KEY")
12
+ LOCAL_BASE_URL = os.getenv("LOCAL_BASE_URL")
13
+
14
+ AVAILABLE_MODELS = [
15
+ "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
16
+ "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
17
+ "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
18
+ "llama3.1",
19
+ ]
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ streamlit
2
+ openai
3
+ requests
4
+ Pillow
5
+ python-dotenv
6
+ langchain
7
+ langchain_community
src/__init__.py ADDED
File without changes
src/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (179 Bytes). View file
 
src/api/__init__.py ADDED
File without changes
src/api/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (183 Bytes). View file
 
src/api/__pycache__/model_integration.cpython-311.pyc ADDED
Binary file (4.55 kB). View file
 
src/api/model_integration.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import json
3
+ from openai import OpenAI
4
+ from config.config import Config
5
+
6
+
7
+ def get_api_config(model_name):
8
+ """
9
+ Get API base URL and API key based on the model name.
10
+ """
11
+ if model_name.startswith("meta-llama/"):
12
+ return Config.HOSTED_BASE_URL, Config.HOSTED_API_KEY
13
+ elif model_name == "llama3.1":
14
+ return Config.LOCAL_BASE_URL, None
15
+ else:
16
+ raise ValueError(f"Invalid model name: {model_name}")
17
+
18
+
19
+ def handle_hosted_request(client, model_name, messages, container):
20
+ """
21
+ Handles the hosted Llama 3.1 model requests via OpenAI's API.
22
+ """
23
+ try:
24
+ stream = client.chat.completions.create(
25
+ model=model_name,
26
+ messages=messages,
27
+ stream=True,
28
+ )
29
+ response_placeholder = container.empty()
30
+ full_response = ""
31
+ for chunk in stream:
32
+ if chunk.choices[0].delta.content is not None:
33
+ full_response += chunk.choices[0].delta.content
34
+ response_placeholder.markdown(full_response + "▌")
35
+ response_placeholder.markdown(full_response)
36
+ return full_response
37
+ except Exception as e:
38
+ error_message = f"API Error: {str(e)}"
39
+ container.error(error_message)
40
+ return None
41
+
42
+
43
+ def handle_local_request(base_url, model_name, messages, container):
44
+ """
45
+ Handles requests to the locally hosted Llama 3.1 model.
46
+ """
47
+ try:
48
+ payload = {
49
+ "model": model_name,
50
+ "messages": messages,
51
+ "stream": True,
52
+ }
53
+ headers = {"Content-Type": "application/json"}
54
+
55
+ response_placeholder = container.empty()
56
+ full_response = ""
57
+
58
+ with requests.post(
59
+ base_url, json=payload, headers=headers, stream=True
60
+ ) as response:
61
+ response.raise_for_status()
62
+ for line in response.iter_lines():
63
+ if line:
64
+ try:
65
+ chunk = json.loads(line)
66
+ if "done" in chunk and chunk["done"]:
67
+ break
68
+ if "message" in chunk and "content" in chunk["message"]:
69
+ content = chunk["message"]["content"]
70
+ full_response += content
71
+ response_placeholder.markdown(full_response + "▌")
72
+ except json.JSONDecodeError:
73
+ pass
74
+ response_placeholder.markdown(full_response)
75
+ return full_response
76
+ except requests.RequestException as e:
77
+ error_message = f"API Error: {str(e)}"
78
+ container.error(error_message)
79
+ return None
80
+
81
+
82
+ def stream_response(messages, container, model_name):
83
+ """
84
+ This function handles the API request based on the model (hosted or local) and streams the response.
85
+ """
86
+ base_url, api_key = get_api_config(model_name)
87
+
88
+ if model_name.startswith("meta-llama/"):
89
+ client = OpenAI(api_key=api_key, base_url=base_url)
90
+ return handle_hosted_request(client, model_name, messages, container)
91
+ elif model_name == "llama3.1":
92
+ return handle_local_request(base_url, model_name, messages, container)
93
+ else:
94
+ raise ValueError("Unsupported model selected.")
src/utils/__init__.py ADDED
File without changes
src/utils/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (185 Bytes). View file
 
src/utils/__pycache__/prompt_templates.cpython-311.pyc ADDED
Binary file (5.02 kB). View file
 
src/utils/prompt_templates.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def get_translation_prompt(text, source_lang, target_lang, cultural_context):
2
+ """
3
+ Returns a prompt for translating the given text while considering cultural context.
4
+ """
5
+ return f"""
6
+ As an advanced cultural translation assistant, translate the following text from {source_lang} to {target_lang}, adapting it to a {cultural_context} context:
7
+
8
+ "{text}"
9
+
10
+ Provide your response in markdown format as follows, using Streamlit's markdown capabilities for enhanced visual appeal:
11
+
12
+ ## :blue[Translation]
13
+ > [Your translated text here]
14
+
15
+ ## :green[Cultural Adaptations]
16
+ - **Adaptation 1**: [Explanation]
17
+ - **Adaptation 2**: [Explanation]
18
+ [Add more adaptations as needed]
19
+
20
+ ## :orange[Alternative Phrasings]
21
+ 1. ":violet[Original phrase]" → ":rainbow[Alternative 1]", ":rainbow[Alternative 2]"
22
+ - _Context_: [Explain when to use each alternative]
23
+
24
+ ## :red[Linguistic Analysis]
25
+ - **Register**: [Formal/Informal/etc.]
26
+ - **Tone**: [Describe the tone of the translation]
27
+ - **Key Challenges**: [Discuss any particularly challenging aspects of the translation]
28
+ """
29
+
30
+
31
+ def get_sentiment_analysis_prompt(text, source_lang):
32
+ """
33
+ Returns a prompt for conducting sentiment analysis on a given text.
34
+ """
35
+ return f"""
36
+ Conduct a comprehensive sentiment analysis of the following {source_lang} text:
37
+
38
+ "{text}"
39
+
40
+ Provide your analysis in markdown format as follows:
41
+
42
+ ## :blue[Overall Sentiment]
43
+ [Positive/Negative/Neutral/Mixed]
44
+
45
+ ## :green[Sentiment Breakdown]
46
+ - **Positivity**: :smile: [Score from 0 to 1]
47
+ - **Negativity**: :frowning: [Score from 0 to 1]
48
+ - **Neutrality**: :neutral_face: [Score from 0 to 1]
49
+
50
+ ## :orange[Key Emotional Indicators]
51
+ 1. **:heart: [Emotion 1]**:
52
+ - _Evidence_: ":violet[Relevant quote from text]"
53
+ - _Explanation_: [Brief analysis]
54
+
55
+ ## :earth_americas: Cultural Context
56
+ [Explain how the sentiment might be perceived in the {source_lang}-speaking culture, considering any cultural-specific expressions or connotations]
57
+ """
58
+
59
+ def get_cultural_reference_explanation_prompt(text, source_lang, target_lang):
60
+ """
61
+ Returns a prompt to explain cultural references in a source language for a target language audience.
62
+ """
63
+ return f"""
64
+ As a cross-cultural communication expert, explain the cultural references in this {source_lang} text for someone from a {target_lang} background:
65
+
66
+ "{text}"
67
+
68
+ ## :earth_americas: Cultural References
69
+
70
+ 1. **:star: [Reference 1]**
71
+ - _Meaning_: :blue[Explanation]
72
+ - _Cultural Significance_: :green[Brief description]
73
+ - _{target_lang} Equivalent_: :orange[Equivalent or similar concept, if applicable]
74
+ - _Usage Example_: ":violet[Show how it's used in a sentence]"
75
+
76
+ 2. **:star: [Reference 2]**
77
+ - _Meaning_: :blue[Explanation]
78
+ - _Cultural Significance_: :green[Brief description]
79
+ - _{target_lang} Equivalent_: :orange[Equivalent or similar concept, if applicable]
80
+ - _Usage Example_: ":violet[Show how it's used in a sentence]"
81
+
82
+ ## :globe_with_meridians: Overall Cultural Context
83
+ [Summarize the cultural differences relevant to this text.]
84
+ """
85
+
86
+ def get_interactive_translation_prompt(text, source_lang, target_lang):
87
+ """
88
+ Returns a prompt for providing an interactive, detailed translation with context.
89
+ """
90
+ return f"""
91
+ Translate the following text from {source_lang} to {target_lang} and provide an overall analysis of its meaning, usage, and cultural relevance:
92
+
93
+ "{text}"
94
+
95
+ ## :books: General Translation
96
+ **Text** → ":blue[Overall translation]"
97
+
98
+ ## :arrows_counterclockwise: Contextual Usage and Adaptation
99
+ 1. ":green[Context 1]" - _Explanation_: [How the translation adapts to cultural context]
100
+ 2. ":orange[Context 2]" - _Explanation_: [Alternative contextual usage]
101
+
102
+ ## :dna: Etymology and Origin
103
+ - **Origin**: :violet[Brief description of word origins or key concepts]
104
+ - **Related concepts**: :rainbow[If applicable, related words or phrases]
105
+
106
+ ## :memo: Usage Notes
107
+ - **Register**: :blue[Formal/Informal/etc.]
108
+ - **Connotations**: :green[Positive/Negative connotations of the translation]
109
+ - **Cultural Significance**: :orange[Explain the cultural impact or relevance of the translation]
110
+ """
111
+