yashwanthputikam99 commited on
Commit
3edbdc9
·
verified ·
1 Parent(s): 0232ba4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +56 -0
app.py CHANGED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import getpass
3
+ import ibm_watsonx_ai
4
+ from ibm_watsonx_ai.foundation_models import Model
5
+
6
+ def get_credentials():
7
+ # Simulating the input of API key 'abc123456'
8
+ apikey = 'O0g17oATnv3hpejzM9svvjaNQc7LX7xV2qH5418saEXD'
9
+ return {
10
+ "url": "https://us-south.ml.cloud.ibm.com",
11
+ "apikey": apikey
12
+ }
13
+
14
+
15
+ # Parameters and model initialization
16
+ model_id = "codellama/codellama-34b-instruct-hf"
17
+ parameters = {
18
+ "decoding_method": "greedy",
19
+ "max_new_tokens": 1000,
20
+ "min_new_tokens": 1,
21
+ "stop_sequences": ["<end of code>"],
22
+ "repetition_penalty": 1
23
+ }
24
+
25
+ # Streamlit app begins
26
+ st.title('Code Generation with IBM WATSONX AI')
27
+
28
+ # Input prompt text area
29
+ prompt_input = st.text_area('Enter your prompt (instructions):', height=250)
30
+
31
+ # Button to generate code
32
+ if st.button('Generate Code'):
33
+ st.text("Generating code...")
34
+
35
+ # Initialize the model with credentials
36
+ credentials = get_credentials()
37
+ model = Model(
38
+ model_id=model_id,
39
+ params=parameters,
40
+ credentials=credentials,
41
+ project_id="78275590-aa27-4c1e-a426-f39640aa0003",
42
+ )
43
+
44
+ # Generate response based on the prompt
45
+ generated_response = model.generate_text(prompt=prompt_input, guardrails=False)
46
+
47
+ # Display the generated response
48
+ st.subheader('Generated Code:')
49
+ st.code(generated_response)
50
+
51
+ # Instructions or info section
52
+ st.markdown('''
53
+ ### Instructions:
54
+ - Enter the instructions for code generation in the text area.
55
+ - Click on **Generate Code** to generate Python code based on the instructions.
56
+ ''')