Spaces:
Sleeping
Sleeping
File size: 1,548 Bytes
3edbdc9 c088718 3edbdc9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
import streamlit as st
import getpass
from ibm_watsonx_ai.foundation_models import Model
def get_credentials():
# Simulating the input of API key 'abc123456'
apikey = 'oRiRYiA8-HRtYpM4N5PGbIyxmqCb8q9VBuQHgzRTxIFi'
return {
"url": "https://us-south.ml.cloud.ibm.com",
"apikey": apikey
}
# Parameters and model initialization
model_id = "codellama/codellama-34b-instruct-hf"
parameters = {
"decoding_method": "greedy",
"max_new_tokens": 1000,
"min_new_tokens": 1,
"stop_sequences": ["<end of code>"],
"repetition_penalty": 1
}
# Streamlit app begins
st.title('Code Generation with IBM WATSONX AI')
# Input prompt text area
prompt_input = st.text_area('Enter your prompt (instructions):', height=250)
# Button to generate code
if st.button('Generate Code'):
st.text("Generating code...")
# Initialize the model with credentials
credentials = get_credentials()
model = Model(
model_id=model_id,
params=parameters,
credentials=credentials,
project_id="78275590-aa27-4c1e-a426-f39640aa0003",
)
# Generate response based on the prompt
generated_response = model.generate_text(prompt=prompt_input, guardrails=False)
# Display the generated response
st.subheader('Generated Code:')
st.code(generated_response)
# Instructions or info section
st.markdown('''
### Instructions:
- Enter the instructions for code generation in the text area.
- Click on **Generate Code** to generate Python code based on the instructions.
''') |