from transformers import TFAutoModelForCausalLM, AutoTokenizer import gradio as gr import tensorflow as tf import os # Get the current working directory current_dir = os.getcwd() # Specify the relative path to the model folder model_folder = 'summarization/model' # Construct the absolute path by joining the current directory and the relative path model_path = os.path.join(current_dir, model_folder) # Load the model custom_model = TFAutoModelForCausalLM.from_pretrained("model") tokenizer = AutoTokenizer.from_pretrained("gpt2") def generate_text(input_text): # Tokenize the input text input_text = f"document: {input_text} summary:" #ut text using the GPT-2 tokenizer input_ids = tokenizer.encode(input_text, return_tensors='tf') # Generate tokens one by one generated_tokens = [] for i in range(len(input_text.split())//4): # Get the last generated token last_token = input_ids[0, -1] # Generate the next token output = custom_model(input_ids) next_token_logits = output.logits[:, -1, :] next_token_id = tf.cast(tf.random.categorical(next_token_logits, num_samples=1), tf.int32) # Append the next token to the input_ids tensor input_ids = tf.concat([input_ids, next_token_id], axis=-1) # Add the generated token to the list of generated_tokens generated_token = tokenizer.decode(next_token_id.numpy()[0][0]) generated_tokens.append(generated_token) return "".join(generated_tokens) input_text = gr.inputs.Textbox(lines=5, label="Input Text") output_text = gr.outputs.Textbox(label="Generated Text") app = gr.Interface(fn=generate_text, inputs=input_text, outputs=output_text) app.launch()