Spaces:
Sleeping
Sleeping
| import os | |
| import numpy as np | |
| import tensorflow as tf | |
| from tensorflow.keras.models import Sequential | |
| from tensorflow.keras.layers import Dense, Embedding, GlobalAveragePooling1D | |
| from tensorflow.keras.preprocessing.text import Tokenizer | |
| from tensorflow.keras.preprocessing.sequence import pad_sequences | |
| from sklearn.model_selection import train_test_split | |
| from huggingface_hub import push_to_hub_keras | |
| # Environment variable for Hugging Face token | |
| sac = os.getenv('accesstoken') | |
| # Sample data for sentiment analysis | |
| texts = ["I love deep learning!", "I hate Mondays.", "This movie is fantastic.", "The weather is terrible."] | |
| labels = np.array([1, 0, 1, 0]) # 1 for positive sentiment, 0 for negative sentiment | |
| # Tokenize the texts | |
| tokenizer = Tokenizer(num_words=1000, oov_token='<OOV>') | |
| tokenizer.fit_on_texts(texts) | |
| sequences = tokenizer.texts_to_sequences(texts) | |
| padded_sequences = pad_sequences(sequences, maxlen=10, padding='post', truncating='post') | |
| # Split data into training and testing sets | |
| X_train, X_test, y_train, y_test = train_test_split(padded_sequences, labels, test_size=0.2, random_state=42) | |
| # Build the model | |
| model = Sequential([ | |
| Embedding(input_dim=1000, output_dim=16), | |
| GlobalAveragePooling1D(), | |
| Dense(16, activation='relu'), | |
| Dense(1, activation='sigmoid') | |
| ]) | |
| model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) | |
| # Train the model | |
| model.fit(X_train, y_train, epochs=5, batch_size=2) | |
| # Evaluate the model | |
| loss, accuracy = model.evaluate(X_test, y_test) | |
| print(f'Accuracy: {accuracy * 100:.2f}%') | |
| # Save the model with the correct filepath extension | |
| model.save('my_custom_text_classifier.keras') | |
| # tf.saved_model.save(model, "my_custom_text_classifier") | |
| # Later, load the model and make predictions | |
| loaded_model = tf.keras.models.load_model('my_custom_text_classifier.keras') | |
| # loaded_model =tf.keras.layers.TFSMLayer("my_custom_text_classifier", call_endpoint="serving_default") | |
| # Example prediction | |
| new_texts = ["I'm feeling great!", "This book is boring."] | |
| sequences = tokenizer.texts_to_sequences(new_texts) | |
| padded_sequences = pad_sequences(sequences, maxlen=10, padding='post', truncating='post') | |
| predictions = model.predict(padded_sequences) | |
| print(predictions) | |
| push_to_hub_keras( | |
| model, | |
| repo_id="okeowo1014/kerascatanddog", | |
| commit_message="Initial commit", | |
| token=sac | |
| ) | |