mjamalm18 commited on
Commit
ec91155
·
verified ·
1 Parent(s): d9920a3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +300 -300
app.py CHANGED
@@ -1,301 +1,301 @@
1
- import tensorflow as tf
2
- from tensorflow.keras import layers
3
-
4
- import pandas as pd
5
- import numpy as np
6
- import joblib
7
-
8
- from sklearn.metrics.pairwise import cosine_similarity
9
- from transformers import T5Tokenizer, T5ForConditionalGeneration
10
-
11
- from fastapi import FastAPI, Request
12
- # from IPython.display import display
13
-
14
- app = FastAPI()
15
-
16
- @tf.keras.utils.register_keras_serializable()
17
- class RecommenderNet(tf.keras.Model):
18
- def __init__(self, num_users, num_places, embedding_size, dropout_rate, **kwargs):
19
- super().__init__(**kwargs)
20
-
21
- self.num_users = num_users
22
- self.num_places = num_places
23
- self.embedding_size = embedding_size
24
- self.dropout_rate = dropout_rate
25
-
26
- self.user_embedding = layers.Embedding(
27
- num_users,
28
- embedding_size,
29
- embeddings_initializer='he_normal',
30
- embeddings_regularizer=tf.keras.regularizers.l2(1e-4)
31
- )
32
- self.user_bias = layers.Embedding(num_users, 1)
33
-
34
- self.place_embedding = layers.Embedding(
35
- num_places,
36
- embedding_size,
37
- embeddings_initializer='he_normal',
38
- embeddings_regularizer=tf.keras.regularizers.l2(1e-4)
39
- )
40
- self.place_bias = layers.Embedding(num_places, 1)
41
-
42
- self.dropout = layers.Dropout(dropout_rate)
43
-
44
- def call(self, inputs):
45
- user_vector = self.user_embedding(inputs[:, 0])
46
- user_vector = self.dropout(user_vector)
47
-
48
- user_bias = self.user_bias(inputs[:, 0])
49
- place_vector = self.place_embedding(inputs[:, 1])
50
- place_vector = self.dropout(place_vector)
51
-
52
- place_bias = self.place_bias(inputs[:, 1])
53
-
54
- dot_user_place = tf.reduce_sum(user_vector * place_vector, axis=1, keepdims=True)
55
- x = dot_user_place + user_bias + place_bias
56
- return tf.squeeze(x, axis=1)
57
-
58
- def get_config(self):
59
- config = super().get_config()
60
- config.update({
61
- 'num_users': self.num_users,
62
- 'num_places': self.num_places,
63
- 'embedding_size': self.embedding_size,
64
- 'dropout_rate': self.dropout_rate,
65
- })
66
- return config
67
-
68
- @classmethod
69
- def from_config(cls, config):
70
- return cls(**config)
71
-
72
- destinasi_df = pd.read_csv('data/destinasi_df.csv')
73
- rating_df = pd.read_csv('data/rating_df.csv')
74
- cb_df = pd.read_csv('data/cb_df.csv')
75
-
76
- cosine_sim_df = joblib.load('models/cosine_sim_df.pkl')
77
- model_cf = tf.keras.models.load_model(
78
- 'models/collab_model.keras',
79
- custom_objects={'RecommenderNet': RecommenderNet}
80
- )
81
- user_to_user_encoded = joblib.load('models/user_to_user_encoded.pkl')
82
- place_to_place_encoded = joblib.load('models/place_to_place_encoded.pkl')
83
- tfidf_vectorizer = joblib.load('models/tfidf_vectorizer.pkl')
84
- tfidf_matrix = tfidf_vectorizer.transform(cb_df['Combined_Features'])
85
-
86
- def content_based_recommendations(place_name, similarity_data=cosine_sim_df, items=cb_df, k=5):
87
-
88
- if place_name not in items['Place_Name'].values:
89
- return pd.DataFrame()
90
-
91
- index = items[items['Place_Name'] == place_name].index[0]
92
- sim_scores = list(enumerate(similarity_data.iloc[index]))
93
- sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)
94
- sim_scores = sim_scores[1:k+1]
95
- place_indices = [i[0] for i in sim_scores]
96
- place_ids = items.iloc[place_indices]['Place_Id'].tolist()
97
-
98
- return place_ids
99
-
100
- def collaborative_filtering_recommendations(user_id, n=5):
101
-
102
- if user_id not in user_to_user_encoded:
103
- return pd.DataFrame()
104
-
105
- user_encoded = user_to_user_encoded[user_id]
106
- place_ids = rating_df['Place_Id'].unique()
107
- visited_places = rating_df[rating_df['User_Id'] == user_id]['Place_Id']
108
- place_ids_unvisited = [p for p in place_ids if p not in visited_places]
109
- place_encoded_unvisited = [
110
- place_to_place_encoded[p] for p in place_ids_unvisited
111
- if p in place_to_place_encoded
112
- ]
113
-
114
- user_place_array = np.array([[user_encoded, p_enc] for p_enc in place_encoded_unvisited])
115
- ratings = model_cf.predict(user_place_array).flatten()
116
- top_ratings_indices = ratings.argsort()[-n:][::-1]
117
- recommended_place_ids = [place_ids_unvisited[i] for i in top_ratings_indices]
118
-
119
- return recommended_place_ids
120
-
121
- def get_travel_recommendations(user_id, favorite_place=None):
122
-
123
- all_recommendations = []
124
- cf_recs = collaborative_filtering_recommendations(user_id)
125
- all_recommendations.extend(cf_recs)
126
-
127
- if favorite_place:
128
- cb_recs = content_based_recommendations(favorite_place)
129
- all_recommendations.extend(cb_recs)
130
-
131
- unique_recommendations = list(set(all_recommendations))
132
- recommendations_df = destinasi_df[
133
- destinasi_df['Place_Id'].isin(unique_recommendations)
134
- ].copy()
135
-
136
- recommendations_df['Recommendation_Source'] = 'Hybrid'
137
- recommendations_df.loc[
138
- recommendations_df['Place_Id'].isin(cf_recs), 'Recommendation_Source'
139
- ] = 'Collaborative'
140
-
141
- if favorite_place:
142
- recommendations_df.loc[
143
- recommendations_df['Place_Id'].isin(cb_recs), 'Recommendation_Source'
144
- ] = 'Content-Based'
145
-
146
- return recommendations_df
147
-
148
-
149
- #IMPLEMENTASI DENGAN MENGGGABUNGKAN 2 PENDEKATAN YANG LEBIH FLEKSIBEL
150
-
151
- # new_user_recs = get_travel_recommendations(user_id=1)
152
- # user_recs = get_travel_recommendations(
153
- # user_id= 3,
154
- # favorite_place= "Monumen Nasional"
155
- # )
156
-
157
- # print("Rekomendasi untuk user dengan favorite place 'Monumen Nasional':")
158
- # from IPython.display import display
159
- # display(user_recs)
160
-
161
- # print("Rekomendasi untuk user baru (tanpa favorite place):")
162
- # display(new_user_recs)
163
-
164
-
165
-
166
- #IMPLEMENTASI HANYA BERDASARKAN CONTENT DESTINASINYA DENGAN INPUT KATEGORI NAMA ATAU KOTA
167
-
168
- def infer_cbf_search(query, top_k=10):
169
- """
170
- Fungsi inference Content-Based Filtering menggunakan cosine similarity
171
- antara query dan TF-IDF matrix dari Combined_Features.
172
- Juga menyesuaikan skor berdasarkan City & Category.
173
- """
174
- weight_city = 0.15
175
- weight_category = 0.05
176
-
177
- query = query.lower().strip()
178
- keywords = query.split()
179
-
180
- query_vec = tfidf_vectorizer.transform([query])
181
- similarity_scores = cosine_similarity(query_vec, tfidf_matrix).flatten()
182
- top_indices = similarity_scores.argsort()[::-1][:top_k * 3]
183
-
184
- unique_cities = cb_df['City'].str.lower().unique().tolist()
185
- city_in_query = [c for c in unique_cities if c in query]
186
-
187
- recommendations = []
188
- for idx in top_indices:
189
- place = cb_df.iloc[idx]
190
- base_score = similarity_scores[idx]
191
- adjusted_score = base_score
192
-
193
- if city_in_query and place['City'].lower() in city_in_query:
194
- adjusted_score += weight_city
195
-
196
- if any(kw in place['Category'].lower() for kw in keywords):
197
- adjusted_score += weight_category
198
-
199
- rec = place[['Place_Id']].copy()
200
- rec['Similarity_Score'] = round(adjusted_score, 4)
201
- rec['Search_Match'] = query
202
- recommendations.append(rec)
203
-
204
- rec_df = pd.DataFrame(recommendations)
205
- rec_df = rec_df.sort_values('Similarity_Score', ascending=False)
206
- rec_df = rec_df.drop_duplicates(subset=['Place_Id']).head(top_k)
207
-
208
- merged_df = pd.merge(rec_df, destinasi_df, on='Place_Id', how='left')
209
- return merged_df.to_dict(orient='records')
210
-
211
- # hasil = infer_cbf_search("budaya yogyakarta ", top_k=5)
212
- # display(hasil)
213
-
214
-
215
- #GENERATIVE AI UNTUK TEKS REKOMENDASI SINGKAT
216
-
217
- model_dir = "mjamalm18/t5-finetuned-recommendation-final"
218
- tokenizer = T5Tokenizer.from_pretrained(model_dir, legacy=True)
219
- model = T5ForConditionalGeneration.from_pretrained(model_dir)
220
-
221
- # rekom_texts = []
222
- # for _, row in user_recs.iterrows():
223
- # teks = f"{row['Place_Name']} di {row['City']}, kategori {row['Category']}, rating {row['Rating']}"
224
- # rekom_texts.append(teks)
225
- # input_text = "Rekomendasi tempat wisata: " + "; ".join(rekom_texts)
226
-
227
- def generate_natural_recommendation(user_id, favorite_place=None, top_n=1):
228
-
229
- user_recs = get_travel_recommendations(user_id=user_id, favorite_place=favorite_place)
230
-
231
- if user_recs.empty:
232
- return "Tidak ada rekomendasi tersedia untuk user ini."
233
-
234
- user_recs = user_recs.head(top_n)
235
- input_template = "User menyukai kategori: {category}; lokasi: {city}; tempat: {place}; rating: {rating}"
236
-
237
- parts = []
238
- for _, row in user_recs.iterrows():
239
- part = input_template.format(
240
- category=row['Category'],
241
- city=row['City'],
242
- place=row['Place_Name'],
243
- rating=row['Rating']
244
- )
245
- parts.append(part)
246
-
247
- input_text = " ; ".join(parts)
248
- inputs = tokenizer(input_text, return_tensors="pt", max_length=512, truncation=True)
249
- outputs = model.generate(**inputs, max_length=150)
250
- result_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
251
-
252
- return result_text
253
-
254
- # hasil = generate_natural_recommendation(user_id=1,favorite_place="Kampung Wisata Taman Sari")
255
- # print(hasil)
256
-
257
- @app.post("/recommendations")
258
- async def recommendations(request: Request):
259
- body = await request.json()
260
- user_id = body.get("user_id")
261
- favorite_place = body.get("favorite_place")
262
- print(user_id)
263
- print(favorite_place)
264
-
265
- try:
266
- user_id = int(user_id)
267
- except (ValueError, TypeError):
268
- return {"user_id": user_id, "recommendations": []}
269
-
270
- result = get_travel_recommendations(user_id, favorite_place)
271
- return {"user_id": user_id, "recommendations": result.to_dict(orient='records')}
272
-
273
- @app.post("/search")
274
- async def search(request: Request):
275
- body = await request.json()
276
- place = body.get("place")
277
- result = infer_cbf_search(place)
278
- return {"query": place, "results": result}
279
-
280
- @app.post("/textgen")
281
- async def textgen(request: Request):
282
- body = await request.json()
283
- user_id = body.get("user_id")
284
- favorite_place = body.get("favorite_place")
285
-
286
- try:
287
- user_id = int(user_id)
288
- except (ValueError, TypeError):
289
- return {
290
- "user_id": user_id,
291
- "gen_text": "User ID tidak valid."
292
- }
293
-
294
- # Panggil fungsi generate_natural_recommendation
295
- gen_text = generate_natural_recommendation(user_id, favorite_place)
296
-
297
- return {
298
- "user_id": user_id,
299
- "favorite_place": favorite_place,
300
- "gen_text": gen_text
301
  }
 
1
+ import tensorflow as tf
2
+ from tensorflow.keras import layers
3
+
4
+ import pandas as pd
5
+ import numpy as np
6
+ import joblib
7
+
8
+ from sklearn.metrics.pairwise import cosine_similarity
9
+ from transformers import T5Tokenizer, T5ForConditionalGeneration
10
+
11
+ from fastapi import FastAPI, Request
12
+ # from IPython.display import display
13
+
14
+ app = FastAPI()
15
+
16
+ @tf.keras.utils.register_keras_serializable()
17
+ class RecommenderNet(tf.keras.Model):
18
+ def __init__(self, num_users, num_places, embedding_size, dropout_rate, **kwargs):
19
+ super().__init__(**kwargs)
20
+
21
+ self.num_users = num_users
22
+ self.num_places = num_places
23
+ self.embedding_size = embedding_size
24
+ self.dropout_rate = dropout_rate
25
+
26
+ self.user_embedding = layers.Embedding(
27
+ num_users,
28
+ embedding_size,
29
+ embeddings_initializer='he_normal',
30
+ embeddings_regularizer=tf.keras.regularizers.l2(1e-4)
31
+ )
32
+ self.user_bias = layers.Embedding(num_users, 1)
33
+
34
+ self.place_embedding = layers.Embedding(
35
+ num_places,
36
+ embedding_size,
37
+ embeddings_initializer='he_normal',
38
+ embeddings_regularizer=tf.keras.regularizers.l2(1e-4)
39
+ )
40
+ self.place_bias = layers.Embedding(num_places, 1)
41
+
42
+ self.dropout = layers.Dropout(dropout_rate)
43
+
44
+ def call(self, inputs):
45
+ user_vector = self.user_embedding(inputs[:, 0])
46
+ user_vector = self.dropout(user_vector)
47
+
48
+ user_bias = self.user_bias(inputs[:, 0])
49
+ place_vector = self.place_embedding(inputs[:, 1])
50
+ place_vector = self.dropout(place_vector)
51
+
52
+ place_bias = self.place_bias(inputs[:, 1])
53
+
54
+ dot_user_place = tf.reduce_sum(user_vector * place_vector, axis=1, keepdims=True)
55
+ x = dot_user_place + user_bias + place_bias
56
+ return tf.squeeze(x, axis=1)
57
+
58
+ def get_config(self):
59
+ config = super().get_config()
60
+ config.update({
61
+ 'num_users': self.num_users,
62
+ 'num_places': self.num_places,
63
+ 'embedding_size': self.embedding_size,
64
+ 'dropout_rate': self.dropout_rate,
65
+ })
66
+ return config
67
+
68
+ @classmethod
69
+ def from_config(cls, config):
70
+ return cls(**config)
71
+
72
+ destinasi_df = pd.read_csv('data/destinasi_df.csv')
73
+ rating_df = pd.read_csv('data/rating_df.csv')
74
+ cb_df = pd.read_csv('data/cb_df.csv')
75
+
76
+ cosine_sim_df = joblib.load('models/cosine_sim_df.pkl')
77
+ model_cf = tf.keras.models.load_model(
78
+ 'models/collab_model.keras',
79
+ custom_objects={'RecommenderNet': RecommenderNet}
80
+ )
81
+ user_to_user_encoded = joblib.load('models/user_to_user_encoded.pkl')
82
+ place_to_place_encoded = joblib.load('models/place_to_place_encoded.pkl')
83
+ tfidf_vectorizer = joblib.load('models/tfidf_vectorizer.pkl')
84
+ tfidf_matrix = tfidf_vectorizer.transform(cb_df['Combined_Features'])
85
+
86
+ def content_based_recommendations(place_name, similarity_data=cosine_sim_df, items=cb_df, k=5):
87
+
88
+ if place_name not in items['Place_Name'].values:
89
+ return pd.DataFrame()
90
+
91
+ index = items[items['Place_Name'] == place_name].index[0]
92
+ sim_scores = list(enumerate(similarity_data.iloc[index]))
93
+ sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)
94
+ sim_scores = sim_scores[1:k+1]
95
+ place_indices = [i[0] for i in sim_scores]
96
+ place_ids = items.iloc[place_indices]['Place_Id'].tolist()
97
+
98
+ return place_ids
99
+
100
+ def collaborative_filtering_recommendations(user_id, n=5):
101
+
102
+ if user_id not in user_to_user_encoded:
103
+ return pd.DataFrame()
104
+
105
+ user_encoded = user_to_user_encoded[user_id]
106
+ place_ids = rating_df['Place_Id'].unique()
107
+ visited_places = rating_df[rating_df['User_Id'] == user_id]['Place_Id']
108
+ place_ids_unvisited = [p for p in place_ids if p not in visited_places]
109
+ place_encoded_unvisited = [
110
+ place_to_place_encoded[p] for p in place_ids_unvisited
111
+ if p in place_to_place_encoded
112
+ ]
113
+
114
+ user_place_array = np.array([[user_encoded, p_enc] for p_enc in place_encoded_unvisited])
115
+ ratings = model_cf.predict(user_place_array).flatten()
116
+ top_ratings_indices = ratings.argsort()[-n:][::-1]
117
+ recommended_place_ids = [place_ids_unvisited[i] for i in top_ratings_indices]
118
+
119
+ return recommended_place_ids
120
+
121
+ def get_travel_recommendations(user_id, favorite_place=None):
122
+
123
+ all_recommendations = []
124
+ cf_recs = collaborative_filtering_recommendations(user_id)
125
+ all_recommendations.extend(cf_recs)
126
+
127
+ if favorite_place:
128
+ cb_recs = content_based_recommendations(favorite_place)
129
+ all_recommendations.extend(cb_recs)
130
+
131
+ unique_recommendations = list(set(all_recommendations))
132
+ recommendations_df = destinasi_df[
133
+ destinasi_df['Place_Id'].isin(unique_recommendations)
134
+ ].copy()
135
+
136
+ recommendations_df['Recommendation_Source'] = 'Hybrid'
137
+ recommendations_df.loc[
138
+ recommendations_df['Place_Id'].isin(cf_recs), 'Recommendation_Source'
139
+ ] = 'Collaborative'
140
+
141
+ if favorite_place:
142
+ recommendations_df.loc[
143
+ recommendations_df['Place_Id'].isin(cb_recs), 'Recommendation_Source'
144
+ ] = 'Content-Based'
145
+
146
+ return recommendations_df
147
+
148
+
149
+ #IMPLEMENTASI DENGAN MENGGGABUNGKAN 2 PENDEKATAN YANG LEBIH FLEKSIBEL
150
+
151
+ # new_user_recs = get_travel_recommendations(user_id=1)
152
+ # user_recs = get_travel_recommendations(
153
+ # user_id= 3,
154
+ # favorite_place= "Monumen Nasional"
155
+ # )
156
+
157
+ # print("Rekomendasi untuk user dengan favorite place 'Monumen Nasional':")
158
+ # from IPython.display import display
159
+ # display(user_recs)
160
+
161
+ # print("Rekomendasi untuk user baru (tanpa favorite place):")
162
+ # display(new_user_recs)
163
+
164
+
165
+
166
+ #IMPLEMENTASI HANYA BERDASARKAN CONTENT DESTINASINYA DENGAN INPUT KATEGORI NAMA ATAU KOTA
167
+
168
+ def infer_cbf_search(query, top_k=10):
169
+ """
170
+ Fungsi inference Content-Based Filtering menggunakan cosine similarity
171
+ antara query dan TF-IDF matrix dari Combined_Features.
172
+ Juga menyesuaikan skor berdasarkan City & Category.
173
+ """
174
+ weight_city = 0.15
175
+ weight_category = 0.05
176
+
177
+ query = query.lower().strip()
178
+ keywords = query.split()
179
+
180
+ query_vec = tfidf_vectorizer.transform([query])
181
+ similarity_scores = cosine_similarity(query_vec, tfidf_matrix).flatten()
182
+ top_indices = similarity_scores.argsort()[::-1][:top_k * 3]
183
+
184
+ unique_cities = cb_df['City'].str.lower().unique().tolist()
185
+ city_in_query = [c for c in unique_cities if c in query]
186
+
187
+ recommendations = []
188
+ for idx in top_indices:
189
+ place = cb_df.iloc[idx]
190
+ base_score = similarity_scores[idx]
191
+ adjusted_score = base_score
192
+
193
+ if city_in_query and place['City'].lower() in city_in_query:
194
+ adjusted_score += weight_city
195
+
196
+ if any(kw in place['Category'].lower() for kw in keywords):
197
+ adjusted_score += weight_category
198
+
199
+ rec = place[['Place_Id']].copy()
200
+ rec['Similarity_Score'] = round(adjusted_score, 4)
201
+ rec['Search_Match'] = query
202
+ recommendations.append(rec)
203
+
204
+ rec_df = pd.DataFrame(recommendations)
205
+ rec_df = rec_df.sort_values('Similarity_Score', ascending=False)
206
+ rec_df = rec_df.drop_duplicates(subset=['Place_Id']).head(top_k)
207
+
208
+ merged_df = pd.merge(rec_df, destinasi_df, on='Place_Id', how='left')
209
+ return merged_df.to_dict(orient='records')
210
+
211
+ # hasil = infer_cbf_search("budaya yogyakarta ", top_k=5)
212
+ # display(hasil)
213
+
214
+
215
+ #GENERATIVE AI UNTUK TEKS REKOMENDASI SINGKAT
216
+
217
+ model_dir = "t5-finetuned-recommendation-final"
218
+ tokenizer = T5Tokenizer.from_pretrained(model_dir, legacy=True)
219
+ model = T5ForConditionalGeneration.from_pretrained(model_dir)
220
+
221
+ # rekom_texts = []
222
+ # for _, row in user_recs.iterrows():
223
+ # teks = f"{row['Place_Name']} di {row['City']}, kategori {row['Category']}, rating {row['Rating']}"
224
+ # rekom_texts.append(teks)
225
+ # input_text = "Rekomendasi tempat wisata: " + "; ".join(rekom_texts)
226
+
227
+ def generate_natural_recommendation(user_id, favorite_place=None, top_n=1):
228
+
229
+ user_recs = get_travel_recommendations(user_id=user_id, favorite_place=favorite_place)
230
+
231
+ if user_recs.empty:
232
+ return "Tidak ada rekomendasi tersedia untuk user ini."
233
+
234
+ user_recs = user_recs.head(top_n)
235
+ input_template = "User menyukai kategori: {category}; lokasi: {city}; tempat: {place}; rating: {rating}"
236
+
237
+ parts = []
238
+ for _, row in user_recs.iterrows():
239
+ part = input_template.format(
240
+ category=row['Category'],
241
+ city=row['City'],
242
+ place=row['Place_Name'],
243
+ rating=row['Rating']
244
+ )
245
+ parts.append(part)
246
+
247
+ input_text = " ; ".join(parts)
248
+ inputs = tokenizer(input_text, return_tensors="pt", max_length=512, truncation=True)
249
+ outputs = model.generate(**inputs, max_length=150)
250
+ result_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
251
+
252
+ return result_text
253
+
254
+ # hasil = generate_natural_recommendation(user_id=1,favorite_place="Kampung Wisata Taman Sari")
255
+ # print(hasil)
256
+
257
+ @app.post("/recommendations")
258
+ async def recommendations(request: Request):
259
+ body = await request.json()
260
+ user_id = body.get("user_id")
261
+ favorite_place = body.get("favorite_place")
262
+ print(user_id)
263
+ print(favorite_place)
264
+
265
+ try:
266
+ user_id = int(user_id)
267
+ except (ValueError, TypeError):
268
+ return {"user_id": user_id, "recommendations": []}
269
+
270
+ result = get_travel_recommendations(user_id, favorite_place)
271
+ return {"user_id": user_id, "recommendations": result.to_dict(orient='records')}
272
+
273
+ @app.post("/search")
274
+ async def search(request: Request):
275
+ body = await request.json()
276
+ place = body.get("place")
277
+ result = infer_cbf_search(place)
278
+ return {"query": place, "results": result}
279
+
280
+ @app.post("/textgen")
281
+ async def textgen(request: Request):
282
+ body = await request.json()
283
+ user_id = body.get("user_id")
284
+ favorite_place = body.get("favorite_place")
285
+
286
+ try:
287
+ user_id = int(user_id)
288
+ except (ValueError, TypeError):
289
+ return {
290
+ "user_id": user_id,
291
+ "gen_text": "User ID tidak valid."
292
+ }
293
+
294
+ # Panggil fungsi generate_natural_recommendation
295
+ gen_text = generate_natural_recommendation(user_id, favorite_place)
296
+
297
+ return {
298
+ "user_id": user_id,
299
+ "favorite_place": favorite_place,
300
+ "gen_text": gen_text
301
  }