File size: 1,384 Bytes
ebfca3b
 
 
 
 
 
 
 
 
 
 
 
 
5a65012
 
 
ebfca3b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73ddd3d
ebfca3b
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
from transformers import BertTokenizerFast, BertModel
import gradio as gr

tokenizer_bert = BertTokenizerFast.from_pretrained("kykim/bert-kor-base")
model_bert = BertModel.from_pretrained("kykim/bert-kor-base")

from sklearn.metrics.pairwise import cosine_similarity

import pandas as pd
df = pd.read_pickle('BookData_real_real_final.pkl')
df_emb = pd.read_pickle('review_emb.pkl')


def embed_text(text, tokenizer=tokenizer_bert, model=model_bert):
    inputs = tokenizer(text, return_tensors="pt")
    outputs = model(**inputs)
    embeddings = outputs.last_hidden_state.mean(dim=1)  # 평균 μž„λ² λ”© μ‚¬μš©
    return embeddings.detach().numpy()[0]
    
def recommend(message):
  
  columns = ['거리']
  list_df = pd.DataFrame(columns=columns)

  emb = embed_text(message)
  list_df['거리'] = df_emb['μ„œν‰μž„λ² λ”©'].map(lambda x: cosine_similarity([emb], [x]).squeeze())
  answer = df.loc[list_df['거리'].idxmax()]
  book_title = answer['제λͺ©']

  return book_title





title = "πŸ€κ³ λ―Ό ν•΄κ²° λ„μ„œ μΆ”μ²œ μ±—λ΄‡πŸ€"
description = "λ‹Ήμ‹ μ˜ κ³ λ―Ό 해결을 도와쀄 책을 μΆ”μ²œ ν•΄λ“œλ¦½λ‹ˆλ‹€"
examples = [["μš”μ¦˜ 잠이 μ•ˆ 와"]]




gr.ChatInterface(
    fn=recommend,
    title=title,
    description=description,
    examples=examples,
    inputs=["text", "state"],
    outputs=["chatbot", "state"],
    theme="finlaymacklon/boxy_violet",
).launch()