Datasets:
commit files to HF hub
Browse files- .gitattributes +1 -0
- code/01-get_products_list.py +55 -0
- code/02-remove_duplicates_and_get_reviews.py +76 -0
- code/03-get_parts_in_csv.py +90 -0
- code/04-process_csv.py +188 -0
- code/06-train-dev-test-split.py +170 -0
- code/get_data.sh +11 -0
- code/python-sdk/README.md +115 -0
- code/python-sdk/examples/delete.py +11 -0
- code/python-sdk/examples/example_login.py +25 -0
- code/python-sdk/examples/get.py +12 -0
- code/python-sdk/examples/post.py +12 -0
- code/python-sdk/examples/put.py +12 -0
- code/python-sdk/lib/__init__.py +0 -0
- code/python-sdk/lib/__pycache__/meli.cpython-37.pyc +0 -0
- code/python-sdk/lib/__pycache__/meli.cpython-38.pyc +0 -0
- code/python-sdk/lib/__pycache__/ssl_helper.cpython-37.pyc +0 -0
- code/python-sdk/lib/__pycache__/ssl_helper.cpython-38.pyc +0 -0
- code/python-sdk/lib/config.ini +24 -0
- code/python-sdk/lib/meli.py +145 -0
- code/python-sdk/lib/ssl_helper.py +19 -0
- code/python-sdk/test/__init__.py +0 -0
- code/python-sdk/test/main.py +144 -0
- code/python-sdk/teste.py +3 -0
- es/test.csv +3 -0
- es/train.csv +3 -0
- es/validation.csv +3 -0
- pt/test.csv +3 -0
- pt/train.csv +3 -0
- pt/validation.csv +3 -0
.gitattributes
CHANGED
|
@@ -35,3 +35,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 35 |
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
| 36 |
*.ogg filter=lfs diff=lfs merge=lfs -text
|
| 37 |
*.wav filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 35 |
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
| 36 |
*.ogg filter=lfs diff=lfs merge=lfs -text
|
| 37 |
*.wav filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
*.csv filter=lfs diff=lfs merge=lfs -text
|
code/01-get_products_list.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
import os
|
| 3 |
+
sys.path.append('python-sdk/lib/')
|
| 4 |
+
from meli import Meli
|
| 5 |
+
import pandas as pd
|
| 6 |
+
from tqdm import tqdm
|
| 7 |
+
import json
|
| 8 |
+
|
| 9 |
+
countries_ids = {
|
| 10 |
+
'MLA':'Argentina',
|
| 11 |
+
'MCO':'Colombia',
|
| 12 |
+
'MPE':'Perú',
|
| 13 |
+
'MLU':'Uruguay',
|
| 14 |
+
'MLC':'Chile',
|
| 15 |
+
'MLM':'Mexico',
|
| 16 |
+
'MLV':'Venezuela',
|
| 17 |
+
'MLB':'Brasil'
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
n_tries = 5
|
| 21 |
+
|
| 22 |
+
def generate_products_list():
|
| 23 |
+
meli = Meli(client_id=1234, client_secret="a secret")
|
| 24 |
+
products_and_categories = {'prod_id': [], 'cat_id': []}
|
| 25 |
+
for country_id, country in countries_ids.items():
|
| 26 |
+
try:
|
| 27 |
+
print('País:',country)
|
| 28 |
+
# Obtengo las categorías del país:
|
| 29 |
+
for i in range(n_tries):
|
| 30 |
+
try:
|
| 31 |
+
categories = meli.get('sites/{}/categories/all'.format(country_id)).json()
|
| 32 |
+
break
|
| 33 |
+
except json.decoder.JSONDecodeError:
|
| 34 |
+
print('Error 1')
|
| 35 |
+
# para cada categoría, obtengo sus productos
|
| 36 |
+
for category_id in tqdm(categories.keys()):
|
| 37 |
+
for i in range(n_tries):
|
| 38 |
+
try:
|
| 39 |
+
products = meli.get('sites/{}/search?category={}'.format(country_id,category_id)).json()['results']
|
| 40 |
+
break
|
| 41 |
+
except (KeyError, json.decoder.JSONDecodeError) as e:
|
| 42 |
+
print('Error 2')
|
| 43 |
+
products_and_categories['prod_id'].extend([product['id'] for product in products])
|
| 44 |
+
products_and_categories['cat_id'].extend([category_id] * len(products))
|
| 45 |
+
|
| 46 |
+
except KeyboardInterrupt:
|
| 47 |
+
pass
|
| 48 |
+
|
| 49 |
+
pd.DataFrame(products_and_categories).to_csv('./products/{}.csv'.format(country),index=False)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
if __name__ == '__main__':
|
| 53 |
+
generate_products_list()
|
| 54 |
+
|
| 55 |
+
|
code/02-remove_duplicates_and_get_reviews.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
sys.path.append('python-sdk/lib/')
|
| 3 |
+
from meli import Meli
|
| 4 |
+
import pandas as pd
|
| 5 |
+
import argparse
|
| 6 |
+
|
| 7 |
+
countries_ids = {
|
| 8 |
+
'MLA':'Argentina',
|
| 9 |
+
'MCO':'Colombia',
|
| 10 |
+
'MPE':'Perú',
|
| 11 |
+
'MLU':'Uruguay',
|
| 12 |
+
'MLC':'Chile',
|
| 13 |
+
'MLM':'Mexico',
|
| 14 |
+
'MLV':'Venezuela',
|
| 15 |
+
'MLB':'Brasil'
|
| 16 |
+
}
|
| 17 |
+
|
| 18 |
+
n_tries = 5
|
| 19 |
+
|
| 20 |
+
def remove_duplicates_and_get_reviews(parts_dir,countries_ids):
|
| 21 |
+
products_df = pd.concat([pd.read_csv('products/{}.csv'.format(country))
|
| 22 |
+
for country in countries_ids.values()]).drop_duplicates(subset=['prod_id'])
|
| 23 |
+
products_df = products_df.reset_index(drop=True)
|
| 24 |
+
# offset = 141000
|
| 25 |
+
prod_len = len(products_df)
|
| 26 |
+
# products_df = products_df.iloc[offset:,:]
|
| 27 |
+
meli = Meli(client_id=1212334,client_secret='a secret')
|
| 28 |
+
reviews_dict = {'prod_id': [], 'cat_id': [], 'review_id': [], 'country': [],
|
| 29 |
+
'prod_title': [], 'reviewer_id': [], 'review_date': [],
|
| 30 |
+
'review_status': [], 'review_title': [], 'review_content': [],
|
| 31 |
+
'review_rate': [], 'review_likes': [], 'review_dislikes': []}
|
| 32 |
+
|
| 33 |
+
for idx, (prod_id, category_id) in products_df.iterrows():
|
| 34 |
+
print('{}/{} ({:.1f}%)'.format(idx,prod_len,idx/prod_len*100))
|
| 35 |
+
prod_title = meli.get('items/{}'.format(prod_id)).json()['title']
|
| 36 |
+
country = prod_id[:3]
|
| 37 |
+
for i in range(n_tries):
|
| 38 |
+
try:
|
| 39 |
+
reviews = meli.get('/reviews/search?item_id={}&limit=100&order_criteria=valorization'.format(prod_id)).json()['results']
|
| 40 |
+
break
|
| 41 |
+
except KeyError:
|
| 42 |
+
print('Error')
|
| 43 |
+
for review in reviews:
|
| 44 |
+
reviews_dict['prod_id'].append(prod_id)
|
| 45 |
+
reviews_dict['cat_id'].append(category_id)
|
| 46 |
+
reviews_dict['country'].append(country)
|
| 47 |
+
reviews_dict['prod_title'].append(prod_title)
|
| 48 |
+
reviews_dict['review_id'].append(review['id'])
|
| 49 |
+
reviews_dict['review_date'].append(review['date_created'])
|
| 50 |
+
reviews_dict['review_status'].append(review['status'])
|
| 51 |
+
reviews_dict['review_title'].append(review['title'])
|
| 52 |
+
reviews_dict['review_content'].append(review['content'])
|
| 53 |
+
reviews_dict['review_rate'].append(review['rate'])
|
| 54 |
+
reviews_dict['review_likes'].append(review['likes'])
|
| 55 |
+
reviews_dict['review_dislikes'].append(review['dislikes'])
|
| 56 |
+
reviews_dict['reviewer_id'].append(review['reviewer_id'])
|
| 57 |
+
if idx % 200 == 199:
|
| 58 |
+
df_reviews = pd.DataFrame(reviews_dict)
|
| 59 |
+
df_reviews.to_csv('./{}/reviews_part{:06}.csv'.format(parts_dir,idx+1),index=False)
|
| 60 |
+
reviews_dict = {'prod_id': [], 'cat_id': [], 'review_id': [], 'country': [],
|
| 61 |
+
'prod_title': [], 'reviewer_id': [], 'review_date': [],
|
| 62 |
+
'review_status': [], 'review_title': [], 'review_content': [],
|
| 63 |
+
'review_rate': [], 'review_likes': [], 'review_dislikes': []}
|
| 64 |
+
|
| 65 |
+
def main():
|
| 66 |
+
parser = argparse.ArgumentParser()
|
| 67 |
+
parser.add_argument('--partsdir', type=str, required=True)
|
| 68 |
+
parser.add_argument('--countries', type=str, nargs="+", required=False)
|
| 69 |
+
args = vars(parser.parse_args())
|
| 70 |
+
parts_dir = args["partsdir"]
|
| 71 |
+
countries_ids = {country_id: country_name for country_id, country_name in countries_ids.items() \
|
| 72 |
+
if country_id in args["countries"]}
|
| 73 |
+
remove_duplicates_and_get_reviews(parts_dir,countries_ids)
|
| 74 |
+
|
| 75 |
+
if __name__ == '__main__':
|
| 76 |
+
main()
|
code/03-get_parts_in_csv.py
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pandas as pd
|
| 3 |
+
import os
|
| 4 |
+
|
| 5 |
+
root_path = './28-melisa/melisa/'
|
| 6 |
+
|
| 7 |
+
def filenames_generator(parts_directories):
|
| 8 |
+
|
| 9 |
+
rs = np.random.RandomState(127361824)
|
| 10 |
+
for directory in parts_directories:
|
| 11 |
+
print('Extracting from {}'.format(directory))
|
| 12 |
+
list_parts = np.array(os.listdir(directory))
|
| 13 |
+
num_parts = len(list_parts)
|
| 14 |
+
for filename in rs.choice(list_parts,num_parts,replace=False):
|
| 15 |
+
yield '{}{}'.format(directory,filename)
|
| 16 |
+
|
| 17 |
+
def drop_dups(df):
|
| 18 |
+
# Elimino los duplicados y los que tienen valores faltantes:
|
| 19 |
+
df = df.drop_duplicates(subset=['review_id'])\
|
| 20 |
+
.reset_index(drop=True).dropna()
|
| 21 |
+
|
| 22 |
+
assert df['prod_id'].apply(type).eq(str).all()
|
| 23 |
+
assert df['cat_id'].apply(type).eq(str).all()
|
| 24 |
+
assert df['review_id'].apply(type).eq(int).all()
|
| 25 |
+
assert df['country'].isin(['MLB','MLA','MLM',
|
| 26 |
+
'MLU','MCO','MLC','MLV','MPE']).all()
|
| 27 |
+
assert df['prod_title'].apply(type).eq(str).all()
|
| 28 |
+
assert df['reviewer_id'].apply(type).eq(int).all()
|
| 29 |
+
assert df['review_date'].apply(type).eq(str).all()
|
| 30 |
+
assert df['review_status'].apply(type).eq(str).all()
|
| 31 |
+
df['review_title'] = df['review_title'].apply(str)
|
| 32 |
+
assert df['review_title'].apply(type).eq(str).all()
|
| 33 |
+
assert df['review_content'].apply(type).eq(str).all()
|
| 34 |
+
assert df['review_rate'].isin([1, 2, 3, 4, 5]).all()
|
| 35 |
+
assert df['review_likes'].apply(type).eq(int).all()
|
| 36 |
+
assert df['review_dislikes'].apply(type).eq(int).all()
|
| 37 |
+
|
| 38 |
+
print('Cantidad de reviews únicos descargados:',len(df))
|
| 39 |
+
|
| 40 |
+
# Cambio todos los espacios por espacios simples
|
| 41 |
+
# y vuelvo a eliminar duplicados:
|
| 42 |
+
df['review_content'] = df['review_content']\
|
| 43 |
+
.str.replace(r'\s+',' ',regex=True)
|
| 44 |
+
df['review_title'] = df['review_title'].str.replace(r'\s+',' ',regex=True)
|
| 45 |
+
df = df.drop_duplicates(subset=['review_content',
|
| 46 |
+
'review_title','review_rate']).reset_index(drop=True)
|
| 47 |
+
print('Cantidad de reviews con contenido, título y rate únicos:',len(df))
|
| 48 |
+
|
| 49 |
+
return df
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def get_csv(filenames_gen,csv_filename):
|
| 53 |
+
df = pd.concat([pd.read_csv(filename,lineterminator='\n',sep=',') \
|
| 54 |
+
for filename in filenames_gen], ignore_index=True)
|
| 55 |
+
df = drop_dups(df)
|
| 56 |
+
|
| 57 |
+
# Guardo en un csv los campos más importantes:
|
| 58 |
+
df.to_csv(root_path + csv_filename,index=False)
|
| 59 |
+
print('Guardado OK.')
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def merge_dfs(csvs_lists):
|
| 63 |
+
df = pd.concat([pd.read_csv(csv_filename,lineterminator='\n',sep=',') \
|
| 64 |
+
for csv_filename in csvs_lists], ignore_index=True)
|
| 65 |
+
df = drop_dups(df)
|
| 66 |
+
return df
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def get_parts_in_csv():
|
| 70 |
+
|
| 71 |
+
csvs = {'orig.csv': ['parts/', 'parts-2/'],
|
| 72 |
+
'ven.csv': ['ven_parts/'],
|
| 73 |
+
'per.csv': ['peru_parts/']}
|
| 74 |
+
|
| 75 |
+
generators = {csvfilename: filenames_generator([root_path + 'all_parts/' + part for part in directories])\
|
| 76 |
+
for csvfilename,directories in csvs.items()}
|
| 77 |
+
|
| 78 |
+
for filename,gen in generators.items():
|
| 79 |
+
print('Generando archivo {}...'.format(filename))
|
| 80 |
+
get_csv(gen,filename)
|
| 81 |
+
print()
|
| 82 |
+
|
| 83 |
+
print('Mergeando todos en un único csv...')
|
| 84 |
+
df = merge_dfs([root_path + csvfiles for csvfiles in csvs.keys()])
|
| 85 |
+
filename = root_path + 'reviews_all.csv'
|
| 86 |
+
df.to_csv(filename,index=False)
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
if __name__ == "__main__":
|
| 90 |
+
get_parts_in_csv()
|
code/04-process_csv.py
ADDED
|
@@ -0,0 +1,188 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
import pandas as pd
|
| 3 |
+
pd.options.mode.chained_assignment = None # default='warn'
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
root_path = './28-melisa/melisa/'
|
| 8 |
+
MeLi_path = './29-mercado-libre-api-v3/python-sdk/lib/'
|
| 9 |
+
sys.path.append(MeLi_path)
|
| 10 |
+
from meli import Meli
|
| 11 |
+
from get_parts_in_csv import drop_dups
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
# Se ejecutó --> dict(df['category'].value_counts())
|
| 15 |
+
# Se obtuvo el diccionario de ocurrencias de las categorías.
|
| 16 |
+
# Se usa eso para generar un conversor de categorías menos específico:
|
| 17 |
+
|
| 18 |
+
generalize_categories = {
|
| 19 |
+
|
| 20 |
+
'Hogar, Muebles y Jardín': 'Hogar / Casa',
|
| 21 |
+
'Casa, Móveis e Decoração': 'Hogar / Casa',
|
| 22 |
+
'Herramientas y Construcción': 'Hogar / Casa',
|
| 23 |
+
'Industrias y Oficinas': 'Hogar / Casa',
|
| 24 |
+
'Ferramentas e Construção': 'Hogar / Casa',
|
| 25 |
+
'Bebés': 'Hogar / Casa',
|
| 26 |
+
'Animales y Mascotas': 'Hogar / Casa',
|
| 27 |
+
'Hogar y Muebles': 'Hogar / Casa',
|
| 28 |
+
'Bebês': 'Hogar / Casa',
|
| 29 |
+
'Animais': 'Hogar / Casa',
|
| 30 |
+
'Indústria e Comércio': 'Hogar / Casa',
|
| 31 |
+
'Industrias': 'Hogar / Casa',
|
| 32 |
+
|
| 33 |
+
'Computación': 'Tecnología y electrónica / Tecnologia e electronica',
|
| 34 |
+
'Accesorios para Vehículos': 'Tecnología y electrónica / Tecnologia e electronica',
|
| 35 |
+
'Acessórios para Veículos': 'Tecnología y electrónica / Tecnologia e electronica',
|
| 36 |
+
'Electrónica, Audio y Video': 'Tecnología y electrónica / Tecnologia e electronica',
|
| 37 |
+
'Electrodomésticos y Aires Ac.': 'Tecnología y electrónica / Tecnologia e electronica',
|
| 38 |
+
'Celulares y Telefonía': 'Tecnología y electrónica / Tecnologia e electronica',
|
| 39 |
+
'Informática': 'Tecnología y electrónica / Tecnologia e electronica',
|
| 40 |
+
'Eletrônicos, Áudio e Vídeo': 'Tecnología y electrónica / Tecnologia e electronica',
|
| 41 |
+
'Electrodomésticos': 'Tecnología y electrónica / Tecnologia e electronica',
|
| 42 |
+
'Eletrodomésticos': 'Tecnología y electrónica / Tecnologia e electronica',
|
| 43 |
+
'Celulares y Teléfonos': 'Tecnología y electrónica / Tecnologia e electronica',
|
| 44 |
+
'Cámaras y Accesorios': 'Tecnología y electrónica / Tecnologia e electronica',
|
| 45 |
+
'Consolas y Videojuegos': 'Tecnología y electrónica / Tecnologia e electronica',
|
| 46 |
+
'Celulares e Telefones': 'Tecnología y electrónica / Tecnologia e electronica',
|
| 47 |
+
'Câmeras e Acessórios': 'Tecnología y electrónica / Tecnologia e electronica',
|
| 48 |
+
|
| 49 |
+
'Deportes y Fitness': 'Salud, ropa y cuidado personal / Saúde, roupas e cuidado pessoal',
|
| 50 |
+
'Belleza y Cuidado Personal': 'Salud, ropa y cuidado personal / Saúde, roupas e cuidado pessoal',
|
| 51 |
+
'Calçados, Roupas e Bolsas': 'Salud, ropa y cuidado personal / Saúde, roupas e cuidado pessoal',
|
| 52 |
+
'Esportes e Fitness': 'Salud, ropa y cuidado personal / Saúde, roupas e cuidado pessoal',
|
| 53 |
+
'Ropa y Accesorios': 'Salud, ropa y cuidado personal / Saúde, roupas e cuidado pessoal',
|
| 54 |
+
'Salud y Equipamiento Médico': 'Salud, ropa y cuidado personal / Saúde, roupas e cuidado pessoal',
|
| 55 |
+
'Beleza e Cuidado Pessoal': 'Salud, ropa y cuidado personal / Saúde, roupas e cuidado pessoal',
|
| 56 |
+
'Ropa, Bolsas y Calzado': 'Salud, ropa y cuidado personal / Saúde, roupas e cuidado pessoal',
|
| 57 |
+
'Saúde': 'Salud, ropa y cuidado personal / Saúde, roupas e cuidado pessoal',
|
| 58 |
+
'Vestuario y Calzado': 'Salud, ropa y cuidado personal / Saúde, roupas e cuidado pessoal',
|
| 59 |
+
'Ropa, Calzados y Accesorios': 'Salud, ropa y cuidado personal / Saúde, roupas e cuidado pessoal',
|
| 60 |
+
'Ropa, Zapatos y Accesorios': 'Salud, ropa y cuidado personal / Saúde, roupas e cuidado pessoal',
|
| 61 |
+
'Estética y Belleza': 'Salud, ropa y cuidado personal / Saúde, roupas e cuidado pessoal',
|
| 62 |
+
|
| 63 |
+
'Juegos y Juguetes': 'Arte y entretenimiento / Arte e Entretenimiento',
|
| 64 |
+
'Brinquedos e Hobbies': 'Arte y entretenimiento / Arte e Entretenimiento',
|
| 65 |
+
'Arte, Librería y Mercería': 'Arte y entretenimiento / Arte e Entretenimiento',
|
| 66 |
+
'Instrumentos Musicales': 'Arte y entretenimiento / Arte e Entretenimiento',
|
| 67 |
+
'Arte, Papelaria e Armarinho': 'Arte y entretenimiento / Arte e Entretenimiento',
|
| 68 |
+
'Arte, Papelería y Mercería': 'Arte y entretenimiento / Arte e Entretenimiento',
|
| 69 |
+
'Joyas y Relojes': 'Arte y entretenimiento / Arte e Entretenimiento',
|
| 70 |
+
'Instrumentos Musicais': 'Arte y entretenimiento / Arte e Entretenimiento',
|
| 71 |
+
'Games': 'Arte y entretenimiento / Arte e Entretenimiento',
|
| 72 |
+
'Joias e Relógios': 'Arte y entretenimiento / Arte e Entretenimiento',
|
| 73 |
+
'Souvenirs, Cotillón y Fiestas': 'Arte y entretenimiento / Arte e Entretenimiento',
|
| 74 |
+
'Festas e Lembrancinhas': 'Arte y entretenimiento / Arte e Entretenimiento',
|
| 75 |
+
'Recuerdos, Cotillón y Fiestas': 'Arte y entretenimiento / Arte e Entretenimiento',
|
| 76 |
+
'Antigüedades y Colecciones': 'Arte y entretenimiento / Arte e Entretenimiento',
|
| 77 |
+
'Libros, Revistas y Comics': 'Arte y entretenimiento / Arte e Entretenimiento',
|
| 78 |
+
'Relojes y Joyas': 'Arte y entretenimiento / Arte e Entretenimiento',
|
| 79 |
+
'Antiguidades e Coleções': 'Arte y entretenimiento / Arte e Entretenimiento',
|
| 80 |
+
'Arte, Librería y Cordonería': 'Arte y entretenimiento / Arte e Entretenimiento',
|
| 81 |
+
'Recuerdos, Piñatería y Fiestas': 'Arte y entretenimiento / Arte e Entretenimiento',
|
| 82 |
+
'Música, Películas y Series': 'Arte y entretenimiento / Arte e Entretenimiento',
|
| 83 |
+
'Música, Filmes e Seriados': 'Arte y entretenimiento / Arte e Entretenimiento',
|
| 84 |
+
'Relojes, Joyas y Bisutería': 'Arte y entretenimiento / Arte e Entretenimiento',
|
| 85 |
+
'Música y Películas': 'Arte y entretenimiento / Arte e Entretenimiento',
|
| 86 |
+
'Livros, Revistas e Comics': 'Arte y entretenimiento / Arte e Entretenimiento',
|
| 87 |
+
|
| 88 |
+
'Alimentos y Bebidas': 'Alimentos y Bebidas / Alimentos e Bebidas',
|
| 89 |
+
'Alimentos e Bebidas': 'Alimentos y Bebidas / Alimentos e Bebidas',
|
| 90 |
+
|
| 91 |
+
'Servicios': np.nan,
|
| 92 |
+
'Serviços': np.nan,
|
| 93 |
+
'Agro': np.nan,
|
| 94 |
+
'Otras categorías': np.nan,
|
| 95 |
+
'Mais Categorias': np.nan,
|
| 96 |
+
'Otras Categorías': np.nan,
|
| 97 |
+
'Ingressos': np.nan,
|
| 98 |
+
'Entradas para Eventos': np.nan,
|
| 99 |
+
'Boletas para Espectáculos': np.nan,
|
| 100 |
+
'Autos, Motos y Otros': np.nan
|
| 101 |
+
|
| 102 |
+
}
|
| 103 |
+
|
| 104 |
+
countries = ['MLB','MLA','MLM','MLU','MCO','MLC','MLV','MPE']
|
| 105 |
+
|
| 106 |
+
rates = [1, 2, 3, 4, 5]
|
| 107 |
+
|
| 108 |
+
cat2n = {
|
| 109 |
+
|
| 110 |
+
'Tecnología y electrónica / Tecnologia e electronica': [14396,11189,9350,1473,3237,3188,656,316],
|
| 111 |
+
'Salud, ropa y cuidado personal / Saúde, roupas e cuidado pessoal': [16032,11399,9522,1228,3260,2295,173,211],
|
| 112 |
+
'Hogar / Casa': [19477,16601,8613,2175,2321,3188,182,157],
|
| 113 |
+
'Arte y entretenimiento / Arte e Entretenimiento': [4572,2710,1789,253,487,380,72,30],
|
| 114 |
+
'Alimentos y Bebidas / Alimentos e Bebidas': [386,462,467,29,22,86,2,2]
|
| 115 |
+
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
def get_orig_cat_name():
|
| 120 |
+
filename = root_path + 'reviews_all.csv'
|
| 121 |
+
df = pd.read_csv(filename)
|
| 122 |
+
|
| 123 |
+
all_categories = pd.read_csv(root_path + 'catid2catname.csv',index_col='cat_id').to_dict()['category']
|
| 124 |
+
df['category'] = df['cat_id'].map(all_categories)
|
| 125 |
+
return df.dropna().reset_index(drop=True)
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def sort_by_score(df):
|
| 129 |
+
score_map = {2:1., 1:.75, 3:.5, 4:.25, 5:0.}
|
| 130 |
+
df['rate_score'] = df['review_rate'].map(score_map)
|
| 131 |
+
diff = df['review_likes'] - df['review_dislikes']
|
| 132 |
+
vals = np.log(diff - diff.min() + 1)
|
| 133 |
+
vals_norm = (vals - vals.min()) / (vals.max()-vals.min())
|
| 134 |
+
lenghts = np.maximum(0,df['review_content'].str.len()-50)
|
| 135 |
+
lenghts_norm = (lenghts - lenghts.min()) / (lenghts.max() - lenghts.min())
|
| 136 |
+
df['val_score'] = vals_norm + .5 * lenghts_norm
|
| 137 |
+
df = df.sort_values(by=['rate_score','val_score'],ascending=[False,False])
|
| 138 |
+
return df.drop(['rate_score','val_score'], axis=1)
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
def mask(df,ctry,cat,rate):
|
| 142 |
+
return ((df['country'] == ctry) & (df['category'] == cat) & (df['review_rate'] == rate))
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def sample_and_get_indices(df,n):
|
| 146 |
+
diff = df['review_likes'] - df['review_dislikes']
|
| 147 |
+
vals = np.log(diff - diff.min() + 1)
|
| 148 |
+
vals_norm = (vals - vals.min()) / (vals.max()-vals.min())
|
| 149 |
+
lenghts = np.maximum(0,df['review_content'].str.len()-50)
|
| 150 |
+
lenghts_norm = (lenghts - lenghts.min()) / (lenghts.max() - lenghts.min())
|
| 151 |
+
df['score'] = vals_norm + .5 * lenghts_norm
|
| 152 |
+
df = df.sort_values(by=['score'],ascending=False)
|
| 153 |
+
return df.iloc[:n,:].index.tolist()
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
def process_csv():
|
| 157 |
+
print('Generalizando categorías...')
|
| 158 |
+
df = get_orig_cat_name()
|
| 159 |
+
df['category'] = df['category'].map(generalize_categories)
|
| 160 |
+
df = df.dropna().reset_index(drop=True)
|
| 161 |
+
print('Limitando por cantidad de productos...')
|
| 162 |
+
df = sort_by_score(df)
|
| 163 |
+
df = df.groupby(['prod_id']).head(30)
|
| 164 |
+
df = df.reset_index(drop=True)
|
| 165 |
+
|
| 166 |
+
print('Sampleando los reviews por categoría, país y rate...')
|
| 167 |
+
indices = []
|
| 168 |
+
for cat, n in cat2n.items():
|
| 169 |
+
for rate in rates:
|
| 170 |
+
for i, country in enumerate(countries):
|
| 171 |
+
df_new = df[mask(df,country,cat,rate)]
|
| 172 |
+
idx = sample_and_get_indices(df_new,n[i])
|
| 173 |
+
indices.extend(idx)
|
| 174 |
+
print('Cantidad de índices hasta ahora:',len(indices))
|
| 175 |
+
df = df.loc[indices,:].reset_index(drop=True)
|
| 176 |
+
|
| 177 |
+
#df.to_csv(root_path + 'reviews_sampled_full.csv',index=False)
|
| 178 |
+
df = drop_dups(df)
|
| 179 |
+
|
| 180 |
+
df_esp = df[df['country']!='MLB']
|
| 181 |
+
df_esp.to_csv(root_path + 'reviews_esp_full.csv',index=False)
|
| 182 |
+
|
| 183 |
+
df_por = df[df['country']=='MLB']
|
| 184 |
+
df_por.to_csv(root_path + 'reviews_por_full.csv',index=False)
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
if __name__ == "__main__":
|
| 188 |
+
process_csv()
|
code/06-train-dev-test-split.py
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
import numpy as np
|
| 3 |
+
import fasttext
|
| 4 |
+
|
| 5 |
+
to_be_removed_esp = [
|
| 6 |
+
483197,483223,483442,483512,484318,484498,484586,484729,485267,485810,
|
| 7 |
+
485903,486030,486154,486295,486525,486749,486773,486786,486791,486811,486814,
|
| 8 |
+
486827,486842,486848,486855,486854,486869,486908,486917,486918,486922,486925,
|
| 9 |
+
486931,486934,486935,486940,486941,486948,486956,486958,486959,486961,486962,
|
| 10 |
+
486966,486968,486973,486974,486978,486981,486983,486987,486992,486999,487001,
|
| 11 |
+
487003,487009,487019,487018,487021,487022,487026,487028,487029,487033,487037,
|
| 12 |
+
487041,487044,487052,487060,487062,487065,487072,487074,487076,487081,487084,
|
| 13 |
+
487085,487088,487092,487094,487095,487099,487103,487105,487106,487109,487110,
|
| 14 |
+
487111,487112,487115,487122,487126,487133,487135,487138,487142,487143,487146,
|
| 15 |
+
487154,487155,487156,487162,487164,487173,487179,487185,487189,487197,487198,
|
| 16 |
+
487199,487204,487207,487210,487211,487216,487219,487225,487229,487233,487234,
|
| 17 |
+
487235,487243,487244,487245,487251,487252,487253,487254,487255,487256,487258,
|
| 18 |
+
487264,487273,487276,487282,487290,487292,487294,487298,487304,487303,487308,
|
| 19 |
+
487318,487321,487323,487326,487327,487329,487330,487331,487334,487337,487346,
|
| 20 |
+
487343,487347,487349,487350,487360,487361,487365,487366,487375,487379,487380,
|
| 21 |
+
487386,487389,487391,487393,487396,487397,487399,487400,487401,487402,487411,
|
| 22 |
+
487412,487414,487416,487417,487419,487421,487424,487425,487427,487433,487435,
|
| 23 |
+
487437,487443,487449,487452,487453,487455,487456,487459,487463,487465,487466,
|
| 24 |
+
487468,487471,487479,487481,487483,487485,487486,487487,487488,487489,487490,
|
| 25 |
+
487493,487494,487496,487498,487500,487501,487502,487505,487506,487512,487517,
|
| 26 |
+
487519,487528,487525,487529,487530,487537,487538,487541,487542,487545,487554,
|
| 27 |
+
487555,487556,487558,487567,487569,487573,487574,487578,487582,487586,487587,
|
| 28 |
+
487592,487596,487602,487603,487604,487607,487608,487609,487612,487613,487616,
|
| 29 |
+
487617,487618,487621,487623,487624
|
| 30 |
+
]
|
| 31 |
+
|
| 32 |
+
to_be_removed_por = [
|
| 33 |
+
274310,274300,274299,274294,274287,274281,274265,274259,274256,274255,274232,274225,274226,274219,
|
| 34 |
+
274213,274206,274200,274199,274194,274172,274171,274170,274167,274166,274165,274163,274153,274146,
|
| 35 |
+
274143,274142,274136,274134,274130,274125,274123,274122,274109,274108,274079,274075,274073,274071,
|
| 36 |
+
274068,274057,274054,274044,274043,274042,274030,274029,274019,274018,274017,274015,274014,274011,
|
| 37 |
+
273998,273975,273969,273967,273951,273934,273924,273922,273914,273910,273909,273901,273899,273895,
|
| 38 |
+
273889,273881,273876,273871,273875,273869,273820,273812,273799,273791,273786,273783,273781,273780,
|
| 39 |
+
273779,273772,273768,273754,273750,273741,273739,273736,273732,273731,273727,273715,273703,273674,
|
| 40 |
+
273596,273595,
|
| 41 |
+
]
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
# countries = ['MLB','MLA','MLM','MLU','MCO','MLC','MLV','MPE']
|
| 45 |
+
# esp_countries = ['MLA','MLM','MLU','MCO','MLC','MLV','MPE']
|
| 46 |
+
# rates = [1, 2, 3, 4, 5]
|
| 47 |
+
|
| 48 |
+
abbreviations = {
|
| 49 |
+
'Hogar / Casa': 'HOGAR',
|
| 50 |
+
'Tecnología y electrónica / Tecnologia e electronica': 'TEC',
|
| 51 |
+
'Arte y entretenimiento / Arte e Entretenimiento': 'ARTE',
|
| 52 |
+
'Salud, ropa y cuidado personal / Saúde, roupas e cuidado pessoal': 'SALUD',
|
| 53 |
+
'Alimentos y Bebidas / Alimentos e Bebidas': 'ALIMENTOS'
|
| 54 |
+
}
|
| 55 |
+
inv_abbreviations = {v:k for k,v in abbreviations.items()}
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def detect_lang_fasttext(df_es,df_pt):
|
| 59 |
+
ds_es = (df_es['review_content'] + ' ' + df_es['review_title']).astype(str)
|
| 60 |
+
ds_pt = (df_pt['review_content'] + ' ' + df_pt['review_title']).astype(str)
|
| 61 |
+
|
| 62 |
+
model_predict = fasttext.load_model('../datav2/lid.176.bin').predict
|
| 63 |
+
|
| 64 |
+
def apply_lang_detect(text):
|
| 65 |
+
return dict(zip(*[('lang','prob'),next(zip(*model_predict(text, k=1)))]))
|
| 66 |
+
|
| 67 |
+
lang_score_es = pd.DataFrame(ds_es.apply(apply_lang_detect).tolist())
|
| 68 |
+
lang_score_pt = pd.DataFrame(ds_pt.apply(apply_lang_detect).tolist())
|
| 69 |
+
|
| 70 |
+
lang_score_es.loc[lang_score_es['lang'] != '__label__es', 'prob'] = 0.
|
| 71 |
+
df_es['lang_prob'] = lang_score_es['prob']
|
| 72 |
+
df_es = df_es.sort_values(by=['lang_prob'],ascending=False).reset_index(drop=True)
|
| 73 |
+
|
| 74 |
+
lang_score_pt.loc[lang_score_pt['lang'] != '__label__pt', 'prob'] = 0.
|
| 75 |
+
df_pt['lang_prob'] = lang_score_pt['prob']
|
| 76 |
+
df_pt = df_pt.sort_values(by=['lang_prob'],ascending=False).reset_index(drop=True)
|
| 77 |
+
|
| 78 |
+
return df_es, df_pt
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def train_test_split(
|
| 82 |
+
df,
|
| 83 |
+
samples,
|
| 84 |
+
random_seed
|
| 85 |
+
):
|
| 86 |
+
rs = np.random.RandomState(random_seed)
|
| 87 |
+
test_indices = []
|
| 88 |
+
for country in samples.keys():
|
| 89 |
+
for cat, n in samples[country].items():
|
| 90 |
+
if n == 0:
|
| 91 |
+
continue
|
| 92 |
+
# print(country, cat, n)
|
| 93 |
+
# print(df.loc[
|
| 94 |
+
# (df['country'] == country) & (df['category'] == inv_abbreviations[cat]), "review_rate"
|
| 95 |
+
# ])
|
| 96 |
+
idx = df[
|
| 97 |
+
(df['country'] == country) & (df['category'] == inv_abbreviations[cat])
|
| 98 |
+
].groupby('review_rate').sample(n=n,random_state=rs).index.tolist()
|
| 99 |
+
test_indices.extend(idx)
|
| 100 |
+
|
| 101 |
+
df_test = df.loc[
|
| 102 |
+
test_indices, ['country','category','review_content','review_title','review_rate']
|
| 103 |
+
].reset_index(drop=True)
|
| 104 |
+
|
| 105 |
+
train_indices = sorted(list(set(range(len(df))) - set(test_indices)))
|
| 106 |
+
df_train = df.loc[
|
| 107 |
+
train_indices, ['country','category','review_content','review_title','review_rate']
|
| 108 |
+
].reset_index(drop=True)
|
| 109 |
+
|
| 110 |
+
return df_train, df_test
|
| 111 |
+
|
| 112 |
+
def main():
|
| 113 |
+
# Se leen todos los comentarios descargados
|
| 114 |
+
df_es = pd.read_csv('./reviews_es_full.csv')
|
| 115 |
+
df_pt = pd.read_csv('./reviews_pt_full.csv')
|
| 116 |
+
|
| 117 |
+
# Se ordenan por relevancia según idioma
|
| 118 |
+
df_es, df_pt = detect_lang_fasttext(df_es,df_pt)
|
| 119 |
+
|
| 120 |
+
## ESPAÑOL
|
| 121 |
+
# Se eliminan los que están en la lista to_be_removed_esp
|
| 122 |
+
df_es = df_es.drop(set(to_be_removed_esp)).reset_index(drop=True)
|
| 123 |
+
|
| 124 |
+
# Se extrae el conjunto de test
|
| 125 |
+
es_country_samples = {
|
| 126 |
+
'MLA':{'ALIMENTOS': 3,'ARTE':30,'HOGAR': 156,'SALUD':210,'TEC':315},
|
| 127 |
+
'MLM':{'ALIMENTOS': 4,'ARTE':30,'HOGAR': 156,'SALUD':210,'TEC':315},
|
| 128 |
+
'MLU':{'ALIMENTOS': 4,'ARTE':30,'HOGAR': 156,'SALUD':210,'TEC':315},
|
| 129 |
+
'MCO':{'ALIMENTOS': 4,'ARTE':30,'HOGAR': 156,'SALUD':210,'TEC':315},
|
| 130 |
+
'MLC':{'ALIMENTOS': 4,'ARTE':30,'HOGAR': 156,'SALUD':210,'TEC':315},
|
| 131 |
+
'MLV':{'ALIMENTOS': 2,'ARTE':30,'HOGAR': 156,'SALUD':172,'TEC':353},
|
| 132 |
+
'MPE':{'ALIMENTOS': 2,'ARTE':30,'HOGAR': 156,'SALUD':210,'TEC':315}
|
| 133 |
+
}
|
| 134 |
+
df_es_train, df_es_test = train_test_split(df_es,es_country_samples,random_seed=776436538)
|
| 135 |
+
|
| 136 |
+
# Se extrae el conjunto de dev
|
| 137 |
+
es_country_samples = {
|
| 138 |
+
'MLA':{'ALIMENTOS': 10,'ARTE':30,'HOGAR': 200,'SALUD':200,'TEC':300},
|
| 139 |
+
'MLM':{'ALIMENTOS': 10,'ARTE':30,'HOGAR': 200,'SALUD':200,'TEC':300},
|
| 140 |
+
'MLU':{'ALIMENTOS': 10,'ARTE':30,'HOGAR': 200,'SALUD':200,'TEC':300},
|
| 141 |
+
'MCO':{'ALIMENTOS': 10,'ARTE':40,'HOGAR': 200,'SALUD':200,'TEC':300},
|
| 142 |
+
'MLC':{'ALIMENTOS': 20,'ARTE':60,'HOGAR': 200,'SALUD':200,'TEC':300},
|
| 143 |
+
'MLV':{'ALIMENTOS': 0,'ARTE':30,'HOGAR': 20,'SALUD':0,'TEC':250},
|
| 144 |
+
'MPE':{'ALIMENTOS': 0,'ARTE':0,'HOGAR': 1,'SALUD':0,'TEC':1}
|
| 145 |
+
}
|
| 146 |
+
df_es_train, df_es_dev = train_test_split(df_es_train,es_country_samples,random_seed=776436538)
|
| 147 |
+
|
| 148 |
+
df_es_train.to_csv('./es/train.csv',index=False)
|
| 149 |
+
df_es_dev.to_csv('./es/validation.csv',index=False)
|
| 150 |
+
df_es_test.to_csv('./es/test.csv',index=False)
|
| 151 |
+
|
| 152 |
+
## PORTUGUÉS
|
| 153 |
+
# Se eliminan los que están en la lista to_be_removed_por
|
| 154 |
+
df_pt = df_pt.drop(set(to_be_removed_por)).reset_index(drop=True)
|
| 155 |
+
|
| 156 |
+
# Se extrae el conjunto de test
|
| 157 |
+
pt_country_samples = {'MLB':{'ALIMENTOS': 23,'ARTE':210,'HOGAR': 1092,'SALUD':1432,'TEC':2243}}
|
| 158 |
+
df_pt_train, df_pt_test = train_test_split(df_pt,pt_country_samples,random_seed=776436538)
|
| 159 |
+
|
| 160 |
+
# Se extrae el conjunto de dev
|
| 161 |
+
pt_country_samples = {'MLB':{'ALIMENTOS': 20,'ARTE':200,'HOGAR': 1032,'SALUD':1400,'TEC':1400}}
|
| 162 |
+
df_pt_train, df_pt_dev = train_test_split(df_pt,pt_country_samples,random_seed=776436538)
|
| 163 |
+
|
| 164 |
+
df_pt_train.to_csv('./pt/train.csv',index=False)
|
| 165 |
+
df_pt_dev.to_csv('./pt/validation.csv',index=False)
|
| 166 |
+
df_pt_test.to_csv('./pt/test.csv',index=False)
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
if __name__ == "__main__":
|
| 170 |
+
main()
|
code/get_data.sh
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#! /bin/bash
|
| 2 |
+
|
| 3 |
+
mkdir products parts parts-2 peru_parts ven_parts
|
| 4 |
+
python 01-get_products_list.py
|
| 5 |
+
python 02-remove_duplicates_and_get_reviews.py --partsdir "parts" --countries "MLA" "MCO" "MPE" "MLU" "MLC" "MLM" "MLV" "MLB"
|
| 6 |
+
python 02-remove_duplicates_and_get_reviews.py --partsdir "parts-2" --countries "MLA" "MCO" "MPE" "MLU" "MLC" "MLM" "MLV" "MLB"
|
| 7 |
+
python 02-remove_duplicates_and_get_reviews.py --partsdir "peru_parts" --countries "MPE"
|
| 8 |
+
python 02-remove_duplicates_and_get_reviews.py --partsdir "ven_parts" --countries "MLV"
|
| 9 |
+
python 03-get_parts_in_csv.py
|
| 10 |
+
python 04-process_csv.py
|
| 11 |
+
python 06-train-dev-test-split.py
|
code/python-sdk/README.md
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# MercadoLibre's Python SDK
|
| 2 |
+
|
| 3 |
+
This is the official Python SDK for MercadoLibre's Platform.
|
| 4 |
+
|
| 5 |
+
## How do I install it?
|
| 6 |
+
|
| 7 |
+
clone repository
|
| 8 |
+
https://github.com/mercadolibre/python-sdk.git
|
| 9 |
+
|
| 10 |
+
## How do I use it?
|
| 11 |
+
|
| 12 |
+
The first thing to do is to instance a ```Meli``` class. You'll need to give a ```clientId``` and a ```clientSecret```. You can obtain both after creating your own application. For more information on this please read: [creating an application](http://developers.mercadolibre.com/application-manager/)
|
| 13 |
+
|
| 14 |
+
### Including the Lib
|
| 15 |
+
Include the lib meli in your project
|
| 16 |
+
|
| 17 |
+
### Attention
|
| 18 |
+
Don't forget to set the authentication URL of your country in file lib/config.ini
|
| 19 |
+
|
| 20 |
+
```python
|
| 21 |
+
import sys
|
| 22 |
+
sys.path.append('../lib')
|
| 23 |
+
from meli import Meli
|
| 24 |
+
```
|
| 25 |
+
Start the development!
|
| 26 |
+
|
| 27 |
+
### Create an instance of Meli class
|
| 28 |
+
Simple like this
|
| 29 |
+
```python
|
| 30 |
+
meli = Meli(client_id=1234, client_secret="a secret")
|
| 31 |
+
```
|
| 32 |
+
With this instance you can start working on MercadoLibre's APIs.
|
| 33 |
+
|
| 34 |
+
There are some design considerations worth to mention.
|
| 35 |
+
|
| 36 |
+
1. This SDK is just a thin layer on top of an http client to handle all the OAuth WebServer flow for you.
|
| 37 |
+
|
| 38 |
+
2. There is JSON parsing. this SDK will include [json](http://docs.python.org/2/library/json.html) for internal usage.
|
| 39 |
+
|
| 40 |
+
3. If you already have the access_token and the refresh_token you can pass in the constructor
|
| 41 |
+
|
| 42 |
+
```python
|
| 43 |
+
meli = Meli(client_id=1234, client_secret="a secret", access_token="Access_Token", refresh_token="Refresh_Token")
|
| 44 |
+
```
|
| 45 |
+
|
| 46 |
+
## How do I redirect users to authorize my application?
|
| 47 |
+
|
| 48 |
+
This is a 2 step process.
|
| 49 |
+
|
| 50 |
+
First get the link to redirect the user. This is very easy! Just:
|
| 51 |
+
|
| 52 |
+
```python
|
| 53 |
+
redirectUrl = meli.auth_url(redirect_URI="http://somecallbackurl")
|
| 54 |
+
```
|
| 55 |
+
|
| 56 |
+
This will give you the url to redirect the user. You need to specify a callback url which will be the one that the user will redirected after a successfull authrization process.
|
| 57 |
+
|
| 58 |
+
Once the user is redirected to your callback url, you'll receive in the query string, a parameter named ```code```. You'll need this for the second part of the process.
|
| 59 |
+
|
| 60 |
+
```python
|
| 61 |
+
meli.authorize(code="the received code", redirect_URI="http://somecallbackurl")
|
| 62 |
+
```
|
| 63 |
+
|
| 64 |
+
This will get an ```access_token``` and a ```refresh_token``` (is case your application has the ```offline_access```) for your application and your user.
|
| 65 |
+
|
| 66 |
+
At this stage your are ready to make call to the API on behalf of the user.
|
| 67 |
+
|
| 68 |
+
#### Making GET calls
|
| 69 |
+
|
| 70 |
+
```python
|
| 71 |
+
params = {'access_token' : meli.access_token}
|
| 72 |
+
response = meli.get(path="/users/me", params=params)
|
| 73 |
+
```
|
| 74 |
+
|
| 75 |
+
#### Making POST calls
|
| 76 |
+
|
| 77 |
+
```python
|
| 78 |
+
params = {'access_token' : meli.access_token}
|
| 79 |
+
|
| 80 |
+
#this body will be converted into json for you
|
| 81 |
+
body = {'foo' : 'bar', 'bar' : 'foo'}
|
| 82 |
+
|
| 83 |
+
response = meli.post(path="/items", body=body, params=params)
|
| 84 |
+
```
|
| 85 |
+
|
| 86 |
+
#### Making PUT calls
|
| 87 |
+
|
| 88 |
+
```python
|
| 89 |
+
params = {'access_token' : meli.access_token}
|
| 90 |
+
|
| 91 |
+
#this body will be converted into json for you
|
| 92 |
+
body = {'foo' : 'bar', 'bar' : 'foo'}
|
| 93 |
+
|
| 94 |
+
response = meli.put(path="/items/123", body=body, params=params)
|
| 95 |
+
```
|
| 96 |
+
|
| 97 |
+
#### Making DELETE calls
|
| 98 |
+
```python
|
| 99 |
+
params = {'access_token' : meli.access_token}
|
| 100 |
+
response = meli.delete(path="/questions/123", params=params)
|
| 101 |
+
```
|
| 102 |
+
|
| 103 |
+
## Examples
|
| 104 |
+
|
| 105 |
+
Don't forget to check out our examples codes in the folder [examples](https://github.com/mercadolibre/python-sdk/tree/master/examples)
|
| 106 |
+
|
| 107 |
+
## Community
|
| 108 |
+
|
| 109 |
+
You can contact us if you have questions using the standard communication channels described in the [developer's site](http://developers.mercadolibre.com/community/)
|
| 110 |
+
|
| 111 |
+
## I want to contribute!
|
| 112 |
+
|
| 113 |
+
That is great! Just fork the project in github. Create a topic branch, write some code, and add some tests for your new code.
|
| 114 |
+
|
| 115 |
+
Thanks for helping!
|
code/python-sdk/examples/delete.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
sys.path.append('../lib')
|
| 3 |
+
from meli import Meli
|
| 4 |
+
|
| 5 |
+
def main():
|
| 6 |
+
meli = Meli(client_id=CLIENT_ID,client_secret=CLIENT_SECRET, access_token=ACCESS_TOKEN, refresh_token=REFRESH_TOKEN)
|
| 7 |
+
response = meli.delete("/questions/QUESTION_ID", {'access_token':meli.access_token})
|
| 8 |
+
print response.content
|
| 9 |
+
|
| 10 |
+
if __name__ == "__main__":
|
| 11 |
+
main()
|
code/python-sdk/examples/example_login.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
# -*- coding: utf-8 -*-
|
| 3 |
+
|
| 4 |
+
from bottle import Bottle, run, template, route, request
|
| 5 |
+
import json
|
| 6 |
+
|
| 7 |
+
import sys
|
| 8 |
+
sys.path.append('../lib')
|
| 9 |
+
from meli import Meli
|
| 10 |
+
|
| 11 |
+
meli = Meli(client_id=CLIENT_ID,client_secret=CLIENT_SECRET)
|
| 12 |
+
|
| 13 |
+
app = Bottle()
|
| 14 |
+
|
| 15 |
+
@app.route('/authorize')
|
| 16 |
+
def authorize():
|
| 17 |
+
if request.query.get('code'):
|
| 18 |
+
meli.authorize(request.query.get('code'), REDIRECT_URI)
|
| 19 |
+
return meli.access_token
|
| 20 |
+
|
| 21 |
+
@app.route('/login')
|
| 22 |
+
def login():
|
| 23 |
+
return "<a href='"+meli.auth_url(redirect_URI=REDIRECT_URI)+"'>Login</a>"
|
| 24 |
+
|
| 25 |
+
run(app, host='localhost', port=4567, reloader=True)
|
code/python-sdk/examples/get.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
sys.path.append('../lib')
|
| 3 |
+
from meli import Meli
|
| 4 |
+
|
| 5 |
+
def main():
|
| 6 |
+
meli = Meli(client_id=CLIENT_ID,client_secret=CLIENT_SECRET, access_token=ACCESS_TOKEN, refresh_token=REFRESH_TOKEN)
|
| 7 |
+
|
| 8 |
+
response = meli.get("/items/ITEM_ID")
|
| 9 |
+
print response.content
|
| 10 |
+
|
| 11 |
+
if __name__ == "__main__":
|
| 12 |
+
main()
|
code/python-sdk/examples/post.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
sys.path.append('../lib')
|
| 3 |
+
from meli import Meli
|
| 4 |
+
|
| 5 |
+
def main():
|
| 6 |
+
meli = Meli(client_id=CLIENT_ID,client_secret=CLIENT_SECRET, access_token=ACCESS_TOKEN,refresh_token=REFRESH_TOKEN)
|
| 7 |
+
body = {"title":"Item De Teste - Por Favor, Não Ofertar! --kc:off","category_id":"MLB257111","price":10,"currency_id":"BRL","available_quantity":1,"buying_mode":"buy_it_now","listing_type_id":"bronze","condition":"new","description":"Item de Teste. Mercado Livre's PHP SDK.","video_id":"Q6dsRpVyyWs","warranty":"12 month","pictures":[{"source":"https://upload.wikimedia.org/wikipedia/commons/thumb/6/64/IPhone_7_Plus_Jet_Black.svg/440px-IPhone_7_Plus_Jet_Black.svg.png"},{"source":"https://upload.wikimedia.org/wikipedia/commons/thumb/b/bc/IPhone7.jpg/440px-IPhone7.jpg"}],"attributes":[{"id":"EAN","value_name":"190198043566"},{"id":"COLOR","value_id":"52049"},{"id":"WEIGHT","value_name":"188g"},{"id":"SCREEN_SIZE","value_name":"4.7 polegadas"},{"id":"TOUCH_SCREEN","value_id":"242085"},{"id":"DIGITAL_CAMERA","value_id":"242085"},{"id":"GPS","value_id":"242085"},{"id":"MP3","value_id":"242085"},{"id":"OPERATING_SYSTEM","value_id":"296859"},{"id":"OPERATING_SYSTEM_VERSION","value_id":"iOS 10"},{"id":"DISPLAY_RESOLUTION","value_id":"1920 x 1080"},{"id":"BATTERY_CAPACITY","value_name":"3980 mAh"},{"id":"FRONT_CAMERA_RESOLUTION","value_name":"7 mpx"}]}
|
| 8 |
+
response = meli.post("/items", body, {'access_token':meli.access_token})
|
| 9 |
+
print response.content
|
| 10 |
+
|
| 11 |
+
if __name__ == "__main__":
|
| 12 |
+
main()
|
code/python-sdk/examples/put.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
sys.path.append('../lib')
|
| 3 |
+
from meli import Meli
|
| 4 |
+
|
| 5 |
+
def main():
|
| 6 |
+
meli = Meli(client_id=CLIENT_ID,client_secret=CLIENT_SECRET, access_token=ACCESS_TOKEN, refresh_token=REFRESH_TOKEN)
|
| 7 |
+
body = {"title":"oculos edicao especial!", "price":1000 }
|
| 8 |
+
response = meli.put("/items/ITEM_+ID", body, {'access_token':meli.access_token})
|
| 9 |
+
print response.content
|
| 10 |
+
|
| 11 |
+
if __name__ == "__main__":
|
| 12 |
+
main()
|
code/python-sdk/lib/__init__.py
ADDED
|
File without changes
|
code/python-sdk/lib/__pycache__/meli.cpython-37.pyc
ADDED
|
Binary file (4.39 kB). View file
|
|
|
code/python-sdk/lib/__pycache__/meli.cpython-38.pyc
ADDED
|
Binary file (4.02 kB). View file
|
|
|
code/python-sdk/lib/__pycache__/ssl_helper.cpython-37.pyc
ADDED
|
Binary file (923 Bytes). View file
|
|
|
code/python-sdk/lib/__pycache__/ssl_helper.cpython-38.pyc
ADDED
|
Binary file (939 Bytes). View file
|
|
|
code/python-sdk/lib/config.ini
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[config]
|
| 2 |
+
sdk_version = MELI-PYTHON-SDK-2.0.0
|
| 3 |
+
api_root_url = https://api.mercadolibre.com
|
| 4 |
+
auth_url = https://auth.mercadolibre.com.ar
|
| 5 |
+
oauth_url = /oauth/token
|
| 6 |
+
ssl_version = PROTOCOL_TLSv1
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
# Set the auth_url according to your country
|
| 10 |
+
|
| 11 |
+
#MLA https://auth.mercadolibre.com.ar // Argentina
|
| 12 |
+
#MLB https://auth.mercadolivre.com.br // Brasil
|
| 13 |
+
#MCO https://auth.mercadolibre.com.co // Colombia
|
| 14 |
+
#MCR https://auth.mercadolibre.com.cr // Costa Rica
|
| 15 |
+
#MEC https://auth.mercadolibre.com.ec // Ecuador
|
| 16 |
+
#MLC https://auth.mercadolibre.cl // Chile
|
| 17 |
+
#MLM https://auth.mercadolibre.com.mx // Mexico
|
| 18 |
+
#MLU https://auth.mercadolibre.com.uy // Uruguay
|
| 19 |
+
#MLV https://auth.mercadolibre.com.ve // Venezuela
|
| 20 |
+
#MPA https://auth.mercadolibre.com.pa // Panama
|
| 21 |
+
#MPE https://auth.mercadolibre.com.pe // Peru
|
| 22 |
+
#MPT https://auth.mercadolibre.com.pt // Prtugal
|
| 23 |
+
#MRD https://auth.mercadolibre.com.do // Dominicana
|
| 24 |
+
#CBT https://global-selling.mercadolibre.com // CBT
|
code/python-sdk/lib/meli.py
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
# -*- coding: utf-8 -*-
|
| 3 |
+
|
| 4 |
+
from configparser import ConfigParser # SelfConfigParsed has been renamed to ConfigParser in Python 3.2 (https://github.com/python/cpython/blob/cd90f6a3692e0f7ef0a13aae651e19a08d1f9b31/Lib/configparser.py#L1230)
|
| 5 |
+
from ssl_helper import SSLAdapter
|
| 6 |
+
from urllib.parse import urlencode # urlencode is now inside urlib.parse (https://stackoverflow.com/questions/44031471/importerror-cannot-import-name-urlencode-when-trying-to-install-flask-ext-sto)
|
| 7 |
+
import json
|
| 8 |
+
import os
|
| 9 |
+
import re
|
| 10 |
+
import requests
|
| 11 |
+
import ssl
|
| 12 |
+
|
| 13 |
+
class Meli(object):
|
| 14 |
+
def __init__(self, client_id, client_secret, access_token=None, refresh_token=None):
|
| 15 |
+
self.client_id = client_id
|
| 16 |
+
self.client_secret = client_secret
|
| 17 |
+
self.access_token = access_token
|
| 18 |
+
self.refresh_token = refresh_token
|
| 19 |
+
self.expires_in = None
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
parser = ConfigParser()
|
| 23 |
+
parser.read(os.path.dirname(os.path.abspath(__file__))+'/config.ini')
|
| 24 |
+
|
| 25 |
+
self._requests = requests.Session()
|
| 26 |
+
try:
|
| 27 |
+
self.SSL_VERSION = parser.get('config', 'ssl_version')
|
| 28 |
+
self._requests.mount('https://', SSLAdapter(ssl_version=getattr(ssl, self.SSL_VERSION)))
|
| 29 |
+
except:
|
| 30 |
+
self._requests = requests
|
| 31 |
+
|
| 32 |
+
self.API_ROOT_URL = parser.get('config', 'api_root_url')
|
| 33 |
+
self.SDK_VERSION = parser.get('config', 'sdk_version')
|
| 34 |
+
self.AUTH_URL = parser.get('config', 'auth_url')
|
| 35 |
+
self.OAUTH_URL = parser.get('config', 'oauth_url')
|
| 36 |
+
|
| 37 |
+
#AUTH METHODS
|
| 38 |
+
def auth_url(self,redirect_URI):
|
| 39 |
+
params = {'client_id':self.client_id,'response_type':'code','redirect_uri':redirect_URI}
|
| 40 |
+
url = self.AUTH_URL + '/authorization' + '?' + urlencode(params)
|
| 41 |
+
return url
|
| 42 |
+
|
| 43 |
+
def authorize(self, code, redirect_URI):
|
| 44 |
+
params = { 'grant_type' : 'authorization_code', 'client_id' : self.client_id, 'client_secret' : self.client_secret, 'code' : code, 'redirect_uri' : redirect_URI}
|
| 45 |
+
headers = {'Accept': 'application/json', 'User-Agent':self.SDK_VERSION, 'Content-type':'application/json'}
|
| 46 |
+
uri = self.make_path(self.OAUTH_URL)
|
| 47 |
+
|
| 48 |
+
response = self._requests.post(uri, params=urlencode(params), headers=headers)
|
| 49 |
+
|
| 50 |
+
if response.ok:
|
| 51 |
+
response_info = response.json()
|
| 52 |
+
self.access_token = response_info['access_token']
|
| 53 |
+
if 'refresh_token' in response_info:
|
| 54 |
+
self.refresh_token = response_info['refresh_token']
|
| 55 |
+
else:
|
| 56 |
+
self.refresh_token = '' # offline_access not set up
|
| 57 |
+
self.expires_in = response_info['expires_in']
|
| 58 |
+
|
| 59 |
+
return self.access_token
|
| 60 |
+
else:
|
| 61 |
+
# response code isn't a 200; raise an exception
|
| 62 |
+
response.raise_for_status()
|
| 63 |
+
|
| 64 |
+
def get_refresh_token(self):
|
| 65 |
+
if self.refresh_token:
|
| 66 |
+
params = {'grant_type' : 'refresh_token', 'client_id' : self.client_id, 'client_secret' : self.client_secret, 'refresh_token' : self.refresh_token}
|
| 67 |
+
headers = {'Accept': 'application/json', 'User-Agent':self.SDK_VERSION, 'Content-type':'application/json'}
|
| 68 |
+
uri = self.make_path(self.OAUTH_URL)
|
| 69 |
+
|
| 70 |
+
response = self._requests.post(uri, params=urlencode(params), headers=headers, data=params)
|
| 71 |
+
|
| 72 |
+
if response.ok:
|
| 73 |
+
response_info = response.json()
|
| 74 |
+
self.access_token = response_info['access_token']
|
| 75 |
+
self.refresh_token = response_info['refresh_token']
|
| 76 |
+
self.expires_in = response_info['expires_in']
|
| 77 |
+
return self.access_token
|
| 78 |
+
else:
|
| 79 |
+
# response code isn't a 200; raise an exception
|
| 80 |
+
response.raise_for_status()
|
| 81 |
+
else:
|
| 82 |
+
raise Exception("Offline-Access is not allowed.")
|
| 83 |
+
|
| 84 |
+
# REQUEST METHODS
|
| 85 |
+
def get(self, path, params=None, extra_headers=None):
|
| 86 |
+
params = params or {}
|
| 87 |
+
headers = {'Accept': 'application/json', 'User-Agent':self.SDK_VERSION, 'Content-type':'application/json'}
|
| 88 |
+
if extra_headers:
|
| 89 |
+
headers.update(extra_headers)
|
| 90 |
+
uri = self.make_path(path)
|
| 91 |
+
response = self._requests.get(uri, params=urlencode(params), headers=headers)
|
| 92 |
+
return response
|
| 93 |
+
|
| 94 |
+
def post(self, path, body=None, params=None, extra_headers=None):
|
| 95 |
+
params = params or {}
|
| 96 |
+
headers = {'Accept': 'application/json', 'User-Agent':self.SDK_VERSION, 'Content-type':'application/json'}
|
| 97 |
+
if extra_headers:
|
| 98 |
+
headers.update(extra_headers)
|
| 99 |
+
uri = self.make_path(path)
|
| 100 |
+
if body:
|
| 101 |
+
body = json.dumps(body)
|
| 102 |
+
|
| 103 |
+
response = self._requests.post(uri, data=body, params=urlencode(params), headers=headers)
|
| 104 |
+
return response
|
| 105 |
+
|
| 106 |
+
def put(self, path, body=None, params=None, extra_headers=None):
|
| 107 |
+
params = params or {}
|
| 108 |
+
headers = {'Accept': 'application/json', 'User-Agent':self.SDK_VERSION, 'Content-type':'application/json'}
|
| 109 |
+
if extra_headers:
|
| 110 |
+
headers.update(extra_headers)
|
| 111 |
+
uri = self.make_path(path)
|
| 112 |
+
if body:
|
| 113 |
+
body = json.dumps(body)
|
| 114 |
+
|
| 115 |
+
response = self._requests.put(uri, data=body, params=urlencode(params), headers=headers)
|
| 116 |
+
return response
|
| 117 |
+
|
| 118 |
+
def delete(self, path, params=None, extra_headers=None):
|
| 119 |
+
params = params or {}
|
| 120 |
+
headers = {'Accept': 'application/json', 'User-Agent':self.SDK_VERSION, 'Content-type':'application/json'}
|
| 121 |
+
if extra_headers:
|
| 122 |
+
headers.update(extra_headers)
|
| 123 |
+
uri = self.make_path(path)
|
| 124 |
+
response = self._requests.delete(uri, params=params, headers=headers)
|
| 125 |
+
return response
|
| 126 |
+
|
| 127 |
+
def options(self, path, params=None, extra_headers=None):
|
| 128 |
+
params = params or {}
|
| 129 |
+
headers = {'Accept': 'application/json', 'User-Agent':self.SDK_VERSION, 'Content-type':'application/json'}
|
| 130 |
+
if extra_headers:
|
| 131 |
+
headers.update(extra_headers)
|
| 132 |
+
uri = self.make_path(path)
|
| 133 |
+
response = self._requests.options(uri, params=urlencode(params), headers=headers)
|
| 134 |
+
return response
|
| 135 |
+
|
| 136 |
+
def make_path(self, path, params=None):
|
| 137 |
+
params = params or {}
|
| 138 |
+
# Making Path and add a leading / if not exist
|
| 139 |
+
if not (re.search("^\/", path)):
|
| 140 |
+
path = "/" + path
|
| 141 |
+
path = self.API_ROOT_URL + path
|
| 142 |
+
if params:
|
| 143 |
+
path = path + "?" + urlencode(params)
|
| 144 |
+
|
| 145 |
+
return path
|
code/python-sdk/lib/ssl_helper.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
# -*- coding: utf-8 -*-
|
| 3 |
+
|
| 4 |
+
from requests.adapters import HTTPAdapter
|
| 5 |
+
from requests.packages.urllib3.poolmanager import PoolManager
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class SSLAdapter(HTTPAdapter):
|
| 9 |
+
""""Transport adapter" that allows us to use SSLv3."""
|
| 10 |
+
def __init__(self, ssl_version=None, **kwargs):
|
| 11 |
+
self.ssl_version = ssl_version
|
| 12 |
+
super(SSLAdapter, self).__init__(**kwargs)
|
| 13 |
+
|
| 14 |
+
def init_poolmanager(self, connections, maxsize, block=False):
|
| 15 |
+
self.poolmanager = PoolManager(num_pools=connections,
|
| 16 |
+
maxsize=maxsize,
|
| 17 |
+
block=block,
|
| 18 |
+
ssl_version=self.ssl_version)
|
| 19 |
+
|
code/python-sdk/test/__init__.py
ADDED
|
File without changes
|
code/python-sdk/test/main.py
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import unittest
|
| 2 |
+
import re
|
| 3 |
+
import sys
|
| 4 |
+
sys.path.append('../lib')
|
| 5 |
+
from meli import Meli
|
| 6 |
+
import requests
|
| 7 |
+
import json
|
| 8 |
+
|
| 9 |
+
class MeliTest(unittest.TestCase):
|
| 10 |
+
|
| 11 |
+
def setUp(self):
|
| 12 |
+
self.CLIENT_ID = "123"
|
| 13 |
+
self.CLIENT_SECRET = "a secret"
|
| 14 |
+
self.ACCESS_TOKEN = "a access_token"
|
| 15 |
+
self.REFRESH_TOKEN = "a refresh_token"
|
| 16 |
+
self.NEW_ACCESS_TOKEN = "a new access_token"
|
| 17 |
+
self.NEW_REFRESH_TOKEN = "a new refresh_token"
|
| 18 |
+
self.meli = Meli(client_id=self.CLIENT_ID, client_secret=self.CLIENT_SECRET, access_token=self.ACCESS_TOKEN, refresh_token=self.REFRESH_TOKEN)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
#constructor tests
|
| 22 |
+
def testClientId(self):
|
| 23 |
+
self.assertEqual(self.meli.client_id, self.CLIENT_ID)
|
| 24 |
+
|
| 25 |
+
def testClientSecret(self):
|
| 26 |
+
self.assertEqual(self.meli.client_secret, self.CLIENT_SECRET)
|
| 27 |
+
|
| 28 |
+
def testAccessToken(self):
|
| 29 |
+
self.assertEqual(self.meli.access_token, self.ACCESS_TOKEN)
|
| 30 |
+
|
| 31 |
+
def testRefreshToken(self):
|
| 32 |
+
self.assertEqual(self.meli.refresh_token, self.REFRESH_TOKEN)
|
| 33 |
+
|
| 34 |
+
#auth_url tests
|
| 35 |
+
def testAuthUrl(self):
|
| 36 |
+
callback = "http://test.com/callback"
|
| 37 |
+
self.assertTrue(re.search("^http", self.meli.auth_url(redirect_URI=callback)))
|
| 38 |
+
self.assertTrue(re.search("^https\:\/\/auth.mercadolibre.com\/authorization", self.meli.auth_url(redirect_URI=callback)))
|
| 39 |
+
self.assertTrue(re.search("redirect_uri", self.meli.auth_url(redirect_URI=callback)))
|
| 40 |
+
self.assertTrue(re.search(self.CLIENT_ID,self.meli.auth_url(redirect_URI=callback)))
|
| 41 |
+
self.assertTrue(re.search("response_type", self.meli.auth_url(redirect_URI=callback)))
|
| 42 |
+
|
| 43 |
+
#Mock requests
|
| 44 |
+
def mockGet(url, path=None, params={},headers={}, data=None, body=None):
|
| 45 |
+
|
| 46 |
+
response = requests.Response()
|
| 47 |
+
|
| 48 |
+
if re.search("/users/me", url):
|
| 49 |
+
if "access_token" in params:
|
| 50 |
+
response.status_code = 200
|
| 51 |
+
else:
|
| 52 |
+
response.status_code = 403
|
| 53 |
+
elif re.search("/authorization", url):
|
| 54 |
+
response.status_code = 200
|
| 55 |
+
else:
|
| 56 |
+
response.status_code = 200
|
| 57 |
+
return response
|
| 58 |
+
|
| 59 |
+
def mockPost(url, path=None, body=None, params={},headers={}, data=None):
|
| 60 |
+
response = requests.Response()
|
| 61 |
+
|
| 62 |
+
if re.search("/oauth/token", url):
|
| 63 |
+
if "grant_type" not in params or "client_id" not in params or "client_secret" not in params:
|
| 64 |
+
response.status_code = 403
|
| 65 |
+
else:
|
| 66 |
+
if re.search("grant_type=authorization_code", params):
|
| 67 |
+
content = {'access_token' : 'a access_token', 'refresh_token' : 'a refresh_token'}
|
| 68 |
+
elif re.search("grant_type=refresh_token", params):
|
| 69 |
+
content = {'access_token' : 'a new access_token', 'refresh_token' : 'a new refresh_token'}
|
| 70 |
+
response._content = json.dumps(content)
|
| 71 |
+
response.status_code = 200
|
| 72 |
+
else:
|
| 73 |
+
if "access_token" in params:
|
| 74 |
+
response.status_code = 200
|
| 75 |
+
else:
|
| 76 |
+
response.status_code = 403
|
| 77 |
+
|
| 78 |
+
return response
|
| 79 |
+
|
| 80 |
+
def mockPut(url, path=None, body=None, params={},headers={}, data=None):
|
| 81 |
+
response = requests.Response()
|
| 82 |
+
if "access_token" in params:
|
| 83 |
+
response.status_code = 200
|
| 84 |
+
else:
|
| 85 |
+
response.status_code = 403
|
| 86 |
+
return response
|
| 87 |
+
|
| 88 |
+
def mockDelete(url, path=None, params={},headers={}, data=None, body=None):
|
| 89 |
+
response = requests.Response()
|
| 90 |
+
if "access_token" in params:
|
| 91 |
+
response.status_code = 200
|
| 92 |
+
else:
|
| 93 |
+
response.status_code = 403
|
| 94 |
+
return response
|
| 95 |
+
|
| 96 |
+
requests.get = mockGet
|
| 97 |
+
requests.post = mockPost
|
| 98 |
+
requests.put = mockPut
|
| 99 |
+
requests.delete = mockDelete
|
| 100 |
+
|
| 101 |
+
#requests tests
|
| 102 |
+
def testGet(self):
|
| 103 |
+
response = self.meli.get(path="/items/test1")
|
| 104 |
+
self.assertEqual(response.status_code, requests.codes.ok)
|
| 105 |
+
|
| 106 |
+
def testPost(self):
|
| 107 |
+
body = {"condition":"new", "warranty":"60 dias", "currency_id":"BRL", "accepts_mercadopago":True, "description":"Lindo Ray_Ban_Original_Wayfarer", "listing_type_id":"bronze", "title":"oculos Ray Ban Aviador Que Troca As Lentes Lancamento!", "available_quantity":64, "price":289, "subtitle":"Acompanha 3 Pares De Lentes!! Compra 100% Segura", "buying_mode":"buy_it_now", "category_id":"MLB5125", "pictures":[{"source":"http://upload.wikimedia.org/wikipedia/commons/f/fd/Ray_Ban_Original_Wayfarer.jpg"}, {"source":"http://en.wikipedia.org/wiki/File:Teashades.gif"}] }
|
| 108 |
+
response = self.meli.post(path="/items",body=body,params={'access_token' : self.meli.access_token})
|
| 109 |
+
self.assertEqual(response.status_code, requests.codes.ok)
|
| 110 |
+
|
| 111 |
+
def testPut(self):
|
| 112 |
+
body = {"title":"oculos edicao especial!", "price":1000 }
|
| 113 |
+
response = self.meli.put(path="/items/test1",body=body,params={'access_token' : self.meli.access_token})
|
| 114 |
+
self.assertEqual(response.status_code, requests.codes.ok)
|
| 115 |
+
|
| 116 |
+
def testDelete(self):
|
| 117 |
+
response = self.meli.delete(path="/questions/123",params={'access_token' : self.meli.access_token})
|
| 118 |
+
self.assertEqual(response.status_code, requests.codes.ok)
|
| 119 |
+
|
| 120 |
+
def testWithoutAccessToken(self):
|
| 121 |
+
response = self.meli.get(path="/users/me")
|
| 122 |
+
self.assertEqual(response.status_code, requests.codes.forbidden)
|
| 123 |
+
|
| 124 |
+
def testWithAccessToken(self):
|
| 125 |
+
response = self.meli.get(path="/users/me",params={'access_token' : self.meli.access_token})
|
| 126 |
+
self.assertEqual(response.status_code, requests.codes.ok)
|
| 127 |
+
|
| 128 |
+
#auth tests
|
| 129 |
+
def testAuthorize(self):
|
| 130 |
+
self.meli.access_token = None
|
| 131 |
+
self.meli.refresh_token = None
|
| 132 |
+
response = self.meli.authorize(code="a code from get param", redirect_URI="A redirect Uri")
|
| 133 |
+
self.assertEqual(self.meli.access_token, self.ACCESS_TOKEN)
|
| 134 |
+
self.assertEqual(self.meli.refresh_token, self.REFRESH_TOKEN)
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
def testDoRefreshToken(self):
|
| 138 |
+
response = self.meli.get_refresh_token()
|
| 139 |
+
self.assertEqual(self.meli.access_token, self.NEW_ACCESS_TOKEN)
|
| 140 |
+
self.assertEqual(self.meli.refresh_token, self.NEW_REFRESH_TOKEN)
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
if __name__ == '__main__':
|
| 144 |
+
unittest.main()
|
code/python-sdk/teste.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
sys.path.append('../lib')
|
| 3 |
+
from meli import Meli
|
es/test.csv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:562aaf7ce44740cd08d1b835be5e71f47988ca786c0b4ef3e71a91c96d3c77c9
|
| 3 |
+
size 6295693
|
es/train.csv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1f0ed9275e72b7fa8198a5df1367c4921297c8b602e9f1e46b00c4a2b139f97f
|
| 3 |
+
size 116501559
|
es/validation.csv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:393e77005d0f3b138934f2c8a050fe915aa88dbcddc884050762731a7f55d7ac
|
| 3 |
+
size 5143755
|
pt/test.csv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:604cb4edefd266a285d64714436b1b7367625c0f835599de36450671ea9e0900
|
| 3 |
+
size 6415294
|
pt/train.csv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fab8152362803376e07df7e2659791c4fd030ae1bbddde02c6ed307456f1cf84
|
| 3 |
+
size 62857885
|
pt/validation.csv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8edb7907b18fab9ff7404206b380b50a56388940eac9deff774f541361ad275c
|
| 3 |
+
size 5135145
|