code stringlengths 17 6.64M |
|---|
def map_texts_with_prompts(texts: Dict[(str, Dict[(str, List[str])])], prompts: Dict[(str, str)], translate_args: Dict[(str, Any)]) -> Dict[(str, Dict[(str, List[str])])]:
'\n Map the texts with the prompts.\n\n Args:\n - texts: A dictionary containing the texts to be mapped.\n - prompts: A dictionary containing the prompts to be mapped.\n - translate_args: A dictionary containing the translation configurations.\n\n Returns:\n - A dictionary containing the mapped texts with the prompts.\n '
texts_with_prompts = defaultdict(dict)
for config in texts:
for field in dataset_args['dataset_fields']:
texts_with_prompts[config][field] = [text_with_prompt(text, prompt=prompts[config], translate_args=translate_args) for text in texts[config][field]]
return texts_with_prompts
|
def extract_translations(translations: List[str], texts: List[str], translate_args: Dict[(str, Any)]) -> List[str]:
'\n Extract the translation from the output of the translation model.\n\n Args:\n - translations: A list containing the translations to be extracted.\n - texts: A list containing the texts to be extracted.\n - translate_args: A dictionary containing the translation configurations.\n\n Returns:\n - A list containing the extracted translations.\n '
for (i, text) in enumerate(texts):
if ('xglm' in translate_args['model_name']):
translations[i] = translations[i].split('English:')[(- 1)].strip()
elif ('bloom' in translate_args['model_name']):
translations[i] = translations[i][len(text):].split('\\n')[0].strip()
elif ('llama' in translate_args['model_name'].lower()):
translations[i] = translations[i][len(text):].split('\\n')[0].strip()
if (': ' in translations[i]):
translations[i] = translations[i].split(': ')[1]
elif ('RedPajama' in translate_args['model_name']):
translations[i] = translations[i][len(text):].split('\\n')[0].strip()
else:
translations[i] = translations[i][len(text):].split('\\n')[0].strip()
return translations
|
def translate_texts(dataset: DatasetDict, texts: Dict[(str, Dict[(str, List[str])])], translate_args: Dict[(str, Any)], dataset_args: Dict[(str, Any)]) -> None:
'\n Translate the texts.\n\n Args:\n - dataset: A DatasetDict object containing the dataset.\n - texts: A dictionary containing the texts to be translated.\n - translate_args: A dictionary containing the translation configurations.\n - dataset_args: A dictionary containing the dataset configurations.\n\n Returns:\n - None\n '
translations = {}
for config in dataset_args['dataset_configs']:
translations[config] = dataset[config].to_dict()
translate_args['source_lang'] = dataset_args['lang_codes'][config]
print(f'Translating from {config}')
for field in dataset_args['dataset_fields']:
translations[config][field] = translate_few_shot.main(sentences_list=texts[config][field], return_output=True, **translate_args)
translations[config][field] = extract_translations(translations[config][field], texts[config][field], translate_args)
save_file(translations[config], config, translate_args, dataset_args)
|
def save_file(translations: Dict[(str, List[str])], config: str, translate_args: Dict[(str, Any)], dataset_args: Dict[(str, Any)]) -> None:
'\n Save the translations to a file.\n\n Args:\n - translations: A dictionary containing the translations to be saved.\n - config: A string representing the configuration.\n - translate_args: A dictionary containing the translation configurations.\n - dataset_args: A dictionary containing the dataset configurations.\n\n Returns:\n - None\n '
name = translate_args['model_name'].split('/')[(- 1)]
if ('LLaMA' in translate_args['model_name']):
name = f'llama-{name}'
dirname = f"{dataset_args['file_path']}/{name}"
if (not os.path.exists(dirname)):
os.makedirs(dirname)
translated_df = pd.DataFrame(translations)
filename = f"{dirname}/{dataset_args['filename'].format(config=config)}"
if filename.endswith('.tsv'):
translated_df.to_csv(filename, sep='\t', index=False)
elif filename.endswith('.jsonl'):
translated_df.to_json(filename, orient='records', lines=True)
else:
raise ValueError('Unknown file format')
|
def main(translate_args: Dict[(str, Any)], dataset_args: Dict[(str, Any)]) -> None:
'\n Main function to translate the dataset.\n\n Args:\n - translate_args: A dictionary containing the translation configurations.\n - dataset_args: A dictionary containing the dataset configurations.\n\n Returns:\n - None\n '
dataset = get_dataset(dataset_args)
texts = get_texts(dataset, dataset_args)
few_shot_dataset = get_few_shot_dataset(dataset_args)
prompts = get_few_shot_prompts(few_shot_dataset, dataset_args, translate_args, shots=4)
texts_with_prompts = map_texts_with_prompts(texts, prompts, translate_args=translate_args)
translate_texts(dataset, texts_with_prompts, translate_args, dataset_args)
|
def get_dataset(dataset_args):
dataset = DatasetDict()
for config in dataset_args['dataset_configs']:
dataset[config] = load_dataset(dataset_args['dataset'], config, split=dataset_args['dataset_split'])
return dataset
|
def get_texts(dataset, dataset_args):
texts = defaultdict(dict)
for config in dataset_args['dataset_configs']:
for field in dataset_args['dataset_fields']:
texts[config][field] = dataset[config][field]
return texts
|
def translate_texts(dataset, texts, translate_args, dataset_args):
translations = {}
for config in dataset_args['dataset_configs']:
translations[config] = dataset[config].to_dict()
translate_args['source_lang'] = dataset_args['lang_codes'][config]
print(f'Translating from {config}')
for field in dataset_args['dataset_fields']:
translations[config][field] = translate.main(sentences_list=texts[config][field], return_output=True, **translate_args)
save_file(translations[config], config, translate_args, dataset_args)
|
def save_file(translations, config, translate_args, dataset_args):
name = translate_args['model_name'].split('/')[(- 1)]
dirname = f"{dataset_args['file_path']}/{name}"
if (not os.path.exists(dirname)):
os.makedirs(dirname)
translated_df = pd.DataFrame(translations)
filename = f"{dirname}/{dataset_args['filename'].format(config=config)}"
if filename.endswith('.tsv'):
translated_df.to_csv(filename, sep='\t', index=False)
elif filename.endswith('.jsonl'):
translated_df.to_json(filename, orient='records', lines=True)
else:
raise ValueError('Unknown file format')
|
def main(translate_args, dataset_args):
dataset = get_dataset(dataset_args)
texts = get_texts(dataset, dataset_args)
translate_texts(dataset, texts, translate_args, dataset_args)
|
def encode_string(text):
return text.replace('\r', '\\r').replace('\n', '\\n').replace('\t', '\\t')
|
def get_dataloader(accelerator: Accelerator, translate_data, tokenizer: PreTrainedTokenizerBase, batch_size: int, max_length: int) -> DataLoader:
dataset = DatasetReader(translate_data, tokenizer, max_length)
if (accelerator.distributed_type == DistributedType.TPU):
data_collator = DataCollatorForSeq2Seq(tokenizer, padding='max_length', max_length=max_length, label_pad_token_id=tokenizer.pad_token_id, return_tensors='pt')
else:
data_collator = DataCollatorForSeq2Seq(tokenizer, padding=True, label_pad_token_id=tokenizer.pad_token_id, pad_to_multiple_of=8, return_tensors='pt')
return DataLoader(dataset, batch_size=batch_size, collate_fn=data_collator, num_workers=0)
|
def main(source_lang: str, target_lang: str, starting_batch_size: int, model_name: str='facebook/m2m100_1.2B', cache_dir: str=None, precision: str='32', max_length: int=128, max_new_tokens: int=128, num_beams: int=4, num_return_sequences: int=1, do_sample: bool=False, temperature: float=1.0, top_k: int=50, top_p: float=1.0, keep_special_tokens: bool=False, eos_token: str='</s>', sentences_path: str=None, output_path: str=None, sentences_list: str=None, return_output: bool=False):
if (not return_output):
os.makedirs(os.path.abspath(os.path.dirname(output_path)), exist_ok=True)
accelerator = Accelerator(mixed_precision=(precision if (precision != '32') else 'no'), split_batches=False, dispatch_batches=False)
print(f'Loading tokenizer {model_name}...')
tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path=model_name, cache_dir=cache_dir, trust_remote_code=('xgen' in model_name), use_fast=('polylm' not in model_name))
tokenizer.padding_side = 'left'
if (tokenizer.pad_token_id is None):
tokenizer.pad_token_id = tokenizer.eos_token_id
print(f'Loading model {model_name}...')
model = AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path=model_name, cache_dir=cache_dir, trust_remote_code=(True if ('falcon' in model_name) else False))
model.eval()
print(f'''Preparing data...
''')
if (precision == '32'):
model = model.float()
elif (precision == 'fp16'):
model = model.half()
elif (precision == 'bf16'):
model = model.bfloat16()
else:
raise ValueError('Precision not supported. Supported values: 32, fp16, bf16')
gen_kwargs = {'max_new_tokens': max_new_tokens, 'num_beams': num_beams, 'num_return_sequences': num_return_sequences, 'do_sample': do_sample, 'temperature': temperature, 'top_k': top_k, 'top_p': top_p}
total_lines: int = (count_lines(sentences_path) if (sentences_list == None) else len(sentences_list))
if accelerator.is_main_process:
print(f'''** Translation **
Input file: {sentences_path}
Output file: {output_path}
Source language: {source_lang}
Target language: {target_lang}
Starting batch size: {starting_batch_size}
Device: {str(accelerator.device).split(':')[0]}
Num. Devices: {accelerator.num_processes}
Distributed_type: {accelerator.distributed_type}
Max length: {max_length}
Precision: {model.dtype}
Model: {model_name}
''')
print('** Generation parameters **')
print('\n'.join((f'{k}: {v}' for (k, v) in gen_kwargs.items())))
print('\n')
def save_sentences(tgt_text: list):
nonlocal return_output, output_path
if return_output:
save_sentences.sentences.extend(tgt_text)
else:
print('\n'.join(tgt_text), file=save_sentences.f)
if (not return_output):
save_sentences.f = open(output_path, 'w', encoding='utf-8')
@find_executable_batch_size(starting_batch_size=starting_batch_size)
def inference(batch_size):
nonlocal model, tokenizer, sentences_path, max_length, output_path, gen_kwargs, precision, sentences_list, return_output
print(f'Translating with batch size {batch_size}')
translate_data = (sentences_path if (sentences_list == None) else sentences_list)
data_loader = get_dataloader(accelerator=accelerator, translate_data=translate_data, tokenizer=tokenizer, batch_size=batch_size, max_length=max_length)
(model, data_loader) = accelerator.prepare(model, data_loader)
samples_seen: int = 0
save_sentences.sentences = []
with tqdm(total=total_lines, desc='Dataset translation', leave=True, ascii=True, disable=(not accelerator.is_main_process)) as pbar:
with torch.no_grad():
for (step, batch) in enumerate(data_loader):
batch['input_ids'] = batch['input_ids']
batch['attention_mask'] = batch['attention_mask']
batch = {k: v for (k, v) in batch.items() if (k != 'token_type_ids')}
generated_tokens = accelerator.unwrap_model(model).generate(**batch, **gen_kwargs)
generated_tokens = accelerator.pad_across_processes(generated_tokens, dim=1, pad_index=tokenizer.pad_token_id)
generated_tokens = accelerator.gather(generated_tokens).cpu().numpy()
tgt_text = tokenizer.batch_decode(generated_tokens, skip_special_tokens=(not keep_special_tokens))
if accelerator.is_main_process:
if (step == (math.ceil((math.ceil((total_lines / batch_size)) / accelerator.num_processes)) - 1)):
tgt_text = tgt_text[:((total_lines * num_return_sequences) - samples_seen)]
else:
samples_seen += len(tgt_text)
save_sentences([encode_string(sentence) for sentence in tgt_text])
pbar.update((len(tgt_text) // gen_kwargs['num_return_sequences']))
inference()
print(f'''Translation done.
''')
if return_output:
return save_sentences.sentences
|
def parse_arguments():
'\n Parse options for functions.\n '
parser = argparse.ArgumentParser(description='Tool for managing Elasticsearch indices')
subparsers = parser.add_subparsers()
create = subparsers.add_parser('create', help='Create Elasticsearch index')
create.add_argument('-i', '--index', required=True, help='Name of the new index')
create.add_argument('-m', '--mappings', type=argparse.FileType('r'), required=True, help='File where mappings configuration is store for the index')
delete = subparsers.add_parser('delete', help='Delete one or more Elasticsearch indices')
delete.add_argument('-e', '--erase', nargs='*', required=True, help='Name of index to delete')
index = subparsers.add_parser('index', help='Index JSON files')
index.add_argument('-d', '--dir', required=True, help='Directory where the JSON files are stored. Be sure that in this path to the Spider folder!!!')
index.add_argument('-l', '--location', required=True, help='Name of the index where to index JSON files. If index do not exist it is created')
index.add_argument('-t', '--item-type', required=True, help='Name of type to be stored in ES index')
index.add_argument('-c', '--chunk-size', type=int, nargs='?', default=500, help='Number of JSON line to load into memory before indexing')
reindex = subparsers.add_parser('reindex', help='Reindex index')
reindex.add_argument('-s', '--source', required=True, help='Source index where documents are stored')
reindex.add_argument('-t', '--target', required=True, help='Target index where to move documents')
args = parser.parse_args()
return args
|
def create_index(es, index_name, body):
if (not es.indices.exists(index_name)):
es.indices.create(index=index_name, body=body)
|
def delete_indices(es, indices_name):
for index in indices_name:
if es.indices.exists(index):
es.indices.delete(index=index)
else:
logger.info('Index `{}` not found'.format(index))
|
def reindex(es, source_index, target_index):
helpers.reindex(es, source_index=source_index, target_index=target_index)
|
def lazy_indexing(es, path, chunck, index, item_type):
def serialize_json(json_line):
to_null = ['author', 'article_tag', 'list_of_tags', 'keywords', 'news_keywords']
for tag in to_null:
if (json_line[tag] == '---'):
json_line[tag] = None
if (json_line['publication_date'] == '---'):
json_line['publication_date'] = datetime.strptime('1900-01-01', '%Y-%m-%d')
else:
try:
json_line['publication_date'] = datetime.strptime(json_line['publication_date'], '%d %B %Y').date()
except ValueError:
try:
json_line['publication_date'] = datetime.strptime(json_line['publication_date'].replace('T', ' '), '%Y-%m-%d %H:%S')
except ValueError:
pass
return json_line
def lazy_json_load(filename):
with open(filename) as infile:
for line in infile:
json_line = json.loads(line)
formattd_json_line = serialize_json(json_line)
index_action = {'_index': index, '_type': item_type, '_id': formattd_json_line['url'], '_source': formattd_json_line}
(yield index_action)
files = [file for file in glob.glob((path + '/**/*.json'), recursive=True) if (not ('active.json' in file.split('/')))]
logger.info('Fond {0} documents to index'.format(len(files)))
for filename in files:
logger.info('Indexing : {}'.format(filename))
helpers.bulk(client=es, chunk_size=chunck, actions=lazy_json_load(filename), index=index, doc_type='news_article', stats_only=True)
|
def groupByQuery(eintrag, eintrag_spalte):
return dataset.groupby(eintrag_spalte).get_group(eintrag)
|
def groupByQuery(eintrag, eintrag_spalte):
return dataset.groupby(eintrag_spalte).get_group(eintrag)
|
def gendata(records, index, type):
for (k, v) in zip(records.keys(), records.values()):
(yield {'_index': index, '_id': k, '_source': v})
|
def extract_classifications(line):
classifications_list = []
start_classification = line.find('<classifications-ipcr>')
relative_end_classification = (line[start_classification:].find('</classifications-ipcr>') + 23)
classification_string = line[start_classification:(start_classification + relative_end_classification)]
try:
treeRoot = ET.fromstring(classification_string)
for classifications in treeRoot.findall('classification-ipcr'):
for classification in classifications:
classifications_list.append(classification.text)
except:
print(('error classification for line: ' + classification_string))
return classifications_list
|
def extract_citationIDs(application_identifier, line):
words = line.split('\t')[6].split(' ')
indices = [i for (i, x) in enumerate(words) if ('sr-cit' in x)]
return [((application_identifier + '_') + words[i][(words[i].find('sr-cit') + 6):(words[i].find('sr-cit') + 10)]) for i in indices]
|
def normalize_claims(claims):
normalized_claims = []
for claim in claims.split(','):
if ('-' not in claim):
normalized_claims.append(int(claim))
else:
for number in range(int(claim.split('-')[0]), (int(claim.split('-')[1]) + 1)):
normalized_claims.append(number)
return normalized_claims
|
def extract_citation_entry(citation_id, searchreport_line):
citation = {}
start_citation = searchreport_line.find(('<citation id="sr-cit' + citation_id[(- 4):]))
relative_end_citation = (searchreport_line[start_citation:].find('</citation>') + 11)
citation_string = searchreport_line[start_citation:(start_citation + relative_end_citation)]
try:
treeRoot = ET.fromstring(citation_string)
except:
print(('error citation for line: ' + citation_string))
return citation
treeRoot.findall('category')
last_category = ''
for element in treeRoot:
if (element.tag == 'category'):
last_category = element.text
elif (element.tag == 'rel-claims'):
for category in last_category.split(','):
citation.update({(('category' + '_') + category): normalize_claims(element.text)})
elif (element.tag == 'rel-passage'):
for category in last_category.split(','):
for passage in element:
old_rel_passage = citation.get((('rel-passage' + '_') + category))
if (old_rel_passage == None):
old_rel_passage = ''
citation.update({(('rel-passage' + '_') + category): (old_rel_passage + passage.text)})
elif (element.tag == 'patcit'):
citation.update({'dnum': element.attrib['dnum']})
citation.update({'url': element.attrib['url']})
for subelement in element:
if (subelement.tag == 'document-id'):
for child in subelement:
if (child.tag == 'country'):
citation.update({'country': child.text})
elif (child.tag == 'doc-number'):
citation.update({'doc-number': child.text})
elif (child.tag == 'kind'):
citation.update({'kind': child.text})
elif (child.tag == 'name'):
citation.update({'name': child.text})
elif (child.tag == 'date'):
citation.update({'date': child.text})
elif (element.tag == 'nplcit'):
citation.update({'nplcit': 'true'})
if ((last_category != '') and ((('rel-passage' + '_') + last_category.split(',')[0]) not in citation.keys())):
print(((('Kategorie ohne rel-passage, Citation ID/String: ' + citation_id) + ' / ') + citation_string))
return citation
|
def main(file):
f = open(file, 'r', encoding='utf8', errors='ignore')
lines = f.readlines()
records = {}
citations = {}
for line in lines:
if ('\ten\t' in line):
application_identifier = line.split('EP\t')[1].split('\ten\t')[0].replace('\t', '')
application_number = line.split('EP\t')[1].split('\t')[0]
application_category = line.split('EP\t')[1].split('\t')[1]
application_date = line.split('EP\t')[1].split('\t')[2]
if (application_identifier not in records):
records.update({application_identifier: {'application_number': application_number, 'application_category': application_category, 'application_date': application_date}})
record = records.get(application_identifier)
if ('\tTITLE\t' in line):
record.update({'title': line.split('\tTITLE\t')[1]})
elif ('\tABSTR\t' in line):
record.update({'abstract': line.split('\tABSTR\t')[1]})
elif ('\tDESCR\t' in line):
record.update({'description': line.split('\tDESCR\t')[1]})
elif ('\tCLAIM\t' in line):
record.update({'claims': line.split('\tCLAIM\t')[1]})
elif ('\tAMEND\t' in line):
record.update({'amended_claims': line.split('\tAMEND\t')[1]})
elif ('\tACSTM\t' in line):
record.update({'amended_claims_statements': line.split('\tACSTM\t')[1]})
elif ('\tSRPRT\t' in line):
record.update({'citation_ipcr_classification': extract_classifications(line)})
record.update({'citation_ids': extract_citationIDs(application_identifier, line)})
for citation_id in record['citation_ids']:
print(('evaluate citation id: ' + citation_id))
citations.update({citation_id: extract_citation_entry(citation_id, line.split('\tSRPRT\t')[1])})
elif ('\tPDFEP\t' in line):
record.update({'publication_url': line.split('\tPDFEP\t')[1]})
records.update({application_identifier: record})
upload(records, INDEX_APPL, 'patent_eu')
upload(citations, INDEX_CIT, 'citation_eu')
|
def createIndexPatentApplications():
settings = {'settings': {'number_of_shards': 1, 'number_of_replicas': 0}, 'mappings': {'properties': {'application_number': {'type': 'keyword'}, 'application_category': {'type': 'keyword'}, 'application_date': {'type': 'date'}, 'title': {'type': 'text'}, 'abstract': {'type': 'text'}, 'description': {'type': 'text'}, 'claims': {'type': 'text'}, 'amended_claims': {'type': 'text'}, 'amended_claims_statements': {'type': 'text'}, 'citation_ipcr_classification': {'type': 'keyword'}, 'citation_ids': {'type': 'keyword'}, 'publication_url': {'type': 'text'}}}}
es = Elasticsearch(hosts=['http://172.16.64.23:9200/'])
response = es.indices.create(index=INDEX_APPL, ignore=400, body=settings)
print(response)
|
def createIndexCitations():
settings = {'settings': {'number_of_shards': 1, 'number_of_replicas': 0, 'index.mapping.ignore_malformed': True}, 'mappings': {'properties': {'dnum': {'type': 'keyword'}, 'publication_url': {'type': 'text'}, 'country': {'type': 'keyword'}, 'kind': {'type': 'keyword'}, 'doc_number': {'type': 'keyword'}, 'name': {'type': 'text'}, 'date': {'type': 'date'}, 'category_X': {'type': 'integer'}, 'category_P': {'type': 'integer'}, 'category_A': {'type': 'integer'}, 'category_D': {'type': 'integer'}, 'category_Y': {'type': 'integer'}, 'category_L': {'type': 'integer'}, 'category_O': {'type': 'integer'}, 'category_T': {'type': 'integer'}, 'category_E': {'type': 'integer'}, 'rel-passage_X': {'type': 'text'}, 'rel-passage_P': {'type': 'text'}, 'rel-passage_A': {'type': 'text'}, 'rel-passage_D': {'type': 'text'}, 'rel-passage_Y': {'type': 'text'}, 'rel-passage_L': {'type': 'text'}, 'rel-passage_O': {'type': 'text'}, 'rel-passage_T': {'type': 'text'}, 'rel-passage_E': {'type': 'text'}, 'nplcit': {'type': 'boolean'}}}}
es = Elasticsearch(hosts=['http://172.16.64.23:9200/'])
response = es.indices.create(index=INDEX_CIT, ignore=400, body=settings)
print(response)
|
def upload(records, index, type):
client = connections.create_connection(hosts=['http://172.16.64.23:9200/'])
res = helpers.bulk(client, gendata(records, index, type), index=index, chunk_size=1000, request_timeout=200)
print(res)
|
def gendata(records, index, type):
for (k, v) in zip(records.keys(), records.values()):
(yield {'_index': index, '_id': k, '_source': v})
|
def extract_classifications(line):
classifications_list = []
start_classification = line.find('<classifications-ipcr>')
relative_end_classification = (line[start_classification:].find('</classifications-ipcr>') + 23)
classification_string = line[start_classification:(start_classification + relative_end_classification)]
try:
treeRoot = ET.fromstring(classification_string)
for classifications in treeRoot.findall('classification-ipcr'):
for classification in classifications:
classifications_list.append(classification.text)
except:
print(('error classification for line: ' + classification_string))
return classifications_list
|
def extract_citationIDs(application_identifier, line):
words = line.split('\t')[6].split(' ')
indices = [i for (i, x) in enumerate(words) if ('sr-cit' in x)]
return [((application_identifier + '_') + words[i][(words[i].find('sr-cit') + 6):(words[i].find('sr-cit') + 10)]) for i in indices]
|
def normalize_claims(claims):
normalized_claims = []
for claim in claims.split(','):
if ('-' not in claim):
normalized_claims.append(int(claim))
else:
for number in range(int(claim.split('-')[0]), (int(claim.split('-')[1]) + 1)):
normalized_claims.append(number)
return normalized_claims
|
def extract_citation_entry(citation_id, searchreport_line):
citation = {}
start_citation = searchreport_line.find(('<citation id="sr-cit' + citation_id[(- 4):]))
relative_end_citation = (searchreport_line[start_citation:].find('</citation>') + 11)
citation_string = searchreport_line[start_citation:(start_citation + relative_end_citation)]
try:
treeRoot = ET.fromstring(citation_string)
except:
print(('error citation for line: ' + citation_string))
return citation
treeRoot.findall('category')
last_category = ''
for element in treeRoot:
if (element.tag == 'category'):
last_category = element.text
elif (element.tag == 'rel-claims'):
for category in last_category.split(','):
citation.update({(('category' + '_') + category): normalize_claims(element.text)})
elif (element.tag == 'rel-passage'):
for category in last_category.split(','):
for passage in element:
old_rel_passage = citation.get((('rel-passage' + '_') + category))
if (old_rel_passage == None):
old_rel_passage = ''
citation.update({(('rel-passage' + '_') + category): (old_rel_passage + passage.text)})
elif (element.tag == 'patcit'):
citation.update({'dnum': element.attrib['dnum']})
citation.update({'url': element.attrib['url']})
for subelement in element:
if (subelement.tag == 'document-id'):
for child in subelement:
if (child.tag == 'country'):
citation.update({'country': child.text})
elif (child.tag == 'doc-number'):
citation.update({'doc-number': child.text})
elif (child.tag == 'kind'):
citation.update({'kind': child.text})
elif (child.tag == 'name'):
citation.update({'name': child.text})
elif (child.tag == 'date'):
citation.update({'date': child.text})
elif (element.tag == 'nplcit'):
citation.update({'nplcit': 'true'})
if ((last_category != '') and ((('rel-passage' + '_') + last_category.split(',')[0]) not in citation.keys())):
print(((('Kategorie ohne rel-passage, Citation ID/String: ' + citation_id) + ' / ') + citation_string))
return citation
|
def main(file):
f = open(file, 'r', encoding='utf8', errors='ignore')
lines = f.readlines()
records = {}
citations = {}
for line in lines:
if ('\ten\t' in line):
application_identifier = line.split('EP\t')[1].split('\ten\t')[0].replace('\t', '')
application_number = line.split('EP\t')[1].split('\t')[0]
application_category = line.split('EP\t')[1].split('\t')[1]
application_date = line.split('EP\t')[1].split('\t')[2]
if (application_date == ''):
print(('Skipping entry, because of missing date: ' + application_identifier))
continue
if (application_identifier not in records):
records.update({application_identifier: {'application_number': application_number, 'application_category': application_category, 'application_date': application_date}})
record = records.get(application_identifier)
if ('\tTITLE\t' in line):
record.update({'title': line.split('\tTITLE\t')[1]})
elif ('\tABSTR\t' in line):
record.update({'abstract': line.split('\tABSTR\t')[1]})
elif ('\tDESCR\t' in line):
record.update({'description': line.split('\tDESCR\t')[1]})
elif ('\tCLAIM\t' in line):
record.update({'claims': line.split('\tCLAIM\t')[1]})
elif ('\tAMEND\t' in line):
record.update({'amended_claims': line.split('\tAMEND\t')[1]})
elif ('\tACSTM\t' in line):
record.update({'amended_claims_statements': line.split('\tACSTM\t')[1]})
elif ('\tSRPRT\t' in line):
record.update({'citation_ipcr_classification': extract_classifications(line)})
record.update({'citation_ids': extract_citationIDs(application_identifier, line)})
for citation_id in record['citation_ids']:
print(('evaluate citation id: ' + citation_id))
citations.update({citation_id: extract_citation_entry(citation_id, line.split('\tSRPRT\t')[1])})
elif ('\tPDFEP\t' in line):
record.update({'publication_url': line.split('\tPDFEP\t')[1]})
records.update({application_identifier: record})
upload(records, INDEX_APPL, 'patent_eu')
upload(citations, INDEX_CIT, 'citation_eu')
|
def createIndexPatentApplications():
settings = {'settings': {'number_of_shards': 1, 'number_of_replicas': 0}, 'mappings': {'properties': {'application_number': {'type': 'keyword'}, 'application_category': {'type': 'keyword'}, 'application_date': {'type': 'date'}, 'title': {'type': 'text'}, 'abstract': {'type': 'text'}, 'description': {'type': 'text'}, 'claims': {'type': 'text'}, 'amended_claims': {'type': 'text'}, 'amended_claims_statements': {'type': 'text'}, 'citation_ipcr_classification': {'type': 'keyword'}, 'citation_ids': {'type': 'keyword'}, 'publication_url': {'type': 'text'}}}}
es = Elasticsearch(hosts=['http://172.16.64.23:9200/'])
response = es.indices.create(index=INDEX_APPL, ignore=400, body=settings)
print(response)
|
def createIndexCitations():
settings = {'settings': {'number_of_shards': 1, 'number_of_replicas': 0, 'index.mapping.ignore_malformed': True}, 'mappings': {'properties': {'dnum': {'type': 'keyword'}, 'publication_url': {'type': 'text'}, 'country': {'type': 'keyword'}, 'kind': {'type': 'keyword'}, 'doc_number': {'type': 'keyword'}, 'name': {'type': 'text'}, 'date': {'type': 'date'}, 'category_X': {'type': 'integer'}, 'category_P': {'type': 'integer'}, 'category_A': {'type': 'integer'}, 'category_D': {'type': 'integer'}, 'category_Y': {'type': 'integer'}, 'category_L': {'type': 'integer'}, 'category_O': {'type': 'integer'}, 'category_T': {'type': 'integer'}, 'category_E': {'type': 'integer'}, 'rel-passage_X': {'type': 'text'}, 'rel-passage_P': {'type': 'text'}, 'rel-passage_A': {'type': 'text'}, 'rel-passage_D': {'type': 'text'}, 'rel-passage_Y': {'type': 'text'}, 'rel-passage_L': {'type': 'text'}, 'rel-passage_O': {'type': 'text'}, 'rel-passage_T': {'type': 'text'}, 'rel-passage_E': {'type': 'text'}, 'nplcit': {'type': 'boolean'}}}}
es = Elasticsearch(hosts=['http://172.16.64.23:9200/'])
response = es.indices.create(index=INDEX_CIT, ignore=400, body=settings)
print(response)
|
def upload(records, index, type):
client = connections.create_connection(hosts=['http://172.16.64.23:9200/'])
res = helpers.bulk(client, gendata(records, index, type), index=index, chunk_size=1000, request_timeout=200)
print(res)
|
def query_exist_claim():
return {'query': {'bool': {'filter': [{'exists': {'field': 'citation_ids'}}, {'exists': {'field': 'claims'}}]}}}
|
def query_citation_id(citation_entry):
return {'query': {'bool': {'filter': [{'exists': {'field': 'category_A'}}, {'ids': {'values': [citation_entry]}}]}}}
|
def process_hits(es, response, patent_application_id_column, patent_citation_column, application_claim_number_column, application_claim_text_column, related_passages_against_claim_column, category_column):
print(response)
all_response_patent_applications = response.get('hits').get('hits')
for element in all_response_patent_applications:
patent_application_id = element.get('_id')
claims_text_raw = element.get('_source').get('claims')
max_claim = int(claims_text_raw.split('<claim id="c-en-00')[(- 1)][:2])
for claim in range(1, (max_claim + 1)):
for citation_id in element.get('_source').get('citation_ids'):
print(citation_id)
response_citation = es.search(index='ep_patent_citations', body=query_citation_id(citation_id), size=10000)
print(response_citation)
try:
response_citation.get('hits').get('hits')[0].get('_source')
except:
continue
response_rel_claims = response_citation.get('hits').get('hits')[0].get('_source').get('category_A')
response_rel_passage = response_citation.get('hits').get('hits')[0].get('_source').get('rel-passage_A')
if (claim in response_rel_claims):
try:
application_claim_text_column.append(claims_text_raw.split((((('<claim id="c-en-00' + '{:02d}'.format(claim)) + '" num="00') + '{:02d}'.format(claim)) + '">'))[1].split('</claim>')[0])
except:
print(((('Discarded Claim. ID: ' + str(claim)) + ', Patent Application ID: ') + str(patent_application_id)))
continue
patent_application_id_column.append(patent_application_id)
patent_citation_column.append(citation_id)
application_claim_number_column.append(claim)
related_passages_against_claim_column.append(response_rel_passage)
category_column.append('A')
|
def main():
patent_application_id_column = []
patent_citation_column = []
application_claim_number_column = []
application_claim_text_column = []
related_passages_against_claim_column = []
category_column = []
es = Elasticsearch(hosts=['http://172.16.64.23:9200/'])
response = es.search(index='ep_patent_applications', body=query_exist_claim(), scroll='2m')
print(response)
sid = response.get('_scroll_id')
scroll_size = len(response['hits']['hits'])
process_hits(es, response, patent_application_id_column, patent_citation_column, application_claim_number_column, application_claim_text_column, related_passages_against_claim_column, category_column)
while (scroll_size > 0):
'Scrolling...'
response = es.scroll(scroll_id=sid, scroll='2m')
process_hits(es, response, patent_application_id_column, patent_citation_column, application_claim_number_column, application_claim_text_column, related_passages_against_claim_column, category_column)
sid = response['_scroll_id']
scroll_size = len(response['hits']['hits'])
column_data = {'patent_application_id': patent_application_id_column, 'patent_citation_id': patent_citation_column, 'application_claim_number': application_claim_number_column, 'application_claim_text': application_claim_text_column, 'related_passages_against_claim': related_passages_against_claim_column, 'category': category_column}
print(column_data)
df = pd.DataFrame(data=column_data, columns=['patent_application_id', 'patent_citation_id', 'application_claim_number', 'application_claim_text', 'related_passages_against_claim', 'category'])
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
pd.set_option('display.max_colwidth', 30)
df.to_csv('./frame_negativeSamples.csv')
|
def query_exist_claim():
return {'query': {'bool': {'filter': [{'exists': {'field': 'citation_ids'}}, {'exists': {'field': 'claims'}}]}}}
|
def query_citation_id(citation_entry):
return {'query': {'bool': {'filter': [{'exists': {'field': 'category_X'}}, {'ids': {'values': [citation_entry]}}]}}}
|
def process_hits(es, response, patent_application_id_column, patent_citation_column, application_claim_number_column, application_claim_text_column, related_passages_against_claim_column, category_column):
print(response)
all_response_patent_applications = response.get('hits').get('hits')
for element in all_response_patent_applications:
patent_application_id = element.get('_id')
claims_text_raw = element.get('_source').get('claims')
max_claim = int(claims_text_raw.split('<claim id="c-en-00')[(- 1)][:2])
for claim in range(1, (max_claim + 1)):
for citation_id in element.get('_source').get('citation_ids'):
print(citation_id)
response_citation = es.search(index='ep_patent_citations', body=query_citation_id(citation_id), size=10000)
print(response_citation)
try:
response_citation.get('hits').get('hits')[0].get('_source')
except:
continue
response_rel_claims = response_citation.get('hits').get('hits')[0].get('_source').get('category_X')
response_rel_passage = response_citation.get('hits').get('hits')[0].get('_source').get('rel-passage_X')
if (claim in response_rel_claims):
try:
application_claim_text_column.append(claims_text_raw.split((((('<claim id="c-en-00' + '{:02d}'.format(claim)) + '" num="00') + '{:02d}'.format(claim)) + '">'))[1].split('</claim>')[0])
except:
print(((('Discarded Claim. ID: ' + str(claim)) + ', Patent Application ID: ') + str(patent_application_id)))
continue
patent_application_id_column.append(patent_application_id)
patent_citation_column.append(citation_id)
application_claim_number_column.append(claim)
related_passages_against_claim_column.append(response_rel_passage)
category_column.append('X')
|
def main():
patent_application_id_column = []
patent_citation_column = []
application_claim_number_column = []
application_claim_text_column = []
related_passages_against_claim_column = []
category_column = []
es = Elasticsearch(hosts=['http://172.16.64.23:9200/'])
response = es.search(index='ep_patent_applications', body=query_exist_claim(), scroll='2m')
print(response)
sid = response.get('_scroll_id')
scroll_size = len(response['hits']['hits'])
process_hits(es, response, patent_application_id_column, patent_citation_column, application_claim_number_column, application_claim_text_column, related_passages_against_claim_column, category_column)
while (scroll_size > 0):
'Scrolling...'
response = es.scroll(scroll_id=sid, scroll='2m')
process_hits(es, response, patent_application_id_column, patent_citation_column, application_claim_number_column, application_claim_text_column, related_passages_against_claim_column, category_column)
sid = response['_scroll_id']
scroll_size = len(response['hits']['hits'])
column_data = {'patent_application_id': patent_application_id_column, 'patent_citation_id': patent_citation_column, 'application_claim_number': application_claim_number_column, 'application_claim_text': application_claim_text_column, 'related_passages_against_claim': related_passages_against_claim_column, 'category': category_column}
print(column_data)
df = pd.DataFrame(data=column_data, columns=['patent_application_id', 'patent_citation_id', 'application_claim_number', 'application_claim_text', 'related_passages_against_claim', 'category'])
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
pd.set_option('display.max_colwidth', 30)
df.to_csv('./frame.csv')
|
def desirable(tag):
return ((tag[0] in ['paragraph', '-', '[']) or ((tag[1] in ['CD']) and tag[0].isdigit()))
|
def syntax_right(tag_before_tag, tag):
if (tag[1] != 'CD'):
return True
else:
return (((tag[1] == 'CD') and ('paragraph' in tag_before_tag[0])) or ('[' in tag_before_tag[0]))
|
def text_is_range(tag_before_tag, tag, tag_after_tag):
return ((tag_before_tag[1] == 'CD') and (tag[0] == '-') and (tag_after_tag[1] == 'CD'))
|
def extract_paragraphs(text):
tokens = nltk.word_tokenize(text.lower().replace('paragraphs', 'paragraph'))
pos_tags = nltk.pos_tag(tokens)
pos_tags = [tag for tag in pos_tags if desirable(tag)]
pos_tags = [tag for (tag_before_tag, tag) in zip(([('', '')] + pos_tags[:(- 1)]), pos_tags) if syntax_right(tag_before_tag, tag)]
pos_tags = [tag for tag in pos_tags if ((not ('paragraph' in tag[0])) and (not ('[' in tag[0])))]
pos_tags_ranges = [list(range(int(tag_before_tag[0]), (int(tag_after_tag[0]) + 1))) for (tag_before_tag, tag, tag_after_tag) in zip(([('', '')] + pos_tags[:(- 1)]), pos_tags, (pos_tags[1:] + [('', '')])) if text_is_range(tag_before_tag, tag, tag_after_tag)]
pos_tags_numbers_only = [int(tag[0]) for tag in pos_tags if (tag[0] != '-')]
end_result_paragraph_numbers = pos_tags_numbers_only
for range_list in pos_tags_ranges:
end_result_paragraph_numbers = (end_result_paragraph_numbers + range_list)
end_result_paragraph_numbers = list(set(end_result_paragraph_numbers))
return end_result_paragraph_numbers
|
def getAccessToken():
payload = 'grant_type=client_credentials'
usrPass = ((consumer_key + ':') + consumer_secret_key)
b64Val = base64.b64encode(bytes(usrPass, 'utf-8'))
header = {'authorization': ('Basic %s' % b64Val.decode('utf-8')), 'content-type': 'application/x-www-form-urlencoded'}
request_token = requests.post(token_url, headers=header, data=payload)
access_token = request_token.json()['access_token']
return access_token
|
def getEquivalents(number):
access_token = getAccessToken()
equivalent = []
payload = number
header = {'authorization': ('Bearer %s' % access_token), 'content-type': 'text/plain'}
request_equivalent = requests.post(request_url, headers=header, data=payload)
response = request_equivalent.text
try:
root = ET.fromstring(response)
for inquiry_result in list(root.iter('{http://ops.epo.org}equivalents-inquiry'))[0].iter('{http://ops.epo.org}inquiry-result'):
for publication_reference in inquiry_result.iter('{http://www.epo.org/exchange}publication-reference'):
for document_id in publication_reference.iter('{http://www.epo.org/exchange}document-id'):
for doc_number in document_id.iter('{http://www.epo.org/exchange}doc-number'):
equivalent.append(doc_number.text)
print(equivalent)
except:
print(('unexpected response or no equivalents :' + number))
return equivalent
|
def query_patent_citation_country_docNumber(id):
return {'query': {'bool': {'filter': [{'ids': {'values': [id]}}]}}}
|
def elasticSearch_process(id):
response_citation = es.search(index='ep_patent_citations', body=query_patent_citation_country_docNumber(id), size=10000)
try:
country = response_citation.get('hits').get('hits')[0].get('_source').get('country')
docNumber = response_citation.get('hits').get('hits')[0].get('_source').get('doc-number')
print((country + docNumber))
return (country + docNumber)
except:
return 'error es_response'
|
def getPatentCitationIds(csv_path):
list_of_patent_citation_ids = []
list_of_equivalents_lists = []
dataframe = pd.read_csv(csv_path, header=0, skiprows=range(1, 2767211))
patent_citation_id_iterator = dataframe['patent_citation_id']
for id in patent_citation_id_iterator.unique():
list_of_patent_citation_ids.append(id)
citation_identifier = elasticSearch_process(id)
if (citation_identifier is not 'error es_response'):
equivalents_list = getEquivalents(citation_identifier)
else:
equivalents_list = ['error es_response']
list_of_equivalents_lists.append(equivalents_list)
time.sleep(6.0)
return pd.DataFrame({'patent_citation_id': list_of_patent_citation_ids, 'equivalents': list_of_equivalents_lists})
|
def process_csv(path):
global counter_error
global counter_success
with open(path) as f:
lines = f.readlines()
follow_up_next_line = False
current_id = ''
for line in lines:
if (follow_up_next_line is True):
equivalents_list = line.replace('[', '').replace(']', '').replace("'", '').rstrip().split(', ')
equivalents_list.append(current_id)
equivalents_list = list(dict.fromkeys(equivalents_list))
column_id.append(current_id)
column_equivalents.append(equivalents_list)
follow_up_next_line = False
counter_success = (counter_success + 1)
elif str.__contains__(line, 'unexpected response or no equivalents :'):
counter_error = (counter_error + 1)
else:
current_id = line.rstrip()
follow_up_next_line = True
|
def elasticsearch_request_getDnum(citation_id):
return {'query': {'bool': {'filter': [{'ids': {'values': [citation_id]}}]}}}
|
def elasticsearch_request_getParagraphText(application_number, application_category):
return {'query': {'bool': {'filter': [{'term': {'application_number': application_number}}, {'term': {'application_category': application_category}}]}}}
|
def getPatentDetails(citation_id):
response = es.search(index='ep_patent_citations', body=elasticsearch_request_getDnum(citation_id))
print(response)
try:
dnum = response['hits']['hits'][0]['_source']['dnum']
docNumber = response['hits']['hits'][0]['_source']['doc-number']
patentCountry = response['hits']['hits'][0]['_source']['country']
patentCategory = response['hits']['hits'][0]['_source']['kind']
except:
return 'not found'
return (dnum, docNumber, patentCountry, patentCategory)
|
def dataframeToDict(dataframe, dictionary):
for (index, entry) in dataframe.iterrows():
id_list = entry['equivalent_patents'].strip('][').split(', ')
clean_id_list = []
for value in id_list:
clean_id_list.append(value.replace("'", ''))
dictionary[entry['patent_id']] = clean_id_list
return dictionary
|
def getParagraphText(dnum, application_category, paragraphs):
response = es.search(index='ep_patent_applications', body=elasticsearch_request_getParagraphText(dnum, application_category))
try:
paragraph_field = response['hits']['hits'][0]['_source']['description']
except:
return 'not found'
extracted_paragraph = ''
for paragraph in paragraphs:
found_paragraph_position_start = paragraph_field.find((((('<p id="p' + ('%04d' % int(paragraph))) + '" num="') + ('%04d' % int(paragraph))) + '">'))
found_paragraph_position_end = (paragraph_field.find('</p>', found_paragraph_position_start) + 3)
extracted_paragraph = ((extracted_paragraph + ' ') + paragraph_field[found_paragraph_position_start:found_paragraph_position_end])
return extracted_paragraph
|
def getParagraphFromText(paragraphsText, paragraphNumber):
found_paragraph_position_start = paragraphsText.find((((('<p id="p' + ('%04d' % int(paragraphNumber))) + '" num="') + ('%04d' % int(paragraphNumber))) + '">'))
found_paragraph_position_end = (paragraphsText.find('</p', found_paragraph_position_start) + 3)
extracted_paragraph = paragraphsText[found_paragraph_position_start:found_paragraph_position_end]
return extracted_paragraph
|
def execute():
path = '/mnt/data/datasets/patents/patent_matching'
positives = pd.read_csv((path + '/positives_satellite.csv'), header=0, dtype={'application_claim_text': str, 'patent_searchReport_paragraph': str})
negatives = pd.read_csv((path + '/negatives_satellite.csv'), header=0, dtype={'application_claim_text': str, 'patent_searchReport_paragraph': str})
sample_size = 1.0
positives = positives[['application_claim_text', 'patent_searchReport_paragraph']]
positives['label'] = '1'
positives = positives.rename(columns={'application_claim_text': 'text', 'patent_searchReport_paragraph': 'text_b'})
negatives = negatives[['application_claim_text', 'patent_searchReport_paragraph']]
negatives['label'] = '0'
negatives = negatives.rename(columns={'application_claim_text': 'text', 'patent_searchReport_paragraph': 'text_b'})
allSamples = positives.append(negatives).dropna()
allSamples['text_b'] = allSamples['text_b'].str.replace('<\\/p', '', regex=True)
allSamples['text'] = allSamples['text'].str.replace('<\\/p', '', regex=True)
allSamples['text_b'] = allSamples['text_b'].str.replace('\\<.+?\\>', '', regex=True)
allSamples['text'] = allSamples['text'].str.replace('\\<.+?\\>', '', regex=True)
allSamples['text_b'] = allSamples['text_b'].str.replace('--\\>', '', regex=True)
allSamples['text'] = allSamples['text'].str.replace('--\\>', '', regex=True)
allSamples['text_b'] = allSamples['text_b'].str.replace('"', '', regex=True)
allSamples['text'] = allSamples['text'].str.replace('"', '', regex=True)
allSamples['text_b'] = allSamples['text_b'].str.replace('[^A-Za-z0-9\\s.]+', '', regex=True)
allSamples['text'] = allSamples['text'].str.replace('[^A-Za-z0-9\\s.]+', '', regex=True)
allSamples['text_b'].replace('^\\s', '', regex=True, inplace=True)
allSamples['text'].replace('^\\s', '', regex=True, inplace=True)
allSamples['text_b'].replace('\\B\\s+|\\s+\\B', '', regex=True, inplace=True)
allSamples['text'].replace('\\B\\s+|\\s+\\B', '', regex=True, inplace=True)
allSamples['text_b'].replace('^[\\s]*$', np.nan, regex=True, inplace=True)
allSamples['text'].replace('^[\\s]*$', np.nan, regex=True, inplace=True)
allSamples = allSamples.sort_values(by=['text']).dropna()
(train, test_dev) = train_test_split(allSamples, test_size=0.2, shuffle=False)
(test, dev) = train_test_split(test_dev, test_size=0.5, shuffle=False)
train = train.sample(frac=sample_size)
test = test.sample(frac=sample_size)
dev = dev.sample(frac=sample_size)
print('Check for intersection values:')
print('Train in Test')
print(train['text'].isin(test['text']).value_counts())
print('Train in Dev')
print(train['text'].isin(dev['text']).value_counts())
print('Test in Dev')
print(test['text'].isin(dev['text']).value_counts())
train.to_csv((path + '/train.tsv'), sep='\t', index=False)
test.to_csv((path + '/test.tsv'), sep='\t', index=False)
dev.to_csv((path + '/dev.tsv'), sep='\t', index=False)
|
def query_citation_id(citation_entry):
return {'query': {'ids': {'values': [citation_entry]}}}
|
def process_hits(response, column_id_pa, column_cit_srprt, column_category_P, column_category_A, column_category_D, column_category_Y, column_category_L, column_category_O, column_category_T, column_category_E, column_category_X):
all_response_patent_applications = response.get('hits').get('hits')
for element in all_response_patent_applications:
element_id_pa = element.get('_id')
for citation_id in element.get('_source').get('citation_ids'):
column_id_pa.append(element_id_pa)
column_cit_srprt.append(citation_id)
response_citation = es.search(index='ep_patent_citations', body=query_citation_id(citation_id), size=10000, filter_path=['hits.total.value', 'hits.hits'])
response_citation_entry = response_citation.get('hits').get('hits')[0].get('_source')
column_category_P.append((response_citation_entry.get('category_P') != None))
column_category_A.append((response_citation_entry.get('category_A') != None))
column_category_D.append((response_citation_entry.get('category_D') != None))
column_category_Y.append((response_citation_entry.get('category_Y') != None))
column_category_L.append((response_citation_entry.get('category_L') != None))
column_category_O.append((response_citation_entry.get('category_O') != None))
column_category_T.append((response_citation_entry.get('category_T') != None))
column_category_E.append((response_citation_entry.get('category_E') != None))
column_category_X.append((response_citation_entry.get('category_X') != None))
|
def setup(app):
app.add_css_file('custom.css')
|
def parse_keys_section(self, section):
return self._format_fields('Keys', self._consume_fields())
|
def parse_attributes_section(self, section):
return self._format_fields('Attributes', self._consume_fields())
|
def parse_class_attributes_section(self, section):
return self._format_fields('Class Attributes', self._consume_fields())
|
def patched_parse(self):
self._sections['keys'] = self._parse_keys_section
self._sections['class attributes'] = self._parse_class_attributes_section
self._unpatched_parse()
|
class MyDeepText(nn.Module):
def __init__(self, vocab_size, padding_idx=1, embed_dim=100, hidden_dim=64):
super(MyDeepText, self).__init__()
self.word_embed = nn.Embedding(vocab_size, embed_dim, padding_idx=padding_idx)
self.rnn = nn.GRU(embed_dim, hidden_dim, num_layers=2, bidirectional=True, batch_first=True)
self.output_dim = (hidden_dim * 2)
def forward(self, X):
embed = self.word_embed(X.long())
(o, h) = self.rnn(embed)
return torch.cat((h[(- 2)], h[(- 1)]), dim=1)
|
class RMSELoss(nn.Module):
def __init__(self):
'root mean squared error'
super().__init__()
self.mse = nn.MSELoss()
def forward(self, input: Tensor, target: Tensor) -> Tensor:
return torch.sqrt(self.mse(input, target))
|
class Accuracy(Metric):
def __init__(self, top_k: int=1):
super(Accuracy, self).__init__()
self.top_k = top_k
self.correct_count = 0
self.total_count = 0
self._name = 'acc'
def reset(self):
self.correct_count = 0
self.total_count = 0
def __call__(self, y_pred: Tensor, y_true: Tensor) -> np.ndarray:
num_classes = y_pred.size(1)
if (num_classes == 1):
y_pred = y_pred.round()
y_true = y_true
elif (num_classes > 1):
y_pred = y_pred.topk(self.top_k, 1)[1]
y_true = y_true.view((- 1), 1).expand_as(y_pred)
self.correct_count += y_pred.eq(y_true).sum().item()
self.total_count += len(y_pred)
accuracy = (float(self.correct_count) / float(self.total_count))
return np.array(accuracy)
|
class SillyCallback(Callback):
def on_train_begin(self, logs=None):
self.trainer.silly_callback = {}
self.trainer.silly_callback['beginning'] = []
self.trainer.silly_callback['end'] = []
def on_epoch_begin(self, epoch, logs=None):
self.trainer.silly_callback['beginning'].append((epoch + 1))
def on_epoch_end(self, epoch, logs=None, metric=None):
self.trainer.silly_callback['end'].append((epoch + 1))
|
class RayTuneReporter(Callback):
'Callback that allows reporting history and lr_history values to RayTune\n during Hyperparameter tuning\n\n Callbacks are passed as input parameters to the ``Trainer`` class. See\n :class:`pytorch_widedeep.trainer.Trainer`\n\n For examples see the examples folder at:\n\n .. code-block:: bash\n\n /examples/12_HyperParameter_tuning_w_RayTune.ipynb\n '
def on_epoch_end(self, epoch: int, logs: Optional[Dict]=None, metric: Optional[float]=None):
report_dict = {}
for (k, v) in self.trainer.history.items():
report_dict.update({k: v[(- 1)]})
if hasattr(self.trainer, 'lr_history'):
for (k, v) in self.trainer.lr_history.items():
report_dict.update({k: v[(- 1)]})
tune.report(report_dict)
|
class WnBReportBest(Callback):
'Callback that allows reporting best performance of a run to WnB\n during Hyperparameter tuning. It is an adjusted pytorch_widedeep.callbacks.ModelCheckpoint\n with added WnB and removed checkpoint saving.\n\n Callbacks are passed as input parameters to the ``Trainer`` class.\n\n Parameters\n ----------\n wb: obj\n Weights&Biases API interface to report single best result usable for\n comparisson of multiple paramater combinations by, for example,\n `parallel coordinates\n <https://docs.wandb.ai/ref/app/features/panels/parallel-coordinates>`_.\n E.g W&B summary report `wandb.run.summary["best"]`.\n monitor: str, default="loss"\n quantity to monitor. Typically `\'val_loss\'` or metric name\n (e.g. `\'val_acc\'`)\n mode: str, default="auto"\n If ``save_best_only=True``, the decision to overwrite the current save\n file is made based on either the maximization or the minimization of\n the monitored quantity. For `\'acc\'`, this should be `\'max\'`, for\n `\'loss\'` this should be `\'min\'`, etc. In `\'auto\'` mode, the\n direction is automatically inferred from the name of the monitored\n quantity.\n\n '
def __init__(self, wb: object, monitor: str='val_loss', mode: str='auto'):
super(WnBReportBest, self).__init__()
self.monitor = monitor
self.mode = mode
self.wb = wb
if (self.mode not in ['auto', 'min', 'max']):
warnings.warn(('WnBReportBest mode %s is unknown, fallback to auto mode.' % self.mode), RuntimeWarning)
self.mode = 'auto'
if (self.mode == 'min'):
self.monitor_op = np.less
self.best = np.Inf
elif (self.mode == 'max'):
self.monitor_op = np.greater
self.best = (- np.Inf)
elif self._is_metric(self.monitor):
self.monitor_op = np.greater
self.best = (- np.Inf)
else:
self.monitor_op = np.less
self.best = np.Inf
def on_epoch_end(self, epoch: int, logs: Optional[Dict]=None, metric: Optional[float]=None):
logs = (logs or {})
current = logs.get(self.monitor)
if (current is not None):
if self.monitor_op(current, self.best):
self.wb.run.summary['best'] = current
self.best = current
self.best_epoch = epoch
@staticmethod
def _is_metric(monitor: str):
'copied from pytorch_widedeep.callbacks'
if any([(s in monitor) for s in ['acc', 'prec', 'rec', 'fscore', 'f1', 'f2']]):
return True
else:
return False
|
@wandb_mixin
def training_function(config, X_train, X_val):
early_stopping = EarlyStopping()
model_checkpoint = ModelCheckpoint(save_best_only=True)
batch_size = config['batch_size']
trainer = Trainer(model, objective='binary_focal_loss', callbacks=[RayTuneReporter, WnBReportBest(wb=wandb), early_stopping, model_checkpoint], lr_schedulers={'deeptabular': deep_sch}, initializers={'deeptabular': XavierNormal}, optimizers={'deeptabular': deep_opt}, metrics=[accuracy, precision, recall, f1], verbose=0)
trainer.fit(X_train=X_train, X_val=X_val, n_epochs=5, batch_size=batch_size)
|
def get_coo_indexes(lil):
rows = []
cols = []
for (i, el) in enumerate(lil):
if (type(el) != list):
el = [el]
for j in el:
rows.append(i)
cols.append(j)
return (rows, cols)
|
def get_sparse_features(series, shape):
coo_indexes = get_coo_indexes(series.tolist())
sparse_df = coo_matrix((np.ones(len(coo_indexes[0])), (coo_indexes[0], coo_indexes[1])), shape=shape)
return sparse_df
|
def sparse_to_idx(data, pad_idx=(- 1)):
indexes = data.nonzero()
indexes_df = pd.DataFrame()
indexes_df['rows'] = indexes[0]
indexes_df['cols'] = indexes[1]
mdf = indexes_df.groupby('rows').apply((lambda x: x['cols'].tolist()))
max_len = mdf.apply((lambda x: len(x))).max()
return mdf.apply((lambda x: pd.Series((x + ([pad_idx] * (max_len - len(x))))))).values
|
class Wide(nn.Module):
def __init__(self, input_dim: int, pred_dim: int):
super().__init__()
self.input_dim = input_dim
self.pred_dim = pred_dim
self.wide_linear = nn.Linear(input_dim, pred_dim)
def forward(self, X):
out = self.wide_linear(X.type(torch.float32))
return out
|
class SimpleEmbed(nn.Module):
def __init__(self, vocab_size: int, embed_dim: int, pad_idx: int):
super().__init__()
self.vocab_size = vocab_size
self.embed_dim = embed_dim
self.pad_idx = pad_idx
self.embed = nn.Embedding(vocab_size, embed_dim, padding_idx=pad_idx)
def forward(self, X):
embed = self.embed(X)
embed_mean = torch.mean(embed, dim=1)
return embed_mean
@property
def output_dim(self) -> int:
return self.embed_dim
|
def download_images(df, out_path, id_col, img_col):
download_error = []
counter = 0
for (idx, row) in tqdm(df.iterrows(), total=df.shape[0]):
if (counter < 1000):
img_path = str((out_path / '.'.join([str(row[id_col]), 'jpg'])))
if os.path.isfile(img_path):
continue
else:
try:
urlretrieve(row[img_col], img_path)
counter += 1
except:
download_error.append(row[id_col])
pass
pickle.dump(download_error, open((DATA_PATH / (id_col + '_download_error.p')), 'wb'))
|
def get_coo_indexes(lil):
rows = []
cols = []
for (i, el) in enumerate(lil):
if (type(el) != list):
el = [el]
for j in el:
rows.append(i)
cols.append(j)
return (rows, cols)
|
def get_sparse_features(series, shape):
coo_indexes = get_coo_indexes(series.tolist())
sparse_df = coo_matrix((np.ones(len(coo_indexes[0])), (coo_indexes[0], coo_indexes[1])), shape=shape)
return sparse_df
|
def sparse_to_idx(data, pad_idx=(- 1)):
indexes = data.nonzero()
indexes_df = pd.DataFrame()
indexes_df['rows'] = indexes[0]
indexes_df['cols'] = indexes[1]
mdf = indexes_df.groupby('rows').apply((lambda x: x['cols'].tolist()))
max_len = mdf.apply((lambda x: len(x))).max()
return mdf.apply((lambda x: pd.Series((x + ([pad_idx] * (max_len - len(x))))))).values
|
def idx_to_sparse(idx, sparse_dim):
sparse = np.zeros(sparse_dim)
sparse[int(idx)] = 1
return pd.Series(sparse, dtype=int)
|
def process_cats_as_kaggle_notebook(df):
df['gender'] = (df['gender'] == 'M').astype(int)
df = pd.concat([df.drop('occupation', axis=1), pd.get_dummies(df['occupation']).astype(int)], axis=1)
df.drop('other', axis=1, inplace=True)
df.drop('zip_code', axis=1, inplace=True)
return df
|
class WideAndDeep(nn.Module):
def __init__(self, continious_feature_shape, embed_size, embed_dict_len, pad_idx):
super(WideAndDeep, self).__init__()
self.embed = nn.Embedding(embed_dict_len, embed_size, padding_idx=pad_idx)
self.linear_relu_stack = nn.Sequential(nn.Linear((embed_size + continious_feature_shape), 1024), nn.ReLU(), nn.Linear(1024, 512), nn.ReLU(), nn.Linear(512, 256), nn.ReLU())
self.head = nn.Sequential(nn.Linear((embed_dict_len + 256), embed_dict_len))
def forward(self, continious, binary, binary_idx):
binary_embed = self.embed(binary_idx)
binary_embed_mean = mean(binary_embed, dim=1)
deep_logits = self.linear_relu_stack(cat((continious, binary_embed_mean), dim=1))
total_logits = self.head(cat((deep_logits, binary), dim=1))
return total_logits
|
def get_coo_indexes(lil):
rows = []
cols = []
for (i, el) in enumerate(lil):
if (type(el) != list):
el = [el]
for j in el:
rows.append(i)
cols.append(j)
return (rows, cols)
|
def get_sparse_features(series, shape):
coo_indexes = get_coo_indexes(series.tolist())
sparse_df = coo_matrix((np.ones(len(coo_indexes[0])), (coo_indexes[0], coo_indexes[1])), shape=shape)
return sparse_df
|
def sparse_to_idx(data, pad_idx=(- 1)):
indexes = data.nonzero()
indexes_df = pd.DataFrame()
indexes_df['rows'] = indexes[0]
indexes_df['cols'] = indexes[1]
mdf = indexes_df.groupby('rows').apply((lambda x: x['cols'].tolist()))
max_len = mdf.apply((lambda x: len(x))).max()
return mdf.apply((lambda x: pd.Series((x + ([pad_idx] * (max_len - len(x))))))).values
|
class Wide(nn.Module):
def __init__(self, input_dim: int, pred_dim: int):
super().__init__()
self.input_dim = input_dim
self.pred_dim = pred_dim
self.wide_linear = nn.Linear(input_dim, pred_dim)
def forward(self, X):
out = self.wide_linear(X.type(torch.float32))
return out
|
class SimpleEmbed(nn.Module):
def __init__(self, vocab_size: int, embed_dim: int, pad_idx: int):
super().__init__()
self.vocab_size = vocab_size
self.embed_dim = embed_dim
self.pad_idx = pad_idx
self.embed = nn.Embedding(vocab_size, embed_dim, padding_idx=pad_idx)
def forward(self, X):
embed = self.embed(X)
embed_mean = torch.mean(embed, dim=1)
return embed_mean
@property
def output_dim(self) -> int:
return self.embed_dim
|
class MyDeepText(nn.Module):
def __init__(self, vocab_size, padding_idx=1, embed_dim=100, hidden_dim=64):
super(MyDeepText, self).__init__()
self.hidden_dim = hidden_dim
self.word_embed = nn.Embedding(vocab_size, embed_dim, padding_idx=padding_idx)
self.rnn = nn.GRU(embed_dim, hidden_dim, num_layers=2, bidirectional=True, batch_first=True)
@property
def output_dim(self):
return (self.hidden_dim * 2)
def forward(self, X):
embed = self.word_embed(X.long())
(o, h) = self.rnn(embed)
return torch.cat((h[(- 2)], h[(- 1)]), dim=1)
|
class RMSELoss(nn.Module):
def __init__(self):
'root mean squared error'
super().__init__()
self.mse = nn.MSELoss()
def forward(self, input: Tensor, target: Tensor) -> Tensor:
return torch.sqrt(self.mse(input, target))
|
class Accuracy(Metric):
def __init__(self, top_k: int=1):
super(Accuracy, self).__init__()
self.top_k = top_k
self.correct_count = 0
self.total_count = 0
self._name = 'acc'
def reset(self):
self.correct_count = 0
self.total_count = 0
def __call__(self, y_pred: Tensor, y_true: Tensor) -> np.ndarray:
num_classes = y_pred.size(1)
if (num_classes == 1):
y_pred = y_pred.round()
y_true = y_true
elif (num_classes > 1):
y_pred = y_pred.topk(self.top_k, 1)[1]
y_true = y_true.view((- 1), 1).expand_as(y_pred)
self.correct_count += y_pred.eq(y_true).sum().item()
self.total_count += len(y_pred)
accuracy = (float(self.correct_count) / float(self.total_count))
return np.array(accuracy)
|
class SillyCallback(Callback):
def on_train_begin(self, logs=None):
self.trainer.silly_callback = {}
self.trainer.silly_callback['beginning'] = []
self.trainer.silly_callback['end'] = []
def on_epoch_begin(self, epoch, logs=None):
self.trainer.silly_callback['beginning'].append((epoch + 1))
def on_epoch_end(self, epoch, logs=None, metric=None):
self.trainer.silly_callback['end'].append((epoch + 1))
|
class MyDeepText(nn.Module):
def __init__(self, vocab_size, padding_idx=1, embed_dim=100, hidden_dim=64):
super(MyDeepText, self).__init__()
self.hidden_dim = hidden_dim
self.word_embed = nn.Embedding(vocab_size, embed_dim, padding_idx=padding_idx)
self.rnn = nn.GRU(embed_dim, hidden_dim, num_layers=2, bidirectional=True, batch_first=True)
@property
def output_dim(self):
return (self.hidden_dim * 2)
def forward(self, X):
embed = self.word_embed(X.long())
(o, h) = self.rnn(embed)
return torch.cat((h[(- 2)], h[(- 1)]), dim=1)
|
class RMSELoss(nn.Module):
def __init__(self):
'root mean squared error'
super().__init__()
self.mse = nn.MSELoss()
def forward(self, input: Tensor, target: Tensor) -> Tensor:
return torch.sqrt(self.mse(input, target))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.