code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from parlai.core.teachers import FbDialogTeacher
from .build_2009 import build as build_2009
from .build_2018 import build as build_2018
import copy
import os
def _path(opt, version, use_history):
# Build the data if it doesn't exist.
if version == '2009':
assert use_history
datapath = build_2009(opt['datapath'])
elif version == '2018':
datapath = build_2018(opt['datapath'], use_history)
else:
raise Exception('Unknown version for OpenSubtitles: %s' % version)
return os.path.join(datapath, opt['datatype'].split(':')[0] + '.txt')
class HalfTeacher(FbDialogTeacher):
"""This version of opensubtitles creates half of all possible dialog
examples.
"""
def __init__(self, opt, shared=None, version='2018', use_history=True):
opt = copy.deepcopy(opt)
opt['datafile'] = _path(opt, version, use_history)
if not opt['datatype'].startswith('train'):
opt['cands_datafile'] = opt['datafile']
super().__init__(opt, shared)
class FullTeacher(HalfTeacher):
"""This version of opensubtitles creates all possible dialog examples."""
def setup_data(self, path):
def rebuild(entries):
return [
(entries[i][1][0], [entries[i + 1][0]])
for i in range(len(entries) - 1)
]
# this shows conversations in both directions
alternate = []
for entry, new in super().setup_data(path):
if new:
for i, e in enumerate(rebuild(alternate)):
yield e, i == 0
alternate.clear()
else:
alternate.append(entry)
yield entry, new
if alternate:
for i, e in enumerate(rebuild(alternate)):
yield e, i == 0
class Task100kTeacher(HalfTeacher):
"""This version of opensubtitles only includes 100,000 dialogs."""
def setup_data(self, path):
cnt = 0
for entry, new in super().setup_data(path):
if len(entry) > 1 and entry[1]:
# focus on examples with targets for small set
yield entry, new
cnt += 1
if cnt >= 100000:
break
class Task10kTeacher(HalfTeacher):
"""This version of opensubtitles only includes 10,000 dialogs."""
def setup_data(self, path):
cnt = 0
for entry, new in super().setup_data(path):
if len(entry) > 1 and entry[1]:
# focus on examples with targets for small set
yield entry, new
cnt += 1
if cnt >= 10000:
break
class V2009Teacher(FullTeacher):
def __init__(self, opt, shared=None):
super(V2009Teacher, self).__init__(opt, shared, '2009', True)
class V2009HalfTeacher(HalfTeacher):
def __init__(self, opt, shared=None):
super(V2009HalfTeacher, self).__init__(opt, shared, '2009', True)
class V2009Task100kTeacher(Task100kTeacher):
def __init__(self, opt, shared=None):
super(V2009Task100kTeacher, self).__init__(opt, shared, '2009', True)
class V2009Task10kTeacher(Task10kTeacher):
def __init__(self, opt, shared=None):
super(V2009Task10kTeacher, self).__init__(opt, shared, '2009', True)
class V2018Teacher(FullTeacher):
def __init__(self, opt, shared=None):
super(V2018Teacher, self).__init__(opt, shared, '2018', True)
class V2018HalfTeacher(HalfTeacher):
def __init__(self, opt, shared=None):
super(V2018HalfTeacher, self).__init__(opt, shared, '2018', True)
class V2018Task100kTeacher(Task100kTeacher):
def __init__(self, opt, shared=None):
super(V2018Task100kTeacher, self).__init__(opt, shared, '2018', True)
class V2018Task10kTeacher(Task10kTeacher):
def __init__(self, opt, shared=None):
super(V2018Task10kTeacher, self).__init__(opt, shared, '2018', True)
class V2018NoHistoryTeacher(FullTeacher):
def __init__(self, opt, shared=None):
super(V2018NoHistoryTeacher, self).__init__(
opt, shared, '2018', False)
class V2018NoHistoryTask100kTeacher(Task100kTeacher):
"""Note, these versions only uses two-turns dialog. This is more efficient
due to movie-based deduplication, compared to the regular v2018 dataset.
"""
def __init__(self, opt, shared=None):
super(V2018NoHistoryTask100kTeacher, self).__init__(
opt, shared, '2018', False)
class V2018NoHistoryTask10kTeacher(Task10kTeacher):
def __init__(self, opt, shared=None):
super(V2018NoHistoryTask10kTeacher, self).__init__(
opt, shared, '2018', False)
# Defaults to full teacher (all possible examples)
class DefaultTeacher(V2018Teacher):
pass | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/tasks/opensubtitles/agents.py | 0.763528 | 0.222954 | agents.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from parlai.core.teachers import FixedDialogTeacher, DialogTeacher
from .build import build
import json
import os
class IndexTeacher(FixedDialogTeacher):
"""Hand-written SQuAD teacher, which loads the json squad data and
implements its own `act()` method for interacting with student agent,
rather than inheriting from the core Dialog Teacher. This code is here as
an example of rolling your own without inheritance.
This teacher also provides access to the "answer_start" indices that
specify the location of the answer in the context.
"""
def __init__(self, opt, shared=None):
build(opt)
super().__init__(opt, shared)
if self.datatype.startswith('train'):
suffix = 'train'
else:
suffix = 'dev'
datapath = os.path.join(
opt['datapath'],
'SQuAD2',
suffix + '-v2.0.json'
)
self.data = self._setup_data(datapath)
self.id = 'squad2'
self.reset()
def num_examples(self):
return len(self.examples)
def num_episodes(self):
return self.num_examples()
def get(self, episode_idx, entry_idx=None):
article_idx, paragraph_idx, qa_idx = self.examples[episode_idx]
article = self.squad[article_idx]
paragraph = article['paragraphs'][paragraph_idx]
qa = paragraph['qas'][qa_idx]
question = qa['question']
answers = []
answer_starts = []
if not qa['is_impossible']:
for a in qa['answers']:
answers.append(a['text'])
answer_starts.append(a['answer_start'])
context = paragraph['context']
plausible = qa.get("plausible_answers", [])
action = {
'id': 'squad',
'text': context + '\n' + question,
'labels': answers,
'plausible_answers': plausible,
'episode_done': True,
'answer_starts': answer_starts
}
return action
def _setup_data(self, path):
with open(path) as data_file:
self.squad = json.load(data_file)['data']
self.examples = []
for article_idx in range(len(self.squad)):
article = self.squad[article_idx]
for paragraph_idx in range(len(article['paragraphs'])):
paragraph = article['paragraphs'][paragraph_idx]
num_questions = len(paragraph['qas'])
for qa_idx in range(num_questions):
self.examples.append((article_idx, paragraph_idx, qa_idx))
class DefaultTeacher(DialogTeacher):
"""This version of SQuAD inherits from the core Dialog Teacher, which just
requires it to define an iterator over its data `setup_data` in order to
inherit basic metrics, a default `act` function.
For SQuAD, this does not efficiently store the paragraphs in memory.
"""
def __init__(self, opt, shared=None):
self.datatype = opt['datatype']
build(opt)
if opt['datatype'].startswith('train'):
suffix = 'train'
else:
suffix = 'dev'
opt['datafile'] = os.path.join(opt['datapath'], 'SQuAD2',
suffix + '-v2.0.json')
self.id = 'squad2'
super().__init__(opt, shared)
def setup_data(self, path):
print('loading: ' + path)
with open(path) as data_file:
self.squad = json.load(data_file)['data']
for article in self.squad:
# each paragraph is a context for the attached questions
for paragraph in article['paragraphs']:
# each question is an example
for qa in paragraph['qas']:
question = qa['question']
ans_list = [{"text": ""}]
if not qa['is_impossible']:
ans_list = qa['answers']
answers = (a['text'] for a in ans_list)
context = paragraph['context']
yield (context + '\n' + question, answers), True
class OpenSquadTeacher(DialogTeacher):
"""This version of SQuAD inherits from the core Dialog Teacher, which just
requires it to define an iterator over its data `setup_data` in order to
inherit basic metrics, a default `act` function.
Note: This teacher omits the context paragraph
"""
def __init__(self, opt, shared=None):
self.datatype = opt['datatype']
build(opt)
if opt['datatype'].startswith('train'):
suffix = 'train'
else:
suffix = 'dev'
opt['datafile'] = os.path.join(opt['datapath'], 'SQuAD2',
suffix + '-v2.0.json')
self.id = 'squad2'
super().__init__(opt, shared)
def setup_data(self, path):
print('loading: ' + path)
with open(path) as data_file:
self.squad = json.load(data_file)['data']
for article in self.squad:
# each paragraph is a context for the attached questions
for paragraph in article['paragraphs']:
# each question is an example
for qa in paragraph['qas']:
question = qa['question']
ans_iter = [{"text": ''}]
if not qa['is_impossible']:
ans_iter = qa['answers']
answers = (a['text'] for a in ans_iter)
yield (question, answers), True
class TitleTeacher(DefaultTeacher):
"""This version of SquAD inherits from the Default Teacher. The only
difference is that the 'text' field of an observation will contain
the title of the article separated by a newline from the paragraph and the
query.
Note: The title will contain underscores, as it is the part of the link for
the Wikipedia page; i.e., the article is at the site:
https://en.wikipedia.org/wiki/{TITLE}
Depending on your task, you may wish to remove underscores.
"""
def __init__(self, opt, shared=None):
self.id = 'squad_title'
build(opt)
super().__init__(opt, shared)
def setup_data(self, path):
print('loading: ' + path)
with open(path) as data_file:
self.squad = json.load(data_file)['data']
for article in self.squad:
title = article['title']
# each paragraph is a context for the attached questions
for paragraph in article['paragraphs']:
# each question is an example
for qa in paragraph['qas']:
question = qa['question']
ans_iter = [{"text": ""}]
if not qa['is_impossible']:
ans_iter = qa['answers']
answers = (a['text'] for a in ans_iter)
context = paragraph['context']
yield (
'\n'.join([title, context, question]),
answers
), True
class SentenceIndexTeacher(IndexTeacher):
"""Index teacher where the labels are the sentences the contain the true
answer.
"""
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
try:
import nltk
except ImportError:
raise ImportError('Please install nltk (e.g. pip install nltk).')
# nltk-specific setup
st_path = 'tokenizers/punkt/{0}.pickle'.format('english')
try:
self.sent_tok = nltk.data.load(st_path)
except LookupError:
nltk.download('punkt')
self.sent_tok = nltk.data.load(st_path)
def get(self, episode_idx, entry_idx=None):
article_idx, paragraph_idx, qa_idx = self.examples[episode_idx]
article = self.squad[article_idx]
paragraph = article['paragraphs'][paragraph_idx]
qa = paragraph['qas'][qa_idx]
context = paragraph['context']
question = qa['question']
answers = []
if not qa['is_impossible']:
answers = [a['text'] for a in qa['answers']]
# temporarily remove '.', '?', '!' from answers for proper sentence
# tokenization
edited_answers = []
for answer in answers:
new_answer = answer.replace(
'.', '').replace('?', '').replace('!', '')
context = context.replace(answer, new_answer)
edited_answers.append(new_answer)
edited_sentences = self.sent_tok.tokenize(context)
sentences = []
for sentence in edited_sentences:
for i in range(len(edited_answers)):
sentence = sentence.replace(edited_answers[i], answers[i])
sentences.append(sentence)
for i in range(len(edited_answers)):
context = context.replace(edited_answers[i], answers[i])
labels = []
label_starts = []
for sentence in sentences:
for answer in answers:
if answer in sentence and sentence not in labels:
labels.append(sentence)
label_starts.append(context.index(sentence))
break
if len(labels) == 0:
labels.append('')
plausible = []
if qa['is_impossible']:
plausible = qa['plausible_answers']
action = {
'id': 'squad',
'text': context + '\n' + question,
'labels': labels,
'plausible_answers': plausible,
'episode_done': True,
'answer_starts': label_starts
}
return action
class SentenceIndexEditTeacher(SentenceIndexTeacher):
"""Index teacher where the labels are the sentences the contain the true
answer.
Some punctuation may be removed from the context and the answer for
tokenization purposes.
"""
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
def get(self, episode_idx, entry_idx=None):
article_idx, paragraph_idx, qa_idx = self.examples[episode_idx]
article = self.squad[article_idx]
paragraph = article['paragraphs'][paragraph_idx]
qa = paragraph['qas'][qa_idx]
context = paragraph['context']
question = qa['question']
answers = [""]
if not qa['is_impossible']:
answers = [a['text'] for a in qa['answers']]
# remove '.', '?', '!' from answers for proper sentence
# tokenization
edited_answers = []
for answer in answers:
new_answer = answer.replace(
'.', '').replace('?', '').replace('!', '')
context = context.replace(answer, new_answer)
edited_answers.append(new_answer)
edited_sentences = self.sent_tok.tokenize(context)
labels = []
label_starts = []
for sentence in edited_sentences:
for answer in edited_answers:
if answer in sentence and sentence not in labels:
labels.append(sentence)
label_starts.append(context.index(sentence))
break
plausible = []
if qa['is_impossible']:
plausible = qa['plausible_answers']
action = {
'id': 'squad',
'text': context + '\n' + question,
'labels': labels,
'plausible_answers': plausible,
'episode_done': True,
'answer_starts': label_starts
}
return action
class SentenceLabelsTeacher(IndexTeacher):
"""Teacher which contains the question as the text, the sentences as the
label candidates, and the label as the sentence containing the answer.
Some punctuation may be removed for tokenization purposes.
"""
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
try:
import nltk
except ImportError:
raise ImportError('Please install nltk (e.g. pip install nltk).')
# nltk-specific setup
st_path = 'tokenizers/punkt/{0}.pickle'.format('english')
try:
self.sent_tok = nltk.data.load(st_path)
except LookupError:
nltk.download('punkt')
self.sent_tok = nltk.data.load(st_path)
def get(self, episode_idx, entry_idx=None):
article_idx, paragraph_idx, qa_idx = self.examples[episode_idx]
article = self.squad[article_idx]
paragraph = article['paragraphs'][paragraph_idx]
qa = paragraph['qas'][qa_idx]
context = paragraph['context']
question = qa['question']
answers = ['']
if not qa['is_impossible']:
answers = [a['text'] for a in qa['answers']]
# remove '.', '?', '!' from answers for proper sentence
# tokenization
edited_answers = []
for answer in answers:
new_answer = answer.replace(
'.', '').replace('?', '').replace('!', '')
context = context.replace(answer, new_answer)
edited_answers.append(new_answer)
edited_sentences = self.sent_tok.tokenize(context)
labels = []
for sentence in edited_sentences:
for answer in edited_answers:
if answer in sentence and sentence not in labels:
labels.append(sentence)
break
plausible = []
if qa['is_impossible']:
plausible = qa['plausible_answers']
action = {
'id': 'SquadSentenceLabels',
'text': question,
'labels': labels,
'plausible_answers': plausible,
'label_candidates': edited_sentences,
'episode_done': True,
}
return action | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/tasks/squad2/agents.py | 0.833663 | 0.23513 | agents.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
# Download and build the data if it does not exist.
import parlai.core.build_data as build_data
import os
def create_fb_format(outpath, dtype, inpath):
print('building fbformat:' + dtype)
with open(os.path.join(outpath, dtype + '.txt'), 'w') as fout:
with open(inpath) as f:
lines = [line.strip('\n') for line in f]
for i in range(len(lines)):
use = True
if dtype == 'train' and (i % 20) == 0:
use = False
if dtype == 'valid' and (i % 20) != 0:
use = False
if use:
xy = lines[i].split('OUT: ')
x = xy[0].split('IN: ')[1].rstrip(' ').lstrip(' ')
y = xy[1].rstrip(' ').lstrip(' ')
s = '1 ' + x + '\t' + y
fout.write(s + '\n\n')
def build(opt):
version = 'v1.0'
dpath = os.path.join(opt['datapath'], 'SCAN')
if not build_data.built(dpath, version):
print('[building data: ' + dpath + ']')
if build_data.built(dpath):
# An older version exists, so remove these outdated files.
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
fname = 'scan.tgz'
url = 'http://parl.ai/downloads/scan/' + fname
build_data.download(url, dpath, fname)
build_data.untar(dpath, fname)
create_fb_format(dpath, 'train', os.path.join(dpath, 'tasks_train_simple.txt'))
create_fb_format(dpath, 'valid', os.path.join(dpath, 'tasks_train_simple.txt'))
create_fb_format(dpath, 'test', os.path.join(dpath, 'tasks_test_simple.txt'))
# Mark the data as built.
build_data.mark_done(dpath, version) | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/tasks/scan/build.py | 0.655115 | 0.150434 | build.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from parlai.core.teachers import DialogTeacher
from .build import build
import json
import os
def _path(opt):
build(opt)
dt = opt['datatype'].split(':')[0]
if dt == 'valid':
dt = 'val'
elif dt != 'train' and dt != 'test':
raise RuntimeError('Not valid datatype.')
prefix = os.path.join(opt['datapath'], 'CLEVR', 'CLEVR_v1.0')
questions_path = os.path.join(
prefix, 'questions',
'CLEVR_' + dt + '_questions.json'
)
images_path = os.path.join(prefix, 'images', dt)
return questions_path, images_path
counts = [str(i) for i in range(11)]
materials = ['metal', 'rubber']
sizes = ['small', 'large']
shapes = ['cube', 'sphere', 'cylinder']
colors = ['gray', 'blue', 'brown', 'yellow', 'red', 'green', 'purple', 'cyan']
class DefaultTeacher(DialogTeacher):
# all possile answers for the questions
cands = ['yes', 'no'] + counts + materials + sizes + shapes + colors
def __init__(self, opt, shared=None):
self.datatype = opt['datatype']
data_path, self.images_path = _path(opt)
opt['datafile'] = data_path
self.id = 'clevr'
super().__init__(opt, shared)
def label_candidates(self):
return self.cands
def setup_data(self, path):
print('loading: ' + path)
with open(path) as data_file:
clevr = json.load(data_file)
image_file = None
for ques in clevr['questions']:
# episode done if first question or image changed
new_episode = ques['image_filename'] != image_file
# only show image at beginning of episode
image_file = ques['image_filename']
img_path = None
if new_episode:
img_path = os.path.join(self.images_path, image_file)
question = ques['question']
answer = [ques['answer']] if ques['split'] != 'test' else None
yield (question, answer, None, None, img_path), new_episode | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/tasks/clevr/agents.py | 0.747892 | 0.184088 | agents.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from parlai.core.teachers import FixedDialogTeacher
from parlai.core.image_featurizers import ImageLoader
from parlai.scripts.extract_image_feature import extract_feats
from .build import build
from parlai.tasks.coco_caption.build_2014 import buildImage as buildImage_2014
from parlai.tasks.coco_caption.build_2015 import buildImage as buildImage_2015
try:
import torch
except Exception as e:
raise ImportError('Need to install Pytorch: go to pytorch.org')
from torch.utils.data import Dataset
from parlai.agents.mlb_vqa.mlb_vqa import VqaDictionaryAgent
import json
import os
def _path(opt):
build(opt)
buildImage_2014(opt)
buildImage_2015(opt)
dt = opt['datatype'].split(':')[0]
if dt == 'train':
ques_suffix = 'MultipleChoice_mscoco_train2014'
annotation_suffix = 'mscoco_train2014'
img_suffix = os.path.join('train2014', 'COCO_train2014_')
img_version = '2014'
elif dt == 'valid':
ques_suffix = 'MultipleChoice_mscoco_val2014'
annotation_suffix = 'mscoco_val2014'
img_suffix = os.path.join('val2014', 'COCO_val2014_')
img_version = '2014'
elif dt == 'test':
ques_suffix = 'MultipleChoice_mscoco_test2015'
annotation_suffix = 'None'
img_suffix = os.path.join('test2015', 'COCO_test2015_')
img_version = '2015'
else:
raise RuntimeError('Not valid datatype.')
data_path = os.path.join(opt['datapath'], 'VQA-v1',
ques_suffix + '_questions.json')
annotation_path = os.path.join(opt['datapath'], 'VQA-v1',
annotation_suffix + '_annotations.json')
image_path = os.path.join(opt['datapath'],
'COCO-IMG-{}'.format(img_version), img_suffix)
return data_path, annotation_path, image_path
class VQADataset(Dataset):
"""A Pytorch Dataset utilizing streaming"""
def __init__(self, opt):
self.opt = opt
self.use_att = opt.get('attention', False)
self.use_hdf5 = opt.get('use_hdf5', False)
self.opt['use_hdf5_extraction'] = self.use_hdf5
self.datatype = self.opt.get('datatype')
self.training = self.datatype.startswith('train')
self.num_epochs = self.opt.get('num_epochs', 0)
self.image_loader = ImageLoader(opt)
data_path, annotation_path, self.image_path = _path(opt)
self._setup_data(data_path, annotation_path, opt.get('unittest', False))
if self.use_hdf5:
try:
import h5py
self.h5py = h5py
except ImportError:
raise ImportError('Need to install h5py - `pip install h5py`')
self._setup_image_data()
self.dict_agent = VqaDictionaryAgent(opt)
def __getitem__(self, index):
index %= self.num_episodes()
qa = self.ques['questions'][index]
ep = {
'text': qa['question'],
'image': self.get_image(qa['image_id']),
'episode_done': True,
}
if self.opt.get('extract_image', False):
ep['image_id'] = qa['image_id']
return ep
if not self.datatype.startswith('test'):
anno = self.annotation['annotations'][index]
labels = [ans['answer'] for ans in anno['answers']]
ep['labels'] = [ans['answer'] for ans in anno['answers']]
ep['valid'] = True
if 'mc_label' in ep:
if not ep['mc_label'][0] in self.dict_agent.ans2ind:
ep['valid'] = False
ep = self.dict_agent.encode_question([ep], self.training)
ep = self.dict_agent.encode_answer(ep)
ep[0]['labels'] = labels
else:
ep['valid'] = True
ep = self.dict_agent.encode_question([ep], False)
ep[0]['use_att'] = self.use_att
ep[0]['use_hdf5'] = self.use_hdf5
return (index, ep)
def __len__(self):
num_epochs = self.num_epochs if self.num_epochs > 0 else 100
num_iters = num_epochs if self.training else 1
return int(num_iters * self.num_episodes())
def _load_lens(self):
with open(self.length_datafile) as length:
lengths = json.load(length)
self.num_eps = lengths['num_eps']
self.num_exs = lengths['num_exs']
def _setup_data(self, data_path, annotation_path, unittest):
with open(data_path) as data_file:
self.ques = json.load(data_file)
if not self.datatype.startswith('test'):
with open(annotation_path) as data_file:
self.annotation = json.load(data_file)
if unittest:
self.ques['questions'] = self.ques['questions'][:10]
if not self.datatype.startswith('test'):
self.annotation['annotations'] = self.annotation['annotations'][:10]
self.image_paths = set()
for qa in self.ques['questions']:
self.image_paths.add(self.image_path + '%012d.jpg' % (qa['image_id']))
def _setup_image_data(self):
'''hdf5 image dataset'''
extract_feats(self.opt)
im = self.opt.get('image_mode')
if self.opt.get('attention', False):
hdf5_path = self.image_path + 'mode_{}.hdf5'.format(im)
else:
hdf5_path = self.image_path + 'mode_{}_noatt.hdf5'.format(im)
hdf5_file = self.h5py.File(hdf5_path, 'r')
self.image_dataset = hdf5_file['images']
image_id_to_idx_path = self.image_path + 'mode_{}_id_to_idx.txt'.format(im)
with open(image_id_to_idx_path, 'r') as f:
self.image_id_to_idx = json.load(f)
def get_image(self, image_id):
if not self.use_hdf5:
im_path = self.image_path + '%012d.jpg' % (image_id)
return self.image_loader.load(im_path)
else:
img_idx = self.image_id_to_idx[str(image_id)]
return torch.Tensor(self.image_dataset[img_idx])
def num_episodes(self):
return len(self.ques['questions'])
def num_examples(self):
return self.num_episodes()
def num_images(self):
if not hasattr(self, 'num_imgs'):
self.num_imgs = len({q['image_id'] for q in self.ques['questions']})
return self.num_imgs
class DefaultDataset(VQADataset):
pass
class OeTeacher(FixedDialogTeacher):
"""
VQA Open-Ended teacher, which loads the json vqa data and implements its
own `act` method for interacting with student agent.
"""
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
data_path, annotation_path, self.image_path = _path(opt)
self.datafile = data_path
self.image_mode = opt.get('image_mode', 'none')
if shared and 'ques' in shared:
self.ques = shared['ques']
if 'annotation' in shared:
self.annotation = shared['annotation']
self.image_loader = shared['image_loader']
else:
self._setup_data(data_path, annotation_path)
self.image_loader = ImageLoader(opt)
self.reset()
def reset(self):
super().reset()
self.example = None
def num_examples(self):
"""Number of examples in VQA-v1."""
return len(self.ques['questions'])
def num_episodes(self):
# same as number of examples since all episodes are of length one
return self.num_examples()
def submit_load_request(self, image_id):
img_path = self.image_path + '%012d.jpg' % (image_id)
self.data_loader.request_load(
self.receive_data, self.image_loader.load, (img_path,)
)
def get(self, episode_idx, entry_idx=0):
# queue up the next one
qa = self.ques['questions'][episode_idx]
question = qa['question']
action = {
'text': question,
'image_id': qa['image_id'],
'episode_done': True
}
if not self.datatype.startswith('test'):
anno = self.annotation['annotations'][episode_idx]
action['labels'] = [ans['answer'] for ans in anno['answers']]
return action
def next_example(self):
# save the currently queued example
ready = None
if self.example is not None:
if self.image_mode != 'none':
image = self.data_queue.get()
self.example['image'] = image
ready = (self.example, self.epochDone)
# queue up the next example
self.example, self.epochDone = super().next_example()
if self.image_mode != 'none' and 'image_id' in self.example:
image_id = self.example['image_id']
self.submit_load_request(image_id)
# Try to return the previously cached example
if ready is None:
return self.next_example()
else:
return ready
def share(self):
shared = super().share()
shared['ques'] = self.ques
if hasattr(self, 'annotation'):
shared['annotation'] = self.annotation
shared['image_loader'] = self.image_loader
return shared
def _setup_data(self, data_path, annotation_path):
print('loading: ' + data_path)
with open(data_path) as data_file:
self.ques = json.load(data_file)
if not self.datatype.startswith('test'):
print('loading: ' + annotation_path)
with open(annotation_path) as data_file:
self.annotation = json.load(data_file)
class McTeacher(OeTeacher):
"""
VQA Multiple-Choice teacher, which inherits from OeTeacher but overrides
the label and label_candidates fields with multiple choice data.
"""
def get(self, episode_idx, entry_idx=0):
action = super().get(episode_idx, entry_idx)
qa = self.ques['questions'][episode_idx]
multiple_choices = qa['multiple_choices']
action['label_candidates'] = multiple_choices
if not self.datatype.startswith('test'):
anno = self.annotation['annotations'][episode_idx]
action['labels'] = [anno['multiple_choice_answer']]
return action
class AllTeacher(OeTeacher):
"""
VQA Teacher, which inherits from OeTeacher and gives access to
the multiple choices and the multiple choice answer.
"""
def act(self):
# parent class increments episode_idx after getting ex, so need to
# cache the episode_idx first
episode_idx = self.episode_idx
action = super().act()
qa = self.ques['questions'][episode_idx]
multiple_choices = qa['multiple_choices']
action['label_candidates'] = multiple_choices
if not self.datatype.startswith('test'):
anno = self.annotation['annotations'][episode_idx]
self.mclabel = [anno['multiple_choice_answer']]
if self.datatype.startswith('train'):
action['mc_label'] = self.mclabel
return action
class DefaultTeacher(McTeacher):
# default to Multiple-Choice Teacher
pass | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/tasks/vqa_v1/agents.py | 0.748904 | 0.16872 | agents.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
# Download and build the data if it does not exist.
import parlai.core.build_data as build_data
import os
import json
def build(opt):
version = 'v0.9'
dpath = os.path.join(opt['datapath'], 'VisDial-v0.9')
if not build_data.built(dpath, version):
print('[building data: ' + dpath + ']')
if build_data.built(dpath):
# An older version exists, so remove these outdated files.
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
fname1 = 'visdial_0.9_train.zip'
fname2 = 'visdial_0.9_val.zip'
url = 'https://computing.ece.vt.edu/~abhshkdz/data/visdial/'
build_data.download(url + fname1, dpath, fname1)
build_data.download(url + fname2, dpath, fname2)
build_data.untar(dpath, fname1)
build_data.untar(dpath, fname2)
print('processing unpacked files')
# Use 1000 examples from training set as validation.
json1 = os.path.join(dpath, fname1.rsplit('.', 1)[0] + '.json')
with open(json1) as t_json:
train_data = json.load(t_json)
valid_data = train_data.copy()
valid_data['data'] = train_data['data'].copy()
valid_data['data']['dialogs'] = []
# Use constant stride to pick examples.
num_valid = 1000
total = len(train_data['data']['dialogs'])
step = total // (num_valid - 1)
for i in range(total - 1, 0, -step)[:num_valid]:
valid_data['data']['dialogs'].append(train_data['data']['dialogs'][i])
del train_data['data']['dialogs'][i]
train_json = json1.rsplit('.', 1)[0] + '_train.json'
valid_json = json1.rsplit('.', 1)[0] + '_valid.json'
with open(train_json, 'w') as t_out, open(valid_json, 'w') as v_out:
json.dump(train_data, t_out)
json.dump(valid_data, v_out)
os.remove(json1)
# Use validation data as test.
json2 = os.path.join(dpath, fname2.rsplit('.', 1)[0] + '.json')
test_json = json2.rsplit('.', 1)[0] + '_test.json'
build_data.move(json2, test_json)
# Mark the data as built.
build_data.mark_done(dpath, version) | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/tasks/visdial/build.py | 0.720565 | 0.169028 | build.py | pypi |
# Copyright (c) 2017-present, Moscow Institute of Physics and Technology.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from .build import build
from parlai.core.teachers import DialogTeacher
import json
import os
class DefaultTeacher(DialogTeacher):
def __init__(self, opt, shared=None):
self.datatype = opt['datatype']
self.data_path = DefaultTeacher._path(opt)
opt['datafile'] = self.data_path
self.id = 'ConvAIChitChat'
super().__init__(opt, shared)
@staticmethod
def _path(opt):
build(opt)
dt = opt['datatype'].split(':')[0]
if dt == 'train':
path = os.path.join(opt['datapath'], 'ConvAIChitChat', 'train.json')
elif dt == 'test':
path = os.path.join(opt['datapath'], 'ConvAIChitChat', 'test.json')
elif dt == 'valid':
raise RuntimeError('warning: validation is not supported')
else:
raise RuntimeError('Not valid datatype.')
return path
@staticmethod
def _fold_utterances(raw_dialog):
dialog = []
for utterance in raw_dialog:
if len(dialog) > 0 and dialog[-1]['userId'] == utterance['userId']:
dialog[-1]['text'] = dialog[-1]['text'] + '\n' + utterance['text']
else:
dialog.append(
{'text': utterance['text'], 'userId': utterance['userId']}
)
return dialog
@staticmethod
def _create_learning_examples(opponent_utterances, answer_utterances):
examples = [
u
for u in map(
lambda pair: ((pair[0]['text'], [pair[1]['text']]), False),
zip(opponent_utterances, answer_utterances)
)
]
return examples
@staticmethod
def _data_generator(dialogs_dict):
for dialog in dialogs_dict:
folded_dialog = DefaultTeacher._fold_utterances(dialog['thread'])
context = dialog['context']
if len(folded_dialog) < 2:
continue
u1_utterances = folded_dialog[::2]
u2_utterances = folded_dialog[1::2]
it = (
[((context, ['']), True)] +
DefaultTeacher._create_learning_examples(u1_utterances, u2_utterances)
)
for second_user_examples in it:
yield second_user_examples
if len(u1_utterances) > 1:
examples = (
[((context, [u1_utterances[0]['text']]), True)] +
DefaultTeacher._create_learning_examples(
u2_utterances, u1_utterances[1:]
)
)
else:
examples = [((context, [u1_utterances[0]['text']]), True)]
for first_user_examples in examples:
yield first_user_examples
@staticmethod
def setup_data(path):
print('loading: ' + path)
if path is None:
return iter(())
with open(path) as data_file:
dialogs = json.load(data_file)
return DefaultTeacher._data_generator(dialogs) | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/tasks/convai_chitchat/agents.py | 0.713931 | 0.179674 | agents.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from parlai.core.teachers import FbDialogTeacher
from .build import build
import copy
import os
'''All teachers have a version with and without label candidates. Each teacher
defaults to using a dataset with label candidates. To use a dataset without
label candidates, specify this using the task flag:
--task convai2:{TEACHER_NAME}:no_cands
where TEACHER_NAME is None, SelfOriginal (Self), or SelfRevised.
'''
def _path(opt, persona, use_cands):
# Build the data if it doesn't exist.
build(opt)
datatype = opt['datatype'].split(':')[0]
if datatype == 'test':
print("WARNING: Test set not included. Setting datatype to valid.")
datatype = 'valid'
dt = datatype + '_' + persona
cands = '' if use_cands else '_no_cands'
return os.path.join(opt['datapath'], 'ConvAI2', dt + cands + '.txt')
class NoneTeacher(FbDialogTeacher):
def __init__(self, opt, shared=None):
opt = copy.deepcopy(opt)
try:
cands = opt['task'].split(":")[2]
use_cands = False if cands == 'no_cands' else True
except Exception:
use_cands = True
opt['datafile'] = _path(opt, 'none_original', use_cands)
super().__init__(opt, shared)
class SelfOriginalTeacher(FbDialogTeacher):
def __init__(self, opt, shared=None):
opt = copy.deepcopy(opt)
try:
cands = opt['task'].split(":")[2]
use_cands = False if cands == 'no_cands' else True
except Exception:
use_cands = True
opt['datafile'] = _path(opt, 'self_original', use_cands)
super().__init__(opt, shared)
class SelfTeacher(SelfOriginalTeacher):
pass
class SelfRevisedTeacher(FbDialogTeacher):
def __init__(self, opt, shared=None):
opt = copy.deepcopy(opt)
try:
cands = opt['task'].split(":")[2]
use_cands = False if cands == 'no_cands' else True
except Exception:
use_cands = True
opt['datafile'] = _path(opt, 'self_revised', use_cands)
super().__init__(opt, shared)
class DefaultTeacher(SelfOriginalTeacher):
pass | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/tasks/convai2/agents.py | 0.747984 | 0.184437 | agents.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
# Download and build the data if it does not exist.
try:
from emoji.unicode_codes import UNICODE_EMOJI
import unidecode
except ImportError:
raise ImportError('Please `pip install emoji unidecode` for the twitter task.')
import parlai.core.build_data as build_data
import os
def replace_emoji(x):
if x in UNICODE_EMOJI.keys():
return ' ' + UNICODE_EMOJI[x].replace(':', '@') + ' '
else:
return x
def split_punctuation(x):
return (
x
.replace('.', ' . ')
.replace('. . .', '...')
.replace(',', ' , ')
.replace(';', ' ; ')
.replace(':', ' : ')
.replace('!', ' ! ')
.replace('?', ' ? ')
.replace('"', ' " ')
.replace('(', ' ( ')
.replace(')', ' ) ')
)
def create_fb_format(data, dpath):
fw1 = open(os.path.join(dpath, 'train.txt'), 'w')
fw2 = open(os.path.join(dpath, 'valid.txt'), 'w')
fw3 = open(os.path.join(dpath, 'test.txt'), 'w')
for i in range(0, len(data) - 1, 2):
fout = fw1
if (i % 500) == 0:
fout = fw2
elif (i % 500) == 2:
fout = fw3
use = True
x = data[i].rstrip(' ').lstrip(' ').replace('\t', ' ')
y = data[i + 1].rstrip(' ').lstrip(' ').replace('\t', ' ')
x = x.replace('|', ' __PIPE__ ')
y = y.replace('|', ' __PIPE__ ')
x = ''.join(list(map(replace_emoji, x)))
y = ''.join(list(map(replace_emoji, y)))
x = split_punctuation(unidecode.unidecode(x))
y = split_punctuation(unidecode.unidecode(y))
x = ' '.join(x.split())
y = ' '.join(y.split())
if len(x) < 1 or len(y) < 1:
use = False
if use:
s = 'text:' + x + '\tlabels:' + y + '\tepisode_done:True'
fout.write('{} \n'.format(s))
fw1.close()
fw2.close()
fw3.close()
def build(opt):
version = 'v1.1'
dpath = os.path.join(opt['datapath'], 'Twitter')
if not build_data.built(dpath, version):
print('[building data: ' + dpath + ']')
if build_data.built(dpath):
# An older version exists, so remove these outdated files.
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
fname1 = "twitter_en_big.txt.gz.partaa"
fname2 = "twitter_en_big.txt.gz.partab"
url = 'https://github.com/Marsan-Ma/chat_corpus/raw/master/'
build_data.download(url + fname1, dpath, fname1)
build_data.download(url + fname2, dpath, fname2)
file1 = os.path.join(dpath, fname1)
file2 = os.path.join(dpath, fname2)
file3 = "twitter_en_big.txt.gz"
outzipfile = os.path.join(dpath, file3)
build_data.cat(file1, file2, outzipfile)
import gzip
with gzip.open(outzipfile, 'r') as f:
file_content = bytes.decode(f.read())
data = file_content.split('\n')[2:]
create_fb_format(data, dpath)
os.remove(outzipfile)
# Mark the data as built.
build_data.mark_done(dpath, version) | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/tasks/twitter/build.py | 0.643553 | 0.184473 | build.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
'''
Provides a dump of Wikipedia articles from 2/3/18.
One can either load full articles, using 'wikipedia:full',
or simply load the first paragraphs of the articles,
using 'wikipedia:summary'
To put the article in the labels and the title in the text, specify
':key-value' at the end (for a title/content key-value association)
'''
from parlai.core.teachers import DialogTeacher
from .build import build
import json
import os
class FullTeacher(DialogTeacher):
"""Reads Wikipedia pages one at a time
"""
def __init__(self, opt, shared=None):
self.key_value = ':key-value' in opt['task']
opt['task'] = 'wikipedia:all'
build(opt)
self.opt = opt
opt['datafile'] = os.path.join(
opt['datapath'],
'wikipedia/full/wiki_full_extracted')
self.id = 'wikipedia'
super().__init__(opt, shared)
def setup_data(self, path):
print('loading: ' + path)
for subdir in os.listdir(path):
if subdir == 'README.md':
continue
subdir_path = os.path.join(path, subdir)
for wiki_file in os.listdir(subdir_path):
wiki_file_path = os.path.join(subdir_path, wiki_file)
with open(wiki_file_path) as wf:
for article_json in wf:
article = json.loads(article_json)
title = article['title']
text = article['text']
if self.key_value:
yield (title, [text]), True
else:
yield (text, None), True
def get_extraction_instructions(self):
'''If one wants to run extraction themselves on a raw wikipedia dump'''
dpath = os.path.join(self.opt['datapath'], 'wikipedia', 'full')
fname = 'enwiki-latest-pages-articles.xml.bz2'
instructions = (
"To complete the data extraction, please run the following:\n"
"mkdir -p {download} && "
"git clone https://github.com/attardi/wikiextractor "
"{download}/wikiextract && cd {download}/wikiextract && "
"python WikiExtractor.py {wikifile} --filter_disambig_pages "
"-o {output} --json"
).format(
download=self.opt['download_path'],
wikifile=dpath + '/' + fname,
output=dpath + '/' + 'wiki_extracted'
)
return instructions
class SummaryTeacher(DialogTeacher):
"""Reads Wikipedia pages one at a time, only uses summaries
"""
def __init__(self, opt, shared=None):
self.key_value = ':key-value' in opt['task']
opt['task'] = 'wikipedia:summary'
build(opt)
opt['datafile'] = os.path.join(
opt['datapath'],
'wikipedia/summary/summaries.json')
self.id = 'wikipedia'
super().__init__(opt, shared)
def setup_data(self, path):
print('loading: ' + path)
with open(path) as wf:
for article_json in wf:
article = json.loads(article_json)
title = article['title']
text = article['text']
if self.key_value:
yield (title, [text]), True
else:
yield (title + '\n' + text, None), True
class DefaultTeacher(SummaryTeacher):
pass | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/tasks/wikipedia/agents.py | 0.716516 | 0.222954 | agents.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from parlai.core.agents import Teacher
from .build import build
import json
import os
import random
def _path(opt, task_size='small'):
"""Return path to json file of dataset - it can be train/valid file
of small/large dataset. Validation data is used for test as well,
because labels are inferred from the image and task itself.
"""
dt = opt['datatype'].split(':')[0]
# ensure data is built
build(opt)
if dt == 'train':
file_name = 'train.json'
elif dt == 'valid' or dt == 'test':
file_name = 'valid.json'
else:
raise RuntimeError('Not valid datatype.')
data_path = os.path.join(opt['datapath'], 'taskntalk', task_size, file_name)
return data_path
class TaskNTalkTeacher(Teacher):
"""TaskNTalk basic teacher, it picks a random image and associates
a random task with it. Metric updates and observation are to be
implemented.
"""
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
self.id = 'taskntalk'
if not shared:
self._setup_data(self.opt['datafile'])
else:
self.data = shared['data']
self.task_defn = shared['task_defn']
self.task_index = shared['task_index']
def _setup_data(self, data_path):
"""Read the json file and store images and task definitions."""
print('loading: ' + data_path)
with open(data_path) as data_file:
json_data = json.load(data_file)
self.data = json_data['data']
self.task_defn = json_data['task_defn']
# images are [color, shape, style] lists (example: ['red', 'square', 'dotted'])
self.task_index = {'color': 0, 'shape': 1, 'style': 2}
random.shuffle(self.data)
def share(self):
"""Share images and task definitions with other teachers."""
shared = super().share()
shared['data'] = self.data
shared['task_defn'] = self.task_defn
shared['task_index'] = self.task_index
return shared
def __len__(self):
return len(self.data)
def observe(self, observation):
"""Process observation for metrics."""
self.observation = observation
# TODO(kd): update metrics
return observation
def act(self):
"""Select random image and associate random task with it."""
image = random.choice(self.data)
task = random.choice(self.task_defn)
labels = [image[self.task_index[attr]] for attr in task]
action = {
'image': ' '.join(image),
'text': ' '.join(task),
'labels': [' '.join(labels)],
'episode_done': True
}
# TODO(kd): fetch all data for valid/test
return action
class SmallTeacher(TaskNTalkTeacher):
"""Teacher for small dataset, invoked by ``taskntalk:small``."""
def __init__(self, opt, shared=None):
opt['datafile'] = _path(opt, 'small')
super().__init__(opt, shared)
class LargeTeacher(TaskNTalkTeacher):
"""Teacher for large dataset, invoked by ``taskntalk:large``."""
def __init__(self, opt, shared=None):
opt['datafile'] = _path(opt, 'large')
super().__init__(opt, shared)
class DefaultTeacher(SmallTeacher):
"""Default teacher for small dataset, invoked by ``taskntalk``."""
pass | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/tasks/taskntalk/agents.py | 0.722331 | 0.276993 | agents.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import itertools
import json
import os
import random
import parlai.core.build_data as build_data
def build(opt):
"""Create train and validation data for synthetic shapes described by attributes."""
dpath = os.path.join(opt['datapath'], 'taskntalk')
if not build_data.built(dpath):
print('[building data: ' + dpath + ']')
build_data.make_dir(os.path.join(dpath, 'large'))
build_data.make_dir(os.path.join(dpath, 'small'))
# save training and validation data
to_save = {
'attributes': ['color', 'shape', 'style'],
'task_defn': [['color', 'shape'], ['shape', 'color'],
['color', 'style'], ['style', 'color'],
['shape', 'style'], ['style', 'shape']]
}
split_data = {}
# small dataset properties
properties = {
'color': ['red', 'green', 'blue', 'purple'],
'shape': ['square', 'triangle', 'circle', 'star'],
'style': ['dotted', 'solid', 'filled', 'dashed']
}
to_save['properties'] = properties
# properties.values() not used directly to maintain order
data_verbose = list(
itertools.product(*[properties[key] for key in to_save['attributes']])
)
# randomly select train and rest of it is valid
split_data['valid'] = random.sample(data_verbose, int(0.2 * len(data_verbose)))
split_data['train'] = [s for s in data_verbose if s not in split_data['valid']]
to_save['data'] = split_data['train']
with open(os.path.join(dpath, 'small', 'train.json'), 'w') as outfile:
json.dump(
to_save, outfile, indent=4, separators=(',', ': '), sort_keys=True
)
to_save['data'] = split_data['valid']
with open(os.path.join(dpath, 'small', 'valid.json'), 'w') as outfile:
json.dump(
to_save, outfile, indent=4, separators=(',', ': '), sort_keys=True
)
# large dataset properties
properties = {
'color': [
'red', 'green', 'blue', 'purple', 'yellow', 'cyan', 'orange', 'teal'
],
'shape': [
'square', 'triangle', 'circle', 'star', 'heart', 'spade',
'club', 'diamond'
],
'style': [
'dotted', 'solid', 'filled', 'dashed', 'hstripe', 'vstripe',
'hgrad', 'vgrad'
]
}
to_save['properties'] = properties
data_verbose = list(
itertools.product(*[properties[key] for key in to_save['attributes']])
)
split_data['valid'] = random.sample(data_verbose, int(0.8 * len(data_verbose)))
split_data['train'] = [s for s in data_verbose if s not in split_data['valid']]
to_save['data'] = split_data['train']
with open(os.path.join(dpath, 'large', 'train.json'), 'w') as outfile:
json.dump(
to_save, outfile, indent=4, separators=(',', ': '), sort_keys=True
)
to_save['data'] = split_data['valid']
with open(os.path.join(dpath, 'large', 'valid.json'), 'w') as outfile:
json.dump(
to_save, outfile, indent=4, separators=(',', ': '), sort_keys=True
)
# Mark the data as built.
build_data.mark_done(dpath) | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/tasks/taskntalk/build.py | 0.83612 | 0.275813 | build.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from parlai.core.teachers import FixedDialogTeacher, DialogTeacher, ParlAIDialogTeacher
from .build import build
import copy
import json
import os
def get_sentence_tokenizer():
"""
Loads the nltk sentence tokenizer
"""
try:
import nltk
except ImportError:
raise ImportError('Please install nltk (e.g. pip install nltk).')
# nltk-specific setup
st_path = 'tokenizers/punkt/{0}.pickle'.format('english')
try:
sent_tok = nltk.data.load(st_path)
except LookupError:
nltk.download('punkt')
sent_tok = nltk.data.load(st_path)
return sent_tok
class IndexTeacher(FixedDialogTeacher):
"""Hand-written SQuAD teacher, which loads the json squad data and
implements its own `act()` method for interacting with student agent,
rather than inheriting from the core Dialog Teacher. This code is here as
an example of rolling your own without inheritance.
This teacher also provides access to the "answer_start" indices that
specify the location of the answer in the context.
"""
def __init__(self, opt, shared=None):
build(opt)
super().__init__(opt, shared)
if self.datatype.startswith('train'):
suffix = 'train'
else:
suffix = 'dev'
datapath = os.path.join(
opt['datapath'],
'SQuAD',
suffix + '-v1.1.json'
)
self.data = self._setup_data(datapath)
self.id = 'squad'
self.reset()
def num_examples(self):
return len(self.examples)
def num_episodes(self):
return self.num_examples()
def get(self, episode_idx, entry_idx=None):
article_idx, paragraph_idx, qa_idx = self.examples[episode_idx]
article = self.squad[article_idx]
paragraph = article['paragraphs'][paragraph_idx]
qa = paragraph['qas'][qa_idx]
question = qa['question']
answers = []
answer_starts = []
for a in qa['answers']:
answers.append(a['text'])
answer_starts.append(a['answer_start'])
context = paragraph['context']
action = {
'id': 'squad',
'text': context + '\n' + question,
'labels': answers,
'episode_done': True,
'answer_starts': answer_starts
}
return action
def _setup_data(self, path):
with open(path) as data_file:
self.squad = json.load(data_file)['data']
self.examples = []
for article_idx in range(len(self.squad)):
article = self.squad[article_idx]
for paragraph_idx in range(len(article['paragraphs'])):
paragraph = article['paragraphs'][paragraph_idx]
num_questions = len(paragraph['qas'])
for qa_idx in range(num_questions):
self.examples.append((article_idx, paragraph_idx, qa_idx))
class DefaultTeacher(DialogTeacher):
"""This version of SQuAD inherits from the core Dialog Teacher, which just
requires it to define an iterator over its data `setup_data` in order to
inherit basic metrics, a default `act` function.
For SQuAD, this does not efficiently store the paragraphs in memory.
"""
def __init__(self, opt, shared=None):
self.datatype = opt['datatype']
build(opt)
if opt['datatype'].startswith('train'):
suffix = 'train'
else:
suffix = 'dev'
opt['datafile'] = os.path.join(opt['datapath'], 'SQuAD',
suffix + '-v1.1.json')
self.id = 'squad'
super().__init__(opt, shared)
def setup_data(self, path):
print('loading: ' + path)
with open(path) as data_file:
self.squad = json.load(data_file)['data']
for article in self.squad:
# each paragraph is a context for the attached questions
for paragraph in article['paragraphs']:
# each question is an example
for qa in paragraph['qas']:
question = qa['question']
answers = (a['text'] for a in qa['answers'])
context = paragraph['context']
yield (context + '\n' + question, answers), True
class OpensquadTeacher(DialogTeacher):
"""This version of SQuAD inherits from the core Dialog Teacher, which just
requires it to define an iterator over its data `setup_data` in order to
inherit basic metrics, a default `act` function.
Note: This teacher omits the context paragraph
"""
def __init__(self, opt, shared=None):
self.datatype = opt['datatype']
build(opt)
if opt['datatype'].startswith('train'):
suffix = 'train'
else:
suffix = 'dev'
opt['datafile'] = os.path.join(opt['datapath'], 'SQuAD',
suffix + '-v1.1.json')
self.id = 'squad'
super().__init__(opt, shared)
def setup_data(self, path):
print('loading: ' + path)
with open(path) as data_file:
self.squad = json.load(data_file)['data']
for article in self.squad:
# each paragraph is a context for the attached questions
for paragraph in article['paragraphs']:
# each question is an example
for qa in paragraph['qas']:
question = qa['question']
answers = (a['text'] for a in qa['answers'])
yield (question, answers), True
class TitleTeacher(DefaultTeacher):
"""This version of SquAD inherits from the Default Teacher. The only
difference is that the 'text' field of an observation will contain
the title of the article separated by a newline from the paragraph and the
query.
Note: The title will contain underscores, as it is the part of the link for
the Wikipedia page; i.e., the article is at the site:
https://en.wikipedia.org/wiki/{TITLE}
Depending on your task, you may wish to remove underscores.
"""
def __init__(self, opt, shared=None):
self.id = 'squad_title'
build(opt)
super().__init__(opt, shared)
def setup_data(self, path):
print('loading: ' + path)
with open(path) as data_file:
self.squad = json.load(data_file)['data']
for article in self.squad:
title = article['title']
# each paragraph is a context for the attached questions
for paragraph in article['paragraphs']:
# each question is an example
for qa in paragraph['qas']:
question = qa['question']
answers = (a['text'] for a in qa['answers'])
context = paragraph['context']
yield (
'\n'.join([title, context, question]),
answers
), True
class FulldocTeacher(ParlAIDialogTeacher):
def __init__(self, opt, shared=None):
build(opt)
opt = copy.deepcopy(opt)
if opt['datatype'].startswith('train'):
suffix = 'train'
else:
suffix = 'valid'
datafile = os.path.join(opt['datapath'],
'SQuAD-fulldoc',
"squad_fulldocs." + suffix + ":ordered")
opt['parlaidialogteacher_datafile'] = datafile
super().__init__(opt, shared)
self.id = 'squad-fulldoc'
self.reset()
class SentenceTeacher(IndexTeacher):
"""Teacher where the label(s) are the sentences that contain the true
answer.
Some punctuation may be removed from the context and the answer for
tokenization purposes.
If `include_context` is False, the teacher returns action dict in the
following format:
{
'context': <context>,
'text': <question>,
'labels': <sentences containing the true answer>,
'label_candidates': <all sentences in the context>,
'episode_done': True,
'answer_starts': <index of start of answer in context>
}
Otherwise, the 'text' field contains <context>\n<question> and there is
no separate context field.
"""
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
self.sent_tok = get_sentence_tokenizer()
self.include_context = opt.get('include_context', False)
@staticmethod
def add_cmdline_args(argparser):
agent = argparser.add_argument_group(
'SQuAD Sentence Teacher Arguments'
)
agent.add_argument('--include-context', type='bool', default=False,
help='include context within text instead of as a '
'separate field')
def get(self, episode_idx, entry_idx=None):
article_idx, paragraph_idx, qa_idx = self.examples[episode_idx]
article = self.squad[article_idx]
paragraph = article['paragraphs'][paragraph_idx]
qa = paragraph['qas'][qa_idx]
context = paragraph['context']
question = qa['question']
answers = [a['text'] for a in qa['answers']]
# remove '.', '?', '!' from answers for proper sentence
# tokenization
edited_answers = []
for answer in answers:
new_answer = answer.replace(
'.', '').replace('?', '').replace('!', '')
context = context.replace(answer, new_answer)
edited_answers.append(new_answer)
edited_sentences = self.sent_tok.tokenize(context)
labels = []
label_starts = []
for sentence in edited_sentences:
for answer in edited_answers:
if answer in sentence and sentence not in labels:
labels.append(sentence)
label_starts.append(context.index(sentence))
break
action = {
'context': context,
'text': question,
'labels': labels,
'label_candidates': edited_sentences,
'episode_done': True,
'answer_starts': label_starts
}
if self.include_context:
action['text'] = action['context'] + '\n' + action['text']
del action['context']
return action
class FulldocsentenceTeacher(FulldocTeacher):
"""Teacher which contains the question as the text, the sentences as the
label candidates, and the label as the sentence containing the answer.
Some punctuation may be removed for tokenization purposes.
If `include_context` is False, the teacher returns action dict in the
following format:
{
'context': <context>,
'text': <question>,
'labels': <sentences containing the true answer>,
'label_candidates': <all sentences in the context>,
'episode_done': True,
'answer_starts': <index of start of answer in context>
}
Otherwise, the 'text' field contains <context>\n<question> and there is
no separate context field.
"""
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
self.sent_tok = get_sentence_tokenizer()
self.include_context = opt.get('include_context', False)
@staticmethod
def add_cmdline_args(argparser):
agent = argparser.add_argument_group(
'SQuAD Fulldoc Sentence Teacher Arguments'
)
agent.add_argument('--include-context', type='bool', default=False,
help='include context within text instead of as a '
'separate field')
def get(self, episode_idx, entry_idx=None):
action = {}
episode = self.episodes[episode_idx][entry_idx]
context = ' '.join(
episode['text'].split('\n')[:-1]
).replace('\xa0', ' ') # get rid of non breaking space characters
question = episode['text'].split('\n')[-1]
label_field = 'labels' if 'labels' in episode else 'eval_labels'
answers = []
for answer in episode[label_field]:
new_answer = answer.replace(
'.', '').replace('?', '').replace('!', '')
context = context.replace(answer, new_answer)
answers.append(new_answer)
sentences = self.sent_tok.tokenize(context)
labels = []
label_starts = []
for sentence in sentences:
for answer in answers:
if answer in sentence and sentence not in labels:
labels.append(sentence)
label_starts.append(context.index(sentence))
action = {
'context': context,
'text': question,
label_field: labels,
'answer_starts': label_starts,
'label_candidates': sentences,
'episode_done': episode['episode_done']
}
if self.include_context:
action['text'] = action['context'] + '\n' + action['text']
del action['context']
return action | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/tasks/squad/agents.py | 0.744656 | 0.194387 | agents.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
# Download and build the data if it does not exist.
import parlai.core.build_data as build_data
import os
def buildImage(opt):
dpath = os.path.join(opt['datapath'], 'COCO-IMG-2014')
version = '1'
if not build_data.built(dpath, version_string=version):
print('[building image data: ' + dpath + ']')
if build_data.built(dpath):
# An older version exists, so remove these outdated files.
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the image data.
fname1 = 'train2014.zip'
fname2 = 'val2014.zip'
fname3 = 'test2014.zip'
url = 'http://parl.ai/downloads/COCO-IMG/'
build_data.download(url + fname1, dpath, fname1)
build_data.download(url + fname2, dpath, fname2)
build_data.download(url + fname3, dpath, fname3)
build_data.untar(dpath, fname1)
build_data.untar(dpath, fname2)
build_data.untar(dpath, fname3)
# Mark the data as built.
build_data.mark_done(dpath, version_string=version)
def build(opt):
dpath = os.path.join(opt['datapath'], 'COCO_2014_Caption')
version = '1.0'
# check if data had been previously built
if not build_data.built(dpath, version_string=version):
print('[building data: ' + dpath + ']')
# make a clean directory if needed
if build_data.built(dpath):
# an older version exists, so remove these outdated files.
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# download the data.
fname = 'dataset_coco.tgz'
# dataset URL
url = 'http://parl.ai/downloads/coco_caption/'
build_data.download(url + fname, dpath, fname)
# uncompress it
build_data.untar(dpath, fname)
# mark the data as built
build_data.mark_done(dpath, version_string=version) | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/tasks/coco_caption/build_2014.py | 0.639286 | 0.166066 | build_2014.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
# Download and build the data if it does not exist.
import parlai.core.build_data as build_data
import os
def buildImage(opt):
dpath = os.path.join(opt['datapath'], 'COCO-IMG-2017')
version = '1'
if not build_data.built(dpath, version_string=version):
print('[building image data: ' + dpath + ']')
if build_data.built(dpath):
# An older version exists, so remove these outdated files.
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the image data.
fname1 = 'train2017.zip'
fname2 = 'val2017.zip'
fname3 = 'test2017.zip'
url = 'http://parl.ai/downloads/COCO-IMG/'
build_data.download(url + fname1, dpath, fname1)
build_data.download(url + fname2, dpath, fname2)
build_data.download(url + fname3, dpath, fname3)
build_data.untar(dpath, fname1)
build_data.untar(dpath, fname2)
build_data.untar(dpath, fname3)
# Mark the data as built.
build_data.mark_done(dpath, version_string=version)
def build(opt):
dpath = os.path.join(opt['datapath'], 'COCO_2017_Caption')
version = None
# check if data had been previously built
if not build_data.built(dpath, version_string=version):
print('[building data: ' + dpath + ']')
# make a clean directory if needed
if build_data.built(dpath):
# an older version exists, so remove these outdated files.
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# download the data.
fname1 = 'annotations_trainval2017.zip'
fname2 = 'image_info_test2017.zip'
# dataset URL
url = 'http://images.cocodataset.org/annotations/'
build_data.download(url + fname1, dpath, fname1)
build_data.download(url + fname2, dpath, fname2)
# uncompress it
build_data.untar(dpath, fname1)
build_data.untar(dpath, fname2)
# mark the data as built
build_data.mark_done(dpath, version_string=version) | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/tasks/coco_caption/build_2017.py | 0.631935 | 0.173078 | build_2017.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from parlai.core.teachers import FixedDialogTeacher
from parlai.core.image_featurizers import ImageLoader
from .build_2014 import build as build_2014
from .build_2014 import buildImage as buildImage_2014
from .build_2017 import build as build_2017
from .build_2017 import buildImage as buildImage_2017
try:
import torch # noqa: F401
except Exception as e:
raise ImportError('Need to install Pytorch: go to pytorch.org')
from torch.utils.data import Dataset
import os
import json
import random
"""
Agents for MSCOCO Image Captioning Task
There are two versions of the task - one comprising MSCOCO 2014 splits
(from the 2015 task competition), and one comprising MSCOCO 2017 splits
For the 2014 splits, we use the train, val, and test split of Karpathy et.
al, "Deep visual-semantic alignments for generating image descriptions"
(splits from here: https://cs.stanford.edu/people/karpathy/deepimagesent/).
This split has ~82k train images, 5k validation images, and 5k test images.
The val and test images are taken from the original validation set of ~40k.
For 2017, we use the splits from the official MSCOCO Image Captioning 2017
task.
"""
# There is no real dialog in this task, so for the purposes of display_data, we
# include a generic question that applies to all images.
QUESTION = "Describe the above picture in a sentence."
def load_candidates(datapath, datatype, version):
if not datatype.startswith('train'):
suffix = 'captions_{}{}.json'
suffix_val = suffix.format('val', version)
val_path = os.path.join(datapath,
'COCO_{}_Caption'.format(version),
'annotations',
suffix_val)
val = json.load(open(val_path))['annotations']
val_caps = [x['caption'] for x in val]
if datatype.startswith('test'):
suffix_train = suffix.format('train', version)
train_path = os.path.join(datapath,
'COCO_{}_Caption'.format(version),
'annotations',
suffix_train)
train = json.load(open(train_path))['annotations']
train_caps = [x['caption'] for x in train]
test_caps = train_caps + val_caps
return test_caps
else:
return val_caps
else:
return None
def _path(opt, version):
if version == '2014':
build_2014(opt)
buildImage_2014(opt)
elif version == '2017':
build_2017(opt)
buildImage_2017(opt)
else:
raise Exception('Unknown version for COCO Captions: %s' % version)
dt = opt['datatype'].split(':')[0]
if dt == 'train':
annotation_suffix = 'train{}'.format(version)
img_suffix = os.path.join(
'train{}'.format(version),
'COCO_train{}_'.format(version) if version == '2014' else ''
)
elif dt == 'valid' or (dt == 'test' and version == '2014'):
annotation_suffix = 'val{}'.format(version)
img_suffix = os.path.join(
'val{}'.format(version),
'COCO_val{}_'.format(version) if version == '2014' else ''
)
elif dt == 'test':
annotation_suffix = 'None'
img_suffix = os.path.join(
'test{}'.format(version),
'COCO_test{}_'.format(version) if version == '2014' else ''
)
else:
raise RuntimeError('Not valid datatype.')
if version == '2017':
test_info_path = os.path.join(opt['datapath'],
'COCO_2017_Caption',
'annotations',
'image_info_test2017.json')
annotation_path = os.path.join(opt['datapath'],
'COCO_2017_Caption',
'annotations',
'captions_' + annotation_suffix + '.json')
else:
test_info_path = None
annotation_path = os.path.join(opt['datapath'],
'COCO_2014_Caption',
'dataset_coco.json')
image_path = os.path.join(opt['datapath'], 'COCO-IMG-{}'.format(version),
img_suffix)
return test_info_path, annotation_path, image_path
class DefaultDataset(Dataset):
"""A Pytorch Dataset utilizing streaming."""
def __init__(self, opt, version='2017'):
self.opt = opt
self.version = version
self.use_intro = opt.get('use_intro', False)
self.num_cands = opt.get('num_cands', -1)
self.datatype = self.opt.get('datatype')
self.include_rest_val = opt.get('include_rest_val', True)
self.image_loader = ImageLoader(opt)
test_info_path, annotation_path, self.image_path = _path(opt, version)
self._setup_data(test_info_path, annotation_path, opt)
@staticmethod
def add_cmdline_args(argparser):
DefaultTeacher.add_cmdline_args(argparser)
def __getitem__(self, index):
ep = {
'episode_done': True
}
if self.use_intro:
ep['text'] = QUESTION
if hasattr(self, 'annotation'):
anno = self.annotation[index]
else:
anno = self.test_info['images'][index]
if self.version == '2014':
ep['labels'] = [s['raw'] for s in anno['sentences']]
ep['image_id'] = anno['cocoid']
ep['split'] = anno['split']
elif not self.datatype.startswith('test'):
ep['image_id'] = anno['image_id']
ep['labels'] = [anno['caption']]
else:
ep['image_id'] = anno['id']
ep['image']: self.get_image(ep['image_id'], anno.get('split', None))
if self.opt.get('extract_image', False):
return ep
# Add Label Cands
if not self.datatype.startswith('train'):
if self.num_cands == -1:
ep['label_candidates'] = self.cands
else:
candidates = random.Random(index).choices(self.cands,
k=self.num_cands)
label = random.choice(ep.get('labels', ['']))
if not (label == '' or label in candidates):
candidates.pop(0)
candidates.append(label)
random.shuffle(candidates)
ep['label_candidates'] = candidates
return (index, ep)
def __len__(self):
return self.num_episodes()
def _load_lens(self):
with open(self.length_datafile) as length:
lengths = json.load(length)
self.num_eps = lengths['num_eps']
self.num_exs = lengths['num_exs']
def _setup_data(self, test_info_path, annotation_path, opt):
if self.version == '2014':
with open(annotation_path) as data_file:
raw_data = json.load(data_file)['images']
if 'train' in self.datatype:
self.annotation = [d for d in raw_data if d['split'] == 'train']
if self.include_rest_val:
self.annotation += [d for d in raw_data if d['split'] == 'restval']
elif 'valid' in self.datatype:
self.annotation = [d for d in raw_data if d['split'] == 'val']
self.cands = [
l for d in self.annotation
for l in [
s['raw'] for s in d['sentences']
]
]
else:
self.annotation = [d for d in raw_data if d['split'] == 'test']
self.cands = [
l for d in self.annotation
for l in [
s['raw'] for s in d['sentences']
]
]
else:
if not self.datatype.startswith('test'):
print('loading: ' + annotation_path)
with open(annotation_path) as data_file:
self.annotation = json.load(data_file)['annotations']
else:
print('loading: ' + test_info_path)
with open(test_info_path) as data_file:
self.test_info = json.load(data_file)
if not self.datatype.startswith('train'):
self.cands = load_candidates(opt['datapath'],
opt['datatype'],
self.version)
if opt.get('unittest', False):
if not self.datatype.startswith('test'):
self.annotation = self.annotation[:10]
else:
self.test_info['images'] = self.test_info['images'][:10]
def get_image(self, image_id, split):
if split == 'restval':
im_path = self.image_path.replace('train', 'val')
else:
im_path = self.image_path
im_path = os.path.join(im_path, '%012d.jpg' % (image_id))
return self.image_loader.load(im_path)
def num_examples(self):
if self.version == '2014' or not self.datatype.startswith('test'):
return len(self.annotation)
else:
# For 2017, we only have annotations for the train and val sets,
# so for the test set we need to determine how many images we have.
return len(self.test_info['images'])
def num_episodes(self):
return self.num_examples()
def num_images(self):
if not hasattr(self, 'num_imgs'):
return self.num_examples()
return self.num_imgs
class V2014Dataset(DefaultDataset):
def __init__(self, opt):
super(V2014Dataset, self).__init__(opt, '2014')
class V2017Dataset(DefaultDataset):
def __init__(self, opt):
super(V2017Dataset, self).__init__(opt, '2017')
class DefaultTeacher(FixedDialogTeacher):
"""
COCO default teacher that expects open-ended descriptions of images
"""
def __init__(self, opt, shared=None, version='2017'):
super().__init__(opt, shared)
self.version = version
self.image_mode = opt.get('image_mode', 'none')
self.use_intro = opt.get('use_intro', False)
self.num_cands = opt.get('num_cands', -1)
self.include_rest_val = opt.get('include_rest_val', False)
test_info_path, annotation_path, self.image_path = _path(opt, version)
self.test_split = opt['test_split']
if shared:
# another instance was set up already, just reference its data
if 'annotation' in shared:
self.annotation = shared['annotation']
self.image_loader = shared['image_loader']
if 'cands' in shared:
self.cands = shared['cands']
else:
# need to set up data from scratch
self._setup_data(test_info_path, annotation_path, opt)
self.image_loader = ImageLoader(opt)
self.reset()
@staticmethod
def add_cmdline_args(argparser):
agent = argparser.add_argument_group('COCO Caption arguments')
agent.add_argument('--use_intro', type='bool',
default=False,
help='Include an intro question with each image \
for readability (e.g. for coco_caption, \
Describe the above picture in a sentence.)')
agent.add_argument('--num_cands', type=int,
default=150,
help='Number of candidates to use during \
evaluation, setting to -1 uses all.')
agent.add_argument('--include_rest_val', type='bool',
default=False,
help='Include unused validation images in training')
agent.add_argument('--test-split', type=int, default=-1,
choices=[-1, 0, 1, 2, 3, 4],
help='Which 1k image split of dataset to use for candidates'
'if -1, use all 5k test images')
def reset(self):
super().reset() # call parent reset so other fields can be set up
self.example = None # set up caching fields
self.imageEpochDone = False
def num_examples(self):
if self.version == '2014' or not self.datatype.startswith('test'):
return len(self.annotation)
else:
# For 2017, we only have annotations for the train and val sets,
# so for the test set we need to determine how many images we have.
return len(self.test_info['images'])
def num_episodes(self):
return self.num_examples()
def submit_load_request(self, image_id, split=None):
if split == 'restval':
img_path = self.image_path.replace('train', 'val')
else:
img_path = self.image_path
img_path += '%012d.jpg' % (image_id)
self.data_loader.request_load(self.receive_data,
self.image_loader.load,
(img_path,))
def get(self, episode_idx, entry_idx=0):
action = {
'episode_done': True
}
if self.use_intro:
action['text'] = QUESTION
if self.version == '2014':
ep = self.annotation[episode_idx]
action['labels'] = [s['raw'] for s in ep['sentences']]
action['image_id'] = ep['cocoid']
action['split'] = ep['split']
if not self.datatype.startswith('train'):
if self.num_cands > 0:
labels = action['labels']
cands_to_sample = [c for c in self.cands if c not in labels]
cands = (
random.Random(episode_idx)
.sample(cands_to_sample, self.num_cands)
) + labels
random.shuffle(cands)
action['label_candidates'] = cands
else:
action['label_candidates'] = self.cands
else:
if not self.datatype.startswith('test'):
# test set annotations are not available for this dataset
anno = self.annotation[episode_idx]
action['labels'] = [anno['caption']]
action['image_id'] = anno['image_id']
if not self.datatype.startswith('train'):
if self.num_cands == -1:
candidates = self.cands
else:
# Can only randomly select from validation set
candidates = random.Random(
episode_idx).choices(self.cands, k=self.num_cands)
if anno['caption'] not in candidates:
candidates.pop(0)
else:
candidates.remove(anno['caption'])
candidate_labels = [anno['caption']]
candidate_labels += candidates
action['label_candidates'] = candidate_labels
else:
if self.num_cands == -1:
candidates = self.cands
else:
# Can only randomly select from validation set
candidates = random.Random(
episode_idx).choices(self.cands, k=self.num_cands)
action['label_candidates'] = candidates
action['image_id'] = self.test_info['images'][episode_idx]['id']
return action
def next_example(self):
"""Returns the next example from this dataset after starting to queue
up the next example.
"""
ready = None
# pull up the currently queued example
if self.example is not None:
if self.image_mode != 'none' and 'image_id' in self.example:
# move the image we loaded in the background into the example
image = self.data_queue.get()
self.example['image'] = image
ready = (self.example, self.imageEpochDone)
# get the next base example: super().next_example() calls self.get()
self.example, self.imageEpochDone = super().next_example()
if self.image_mode != 'none' and 'image_id' in self.example:
# load the next image in the background
image_id = self.example['image_id']
split = self.example.get('split', None)
self.submit_load_request(image_id, split)
# Try to return the previously cached example
if ready is None:
return self.next_example()
else:
return ready
def share(self):
shared = super().share()
if hasattr(self, 'annotation'):
shared['annotation'] = self.annotation
shared['image_loader'] = self.image_loader
if hasattr(self, 'cands'):
shared['cands'] = self.cands
return shared
def _setup_data(self, test_info_path, annotation_path, opt):
if self.version == '2014':
with open(annotation_path) as data_file:
raw_data = json.load(data_file)['images']
if 'train' in self.datatype:
self.annotation = [d for d in raw_data if d['split'] == 'train']
if self.include_rest_val:
self.annotation += [d for d in raw_data if d['split'] == 'restval']
elif 'valid' in self.datatype:
self.annotation = [d for d in raw_data if d['split'] == 'val']
self.cands = [
l for d in self.annotation
for l in [
s['raw'] for s in d['sentences']
]
]
else:
self.annotation = [d for d in raw_data if d['split'] == 'test']
if self.test_split != -1:
start = self.test_split * 1000
end = (self.test_split + 1) * 1000
self.annotation = self.annotation[start:end]
self.cands = [
l for d in self.annotation
for l in [
s['raw'] for s in d['sentences']
]
]
else:
if not self.datatype.startswith('test'):
print('loading: ' + annotation_path)
with open(annotation_path) as data_file:
self.annotation = json.load(data_file)['annotations']
else:
print('loading: ' + test_info_path)
with open(test_info_path) as data_file:
self.test_info = json.load(data_file)
if not self.datatype.startswith('train'):
self.cands = load_candidates(opt['datapath'],
opt['datatype'],
self.version)
class V2014Teacher(DefaultTeacher):
def __init__(self, opt, shared=None):
super(V2014Teacher, self).__init__(opt, shared, '2014')
class V2017Teacher(DefaultTeacher):
def __init__(self, opt, shared=None):
super(V2017Teacher, self).__init__(opt, shared, '2017') | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/tasks/coco_caption/agents.py | 0.794544 | 0.296896 | agents.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from parlai.core.teachers import FixedDialogTeacher
from .build import build
import json
import os
class DefaultTeacher(FixedDialogTeacher):
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
# Build the data if it doesn't exist.
build(opt)
if not shared:
datapath = self._path(opt)
self._setup_data(datapath)
else:
self.examples = shared['examples']
self.id = 'qangaroo'
self.reset()
def _path(self, opt):
dt = opt['datatype'].split(':')
datatype = 'train' if dt[0] == 'train' else 'dev'
return os.path.join(opt['datapath'], 'qangaroo', 'qangaroo_v1.1',
'wikihop', datatype + '.json')
def num_examples(self):
return len(self.examples)
def num_episodes(self):
# same as num_examples since only one exchange per episode
return self.num_examples()
def share(self):
shared = super().share()
shared['examples'] = self.examples
return shared
def get(self, episode_idx, entry_idx=None):
item = self.examples[episode_idx]
action = {
'id': 'qangaroo',
'text': '\n'.join(item['supports']) + '\n' + item['query'],
'query': item['query'],
'label_candidates': item['candidates'],
'label': [item['answer']],
'supports': item['supports'],
'episode_done': True,
}
return action
def _setup_data(self, path):
print("loading: ", path)
with open(path) as data_file:
self.examples = json.load(data_file)
class WikiHopTeacher(DefaultTeacher):
def _path(self, opt):
dt = opt['datatype'].split(':')
datatype = 'train' if dt[0] == 'train' else 'dev'
return os.path.join(opt['datapath'], 'qangaroo', 'qangaroo_v1.1',
'wikihop', datatype + '.json')
class MaskedWikiHopTeacher(DefaultTeacher):
def _path(self, opt):
dt = opt['datatype'].split(':')
datatype = 'train.masked' if dt[0] == 'train' else 'dev.masked'
return os.path.join(opt['datapath'], 'qangaroo', 'qangaroo_v1.1',
'wikihop', datatype + '.json')
class MedHopTeacher(DefaultTeacher):
def _path(self, opt):
dt = opt['datatype'].split(':')
datatype = 'train' if dt[0] == 'train' else 'dev'
return os.path.join(opt['datapath'], 'qangaroo', 'qangaroo_v1.1',
'medhop', datatype + '.json')
class MaskedMedHopTeacher(DefaultTeacher):
def _path(self, opt):
dt = opt['datatype'].split(':')
datatype = 'train.masked' if dt[0] == 'train' else 'dev.masked'
return os.path.join(opt['datapath'], 'qangaroo', 'qangaroo_v1.1',
'medhop', datatype + '.json') | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/tasks/qangaroo/agents.py | 0.727201 | 0.201636 | agents.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from parlai.core.teachers import FixedDialogTeacher
from parlai.core.image_featurizers import ImageLoader
from .build import build
try:
import torch # noqa: F401
except Exception as e:
raise ImportError('Need to install Pytorch: go to pytorch.org')
from torch.utils.data import Dataset
from parlai.core.dict import DictionaryAgent
import os
import json
import random
# There is no real dialog in this task, so for the purposes of display_data, we
# include a generic question that applies to all images.
QUESTION = "Describe the above picture in a sentence."
def _path(opt):
build(opt)
data_path = os.path.join(opt['datapath'], 'Flickr30k',
'dataset.json')
image_path = os.path.join(opt['datapath'], 'Flickr30k', 'flickr30k_images')
return data_path, image_path
class FlickrDataset(Dataset):
"""A Pytorch Dataset utilizing streaming"""
def __init__(self, opt, shared=None):
self.opt = opt
self.datatype = self.opt.get('datatype')
self.training = self.datatype.startswith('train')
self.num_epochs = self.opt.get('num_epochs', 0)
self.image_loader = ImageLoader(opt)
data_path, self.image_path = _path(opt)
self._setup_data(data_path, opt.get('unittest', False))
self.dict_agent = DictionaryAgent(opt)
@staticmethod
def add_cmdline_args(argparser):
DefaultTeacher.add_cmdline_args(argparser)
def __getitem__(self, index):
cap = self.data[index]
image_id = int(cap['filename'].replace('.jpg', ''))
ep = {
'text': QUESTION,
'image': self.get_image(image_id),
'episode_done': True,
}
if self.opt.get('extract_image', False):
ep['image_id'] = image_id
return ep
ep['labels'] = [s['raw'] for s in cap['sentences']]
ep['valid'] = True
if 'train' not in self.datatype:
ep['label_candidates'] = self.cands
return (index, ep)
def __len__(self):
return self.num_episodes()
def _setup_data(self, data_path, unittest):
with open(data_path) as data_file:
raw_data = json.load(data_file)['images']
if 'train' in self.datatype:
self.data = [d for d in raw_data if d['split'] == 'train']
elif 'valid' in self.datatype:
self.data = [d for d in raw_data if d['split'] == 'val']
self.cands = [
l for d in self.data for l in [s['raw'] for s in d['sentences']]
]
else:
self.data = [d for d in raw_data if d['split'] == 'test']
self.cands = [
l for d in self.data for l in [s['raw'] for s in d['sentences']]
]
if unittest:
self.caption = self.caption[:10]
def get_image(self, image_id):
im_path = os.path.join(self.image_path, '%d.jpg' % (image_id))
return self.image_loader.load(im_path)
def num_episodes(self):
return len(self.data)
def num_examples(self):
return self.num_episodes()
def num_images(self):
return self.num_episodes()
class DefaultDataset(FlickrDataset):
pass
class DefaultTeacher(FixedDialogTeacher):
"""
Flickr default teacher that expects open-ended descriptions of images
"""
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
self.image_mode = opt.get('image_mode', 'none')
self.use_intro = opt.get('use_intro', False)
self.num_cands = opt.get('num_cands', -1)
data_path, self.image_path = _path(opt)
if shared:
# another instance was set up already, just reference its data
self.data = shared['data']
self.image_loader = shared['image_loader']
if 'cands' in shared:
self.cands = shared['cands']
else:
# need to set up data from scratch
self._setup_data(data_path)
self.image_loader = ImageLoader(opt)
self.reset()
@staticmethod
def add_cmdline_args(argparser):
agent = argparser.add_argument_group('Flickr30k arguments')
agent.add_argument('--use_intro', type='bool',
default=False,
help='Include an intro question with each image \
for readability (e.g. for coco_caption, \
Describe the above picture in a sentence.)')
agent.add_argument('--num_cands', type=int,
default=-1,
help='Number of candidates to use during \
evaluation, setting to -1 uses all.')
def reset(self):
super().reset() # call parent reset so other fields can be set up
self.example = None # set up caching fields
self.imageEpochDone = False
def num_examples(self):
return len(self.data)
def num_episodes(self):
return self.num_examples()
def submit_load_request(self, image_id):
img_path = os.path.join(self.image_path, '%d.jpg' % (image_id))
self.data_loader.request_load(self.receive_data,
self.image_loader.load,
(img_path,))
def get(self, episode_idx, entry_idx=0):
ep = self.data[episode_idx]
action = {
'image_id': int(ep['filename'].replace('.jpg', '')),
'episode_done': True,
'labels': [s['raw'] for s in ep['sentences']]
}
if self.use_intro:
action['text'] = QUESTION
if 'train' not in self.datatype:
if self.num_cands > 0:
labels = action['labels']
cands_to_sample = [c for c in self.cands if c not in labels]
cands = (
random.Random(episode_idx).sample(cands_to_sample, self.num_cands) +
labels
)
random.shuffle(cands)
action['label_candidates'] = cands
else:
action['label_candidates'] = self.cands
return action
def next_example(self):
"""Returns the next example from this dataset after starting to queue
up the next example.
"""
ready = None
# pull up the currently queued example
if self.example is not None:
if self.image_mode != 'none' and 'image_id' in self.example:
# move the image we loaded in the background into the example
image = self.data_queue.get()
self.example['image'] = image
ready = (self.example, self.imageEpochDone)
# get the next base example: super().next_example() calls self.get()
self.example, self.imageEpochDone = super().next_example()
if self.image_mode != 'none' and 'image_id' in self.example:
# load the next image in the background
image_id = self.example['image_id']
self.submit_load_request(image_id)
# Try to return the previously cached example
if ready is None:
return self.next_example()
else:
return ready
def share(self):
shared = super().share()
shared['data'] = self.data
shared['image_loader'] = self.image_loader
if hasattr(self, 'cands'):
shared['cands'] = self.cands
return shared
def _setup_data(self, data_path):
print('loading: ' + data_path)
with open(data_path) as data_file:
raw_data = json.load(data_file)['images']
if 'train' in self.datatype:
self.data = [d for d in raw_data if d['split'] == 'train']
elif 'valid' in self.datatype:
self.data = [d for d in raw_data if d['split'] == 'val']
self.cands = [
l for d in self.data for l in [s['raw'] for s in d['sentences']]
]
else:
self.data = [d for d in raw_data if d['split'] == 'test']
self.cands = [
l for d in self.data for l in [s['raw'] for s in d['sentences']]
] | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/tasks/flickr30k/agents.py | 0.720762 | 0.249739 | agents.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import parlai.core.build_data as build_data
import os
from PIL import Image
from zipfile import ZipFile
from functools import wraps
from threading import Lock, Condition
_greyscale = ' .,:;crsA23hHG#98&@'
_cache_size = 84000
def first_n_cache(function):
cache = {}
cache_monitor = CacheMonitor()
@wraps(function)
def wrapper(*args):
path = args[1]
loader = args[0]
if path in cache:
img = cache[path]
else:
img = function(*args)
if img is not None and len(cache) < _cache_size:
cache_monitor.waitForCache()
cache[path] = img
cache_monitor.doneWithCache()
if loader.use_cuda and loader.im not in [None, 'none', 'raw', 'ascii']:
img = loader.torch.from_numpy(img).cuda()
return img
return wrapper
class CacheMonitor():
def __init__(self):
self.cache_lock = Lock()
self.cache_available = Condition(self.cache_lock)
self.cache_busy = False
def waitForCache(self):
with self.cache_lock:
while self.cache_busy:
self.cache_available.wait()
self.cache_busy = True
def doneWithCache(self):
with self.cache_lock:
self.cache_busy = False
self.cache_available.notify_all()
class ImageLoader():
"""Extract image feature using pretrained CNN network.
"""
def __init__(self, opt):
self.opt = opt.copy()
self.use_cuda = False
self.netCNN = None
self.im = opt.get('image_mode', 'none')
if self.im not in ['none', 'raw', 'ascii']:
self.init_cnn(self.opt)
def init_cnn(self, opt):
"""Lazy initialization of preprocessor model in case we don't need any
image preprocessing.
"""
try:
import torch
self.use_cuda = (
not opt.get('no_cuda', False) and torch.cuda.is_available()
)
self.torch = torch
except ImportError:
raise ImportError('Need to install Pytorch: go to pytorch.org')
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
if 'image_mode' not in opt or 'image_size' not in opt:
raise RuntimeError(
'Need to add image arguments to opt. See '
'parlai.core.params.ParlaiParser.add_image_args')
self.image_mode = opt['image_mode']
self.image_size = opt['image_size']
self.crop_size = opt['image_cropsize']
if self.use_cuda:
print('[ Using CUDA ]')
torch.cuda.set_device(opt.get('gpu', -1))
cnn_type, layer_num = self.image_mode_switcher()
# initialize the pretrained CNN using pytorch.
CNN = getattr(torchvision.models, cnn_type)
# cut off the additional layer.
self.netCNN = nn.Sequential(
*list(CNN(pretrained=True).children())[:layer_num])
# initialize the transform function using torch vision.
self.transform = transforms.Compose([
transforms.Scale(self.image_size),
transforms.CenterCrop(self.crop_size),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
if self.use_cuda:
self.netCNN.cuda()
def image_mode_switcher(self):
switcher = {
'resnet152': ['resnet152', -1],
'resnet101': ['resnet101', -1],
'resnet50': ['resnet50', -1],
'resnet34': ['resnet34', -1],
'resnet18': ['resnet18', -1],
'resnet152_spatial': ['resnet152', -2],
'resnet101_spatial': ['resnet101', -2],
'resnet50_spatial': ['resnet50', -2],
'resnet34_spatial': ['resnet34', -2],
'resnet18_spatial': ['resnet18', -2],
}
if self.image_mode not in switcher:
raise NotImplementedError('image preprocessing mode' +
'{} not supported yet'.format(self.image_mode))
return switcher.get(self.image_mode)
def extract(self, image, path):
# check whether initialize CNN network.
if not self.netCNN:
self.init_cnn(self.opt)
# extract the image feature
transform = self.transform(image).unsqueeze(0)
if self.use_cuda:
transform = transform.cuda()
feature = self.netCNN(transform)
# save the feature
self.torch.save(feature.cpu(), path)
return feature
def img_to_ascii(self, path):
im = Image.open(path)
im.thumbnail((60, 40), Image.BICUBIC)
im = im.convert('L')
asc = []
for y in range(0, im.size[1]):
for x in range(0, im.size[0]):
lum = 255 - im.getpixel((x, y))
asc.append(_greyscale[lum * len(_greyscale) // 256])
asc.append('\n')
return ''.join(asc)
# @first_n_cache
def load(self, path):
opt = self.opt
mode = opt.get('image_mode', 'raw')
is_zip = False
if mode is None or mode == 'none':
# don't need to load images
return None
elif '.zip' in path:
# assume format path/to/file.zip/image_name.jpg
is_zip = True
sep = path.index('.zip') + 4
zipname = path[:sep]
file_name = path[sep + 1:]
path = ZipFile(zipname, 'r').open(file_name)
if opt['task'] != 'pytorch_teacher':
task = opt['task']
else:
task = opt['image_load_task']
prepath = os.path.join(opt['datapath'], task)
imagefn = ''.join(zipname.strip('.zip').split('/')[-2:]) + path.name
if mode == 'raw':
# raw just returns RGB values
return Image.open(path).convert('RGB')
elif mode == 'ascii':
# convert images to ascii ¯\_(ツ)_/¯
return self.img_to_ascii(path)
else:
# otherwise, looks for preprocessed version under 'mode' directory
if not is_zip:
prepath, imagefn = os.path.split(path)
dpath = os.path.join(prepath, mode)
if not os.path.exists(dpath):
build_data.make_dir(dpath)
imagefn = imagefn.split('.')[0]
new_path = os.path.join(prepath, mode, imagefn)
if not os.path.isfile(new_path):
return self.extract(Image.open(path).convert('RGB'), new_path)
else:
return self.torch.load(new_path) | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/core/image_featurizers.py | 0.793106 | 0.195287 | image_featurizers.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
"""Provides utilities useful for multiprocessing."""
from multiprocessing import Lock, RawArray
from collections.abc import MutableMapping
import ctypes
import sys
class SharedTable(MutableMapping):
"""Provides a simple shared-memory table of integers, floats, or strings.
Use this class as follows:
.. code-block:: python
tbl = SharedTable({'cnt': 0})
with tbl.get_lock():
tbl['startTime'] = time.time()
for i in range(10):
with tbl.get_lock():
tbl['cnt'] += 1
"""
types = {
int: ctypes.c_int,
float: ctypes.c_float,
bool: ctypes.c_bool,
}
def __init__(self, init_dict=None):
"""Create a shared memory version of each element of the initial
dictionary. Creates an empty array otherwise, which will extend
automatically when keys are added.
Each different type (all supported types listed in the ``types`` array
above) has its own array. For each key we store an index into the
appropriate array as well as the type of value stored for that key.
"""
# idx is dict of {key: (array_idx, value_type)}
self.idx = {}
# arrays is dict of {value_type: array_of_ctype}
self.arrays = {}
self.tensors = {}
if init_dict:
sizes = {typ: 0 for typ in self.types.keys()}
for k, v in init_dict.items():
if is_tensor(v):
# add tensor to tensor dict--don't try to put in rawarray
self.tensors[k] = v
continue
elif type(v) not in sizes:
raise TypeError('SharedTable does not support values of ' +
'type ' + str(type(v)))
sizes[type(v)] += 1
# pop tensors from init_dict
for k in self.tensors.keys():
init_dict.pop(k)
# create raw arrays for each type
for typ, sz in sizes.items():
self.arrays[typ] = RawArray(self.types[typ], sz)
# track indices for each key, assign them to their typed rawarray
idxs = {typ: 0 for typ in self.types.keys()}
for k, v in init_dict.items():
val_type = type(v)
self.idx[k] = (idxs[val_type], val_type)
if val_type == str:
v = sys.intern(v)
self.arrays[val_type][idxs[val_type]] = v
idxs[val_type] += 1
# initialize any needed empty arrays
for typ, ctyp in self.types.items():
if typ not in self.arrays:
self.arrays[typ] = RawArray(ctyp, 0)
self.lock = Lock()
def __len__(self):
return len(self.idx) + len(self.tensors)
def __iter__(self):
return iter([k for k in self.idx] + [k for k in self.tensors])
def __contains__(self, key):
return key in self.idx or key in self.tensors
def __getitem__(self, key):
"""Returns shared value if key is available."""
if key in self.tensors:
return self.tensors[key]
elif key in self.idx:
idx, typ = self.idx[key]
return self.arrays[typ][idx]
else:
raise KeyError('Key "{}" not found in SharedTable'.format(key))
def __setitem__(self, key, value):
"""If key is in table, update it. Otherwise, extend the array to make
room. This uses additive resizing not multiplicative, since the number
of keys is not likely to change frequently during a run, so do not abuse
it.
Raises an error if you try to change the type of the value stored for
that key--if you need to do this, you must delete the key first.
"""
val_type = type(value)
if 'Tensor' in str(val_type):
self.tensors[key] = value
return
if val_type not in self.types:
raise TypeError('SharedTable does not support type ' + str(type(value)))
if val_type == str:
value = sys.intern(value)
if key in self.idx:
idx, typ = self.idx[key]
if typ != val_type:
raise TypeError(('Cannot change stored type for {key} from ' +
'{v1} to {v2}. You need to del the key first' +
' if you need to change value types.'
).format(key=key, v1=typ, v2=val_type))
self.arrays[typ][idx] = value
else:
raise KeyError('Cannot add more keys to the shared table as '
'they will not be synced across processes.')
def __delitem__(self, key):
if key in self.tensors:
del self.tensors[key]
elif key in self.idx:
del self.idx[key]
else:
raise KeyError('Key "{}" not found in SharedTable'.format(key))
def __str__(self):
"""Returns simple dict representation of the mapping."""
lhs = [
'{k}: {v}'.format(k=key, v=self.arrays[typ][idx])
for key, (idx, typ) in self.idx.items()
]
rhs = ['{k}: {v}'.format(k=k, v=v) for k, v in self.tensors.items()]
return '{{{}}}'.format(', '.join(lhs + rhs))
def __repr__(self):
"""Returns the object type and memory location with the mapping."""
representation = super().__repr__()
return representation.replace('>', ': {}>'.format(str(self)))
def get_lock(self):
return self.lock
def is_tensor(v):
if type(v).__module__.startswith('torch'):
import torch
return torch.is_tensor(v)
return False | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/core/thread_utils.py | 0.832713 | 0.227523 | thread_utils.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
"""Provides standard metric evaluations for dialog.
Uses locking and shared memory when ``numthreads`` is set to >1 to share metrics
between processes.
"""
from parlai.core.thread_utils import SharedTable
from parlai.core.utils import round_sigfigs, no_lock
from collections import Counter
import re
import math
try:
from nltk.translate import bleu_score as nltkbleu
except ImportError:
# User doesn't have nltk installed, so we can't use it for bleu
# We'll just turn off things, but we might want to warn the user
nltkbleu = None
re_art = re.compile(r'\b(a|an|the)\b')
re_punc = re.compile(r'[!"#$%&()*+,-./:;<=>?@\[\]\\^`{|}~_\']')
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re_art.sub(' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
return re_punc.sub(' ', text) # convert punctuation to spaces
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def _exact_match(guess, answers):
"""Check if guess is a (normalized) exact match with any answer."""
if guess is None or answers is None:
return False
guess = normalize_answer(guess)
for a in answers:
if guess == normalize_answer(a):
return True
return False
def _prec_recall_f1_score(pred_items, gold_items):
"""
Computes precision, recall and f1 given a set of gold and prediction items.
:param pred_items: iterable of predicted values
:param gold_items: iterable of gold values
:return: tuple (p, r, f1) for precision, recall, f1
"""
common = Counter(gold_items) & Counter(pred_items)
num_same = sum(common.values())
if num_same == 0:
return 0, 0, 0
precision = 1.0 * num_same / len(pred_items)
recall = 1.0 * num_same / len(gold_items)
f1 = (2 * precision * recall) / (precision + recall)
return precision, recall, f1
def _f1_score(guess, answers):
"""Return the max F1 score between the guess and *any* answer."""
if guess is None or answers is None:
return 0
g_tokens = normalize_answer(guess).split()
scores = [
_prec_recall_f1_score(g_tokens, normalize_answer(a).split())for a in answers
]
return max(f1 for p, r, f1 in scores)
def _bleu(guess, answers):
"""Compute approximate BLEU score between guess and a set of answers."""
if nltkbleu is None:
# bleu library not installed, just return a default value
return None
# Warning: BLEU calculation *should* include proper tokenization and
# punctuation etc. We're using the normalize_answer for everything though,
# so we're over-estimating our BLEU scores. Also note that NLTK's bleu is
# going to be slower than fairseq's (which is written in C), but fairseq's
# requires that everything be in arrays of ints (i.e. as tensors). NLTK's
# works with strings, which is better suited for this module.
return nltkbleu.sentence_bleu(
[normalize_answer(a).split(" ") for a in answers],
normalize_answer(guess).split(" "),
smoothing_function=nltkbleu.SmoothingFunction(epsilon=1e-12).method1,
)
def aggregate_metrics(reporters):
# reporters is a list of teachers or worlds
m = {}
m['tasks'] = {}
sums = {'accuracy': 0, 'f1': 0, 'loss': 0, 'ppl': 0}
if nltkbleu is not None:
sums['bleu'] = 0
num_tasks = 0
total = 0
for i in range(len(reporters)):
tid = reporters[i].getID()
mt = reporters[i].report()
while tid in m['tasks']:
# prevent name cloberring if using multiple tasks with same ID
tid += '_'
m['tasks'][tid] = mt
total += mt['exs']
found_any = False
for k in sums.keys():
if k in mt:
sums[k] += mt[k]
found_any = True
if found_any:
num_tasks += 1
m['exs'] = total
m['accuracy'] = 0
if num_tasks > 0:
for k in sums.keys():
m[k] = round_sigfigs(sums[k] / num_tasks, 4)
return m
def compute_time_metrics(world, max_time):
# Determine time_left and num_epochs
exs_per_epoch = world.num_examples() if world.num_examples() else 0
num_epochs = world.opt.get('num_epochs', 0)
max_exs = exs_per_epoch * num_epochs
total_exs = world.get_total_exs()
m = {}
if (max_exs > 0 and total_exs > 0) or max_time > 0:
m = {}
time_left = None
time = world.get_time()
total_epochs = world.get_total_epochs()
if (num_epochs > 0 and total_exs > 0 and max_exs > 0):
exs_per_sec = time / total_exs
time_left = (max_exs - total_exs) * exs_per_sec
if max_time > 0:
other_time_left = max_time - time
if time_left is not None:
time_left = min(time_left, other_time_left)
else:
time_left = other_time_left
if time_left is not None:
m['time_left'] = math.floor(time_left)
if num_epochs > 0:
if (total_exs > 0 and exs_per_epoch > 0):
display_epochs = int(total_exs / exs_per_epoch)
else:
display_epochs = total_epochs
m['num_epochs'] = display_epochs
return m
class Metrics(object):
"""Class that maintains evaluation metrics over dialog."""
def __init__(self, opt):
self.metrics = {}
self.metrics['cnt'] = 0
self.metrics_list = ['mean_rank', 'loss', 'correct', 'f1', 'ppl']
if nltkbleu is not None:
# only compute bleu if we can
self.metrics_list.append('bleu')
for k in self.metrics_list:
self.metrics[k] = 0.0
self.metrics[k + '_cnt'] = 0
self.eval_pr = [1, 5, 10, 100]
for k in self.eval_pr:
self.metrics['hits@' + str(k)] = 0
self.metrics['hits@_cnt'] = 0
self.flags = {'has_text_cands': False, 'print_prediction_metrics': False}
if opt.get('numthreads', 1) > 1:
self.metrics = SharedTable(self.metrics)
self.flags = SharedTable(self.flags)
def __str__(self):
return str(self.metrics)
def __repr__(self):
representation = super().__repr__()
return representation.replace('>', ': {}>'.format(repr(self.metrics)))
def _lock(self):
if hasattr(self.metrics, 'get_lock'):
# use the shared_table's lock
return self.metrics.get_lock()
else:
# otherwise do nothing
return no_lock()
def update_ranking_metrics(self, observation, labels):
text_cands = observation.get('text_candidates', None)
if text_cands is None:
return
else:
# Now loop through text candidates, assuming they are sorted.
# If any of them is a label then score a point.
# maintain hits@1, 5, 10, 50, 100, etc.
label_set = set(normalize_answer(l) for l in labels)
cnts = {k: 0 for k in self.eval_pr}
cnt = 0
for c in text_cands:
cnt += 1
if normalize_answer(c) in label_set:
for k in self.eval_pr:
if cnt <= k:
cnts[k] += 1
# hits metric is 1 if cnts[k] > 0.
# (other metrics such as p@k and r@k take
# the value of cnt into account.)
with self._lock():
self.flags['has_text_cands'] = True
for k in self.eval_pr:
if cnts[k] > 0:
self.metrics['hits@' + str(k)] += 1
self.metrics['hits@_cnt'] += 1
def update(self, observation, labels):
with self._lock():
self.metrics['cnt'] += 1
# Exact match metric.
correct = 0
prediction = observation.get('text', None)
if prediction is not None:
if _exact_match(prediction, labels):
correct = 1
with self._lock():
self.flags['print_prediction_metrics'] = True
self.metrics['correct'] += correct
self.metrics['correct_cnt'] += 1
# F1 and BLEU metrics.
f1 = _f1_score(prediction, labels)
bleu = _bleu(prediction, labels)
with self._lock():
self.metrics['f1'] += f1
self.metrics['f1_cnt'] += 1
if bleu is not None:
self.metrics['bleu'] += bleu
self.metrics['bleu_cnt'] += 1
# Ranking metrics.
self.update_ranking_metrics(observation, labels)
# User-reported metrics
if 'metrics' in observation:
for k, v in observation['metrics'].items():
if k not in ['correct', 'f1', 'hits@k', 'bleu']:
if k in self.metrics_list:
with self._lock():
self.metrics[k] += v
self.metrics[k + '_cnt'] += 1
else:
if type(self.metrics) is SharedTable:
# can't share custom metrics during hogwild
pass
else:
# no need to lock because not SharedTable
if k not in self.metrics:
self.metrics[k] = v
self.metrics_list.append(k)
self.metrics[k + '_cnt'] = 1.0
else:
self.metrics[k] += v
# Return a dict containing the metrics for this specific example.
# Metrics across all data is stored internally in the class, and
# can be accessed with the report method.
loss = {}
loss['correct'] = correct
return loss
def report(self):
# Report the metrics over all data seen so far.
m = {}
total = self.metrics['cnt']
m['exs'] = total
if total > 0:
if self.flags['print_prediction_metrics']:
m['accuracy'] = round_sigfigs(
self.metrics['correct'] / max(1, self.metrics['correct_cnt']),
4
)
m['f1'] = round_sigfigs(
self.metrics['f1'] / max(1, self.metrics['f1_cnt']),
4
)
if self.flags['has_text_cands']:
for k in self.eval_pr:
m['hits@' + str(k)] = round_sigfigs(
self.metrics['hits@' + str(k)] /
max(1, self.metrics['hits@_cnt']),
3
)
for k in self.metrics_list:
if self.metrics[k + '_cnt'] > 0 and k != 'correct' and k != 'f1':
m[k] = round_sigfigs(
self.metrics[k] / max(1, self.metrics[k + '_cnt']),
4
)
return m
def clear(self):
with self._lock():
self.metrics['cnt'] = 0
for k in self.metrics_list:
v = self.metrics[k]
v_typ = type(v)
if 'Tensor' in str(v_typ):
self.metrics[k].zero_()
else:
self.metrics[k] = 0.0
self.metrics[k + '_cnt'] = 0
for k in self.eval_pr:
self.metrics['hits@' + str(k)] = 0
self.metrics['hits@_cnt'] = 0 | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/core/metrics.py | 0.844553 | 0.328745 | metrics.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
"""Basic example which iterates through the tasks specified and
checks them for offensive language.
Examples
--------
.. code-block:: shell
python -m parlai.scripts.detect_offensive_language -t "convai_chitchat" --display-examples True
""" # noqa: E501
from parlai.core.params import ParlaiParser
from parlai.core.agents import create_agent
from parlai.core.worlds import create_task
from parlai.core.utils import OffensiveLanguageDetector, TimeLogger
import random
def setup_args(parser=None):
if parser is None:
parser = ParlaiParser(True, True, 'Check task for offensive language')
# Get command line arguments
parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2)
parser.add_argument('-d', '--display-examples', type='bool', default=False)
parser.set_defaults(datatype='train:ordered')
parser.set_defaults(model='repeat_query')
return parser
def detect(opt, printargs=None, print_parser=None):
"""Checks a task for offensive language.
"""
if print_parser is not None:
if print_parser is True and isinstance(opt, ParlaiParser):
print_parser = opt
elif print_parser is False:
print_parser = None
random.seed(42)
# Create model and assign it to the specified task
agent = create_agent(opt, requireModelExists=True)
world = create_task(opt, agent)
bad = OffensiveLanguageDetector()
if print_parser:
# Show arguments after loading model
print_parser.opt = agent.opt
print_parser.print_args()
log_every_n_secs = opt.get('log_every_n_secs', -1)
if log_every_n_secs <= 0:
log_every_n_secs = float('inf')
log_time = TimeLogger()
# Show some example dialogs:
cnt = 0
while not world.epoch_done():
world.parley()
words = []
for a in world.acts:
offensive = bad.contains_offensive_language(a.get('text', ''))
if offensive:
words.append(offensive)
labels = a.get('labels', a.get('eval_labels', ''))
for l in labels:
offensive = bad.contains_offensive_language(l)
if offensive:
words.append(offensive)
if len(words) > 0 and opt['display_examples']:
print(world.display())
print("[Offensive words detected:]", ', '.join(words))
print("\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n")
cnt += len(words)
if log_time.time() > log_every_n_secs:
report = world.report()
log = {'offenses': cnt}
text, log = log_time.log(report['exs'], world.num_examples(), log)
print(text)
if world.epoch_done():
print("EPOCH DONE")
print(str(cnt) + " offensive messages found out of " +
str(world.num_examples()) + " messages.")
return world.report()
if __name__ == '__main__':
parser = setup_args()
detect(parser.parse_args(print_args=False), print_parser=parser) | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/scripts/detect_offensive_language.py | 0.748904 | 0.181209 | detect_offensive_language.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
"""Convert a dataset into the ParlAI text format.
Examples
--------
.. code-block:: shell
python convert_data_to_parlai_format.py -t babi:task1k:1 --outfile /tmp/dump
"""
from parlai.core.params import ParlaiParser
from parlai.agents.repeat_label.repeat_label import RepeatLabelAgent
from parlai.core.worlds import create_task
from parlai.core.utils import msg_to_str, TimeLogger
import random
import tempfile
def dump_data(opt):
# create repeat label agent and assign it to the specified task
agent = RepeatLabelAgent(opt)
world = create_task(opt, agent)
ignorefields = opt.get('ignore_fields', '')
if opt['outfile'] is None:
outfile = tempfile.mkstemp(
prefix='{}_{}_'.format(opt['task'], opt['datatype']),
suffix='.txt')[1]
else:
outfile = opt['outfile']
if opt['num_examples'] == -1:
num_examples = world.num_examples()
else:
num_examples = opt['num_examples']
log_timer = TimeLogger()
print('[ starting to convert.. ]')
print('[ saving output to {} ]'.format(outfile))
fw = open(outfile, 'w')
for _ in range(num_examples):
world.parley()
world.acts[0]['labels'] = world.acts[0].get(
'labels', world.acts[0].pop('eval_labels', None))
txt = msg_to_str(world.acts[0], ignore_fields=ignorefields)
fw.write(txt + '\n')
if world.acts[0].get('episode_done', False):
fw.write('\n')
if log_timer.time() > opt['log_every_n_secs']:
text, _log = log_timer.log(world.total_parleys, world.num_examples())
print(text)
if world.epoch_done():
print('EPOCH DONE')
break
fw.close()
def main():
random.seed(42)
# Get command line arguments
parser = ParlaiParser()
parser.add_argument('-n', '--num-examples', default=-1, type=int,
help='Total number of exs to convert, -1 to convert \
all examples')
parser.add_argument('-of', '--outfile', default=None, type=str,
help='Output file where to save, by default will be \
created in /tmp')
parser.add_argument('-if', '--ignore-fields', default='id', type=str,
help='Ignore these fields from the message (returned\
with .act() )')
parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2)
parser.set_defaults(datatype='train:stream')
opt = parser.parse_args()
dump_data(opt)
if __name__ == '__main__':
main() | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/scripts/convert_data_to_parlai_format.py | 0.646572 | 0.217784 | convert_data_to_parlai_format.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
"""Basic example which iterates through the tasks specified and
evaluates the given model on them.
Examples
--------
.. code-block:: shell
python eval_model.py -t "babi:Task1k:2" -m "repeat_label"
python eval_model.py -t "#CornellMovie" -m "ir_baseline" -mp "-lp 0.5"
"""
from parlai.core.params import ParlaiParser
from parlai.core.agents import create_agent
from parlai.core.logs import TensorboardLogger
from parlai.core.worlds import create_task
from parlai.core.utils import TimeLogger
import random
def setup_args(parser=None):
if parser is None:
parser = ParlaiParser(True, True, 'Evaluate a model')
# Get command line arguments
parser.add_argument('-ne', '--num-examples', type=int, default=-1)
parser.add_argument('-d', '--display-examples', type='bool', default=False)
parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2)
parser.add_argument('--metrics', type=str, default="all",
help="list of metrics to show/compute, e.g. "
"ppl,f1,accuracy,hits@1."
"If 'all' is specified [default] all are shown.")
TensorboardLogger.add_cmdline_args(parser)
parser.set_defaults(datatype='valid')
return parser
def eval_model(opt, printargs=None, print_parser=None):
"""Evaluates a model.
:param opt: tells the evaluation function how to run
:param bool print_parser: if provided, prints the options that are set within the
model after loading the model
:return: the final result of calling report()
"""
if printargs is not None:
print('[ Deprecated Warning: eval_model no longer uses `printargs` ]')
print_parser = printargs
if print_parser is not None:
if print_parser is True and isinstance(opt, ParlaiParser):
print_parser = opt
elif print_parser is False:
print_parser = None
if isinstance(opt, ParlaiParser):
print('[ Deprecated Warning: eval_model should be passed opt not Parser ]')
opt = opt.parse_args()
random.seed(42)
# Create model and assign it to the specified task
agent = create_agent(opt, requireModelExists=True)
world = create_task(opt, agent)
if print_parser:
# Show arguments after loading model
print_parser.opt = agent.opt
print_parser.print_args()
log_every_n_secs = opt.get('log_every_n_secs', -1)
if log_every_n_secs <= 0:
log_every_n_secs = float('inf')
log_time = TimeLogger()
# Show some example dialogs:
cnt = 0
while not world.epoch_done():
cnt += opt.get('batchsize', 1)
world.parley()
if opt['display_examples']:
print(world.display() + "\n~~")
if log_time.time() > log_every_n_secs:
report = world.report()
text, report = log_time.log(report['exs'], world.num_examples(),
report)
print(text)
if opt['num_examples'] > 0 and cnt >= opt['num_examples']:
break
if world.epoch_done():
print("EPOCH DONE")
print('finished evaluating task {} using datatype {}'.format(
opt['task'], opt.get('datatype', 'N/A')))
report = world.report()
print(report)
return report
if __name__ == '__main__':
parser = setup_args()
eval_model(parser.parse_args(print_args=False), print_parser=parser) | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/scripts/eval_model.py | 0.814901 | 0.211173 | eval_model.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
"""
This helper script can be used alone with modelfile and task: the output will
contain the word statistics of the model outputs.
One can also use the function defined here in other places in order to get such
statistic for any agent given the agent object (with corr. dict) and a
sequence.
Additionally provides function get_word_stats that can be used in other parts
of runtime code since it depends only on the agent object. For example:
::
from parlai.scripts.eval_wordstat import get_word_stats
reqs, cnt = get_word_stats(predictions.tolist(), self.dict)
Examples
--------
.. code-block:: shell
eval_wordstat.py -mf data/model -t convai2:self --freq-bins 10,100,1000
"""
from parlai.core.params import ParlaiParser
from parlai.core.dict import DictionaryAgent
from parlai.core.agents import create_agent
from parlai.core.worlds import create_task
from parlai.core.utils import TimeLogger
from parlai.core.metrics import normalize_answer
from parlai.core.logs import TensorboardLogger
from collections import Counter
import copy
import numpy
import random
def setup_args(parser=None):
if parser is None:
parser = ParlaiParser(True, True, 'compute statistics from model predictions')
DictionaryAgent.add_cmdline_args(parser)
# Get command line arguments
parser.add_argument('-ne', '--num-examples', type=int, default=-1)
parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2)
parser.add_argument('-ed', '--external-dict', type=str, default=None,
help='External dictionary for stat computation')
parser.add_argument('-fb', '--freq-bins', type=str, default='0,100,1000,10000',
help='Bins boundaries for rare words stat')
parser.add_argument('-dup', '--dump-predictions-path', type=str, default=None,
help='Dump predictions into file')
parser.add_argument('-cun', '--compute-unique', type=bool, default=True,
help='Compute %% of unique responses from the model')
parser.set_defaults(datatype='valid', model='repeat_label')
TensorboardLogger.add_cmdline_args(parser)
return parser
def get_word_stats(text, agent_dict, bins=[0, 100, 1000, 100000]):
"""
Function which takes text sequence and dict, returns word freq and length statistics
:param sequence: text sequence
:param agent_dict: can be external dict or dict from the model
:param bins: list with range boundaries
:return: freqs dictionary, num words, avg word length, avg char length
"""
pred_list = agent_dict.tokenize(text)
pred_freq = [agent_dict.freq[word] for word in pred_list]
freqs = {i: 0 for i in bins}
for f in pred_freq:
for b in bins:
if f <= b:
freqs[b] += 1
break
wlength = len(pred_list)
clength = len(text) # including spaces
return freqs, len(pred_freq), wlength, clength
def eval_wordstat(opt, print_parser=None):
"""Evaluates a model.
:param opt: tells the evaluation function how to run
:param print_parser: if provided, prints the options that are set within the
model after loading the model
"""
random.seed(42)
# Create model and assign it to the specified task
agent = create_agent(opt, requireModelExists=True)
world = create_task(opt, agent)
if opt.get('external_dict'):
print('[ Using external dictionary from: {} ]'.format(
opt['external_dict']))
dict_opt = copy.deepcopy(opt)
dict_opt['dict_file'] = opt['external_dict']
dictionary = DictionaryAgent(dict_opt)
else:
print('[ Using model bundled dictionary ]')
dictionary = agent.dict
batch_size = opt['batchsize']
if print_parser:
# Show arguments after loading model
print_parser.opt = agent.opt
print_parser.print_args()
log_every_n_secs = opt.get('log_every_n_secs', -1)
if log_every_n_secs <= 0:
log_every_n_secs = float('inf')
log_time = TimeLogger()
cnt = 0
word_statistics = {
'mean_wlength': [],
'mean_clength': [],
'freqs_cnt': Counter(),
'word_cnt': 0,
'pred_list': [],
'pure_pred_list': [],
'context_list': []
}
bins = [int(i) for i in opt['freq_bins'].split(',')]
def process_prediction(prediction, word_statistics):
word_statistics['pred_list'].append(normalize_answer(prediction))
freqs, _cnt, wlength, clength = get_word_stats(
prediction, dictionary, bins=bins
)
word_statistics['word_cnt'] += _cnt
word_statistics['mean_wlength'].append(wlength)
word_statistics['mean_clength'].append(clength)
word_statistics['freqs_cnt'] += Counter(freqs)
return word_statistics
while not world.epoch_done():
world.parley()
if batch_size == 1:
cnt += 1
prediction = world.acts[-1]['text']
word_statistics['context_list'].append(world.acts[0]['text'])
word_statistics['pure_pred_list'].append(prediction)
word_statistics = process_prediction(prediction, word_statistics)
else:
for w in world.worlds:
try:
prediction = w.acts[-1]['text']
word_statistics['context_list'].append(w.acts[0]['text'])
word_statistics['pure_pred_list'].append(prediction)
except IndexError:
continue
cnt += 1
word_statistics = process_prediction(prediction, word_statistics)
if log_time.time() > log_every_n_secs:
report = world.report()
text, report = log_time.log(report['exs'], world.num_examples(), report)
print(text)
stat_str = 'total_words: {}, '.format(word_statistics['word_cnt'])
stat_str += ', '.join([
'<{}:{} ({:.{prec}f}%)'.format(
b,
word_statistics['freqs_cnt'].get(b, 0),
(word_statistics['freqs_cnt'].get(b, 0) /
word_statistics['word_cnt']) * 100,
prec=2
)
for b in bins
])
print(
"Word statistics: {}, avg_word_length: {:.{prec}f}, "
"avg_char_length: {:.{prec}f}"
.format(
stat_str,
numpy.array(word_statistics['mean_wlength']).mean(),
numpy.array(word_statistics['mean_clength']).mean(),
prec=2
)
)
if opt['num_examples'] > 0 and cnt >= opt['num_examples']:
break
if world.epoch_done():
print("EPOCH DONE")
if opt['compute_unique'] is True:
unique_list = []
cntr = Counter(word_statistics['pred_list'])
for k, v in cntr.items():
if v == 1:
unique_list.append(k)
print(
"Unique responses: {:.{prec}f}%"
.format(
len(unique_list) / len(word_statistics['pred_list']) * 100,
prec=2
)
)
if opt['dump_predictions_path'] is not None:
with open(opt['dump_predictions_path'], 'w') as f:
f.writelines([
'CONTEXT: {}\nPREDICTION:{}\n\n'.format(c, p)
for c, p in zip(
word_statistics['context_list'],
word_statistics['pure_pred_list']
)
])
if opt['compute_unique'] is True:
with open(opt['dump_predictions_path'] + '_unique', 'w') as f:
f.writelines(['{}\n'.format(i) for i in unique_list])
stat_str = 'total_words: {}, '.format(word_statistics['word_cnt'])
stat_str += ', '.join([
'<{}:{} ({:.{prec}f}%)'.format(
b,
word_statistics['freqs_cnt'].get(b, 0),
(word_statistics['freqs_cnt'].get(b, 0) /
word_statistics['word_cnt']) * 100,
prec=2
)
for b in bins
])
print(
"Word statistics: {}, avg_word_length: {:.{prec}f}, "
"avg_char_length: {:.{prec}f}"
.format(
stat_str,
numpy.array(word_statistics['mean_wlength']).mean(),
numpy.array(word_statistics['mean_clength']).mean(),
prec=2
)
)
report = world.report()
print(report)
return report
if __name__ == '__main__':
parser = setup_args()
eval_wordstat(parser.parse_args(print_args=False), print_parser=parser) | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/scripts/eval_wordstat.py | 0.76207 | 0.298773 | eval_wordstat.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
"""Basic example which iterates through the tasks specified and prints them out.
Used for verification of data loading and iteration.
For example, to make sure that bAbI task 1 (1k exs) loads one can run and to
see a few of them:
Examples
--------
.. code-block:: shell
python display_data.py -t babi:task1k:1
"""
from parlai.core.params import ParlaiParser
from parlai.agents.repeat_label.repeat_label import RepeatLabelAgent
from parlai.core.worlds import create_task
import random
def setup_args(parser=None):
if parser is None:
parser = ParlaiParser(True, True, 'Display data from a task')
# Get command line arguments
parser.add_argument('-ne', '--num_examples', type=int, default=10)
parser.add_argument('-mdl', '--max_display_len', type=int, default=1000)
parser.add_argument('--display_ignore_fields', type=str, default='agent_reply')
parser.set_defaults(datatype='train:stream')
return parser
def display_data(opt):
# create repeat label agent and assign it to the specified task
agent = RepeatLabelAgent(opt)
world = create_task(opt, agent)
# Show some example dialogs.
for _ in range(opt['num_examples']):
world.parley()
# NOTE: If you want to look at the data from here rather than calling
# world.display() you could access world.acts[0] directly
print(world.display() + '\n~~')
if world.epoch_done():
print('EPOCH DONE')
break
try:
# print dataset size if available
print('[ loaded {} episodes with a total of {} examples ]'.format(
world.num_episodes(), world.num_examples()
))
except Exception:
pass
if __name__ == '__main__':
random.seed(42)
# Get command line arguments
parser = setup_args()
opt = parser.parse_args()
display_data(opt) | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/scripts/display_data.py | 0.700383 | 0.267537 | display_data.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
"""Basic example which allows local human keyboard input to talk to a trained model.
Examples
--------
.. code-block:: shell
python examples/interactive.py -m drqa -mf "models:drqa/squad/model"
When prompted, enter something like: ``Bob is Blue.\\nWhat is Bob?``
Input is often model or task specific, but in drqa, it is always
``context '\\n' question``.
"""
from parlai.core.params import ParlaiParser
from parlai.core.agents import create_agent
from parlai.core.worlds import create_task
from parlai.agents.local_human.local_human import LocalHumanAgent
import random
def setup_args(parser=None):
if parser is None:
parser = ParlaiParser(True, True, 'Interactive chat with a model')
parser.add_argument('-d', '--display-examples', type='bool', default=False)
parser.add_argument('--display-prettify', type='bool', default=False,
help='Set to use a prettytable when displaying '
'examples with text candidates')
parser.add_argument('--display-ignore-fields', type=str,
default='label_candidates,text_candidates',
help='Do not display these fields')
LocalHumanAgent.add_cmdline_args(parser)
return parser
def interactive(opt, print_parser=None):
if print_parser is not None:
if print_parser is True and isinstance(opt, ParlaiParser):
print_parser = opt
elif print_parser is False:
print_parser = None
if isinstance(opt, ParlaiParser):
print('[ Deprecated Warning: interactive should be passed opt not Parser ]')
opt = opt.parse_args()
opt['task'] = 'parlai.agents.local_human.local_human:LocalHumanAgent'
# Create model and assign it to the specified task
agent = create_agent(opt, requireModelExists=True)
world = create_task(opt, agent)
if print_parser:
# Show arguments after loading model
print_parser.opt = agent.opt
print_parser.print_args()
# Show some example dialogs:
while True:
world.parley()
if opt.get('display_examples'):
print("---")
print(world.display())
if world.epoch_done():
print("EPOCH DONE")
break
if __name__ == '__main__':
random.seed(42)
parser = setup_args()
interactive(parser.parse_args(print_args=False), print_parser=parser) | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/scripts/interactive.py | 0.754825 | 0.167559 | interactive.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
"""Verify data doesn't have basic mistakes, like empty text fields
or empty label candidates.
Examples
--------
.. code-block:: shell
python parlai/scripts/verify_data.py -t convai2 -dt train:ordered
"""
from parlai.core.params import ParlaiParser
from parlai.agents.repeat_label.repeat_label import RepeatLabelAgent
from parlai.core.worlds import create_task
from parlai.core.utils import TimeLogger
def setup_args(parser=None):
if parser is None:
parser = ParlaiParser(True, True, 'Lint for ParlAI tasks')
# Get command line arguments
parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2)
parser.add_argument('-d', '--display-examples', type='bool', default=False)
parser.set_defaults(datatype='train:stream')
return parser
def report(world, counts, log_time):
report = world.report()
log = {
'missing_text': counts['missing_text'],
'missing_labels': counts['missing_labels'],
'missing_label_candidates': counts['missing_label_candidates'],
'empty_label_candidates': counts['empty_label_candidates'],
}
text, log = log_time.log(report['exs'], world.num_examples(), log)
return text, log
def verify(opt, printargs=None, print_parser=None):
# create repeat label agent and assign it to the specified task
agent = RepeatLabelAgent(opt)
world = create_task(opt, agent)
log_every_n_secs = opt.get('log_every_n_secs', -1)
if log_every_n_secs <= 0:
log_every_n_secs = float('inf')
log_time = TimeLogger()
counts = {}
counts['missing_text'] = 0
counts['missing_labels'] = 0
counts['missing_label_candidates'] = 0
counts['empty_label_candidates'] = 0
# Show some example dialogs.
while not world.epoch_done():
world.parley()
act = world.acts[0]
if 'text' not in act:
print("warning: missing text field")
counts['missing_text'] += 1
if 'labels' not in act and 'eval_labels' not in act:
print("warning: missing labels/eval_labels field")
counts['missing_labels'] += 1
else:
if 'label_candidates' not in act:
counts['missing_label_candidates'] += 1
else:
for c in act['label_candidates']:
if c == '':
print("warning: empty string label_candidate")
counts['empty_label_candidates'] += 1
if log_time.time() > log_every_n_secs:
text, log = report(world, counts, log_time)
if print_parser:
print(text)
try:
# print dataset size if available
print('[ loaded {} episodes with a total of {} examples ]'.format(
world.num_episodes(), world.num_examples()
))
except Exception:
pass
return report(world, counts, log_time)
if __name__ == '__main__':
parser = setup_args()
report_text, report_log = \
verify(parser.parse_args(print_args=False), print_parser=parser)
print(report_text) | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/scripts/verify_data.py | 0.68616 | 0.210888 | verify_data.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
"""Basic example which iterates through the tasks specified and load/extract
the image features.
For more options, check ``parlai.core.image_featurizers``
Examples
--------
To extract the image feature of COCO images:
.. code-block:: shell
python examples/extract_image_feature.py -t vqa_v1 -im resnet152
"""
import importlib
import h5py
import copy
import os
import json
import datetime
from parlai.core.params import ParlaiParser
from parlai.agents.repeat_label.repeat_label import RepeatLabelAgent
from parlai.core.worlds import create_task
from parlai.core.utils import ProgressLogger
def setup_args(parser=None):
if parser is None:
parser = ParlaiParser(True, False, 'Load/extract image features')
arg_group = parser.add_argument_group('Image Extraction')
arg_group.add_argument('--dataset', type=str, default=None,
help='Pytorch Dataset; if specified, will save \
the images in one hdf5 file according to how \
they are returned by the specified dataset')
arg_group.add_argument('-at', '--attention', action='store_true',
help='Whether to extract image features with attention \
(Note - this is specifically for the mlb_vqa model)')
arg_group.add_argument('--use-hdf5-extraction', type='bool', default=False,
help='Whether to extract images into an hdf5 dataset')
return parser
def get_dataset_class(opt):
""" To use a custom Pytorch Dataset, specify it on the command line:
``--dataset parlai.tasks.vqa_v1.agents:VQADataset``
Note that if the dataset is named ``DefaultDataset``, then you do
not need to specify its name following the colon; e.g., it
would just be:
``--dataset parlai.tasks.vqa_v1.agents``
"""
dataset_name = opt.get('pytorch_teacher_dataset')
sp = dataset_name.strip().split(':')
module_name = sp[0]
if len(sp) > 1:
dataset = sp[1]
else:
dataset = 'DefaultDataset'
my_module = importlib.import_module(module_name)
return getattr(my_module, dataset)
def extract_feats(opt):
if isinstance(opt, ParlaiParser):
print('[ Deprecated Warning: extract_feats should be passed opt not Parser ]')
opt = opt.parse_args()
# Get command line arguments
opt = copy.deepcopy(opt)
dt = opt['datatype'].split(':')[0] + ':ordered'
opt['datatype'] = dt
bsz = opt.get('batchsize', 1)
opt['no_cuda'] = False
opt['gpu'] = 0
opt['num_epochs'] = 1
opt['use_hdf5'] = False
opt['num_load_threads'] = 20
logger = ProgressLogger(should_humanize=False, throttle=0.1)
print("[ Loading Images ]")
# create repeat label agent and assign it to the specified task
if opt.get('pytorch_teacher_dataset') is None:
agent = RepeatLabelAgent(opt)
world = create_task(opt, agent)
exs_seen = 0
total_exs = world.num_examples()
while not world.epoch_done():
world.parley()
exs_seen += bsz
logger.log(exs_seen, total_exs)
elif opt.get('use_hdf5_extraction', False):
'''One can specify a Pytorch Dataset for custom image loading'''
nw = opt.get('numworkers', 1)
im = opt.get('image_mode', 'raw')
opt['batchsize'] = 1
opt['extract_image'] = True
bsz = 1
try:
import torch
from torch.utils.data import DataLoader
except ImportError:
raise ImportError('Need to install Pytorch: go to pytorch.org')
dataset = get_dataset_class(opt)(opt)
pre_image_path, _ = os.path.split(dataset.image_path)
image_path = os.path.join(pre_image_path, opt.get('image_mode'))
images_built_file = image_path + '.built'
if not os.path.exists(image_path) or not os.path.isfile(images_built_file):
'''Image features have not been computed yet'''
opt['num_load_threads'] = 20
agent = RepeatLabelAgent(opt)
if opt['task'] == 'pytorch_teacher':
if opt.get('pytorch_teacher_task'):
opt['task'] = opt['pytorch_teacher_task']
else:
opt['task'] = opt['pytorch_teacher_dataset']
world = create_task(opt, agent)
exs_seen = 0
total_exs = world.num_examples()
print('[ Computing and Saving Image Features ]')
while exs_seen < total_exs:
world.parley()
exs_seen += bsz
logger.log(exs_seen, total_exs)
print('[ Feature Computation Done ]')
with open(images_built_file, 'w') as write:
write.write(str(datetime.datetime.today()))
dataloader = DataLoader(
dataset,
batch_size=bsz,
shuffle=False,
num_workers=nw,
collate_fn=lambda batch: batch[0]
)
dataset_shape = None
image_id_to_index = {}
num_images = dataset.num_images()
attention = opt.get('attention', False)
if attention:
hdf5_path = '{}mode_{}.hdf5'.format(dataset.image_path, im)
else:
hdf5_path = '{}mode_{}_noatt.hdf5'.format(dataset.image_path, im)
image_id_to_idx_path = '{}mode_{}_id_to_idx.txt'.format(dataset.image_path, im)
hdf5_built_file = hdf5_path + '.built'
if os.path.isfile(hdf5_path) and os.path.isfile(hdf5_built_file):
print('[ Images already extracted at: {} ]'.format(hdf5_path))
return
print("[ Beginning image extraction for {} images ]".format(dt.split(':')[0]))
hdf5_file = h5py.File(hdf5_path, 'w')
idx = 0
for ex in iter(dataloader):
if ex['image_id'] in image_id_to_index:
continue
else:
image_id_to_index[ex['image_id']] = idx
img = ex['image']
if isinstance(img, torch.autograd.Variable):
img = img.cpu().data
if not attention:
nb_regions = img.size(2) * img.size(3)
img = img.sum(3).sum(2).div(nb_regions).view(-1, 2048)
if dataset_shape is None:
if attention:
dataset_shape = (num_images, img.size(1), img.size(2), img.size(3))
else:
dataset_shape = (num_images, img.size(1))
hdf5_dataset = hdf5_file.create_dataset(
'images',
dataset_shape,
dtype='f')
hdf5_dataset[idx] = img
logger.log(idx, num_images)
idx += 1
hdf5_file.close()
if not os.path.exists(image_id_to_idx_path):
with open(image_id_to_idx_path, 'w') as f:
json.dump(image_id_to_index, f)
with open(hdf5_built_file, 'w') as write:
write.write(str(datetime.datetime.today()))
print("[ Finished extracting images ]")
if __name__ == '__main__':
extract_feats(setup_args().parse_args()) | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/scripts/extract_image_feature.py | 0.815049 | 0.30948 | extract_image_feature.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
"""Run the python or pytorch profiler and prints the results.
Examples
--------
To make sure that bAbI task 1 (1k exs) loads one can run and to
see a few of them:
.. code-block:: shell
python examples/profile.py -t babi:task1k:1 -m seq2seq -e 0.1 --dict-file /tmp/dict
"""
from parlai.core.params import ParlaiParser
from train_model import setup_args as train_args
from train_model import TrainLoop
import cProfile
import io
import pdb
import pstats
try:
import torch
except ImportError:
print('Torch not found--only cProfile allowed with this tool.')
def setup_args(parser=None):
if parser is None:
parser = ParlaiParser(True, True, 'cProfile a training run')
parser = train_args(parser)
profile = parser.add_argument_group('Profiler Arguments')
profile.add_argument(
'--torch', type='bool', default=False,
help='If true, use the torch profiler. Otherwise use cProfile.'
)
profile.add_argument(
'--torch-cuda', type='bool', default=False,
help='If true, use the torch cuda profiler. Otherwise use cProfile.'
)
profile.add_argument(
'--debug', type='bool', default=False,
help='If true, enter debugger at end of run.'
)
return parser
def profile(opt):
if isinstance(opt, ParlaiParser):
print('[ Deprecated Warning: profile should be passed opt not Parser ]')
opt = opt.parse_args()
if opt['torch'] or opt['torch_cuda']:
with torch.autograd.profiler.profile(use_cuda=opt['torch_cuda']) as prof:
TrainLoop(opt).train()
print(prof.total_average())
sort_cpu = sorted(prof.key_averages(), key=lambda k: k.cpu_time)
sort_cuda = sorted(prof.key_averages(), key=lambda k: k.cuda_time)
def cpu():
for e in sort_cpu:
print(e)
def cuda():
for e in sort_cuda:
print(e)
cpu()
if opt['debug']:
print('`cpu()` prints out cpu-sorted list, '
'`cuda()` prints cuda-sorted list')
pdb.set_trace()
else:
pr = cProfile.Profile()
pr.enable()
TrainLoop(opt).train()
pr.disable()
s = io.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
print(s.getvalue())
if opt['debug']:
pdb.set_trace()
if __name__ == '__main__':
profile(setup_args().parse_args()) | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/scripts/profile_train.py | 0.779196 | 0.202759 | profile_train.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
"""This file contains a list of all the models in the model zoo, the path to
load them, agents & tasks associated (e.g. they were trained using) and a
description. Using the path you should be able to download and use the model
automatically, e.g.:
python examples/interactive.py --model-file\
"models:wikipedia_2016-12-21/tfidf_retriever/drqa_docs"
"""
model_list = [
{
"id": "drqa",
"path": "models:drqa/squad/model",
"agent": "drqa",
"task": "squad",
"description": "drqa reader trained on SQuAD",
"result": (
"{'exs': 10570, 'accuracy': 0.6886, 'f1': 0.7821, 'hits@1': 0.689, 'hits@5': 0.689, 'hits@10': 0.689, 'hits@100': 0.689, 'bleu': 0.1364, 'train_loss': 0}" # noqa: E501
),
},
{
"id": "wikipedia_2016-12-21",
"path": "models:wikipedia_2016-12-21/tfidf_retriever/drqa_docs",
"agent": "tfidf_retriever",
"task": "wikipedia:full",
"example": (
"python -m parlai.scripts.interactive --model tfidf_retriever "
"-mf models:wikipedia_2016-12-21/tfidf_retriever/drqa_docs"
),
"result": (
"""
Enter Your Message: Yann LeCun
[candidate_scores]: [507.05804682 390.18244433 279.24033928 269.60377042 214.00140589]
[SparseTfidfRetrieverAgent]:
Deep learning (also known as deep structured learning, hierarchical learning or deep machine learning) is a branch of machine learning based on a set of algorithms that attempt to model high level abstractions in data. In a simple case, you could have two sets of neurons: ones that receive an input signal and ones that send an output signal. When the input layer receives an input it passes on a modified version of the input to the next layer. In a deep network, there are many layers between the input and output (and the layers are not made of neurons but it can help to think of it that way), allowing the algorithm to use multiple processing layers, composed of multiple linear and non-linear transformations.
Deep learning is part of a broader family of machine learning methods based on ...
to commonsense reasoning which operates on concepts in terms of production rules of the grammar, and is a basic goal of both human language acquisition and AI. (See also Grammar induction.)
""" # noqa: E501
),
"description": (
"Retrieval over Wikipedia dump, used for DrQA on the open squad "
"dataset. This is the dump from the original paper, used for "
"replicating results."
)
},
{
"id": "wikipedia_full",
"path": "models:wikipedia_full/tfidf_retriever/model",
"agent": "tfidf_retriever",
"task": "wikipedia:full",
"description": (
"Retrieval over Wikipedia dump, used for DrQA on the open squad "
"dataset."
),
"example": "python -m parlai.scripts.interactive --model tfidf_retriever -mf models:wikipedia_full/tfidf_retriever/model", # noqa: E501
"result": (
"""
Enter Your Message: Yann LeCun
[candidate_scores]: [454.74038503 353.88863708 307.31353203 280.4501096 269.89960432]
[SparseTfidfRetrieverAgent]:
Yann LeCun (; born 1960) is a computer scientist with contributions in machine learning, computer vision, mobile robotics and computational neuroscience. He is well known for his work on optical character recognition and computer vision using convolutional neural networks (CNN), and is a founding father of convolutional nets. He is also one of the main creators of the DjVu image compression technology (together with Léon Bottou and Patrick Haffner). He co-developed the Lush programming language with Léon Bottou.
Yann LeCun was born near Paris, France, in 1960. He received a Diplôme d'Ingénieur from the Ecole Superieure d'Ingénieur en Electrotechnique et Electronique (ESIEE), Paris in 1983, and a PhD in Computer Science from Université Pierre et Marie Curie in 1987 during which he ...
of Science and Technology in Saudi Arabia because he was considered a terrorist in the country in view of his atheism.
In 2018 Yann LeCun picked a fight with a robot to support Facebook AI goals.
""" # noqa: E501
),
},
{
"id": "twitter",
"path": "models:twitter/seq2seq/twitter_seq2seq_model",
"agent": "legacy:seq2seq:0",
"task": "twitter",
"description": (
"Generic conversational model trained on the twitter task"
),
"result": "{'exs': 10405, 'accuracy': 0.001538, 'f1': 0.07537, 'bleu': 0.002304, 'loss': 3.93, 'ppl': 50.9}", # noqa: E501
}
] | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/zoo/model_list.py | 0.886236 | 0.527317 | model_list.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
"""
Simple IR baselines.
We plan to implement the following variants:
Given an input message, either:
(i) find the most similar message in the (training) dataset and output the
response from that exchange; or
(ii) find the most similar response to the input directly.
(iii) if label_candidates are provided, simply ranks them according to their
similarity to the input message.
Currently only (iii) is used.
Additonally, TFIDF is either used (requires building a dictionary) or not,
depending on whether you train on the train set first, or not.
"""
import math
from collections.abc import Sequence
import heapq
from parlai.core.agents import Agent
from parlai.core.dict import DictionaryAgent
class MaxPriorityQueue(Sequence):
"""Fixed-size priority queue keeping the max_size largest items."""
def __init__(self, max_size):
"""Initialize priority queue.
:param max_size: maximum capacity of priority queue.
"""
self.capacity = max_size
self.lst = []
def add(self, item, priority=None):
"""Add element to the queue, with a separate priority if desired.
Element will not be added if the queue is at capacity and the element
has lower priority than the lowest currently in the queue.
:param item: item to add to queue.
:param priority: priority to use for item. if None (default), will use
the item itself to calculate its own priority.
"""
if priority is None:
priority = item
if len(self.lst) < self.capacity:
heapq.heappush(self.lst, (priority, item))
elif priority > self.lst[0][0]:
heapq.heapreplace(self.lst, (priority, item))
def __getitem__(self, key):
"""Get item at specified index.
:param key: integer index into priority queue, 0 <= index < max_size.
:returns: item stored at the specified index.
"""
return sorted(self.lst)[key][1]
def __len__(self):
"""Return length of priority queue."""
return len(self.lst)
def __str__(self):
"""Return str representation of the priority queue in list form."""
return str([v for _, v in sorted(self.lst)])
def __repr__(self):
"""Return repr representation of the priority queue in list form."""
return repr([v for _, v in sorted(self.lst)])
stopwords = {
'i', 'a', 'an', 'are', 'about', 'as', 'at', 'be', 'by', 'for', 'from',
'how', 'in', 'is', 'it', 'of', 'on', 'or', 'that', 'the', 'this', 'to',
'was', 'what', 'when', 'where', '--', '?', '.', "''", "''", "``", ',',
'do', 'see', 'want', 'people', 'and', "n't", "me", 'too', 'own', 'their',
'*', "'s", 'not', 'than', 'other', 'you', 'your', 'know', 'just', 'but',
'does', 'really', 'have', 'into', 'more', 'also', 'has', 'any', 'why',
'will'
}
def score_match(query_rep, text, length_penalty, dictionary=None, debug=False):
"""Calculate the score match between the query representation the text.
:param query_rep: base query representation to match text again.
:param text: string to comapre against query_rep for matching tokens
:param length_penalty: scores are divided by the norm taken to this power
:dictionary: optional dictionary to use to tokenize text
:debug: flag to enable printing every match
:returns: float score of match
"""
if text == "":
return 0
if not dictionary:
words = text.lower().split(' ')
else:
words = [w for w in dictionary.tokenize(text.lower())]
score = 0
rw = query_rep['words']
used = {}
for w in words:
if w in rw and w not in used:
score += rw[w]
if debug:
print("match: " + w)
used[w] = True
norm = math.sqrt(len(used))
norm = math.pow(norm * query_rep['norm'], length_penalty)
if norm > 1:
score /= norm
return score
def rank_candidates(query_rep, cands, length_penalty, dictionary=None):
"""Rank candidates given representation of query.
:param query_rep: base query representation to match text again.
:param cands: strings to compare against query_rep for matching tokens
:param length_penalty: scores are divided by the norm taken to this power
:dictionary: optional dictionary to use to tokenize text
:returns: ordered list of candidate strings in score-ranked order
"""
if True:
mpq = MaxPriorityQueue(100)
for c in cands:
score = score_match(query_rep, c, length_penalty, dictionary)
mpq.add(c, score)
return list(reversed(mpq))
else:
cands = list(cands)
score = [0] * len(cands)
for i, c in enumerate(cands):
score[i] = -score_match(query_rep, c, length_penalty, dictionary)
r = [i[0] for i in sorted(enumerate(score), key=lambda x:x[1])]
res = []
for i in range(min(100, len(score))):
res.append(cands[r[i]])
return res
class IrBaselineAgent(Agent):
"""Information Retrieval baseline."""
@staticmethod
def add_cmdline_args(parser):
"""Add command line args specific to this agent."""
parser = parser.add_argument_group('IrBaseline Arguments')
parser.add_argument(
'-lp', '--length_penalty', type=float, default=0.5,
help='length penalty for responses')
parser.add_argument(
'-hsz', '--history_size', type=int, default=1,
help='number of utterances from the dialogue history to take use '
'as the query')
parser.add_argument(
'--label_candidates_file', type=str, default=None,
help='file of candidate responses to choose from')
def __init__(self, opt, shared=None):
"""Initialize agent."""
super().__init__(opt)
self.id = 'IRBaselineAgent'
self.length_penalty = float(opt['length_penalty'])
self.dictionary = DictionaryAgent(opt)
self.opt = opt
self.history = []
self.episodeDone = True
if opt.get('label_candidates_file'):
f = open(opt.get('label_candidates_file'))
self.label_candidates = f.read().split('\n')
def reset(self):
"""Reset agent properties."""
self.observation = None
self.history = []
self.episodeDone = True
def observe(self, obs):
"""Store and remember incoming observation message dict."""
self.observation = obs
self.dictionary.observe(obs)
if self.episodeDone:
self.history = []
if 'text' in obs:
self.history.append(obs.get('text', ''))
self.episodeDone = obs.get('episode_done', False)
return obs
def act(self):
"""Generate a response to the previously seen observation(s)."""
if self.opt.get('datatype', '').startswith('train'):
self.dictionary.act()
obs = self.observation
reply = {}
reply['id'] = self.getID()
# Rank candidates
cands = None
if obs.get('label_candidates', False) and len(obs['label_candidates']) > 0:
cands = obs['label_candidates']
if hasattr(self, 'label_candidates'):
# override label candidates with candidate file if set
cands = self.label_candidates
if cands:
hist_sz = self.opt.get('history_size', 1)
left_idx = max(0, len(self.history) - hist_sz)
text = ' '.join(self.history[left_idx:len(self.history)])
rep = self.build_query_representation(text)
reply['text_candidates'] = (
rank_candidates(rep, cands,
self.length_penalty, self.dictionary))
reply['text'] = reply['text_candidates'][0]
else:
reply['text'] = "I don't know."
return reply
def save(self, fname=None):
"""Save dictionary tokenizer if available."""
fname = self.opt.get('model_file', None) if fname is None else fname
if fname:
self.dictionary.save(fname + '.dict')
def load(self, fname):
"""Load internal dictionary."""
self.dictionary.load(fname + '.dict')
def build_query_representation(self, query):
"""Build representation of query, e.g. words or n-grams.
:param query: string to represent.
:returns: dictionary containing 'words' dictionary (token => frequency)
and 'norm' float (square root of the number of tokens)
"""
rep = {}
rep['words'] = {}
words = [w for w in self.dictionary.tokenize(query.lower())]
rw = rep['words']
used = {}
for w in words:
if len(self.dictionary.freqs()) > 0:
rw[w] = 1.0 / (1.0 + math.log(1.0 + self.dictionary.freqs()[w]))
else:
if w not in stopwords:
rw[w] = 1
used[w] = True
rep['norm'] = math.sqrt(len(words))
return rep | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/agents/ir_baseline/ir_baseline.py | 0.927458 | 0.369514 | ir_baseline.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
"""Simple agent which repeats back the labels sent to it.
By default, replies with a single random label from the list of labels sent to
it, if any. If the ``label_candidates`` field is set, will fill the ``text_candidates``
field with up to a hundred randomly selected candidates (the first text
candidate is the selected label).
Options:
``returnOneRandomAnswer`` -- default ``True``, set to ``False`` to instead
reply with all labels joined by commas.
``cantAnswerPercent`` -- default ``0``, set value in range[0,1] to set
chance of replying with "I don't know."
"""
import random
from parlai.core.agents import Agent
class RepeatLabelAgent(Agent):
@staticmethod
def add_cmdline_args(argparser):
group = argparser.add_argument_group('RepeatLabel Arguments')
group.add_argument('--return_one_random_answer', type='bool', default=True,
help='return one answer from the set of labels')
group.add_argument('--cant_answer_percent', type=float, default=0,
help='set value in range[0,1] to set chance of '
'replying with special message')
group.add_argument('--cant_answer_message', type=str, default="I don't know.",
help='Message sent when the model cannot answer')
def __init__(self, opt, shared=None):
super().__init__(opt)
self.returnOneRandomAnswer = opt.get('return_one_random_answer', True)
self.cantAnswerPercent = opt.get('cant_answer_percent', 0)
self.cantAnswerMessage = opt.get('cant_answer_message', "I don't know.")
self.id = 'RepeatLabelAgent'
def act(self):
obs = self.observation
if obs is None:
return {'text': 'Nothing to repeat yet.'}
reply = {}
reply['id'] = self.getID()
labels = obs.get('labels', obs.get('eval_labels', None))
if labels:
if random.random() >= self.cantAnswerPercent:
if self.returnOneRandomAnswer:
reply['text'] = labels[random.randrange(len(labels))]
else:
reply['text'] = ', '.join(labels)
else:
# Some 'self.cantAnswerPercent' percentage of the time
# the agent does not answer.
reply['text'] = self.cantAnswerMessage
else:
reply['text'] = self.cantAnswerMessage
return reply | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/agents/repeat_label/repeat_label.py | 0.844553 | 0.223864 | repeat_label.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
"""
Baseline model which always emits the N most common non-punctuation
unigrams. Typically this is mostly stopwords. This model is a poor
conversationalist, but may get reasonable F1.
UnigramAgent has one option, --num-words, which controls the unigrams
outputted.
This also makes a nice reference for a simple, minimalist agent.
"""
import json
import re
from parlai.core.agents import Agent
from parlai.core.dict import DictionaryAgent
from itertools import islice
class UnigramAgent(Agent):
@classmethod
def add_cmdline_args(cls, parser):
"""
Adds command line arguments
"""
parser.add_argument(
'--num-words', type=int, default=10,
help='Number of unigrams to output.'
)
cls.dictionary_class().add_cmdline_args(parser)
@classmethod
def dictionary_class(cls):
"""
Returns the DictionaryAgent used for tokenization.
"""
return DictionaryAgent
def __init__(self, opt, shared=None):
"""
Construct a UnigramAgent.
:param opt: parlai options
:param shared: Used to duplicate the model for batching/hogwild.
"""
self.id = 'UnigramAgent'
self.unigram_cache = None
self.opt = opt
self.num_words = opt['num_words']
if shared is not None:
self.dict = shared['dict']
else:
self.dict = self.dictionary_class()(opt)
def share(self):
"""Basic sharing function."""
return {'dict': self.dict}
def observe(self, obs):
"""Stub observe method."""
self.observation = obs
def is_valid_word(self, word):
"""
Marks whether a string may be included in the unigram list. Used to
filter punctuation and special tokens.
"""
return (
not word.startswith('__') and
word != '\n' and
not re.match('[^\w]', word)
)
def get_prediction(self):
"""
Core algorithm, which gathers the most common unigrams into a string.
"""
# we always make the same prediction, so cache it for speed
if self.unigram_cache is None:
most_common = sorted(
self.dict.freq.items(), key=lambda x: x[1], reverse=True
)
most_common = ((u, v) for u, v in most_common if self.is_valid_word(u))
most_common = islice(most_common, self.num_words)
most_common = (u for u, v in most_common)
self.unigram_cache = ' '.join(list(most_common))
return self.unigram_cache
def act(self):
"""
Stub act, which always makes the same prediction.
"""
return {
'id': self.getID(),
'text': self.get_prediction(),
}
def save(self, path=None):
"""
Stub save which dumps options. Necessary for evaluation scripts to
load the model.
"""
if not path:
return
with open(path, 'w') as f:
f.write(self.get_prediction() + '\n')
with open(path + '.opt', 'w') as f:
json.dump(self.opt, f)
def load(self, path):
"""
Stub load which ignores the model on disk, as UnigramAgent depends
on the dictionary, which is saved elsewhere.
"""
# we rely on the dict, so we don't actually need to load anything
pass | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/agents/unigram/unigram.py | 0.882352 | 0.217857 | unigram.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
"""Various retriever utilities."""
import regex
import unicodedata
import numpy as np
import scipy.sparse as sp
from sklearn.utils import murmurhash3_32
try:
import torch
except ImportError as e:
raise ImportError('Need to install Pytorch: go to pytorch.org')
# ------------------------------------------------------------------------------
# Sparse matrix saving/loading helpers.
# ------------------------------------------------------------------------------
def save_sparse_csr(filename, matrix, metadata=None):
data = {
'data': matrix.data,
'indices': matrix.indices,
'indptr': matrix.indptr,
'shape': matrix.shape,
'metadata': metadata,
}
np.savez(filename, **data)
def save_sparse_tensor(filename, matrix, metadata=None):
data = {
'indices': matrix._indices(),
'values': matrix._values(),
'size': matrix.size(),
'metadata': metadata,
}
torch.save(data, filename)
def load_sparse_csr(filename):
loader = np.load(filename + '.npz')
matrix = sp.csr_matrix((loader['data'], loader['indices'],
loader['indptr']), shape=loader['shape'])
return matrix, loader['metadata'].item(0) if 'metadata' in loader else None
def load_sparse_tensor(filename):
loader = torch.load(filename)
matrix = torch.sparse.FloatTensor(
loader['indices'], loader['values'], loader['size']
)
return matrix, loader['metadata'] if 'metadata' in loader else None
# ------------------------------------------------------------------------------
# Token hashing.
# ------------------------------------------------------------------------------
def hash(token, num_buckets):
"""Unsigned 32 bit murmurhash for feature hashing."""
return murmurhash3_32(token, positive=True) % num_buckets
# ------------------------------------------------------------------------------
# Text cleaning.
# ------------------------------------------------------------------------------
STOPWORDS = {
'i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', 'your',
'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she',
'her', 'hers', 'herself', 'it', 'its', 'itself', 'they', 'them', 'their',
'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that',
'these', 'those', 'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being',
'have', 'has', 'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an',
'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of',
'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into', 'through',
'during', 'before', 'after', 'above', 'below', 'to', 'from', 'up', 'down',
'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then',
'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any',
'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such', 'no', 'nor',
'not', 'only', 'own', 'same', 'so', 'than', 'too', 'very', 's', 't', 'can',
'will', 'just', 'don', 'should', 'now', 'd', 'll', 'm', 'o', 're', 've',
'y', 'ain', 'aren', 'couldn', 'didn', 'doesn', 'hadn', 'hasn', 'haven',
'isn', 'ma', 'mightn', 'mustn', 'needn', 'shan', 'shouldn', 'wasn', 'weren',
'won', 'wouldn', "'ll", "'re", "'ve", "n't", "'s", "'d", "'m", "''", "``"
}
def normalize(text):
"""Resolve different type of unicode encodings."""
if type(text) != str:
return text
return unicodedata.normalize('NFD', text)
def filter_word(text):
"""Take out english stopwords, punctuation, and compound endings."""
text = normalize(text)
if regex.match(r'^\p{P}+$', text):
return True
if text.lower() in STOPWORDS:
return True
return False
def filter_ngram(gram, mode='any'):
"""Decide whether to keep or discard an n-gram.
Args:
gram: list of tokens (length N)
mode: Option to throw out ngram if
'any': any single token passes filter_word
'all': all tokens pass filter_word
'ends': book-ended by filterable tokens
"""
filtered = [filter_word(w) for w in gram]
if mode == 'any':
return any(filtered)
elif mode == 'all':
return all(filtered)
elif mode == 'ends':
return filtered[0] or filtered[-1]
else:
raise ValueError('Invalid mode: %s' % mode) | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/agents/tfidf_retriever/utils.py | 0.87864 | 0.364042 | utils.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
"""Rank documents with TF-IDF scores.
Adapted from Adam Fisch's work at github.com/facebookresearch/DrQA/
"""
import logging
import numpy as np
import scipy.sparse as sp
from multiprocessing.pool import ThreadPool
from functools import partial
from . import utils
from . import tokenizers
logger = logging.getLogger(__name__)
class TfidfDocRanker(object):
"""Loads a pre-weighted inverted index of token/document terms.
Scores new queries by taking sparse dot products.
"""
def __init__(self, tfidf_path=None, strict=True):
"""
Args:
tfidf_path: path to saved model file
strict: fail on empty queries or continue (and return empty result)
"""
# Load from disk
logger.info('Loading %s' % tfidf_path)
matrix, metadata = utils.load_sparse_csr(tfidf_path)
self.doc_mat = matrix
self.ngrams = metadata['ngram']
self.hash_size = metadata['hash_size']
self.tokenizer = tokenizers.get_class(metadata['tokenizer'])()
self.doc_freqs = metadata['doc_freqs'].squeeze()
self.doc_dict = metadata.get('doc_dict', None)
self.num_docs = self.doc_mat.shape[1] - 1
self.strict = strict
def get_doc_index(self, doc_id):
"""Convert doc_id --> doc_index"""
return self.doc_dict[0][doc_id] if self.doc_dict else doc_id
def get_doc_id(self, doc_index):
"""Convert doc_index --> doc_id"""
return self.doc_dict[1][doc_index] if self.doc_dict else doc_index
def closest_docs(self, query, k=1, matrix=None):
"""Closest docs by dot product between query and documents
in tfidf weighted word vector space.
matrix arg can be provided to be used instead of internal doc matrix.
"""
spvec = self.text2spvec(query)
res = spvec * matrix if matrix is not None else spvec * self.doc_mat
if len(res.data) <= k:
o_sort = np.argsort(-res.data)
else:
o = np.argpartition(-res.data, k)[0:k]
o_sort = o[np.argsort(-res.data[o])]
doc_scores = res.data[o_sort]
doc_ids = res.indices[o_sort]
return doc_ids, doc_scores
def batch_closest_docs(self, queries, k=1, num_workers=None):
"""Process a batch of closest_docs requests multithreaded.
Note: we can use plain threads here as scipy is outside of the GIL.
"""
with ThreadPool(num_workers) as threads:
closest_docs = partial(self.closest_docs, k=k)
results = threads.map(closest_docs, queries)
return results
def parse(self, query):
"""Parse the query into tokens (either ngrams or tokens)."""
tokens = self.tokenizer.tokenize(query)
return tokens.ngrams(n=self.ngrams, uncased=True,
filter_fn=utils.filter_ngram)
def text2spvec(self, query):
"""Create a sparse tfidf-weighted word vector from query.
tfidf = log(tf + 1) * log((N - Nt + 0.5) / (Nt + 0.5))
"""
# Get hashed ngrams
words = self.parse(utils.normalize(query))
wids = [utils.hash(w, self.hash_size) for w in words]
if len(wids) == 0:
if self.strict:
raise RuntimeError('No valid word in: %s' % query)
else:
logger.warning('No valid word in: %s' % query)
return sp.csr_matrix((1, self.hash_size))
# Count TF
wids_unique, wids_counts = np.unique(wids, return_counts=True)
tfs = np.log1p(wids_counts)
# Count IDF
Ns = self.doc_freqs[wids_unique]
idfs = np.log((self.num_docs - Ns + 0.5) / (Ns + 0.5))
idfs[idfs < 0] = 0
# TF-IDF
data = np.multiply(tfs, idfs)
# One row, sparse csr matrix
indptr = np.array([0, len(wids_unique)])
spvec = sp.csr_matrix(
(data, wids_unique, indptr), shape=(1, self.hash_size)
)
return spvec | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/agents/tfidf_retriever/tfidf_doc_ranker.py | 0.883751 | 0.296109 | tfidf_doc_ranker.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
"""Regex based tokenizer that emulates the Stanford/NLTK PTB tokenizers.
However it is purely in Python, supports robust untokenization, unicode,
and requires minimal dependencies.
"""
import regex
import logging
from .tokenizer import Tokens, Tokenizer
logger = logging.getLogger(__name__)
class RegexpTokenizer(Tokenizer):
DIGIT = r'\p{Nd}+([:\.\,]\p{Nd}+)*'
TITLE = (r'(dr|esq|hon|jr|mr|mrs|ms|prof|rev|sr|st|rt|messrs|mmes|msgr)'
r'\.(?=\p{Z})')
ABBRV = r'([\p{L}]\.){2,}(?=\p{Z}|$)'
ALPHA_NUM = r'[\p{L}\p{N}\p{M}]++'
HYPHEN = r'{A}([-\u058A\u2010\u2011]{A})+'.format(A=ALPHA_NUM)
NEGATION = r"((?!n't)[\p{L}\p{N}\p{M}])++(?=n't)|n't"
CONTRACTION1 = r"can(?=not\b)"
CONTRACTION2 = r"'([tsdm]|re|ll|ve)\b"
START_DQUOTE = r'(?<=[\p{Z}\(\[{<]|^)(``|["\u0093\u201C\u00AB])(?!\p{Z})'
START_SQUOTE = r'(?<=[\p{Z}\(\[{<]|^)[\'\u0091\u2018\u201B\u2039](?!\p{Z})'
END_DQUOTE = r'(?<!\p{Z})(\'\'|["\u0094\u201D\u00BB])'
END_SQUOTE = r'(?<!\p{Z})[\'\u0092\u2019\u203A]'
DASH = r'--|[\u0096\u0097\u2013\u2014\u2015]'
ELLIPSES = r'\.\.\.|\u2026'
PUNCT = r'\p{P}'
NON_WS = r'[^\p{Z}\p{C}]'
def __init__(self, **kwargs):
"""
Args:
annotators: None or empty set (only tokenizes).
substitutions: if true, normalizes some token types (e.g. quotes).
"""
self._regexp = regex.compile(
'(?P<digit>%s)|(?P<title>%s)|(?P<abbr>%s)|(?P<neg>%s)|(?P<hyph>%s)|'
'(?P<contr1>%s)|(?P<alphanum>%s)|(?P<contr2>%s)|(?P<sdquote>%s)|'
'(?P<edquote>%s)|(?P<ssquote>%s)|(?P<esquote>%s)|(?P<dash>%s)|'
'(?<ellipses>%s)|(?P<punct>%s)|(?P<nonws>%s)' %
(self.DIGIT, self.TITLE, self.ABBRV, self.NEGATION, self.HYPHEN,
self.CONTRACTION1, self.ALPHA_NUM, self.CONTRACTION2,
self.START_DQUOTE, self.END_DQUOTE, self.START_SQUOTE,
self.END_SQUOTE, self.DASH, self.ELLIPSES, self.PUNCT,
self.NON_WS),
flags=regex.IGNORECASE + regex.UNICODE + regex.MULTILINE
)
if len(kwargs.get('annotators', {})) > 0:
logger.warning('%s only tokenizes! Skipping annotators: %s' %
(type(self).__name__, kwargs.get('annotators')))
self.annotators = set()
self.substitutions = kwargs.get('substitutions', True)
def tokenize(self, text):
data = []
matches = [m for m in self._regexp.finditer(text)]
for i in range(len(matches)):
# Get text
token = matches[i].group()
# Make normalizations for special token types
if self.substitutions:
groups = matches[i].groupdict()
if groups['sdquote']:
token = "``"
elif groups['edquote']:
token = "''"
elif groups['ssquote']:
token = "`"
elif groups['esquote']:
token = "'"
elif groups['dash']:
token = '--'
elif groups['ellipses']:
token = '...'
# Get whitespace
span = matches[i].span()
start_ws = span[0]
if i + 1 < len(matches):
end_ws = matches[i + 1].span()[0]
else:
end_ws = span[1]
# Format data
data.append((
token,
text[start_ws: end_ws],
span,
))
return Tokens(data, self.annotators) | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/agents/tfidf_retriever/tokenizers/regexp_tokenizer.py | 0.684264 | 0.290547 | regexp_tokenizer.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
"""Base tokenizer/tokens classes and utilities."""
import copy
class Tokens(object):
"""A class to represent a list of tokenized text."""
TEXT = 0
TEXT_WS = 1
SPAN = 2
POS = 3
LEMMA = 4
NER = 5
def __init__(self, data, annotators, opts=None):
self.data = data
self.annotators = annotators
self.opts = opts or {}
def __len__(self):
"""The number of tokens."""
return len(self.data)
def slice(self, i=None, j=None):
"""Return a view of the list of tokens from [i, j)."""
new_tokens = copy.copy(self)
new_tokens.data = self.data[i: j]
return new_tokens
def untokenize(self):
"""Returns the original text (with whitespace reinserted)."""
return ''.join([t[self.TEXT_WS] for t in self.data]).strip()
def words(self, uncased=False):
"""Returns a list of the text of each token
Args:
uncased: lower cases text
"""
if uncased:
return [t[self.TEXT].lower() for t in self.data]
else:
return [t[self.TEXT] for t in self.data]
def offsets(self):
"""Returns a list of [start, end) character offsets of each token."""
return [t[self.SPAN] for t in self.data]
def pos(self):
"""Returns a list of part-of-speech tags of each token.
Returns None if this annotation was not included.
"""
if 'pos' not in self.annotators:
return None
return [t[self.POS] for t in self.data]
def lemmas(self):
"""Returns a list of the lemmatized text of each token.
Returns None if this annotation was not included.
"""
if 'lemma' not in self.annotators:
return None
return [t[self.LEMMA] for t in self.data]
def entities(self):
"""Returns a list of named-entity-recognition tags of each token.
Returns None if this annotation was not included.
"""
if 'ner' not in self.annotators:
return None
return [t[self.NER] for t in self.data]
def ngrams(self, n=1, uncased=False, filter_fn=None, as_strings=True):
"""Returns a list of all ngrams from length 1 to n.
Args:
n: upper limit of ngram length
uncased: lower cases text
filter_fn: user function that takes in an ngram list and returns
True or False to keep or not keep the ngram
as_string: return the ngram as a string vs list
"""
def _skip(gram):
if not filter_fn:
return False
return filter_fn(gram)
words = self.words(uncased)
ngrams = [(s, e + 1)
for s in range(len(words))
for e in range(s, min(s + n, len(words)))
if not _skip(words[s:e + 1])]
# Concatenate into strings
if as_strings:
ngrams = ['{}'.format(' '.join(words[s:e])) for (s, e) in ngrams]
return ngrams
def entity_groups(self):
"""Group consecutive entity tokens with the same NER tag."""
entities = self.entities()
if not entities:
return None
non_ent = self.opts.get('non_ent', 'O')
groups = []
idx = 0
while idx < len(entities):
ner_tag = entities[idx]
# Check for entity tag
if ner_tag != non_ent:
# Chomp the sequence
start = idx
while (idx < len(entities) and entities[idx] == ner_tag):
idx += 1
groups.append((self.slice(start, idx).untokenize(), ner_tag))
else:
idx += 1
return groups
class Tokenizer(object):
"""Base tokenizer class.
Tokenizers implement tokenize, which should return a Tokens class.
"""
def tokenize(self, text):
raise NotImplementedError
def shutdown(self):
pass
def __del__(self):
self.shutdown() | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/agents/tfidf_retriever/tokenizers/tokenizer.py | 0.910708 | 0.430925 | tokenizer.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
"""Simple wrapper around the Stanford CoreNLP pipeline.
Serves commands to a java subprocess running the jar. Requires java 8.
"""
import copy
import json
import pexpect
from .tokenizer import Tokens, Tokenizer
from . import DEFAULTS
class CoreNLPTokenizer(Tokenizer):
def __init__(self, **kwargs):
"""
Args:
annotators: set that can include pos, lemma, and ner.
classpath: Path to the corenlp directory of jars
mem: Java heap memory
"""
self.classpath = (kwargs.get('classpath') or
DEFAULTS['corenlp_classpath'])
self.annotators = copy.deepcopy(kwargs.get('annotators', set()))
self.mem = kwargs.get('mem', '2g')
self._launch()
def _launch(self):
"""Start the CoreNLP jar with pexpect."""
annotators = ['tokenize', 'ssplit']
if 'ner' in self.annotators:
annotators.extend(['pos', 'lemma', 'ner'])
elif 'lemma' in self.annotators:
annotators.extend(['pos', 'lemma'])
elif 'pos' in self.annotators:
annotators.extend(['pos'])
annotators = ','.join(annotators)
options = ','.join(['untokenizable=noneDelete',
'invertible=true'])
cmd = ['java', '-mx' + self.mem, '-cp', '"%s"' % self.classpath,
'edu.stanford.nlp.pipeline.StanfordCoreNLP', '-annotators',
annotators, '-tokenize.options', options,
'-outputFormat', 'json', '-prettyPrint', 'false']
# We use pexpect to keep the subprocess alive and feed it commands.
# Because we don't want to get hit by the max terminal buffer size,
# we turn off canonical input processing to have unlimited bytes.
self.corenlp = pexpect.spawn('/bin/bash', maxread=100000, timeout=60)
self.corenlp.setecho(False)
self.corenlp.sendline('stty -icanon')
self.corenlp.sendline(' '.join(cmd))
self.corenlp.delaybeforesend = 0
self.corenlp.delayafterread = 0
self.corenlp.expect_exact('NLP>', searchwindowsize=100)
@staticmethod
def _convert(token):
if token == '-LRB-':
return '('
if token == '-RRB-':
return ')'
if token == '-LSB-':
return '['
if token == '-RSB-':
return ']'
if token == '-LCB-':
return '{'
if token == '-RCB-':
return '}'
return token
def tokenize(self, text):
# Since we're feeding text to the commandline, we're waiting on seeing
# the NLP> prompt. Hacky!
if 'NLP>' in text:
raise RuntimeError('Bad token (NLP>) in text!')
# Sending q will cause the process to quit -- manually override
if text.lower().strip() == 'q':
token = text.strip()
index = text.index(token)
data = [(token, text[index:], (index, index + 1), 'NN', 'q', 'O')]
return Tokens(data, self.annotators)
# Minor cleanup before tokenizing.
clean_text = text.replace('\n', ' ')
self.corenlp.sendline(clean_text.encode('utf-8'))
self.corenlp.expect_exact('NLP>', searchwindowsize=100)
# Skip to start of output (may have been stderr logging messages)
output = self.corenlp.before
start = output.find(b'{"sentences":')
output = json.loads(output[start:].decode('utf-8'))
data = []
tokens = [t for s in output['sentences'] for t in s['tokens']]
for i in range(len(tokens)):
# Get whitespace
start_ws = tokens[i]['characterOffsetBegin']
if i + 1 < len(tokens):
end_ws = tokens[i + 1]['characterOffsetBegin']
else:
end_ws = tokens[i]['characterOffsetEnd']
data.append((
self._convert(tokens[i]['word']),
text[start_ws: end_ws],
(tokens[i]['characterOffsetBegin'],
tokens[i]['characterOffsetEnd']),
tokens[i].get('pos', None),
tokens[i].get('lemma', None),
tokens[i].get('ner', None)
))
return Tokens(data, self.annotators) | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/agents/tfidf_retriever/tokenizers/corenlp_tokenizer.py | 0.83825 | 0.213254 | corenlp_tokenizer.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
"""Tokenizer that is backed by spaCy (spacy.io).
Requires spaCy package and the spaCy english model.
"""
import spacy
import copy
from .tokenizer import Tokens, Tokenizer
class SpacyTokenizer(Tokenizer):
def __init__(self, **kwargs):
"""
Args:
annotators: set that can include pos, lemma, and ner.
model: spaCy model to use (either path, or keyword like 'en').
"""
model = kwargs.get('model', 'en')
self.annotators = copy.deepcopy(kwargs.get('annotators', set()))
nlp_kwargs = {'parser': False}
if not {'lemma', 'pos', 'ner'} & self.annotators:
nlp_kwargs['tagger'] = False
if not {'ner'} & self.annotators:
nlp_kwargs['entity'] = False
self.nlp = spacy.load(model, **nlp_kwargs)
def tokenize(self, text):
# We don't treat new lines as tokens.
clean_text = text.replace('\n', ' ')
tokens = self.nlp.tokenizer(clean_text)
if {'lemma', 'pos', 'ner'} & self.annotators:
self.nlp.tagger(tokens)
if {'ner'} & self.annotators:
self.nlp.entity(tokens)
data = []
for i in range(len(tokens)):
# Get whitespace
start_ws = tokens[i].idx
if i + 1 < len(tokens):
end_ws = tokens[i + 1].idx
else:
end_ws = tokens[i].idx + len(tokens[i].text)
data.append((
tokens[i].text,
text[start_ws: end_ws],
(tokens[i].idx, tokens[i].idx + len(tokens[i].text)),
tokens[i].tag_,
tokens[i].lemma_,
tokens[i].ent_type_,
))
# Set special option for non-entity tag: '' vs 'O' in spaCy
return Tokens(data, self.annotators, opts={'non_ent': ''}) | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/agents/tfidf_retriever/tokenizers/spacy_tokenizer.py | 0.735737 | 0.156943 | spacy_tokenizer.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from parlai.core.torch_agent import TorchAgent, Output
from .modules import VSEpp, ContrastiveLoss
from parlai.core.utils import round_sigfigs
import torch
import torchvision.transforms as transforms
import os
import numpy as np
class VseppCaptionAgent(TorchAgent):
"""
Agent which takes an image and retrieves a caption.
This agent supports modifying the CNN arch used for the image encoder. The
model then uses a GRU to encode the different candidate captions. These
encoders map the captions and images to a joint embedding space, so then
a similarity metric is used to determine which captions are the best match
for the images.
For more information see the following paper:
- VSE++: Improving Visual-Semantic Embeddings with Hard Negatives
`(Faghri et al. 2017) <arxiv.org/abs/1707.05612>`
"""
@staticmethod
def add_cmdline_args(argparser):
"""Add command-line arguments specifically for this agent."""
TorchAgent.add_cmdline_args(argparser)
agent = argparser.add_argument_group('Image Caption Model Arguments')
agent.add_argument('--word_dim', default=300, type=int,
help='Dimensionality of the word embedding.')
agent.add_argument('--embed_size', default=1024, type=int,
help='Dimensionality of the joint embedding.')
agent.add_argument('--num_layers', default=1, type=int,
help='Number of GRU layers.')
agent.add_argument('--finetune', type='bool', default=False,
help='Finetune the image encoder')
agent.add_argument('--cnn_type', default='resnet152',
help="""The CNN used for image encoder
(e.g. vgg19, resnet152)""")
agent.add_argument('--no_imgnorm', type='bool', default=False,
help='Do not normalize the image embeddings.')
agent.add_argument('--margin', default=0.2, type=float,
help='Rank loss margin.')
agent.add_argument('--max_violation', type='bool', default=True,
help='Use max instead of sum in the rank loss.')
agent.add_argument('-lr', '--learning_rate', type=float,
default=0.001, help='learning rate')
VseppCaptionAgent.dictionary_class().add_cmdline_args(argparser)
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
self.id = 'VSEppImageCaption'
self.mode = None
if not shared:
self.image_size = opt['image_size']
self.crop_size = opt['image_cropsize']
# initialize the transform function using torch vision.
self.transform = transforms.Compose([
transforms.Scale(self.image_size),
transforms.RandomCrop(self.crop_size),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
self.model = VSEpp(opt, self.dict)
self.metrics = {'loss': 0.0, 'r@': []}
self.optimizer = self.model.get_optim()
load_model = None
states = {}
if opt.get('model_file') and os.path.isfile(opt['model_file']):
load_model = opt['model_file']
if load_model is not None:
# load model parameters if available
print('[ Loading existing model params from {} ]'.format(load_model))
states = self.load(opt['model_file'])
self.criterion = ContrastiveLoss(self.use_cuda)
if self.use_cuda:
self.model.cuda()
self.criterion.cuda()
if 'optimizer' in states:
try:
self.optimizer.load_state_dict(states['optimizer'])
except ValueError:
print('WARNING: not loading optim state since model '
'params changed.')
if self.use_cuda:
for state in self.optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.cuda()
self.reset()
def reset(self):
self.observation = None
if hasattr(self, "metrics"):
self.reset_metrics()
def reset_metrics(self):
self.metrics['loss'] = 0.0
self.metrics['r@'] = []
def candidate_helper(self, candidate_vecs, candidate_labels, is_testing):
"""
Prepares a list of candidate lists into a format ready for the model
as pack_padded_sequence requires each candidate must be in descending
order of length.
Returns a tuple of:
(ordered_candidate_tensor, ordered_text_candidate_list,
candidate_lengths, idx of truth caption*)
*if exists -- else it will be None
"""
cand_lens = [c.shape[0] for c in candidate_vecs]
ind_sorted = sorted(range(len(cand_lens)), key=lambda k: -cand_lens[k])
truth_idx = ind_sorted.index(0) if not is_testing else None
cands = [candidate_labels[k] for k in ind_sorted]
cand_vecs = [candidate_vecs[k] for k in ind_sorted]
cand_lens = [cand_lens[k] for k in ind_sorted]
cand_lens = torch.LongTensor(cand_lens)
padded_cands = torch.LongTensor(len(candidate_vecs),
max(cand_lens)).fill_(self.NULL_IDX)
if self.use_cuda:
cand_lens = cand_lens.cuda()
padded_cands = padded_cands.cuda()
for i, cand in enumerate(cand_vecs):
padded_cands[i, :cand.shape[0]] = cand
return (padded_cands, cands, cand_lens, truth_idx)
def batchify(self, *args, **kwargs):
kwargs['sort'] = True
return super().batchify(*args, **kwargs)
def train_step(self, batch):
images = torch.stack([self.transform(img) for img in batch.image])
if self.use_cuda:
images = images.cuda(non_blocking=True)
text_lengths = torch.LongTensor(batch.label_lengths)
if self.use_cuda:
text_lengths = text_lengths.cuda()
self.model.train()
self.optimizer.zero_grad()
img_embs, cap_embs = self.model(images, batch.label_vec, text_lengths)
loss, ranks, top1 = self.criterion(img_embs, cap_embs)
self.metrics['loss'] += loss.item()
self.metrics['r@'] += ranks
loss.backward()
self.optimizer.step()
predictions = []
for score_idx in top1:
predictions.append(batch.labels[score_idx])
return Output(predictions, None)
def eval_step(self, batch):
images = torch.stack([self.transform(img) for img in batch.image])
if self.use_cuda:
images = images.cuda(non_blocking=True)
# Need to collate then sort the captions by length
cands = [
self.candidate_helper(label_cands_vec, label_cands, self.mode == 'test')
for label_cands_vec, label_cands in
zip(batch.candidate_vecs, batch.candidates)
]
self.model.eval()
# Obtain the image embeddings
img_embs, _ = self.model(images, None, None)
ranks = []
top1 = []
# Each image has their own caption candidates, so we need to
# iteratively create the embeddings and rank
for i, (cap, _, lens, truth_idx) in enumerate(cands):
_, embs = self.model(None, cap, lens)
# Hack to pass through the truth label's index to compute the
# rank and top metrics
offset = truth_idx if truth_idx is not None else 0
_, rank, top = self.criterion(img_embs[i, :].unsqueeze(0),
embs, offset)
ranks += rank
top1.append(top[0])
self.metrics['r@'] += ranks
predictions = []
for i, score_idx in enumerate(top1):
predictions.append(cands[i][1][score_idx])
return Output(predictions, None)
def report(self):
m = {}
m['loss'] = self.metrics['loss']
ranks = np.asarray(self.metrics['r@'])
m['r@1'] = len(np.where(ranks < 1)[0]) / len(ranks)
m['r@5'] = len(np.where(ranks < 5)[0]) / len(ranks)
m['r@10'] = len(np.where(ranks < 10)[0]) / len(ranks)
for k, v in m.items():
# clean up: rounds to sigfigs and converts tensors to floats
m[k] = round_sigfigs(v, 4)
return m | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/agents/vsepp_caption/vsepp_caption.py | 0.931142 | 0.297559 | vsepp_caption.py | pypi |
# This file is covered under the Apache 2.0 License listed here
# <https://github.com/fartashf/vsepp/blob/master/LICENSE> as it is a
# Derivative Work of the repo.
import torch
from torch import optim
from torch.nn.utils.rnn import pack_padded_sequence
import torch.nn as nn
import torchvision.models as models
import numpy as np
class VSEpp(nn.Module):
"""
Model based on:
- VSE++: Improving Visual-Semantic Embeddings with Hard Negatives
`(Faghri et al. 2017) <arxiv.org/abs/1707.05612>`
Original Implementation found here: <https://github.com/fartashf/vsepp>
"""
def __init__(self, opt, dict):
super().__init__()
self.opt = opt
self.dict = dict
self.img_enc = EncoderImage(embed_size=opt['embed_size'],
finetune=opt['finetune'],
cnn_type=opt['cnn_type'],
no_imgnorm=opt['no_imgnorm'])
self.txt_enc = EncoderText(vocab_size=len(self.dict.tok2ind),
word_dim=opt['word_dim'],
embed_size=opt['embed_size'],
num_layers=opt['num_layers'])
def forward(self, images, captions, lengths):
img_emb = self.img_enc(images) if images is not None else None
cap_emb = self.txt_enc(captions, lengths) if captions is not None else None
return img_emb, cap_emb
def get_optim(self):
kwargs = {'lr': float(self.opt['learning_rate']),
'amsgrad': True}
params = list(self.txt_enc.parameters())
params += list(self.img_enc.fc.parameters())
if self.opt['finetune']:
params += list(self.img_enc.cnn.parameters())
optimizer = optim.Adam(params, **kwargs)
return optimizer
def dot_sim(im, s):
"""
Dot product similarity between all the image and sentence pairs
"""
return im.mm(s.t())
def l2norm(X):
"""
L2-normalize columns of X
"""
norm = torch.pow(X, 2).sum(dim=1, keepdim=True).sqrt()
X = torch.div(X, norm)
return X
class ContrastiveLoss(nn.Module):
"""
Compute contrastive loss.
"""
def __init__(self, use_cuda, margin=0, max_violation=True):
super().__init__()
self.use_cuda = use_cuda
self.margin = margin
self.sim = dot_sim
self.max_violation = max_violation
def forward(self, im, caps, offset=0):
# Compute the similarity of each image/caption pair
scores = self.sim(im, caps)
diagonal = scores.diag().view(im.shape[0], 1)
d1 = diagonal.expand(scores.size())
d2 = diagonal.t().expand(scores.size())
# Caption retrieval score
cost_cap = (self.margin + scores - d1).clamp(min=0)
# image retrieval score
cost_im = (self.margin + scores - d2).clamp(min=0)
mask = torch.eye(im.shape[0]) > 0.5
if self.use_cuda:
mask = mask.cuda()
cost_cap = cost_cap.masked_fill(mask, 0)
cost_im = cost_im.masked_fill(mask, 0)
# Compute the metrics (ranks, top1)
if self.use_cuda:
sorted_ranks = np.flip(np.argsort(scores.detach().cpu().numpy()), 1)
else:
sorted_ranks = np.flip(np.argsort(scores.detach().numpy()), 1)
top1 = sorted_ranks[:, 0]
ranks = []
for idx in range(im.shape[0]):
ranks.append(np.where(sorted_ranks[idx, :] == (idx + offset))[0][0])
# keep the maximum violating negative for each query
if self.max_violation:
cost_cap = cost_cap.max(1)[0]
cost_im = cost_im.max(0)[0]
return cost_cap.sum() + cost_im.sum(), ranks, top1
class EncoderImage(nn.Module):
def __init__(self, embed_size, finetune=False, cnn_type='resnet152',
no_imgnorm=False):
"""Load pretrained CNN and replace top fc layer."""
super().__init__()
self.embed_size = embed_size
self.no_imgnorm = no_imgnorm
# Load a pre-trained model
self.cnn = self.get_cnn(cnn_type)
# For efficient memory usage.
for param in self.cnn.parameters():
param.requires_grad = finetune
# Replace the last fully connected layer of CNN with a new one
if cnn_type.startswith('vgg'):
self.fc = nn.Linear(self.cnn.classifier._modules['6'].in_features,
embed_size)
self.cnn.classifier = nn.Sequential(
*list(self.cnn.classifier.children())[:-1])
elif cnn_type.startswith('resnet'):
self.fc = nn.Linear(self.cnn.module.fc.in_features, embed_size)
self.cnn.module.fc = nn.Sequential()
self.init_weights()
def get_cnn(self, arch):
"""Load a pretrained CNN and parallelize over GPUs
"""
print("=> using pre-trained model '{}'".format(arch))
model = models.__dict__[arch](pretrained=True)
if arch.startswith('alexnet') or arch.startswith('vgg'):
model.features = nn.DataParallel(model.features)
else:
model = nn.DataParallel(model)
return model
def init_weights(self):
"""Xavier initialization for the fully connected layer
"""
r = np.sqrt(6.) / np.sqrt(self.fc.in_features +
self.fc.out_features)
self.fc.weight.data.uniform_(-r, r)
self.fc.bias.data.fill_(0)
def forward(self, images):
"""Extract image feature vectors."""
features = self.cnn(images)
# normalization in the image embedding space
features = l2norm(features)
# linear projection to the joint embedding space
features = self.fc(features)
# normalization in the joint embedding space
if not self.no_imgnorm:
features = l2norm(features)
return features
class EncoderText(nn.Module):
def __init__(self, vocab_size, word_dim, embed_size, num_layers):
super().__init__()
self.embed_size = embed_size
# word embedding
self.embed = nn.Embedding(vocab_size, word_dim)
# caption embedding
self.rnn = nn.GRU(word_dim, embed_size, num_layers, batch_first=True)
self.init_weights()
def init_weights(self):
self.embed.weight.data.uniform_(-0.1, 0.1)
def forward(self, x, lengths):
"""Handles variable size captions
"""
# Embed word ids to vectors
x = self.embed(x)
packed = pack_padded_sequence(x, lengths, batch_first=True)
# Forward propagate RNN
_, out = self.rnn(packed)
out = out.squeeze(0)
# normalization in the joint embedding space
out = l2norm(out)
return out | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/agents/vsepp_caption/modules.py | 0.952783 | 0.485295 | modules.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import torch
import torch.nn as nn
from functools import lru_cache
def opt_to_kwargs(opt):
"""Get kwargs for seq2seq from opt."""
kwargs = {}
for k in ['mem_size', 'time_features', 'position_encoding', 'hops']:
if k in opt:
kwargs[k] = opt[k]
return kwargs
class MemNN(nn.Module):
"""Memory Network module."""
def __init__(
self, num_features, embedding_size, hops=1,
mem_size=32, time_features=False, position_encoding=False,
dropout=0, padding_idx=0,
):
"""Initialize memnn model.
See cmdline args in MemnnAgent for description of arguments.
"""
super().__init__()
# prepare features
self.hops = hops
# time features: we learn an embedding for each memory slot
self.extra_features = 0
if time_features:
self.extra_features += mem_size
self.time_features = torch.LongTensor(
range(num_features, num_features + mem_size))
def embedding(use_extra_feats=True):
if use_extra_feats:
return Embed(num_features + self.extra_features,
embedding_size,
position_encoding=position_encoding,
padding_idx=padding_idx)
else:
return Embed(num_features, embedding_size,
position_encoding=position_encoding,
padding_idx=padding_idx)
# TODO: add token dropout?
# TODO: add dropout
# self.dropout = nn.Dropout(dropout)
# TODO: support more weight tying options?
self.query_lt = embedding()
self.in_memory_lt = embedding()
self.out_memory_lt = embedding()
self.answer_embedder = embedding()
self.memory_hop = Hop(embedding_size)
def _score(self, output, cands):
if cands.dim() == 2:
return torch.matmul(output, cands.t())
elif cands.dim() == 3:
return torch.bmm(output.unsqueeze(1),
cands.transpose(1, 2)).squeeze(1)
else:
raise RuntimeError('Unexpected candidate dimensions {}'
''.format(cands.dim()))
def forward(self, xs, mems, cands=None):
"""One forward step.
:param xs: (bsz x seqlen) LongTensor queries to the model
:param mems: (bsz x num_mems x seqlen) LongTensor memories
:param cands: (num_cands x seqlen) or (bsz x num_cands x seqlen)
LongTensor with candidates to rank
:returns: scores
scores contains the model's predicted scores.
if cand_params is None, the candidates are the vocabulary;
otherwise, these scores are over the candidates provided.
(bsz x num_cands)
"""
state = self.query_lt(xs)
if mems is not None:
# no memories available, `nomemnn` mode just uses query/ans embs
in_memory_embs = self.in_memory_lt(mems).transpose(1, 2)
out_memory_embs = self.out_memory_lt(mems)
for _ in range(self.hops):
state = self.memory_hop(state, in_memory_embs, out_memory_embs)
if cands is not None:
# embed candidates
cand_embs = self.answer_embedder(cands)
else:
# rank all possible tokens
cand_embs = self.answer_embedder.weight
scores = self._score(state, cand_embs)
return scores
class Embed(nn.Embedding):
"""Embed sequences for MemNN model.
Applies Position Encoding if enabled and currently applies BOW sum.
"""
def __init__(self, *args, position_encoding=False, reduction='mean',
**kwargs):
"""Initialize custom Embedding layer.
:param position_encoding: apply positional encoding transformation
on input sequences
:param reduction: reduction strategy to sequences, default 'mean'
"""
self.position_encoding = position_encoding
self.reduction = reduction
super().__init__(*args, **kwargs)
def _reduce(self, embs, input):
# last dimension is embedding, do operation over dim before that
if self.reduction == 'sum':
return embs.sum(-2)
elif self.reduction == 'mean':
# this is more fair than mean(-2) since mean includes null tokens
sum = embs.sum(-2)
lens = input.ne(0).sum(-1).unsqueeze(-1).float()
return sum / lens
else:
raise RuntimeError(
'reduction method {} not supported'.format(self.reduction))
def forward(self, input):
"""Return BOW embedding with PE reweighting if enabled.
:param input: (bsz x seqlen) LongTensor
:returns: (bsz x esz) FloatTensor
"""
embs = super().forward(input)
if self.position_encoding:
if embs.dim() == 3:
num_mems, seqlen, embdim = embs.size()
pe = self.position_matrix(seqlen, embdim, embs.is_cuda)
for i in range(num_mems):
embs[i] *= pe
else:
bsz, num_mems, seqlen, embdim = embs.size()
pe = self.position_matrix(seqlen, embdim, embs.is_cuda)
for i in range(num_mems):
embs[:, i] *= pe
return self._reduce(embs, input)
@staticmethod
@lru_cache(maxsize=128)
def position_matrix(J, d, use_cuda):
"""Build matrix of position encoding coeffiencents.
See https://papers.nips.cc/paper/5846-end-to-end-memory-networks,
section 4.1 Model Details: Sentence Representation.
:param J: number of words in the sequence
:param d: dimension of the embedding
:returns: Position Encoding matrix
"""
m = torch.Tensor(J, d)
for k in range(1, d + 1):
for j in range(1, J + 1):
m[j - 1, k - 1] = (1 - j / J) - (k / d) * (1 - 2 * j / J)
if use_cuda:
m = m.cuda()
return m
class Hop(nn.Module):
"""Memory Network hop outputs attention-weighted sum of memory embeddings.
0) rotate the query embeddings
1) compute the dot product between the input vector and each memory vector
2) compute a softmax over the memory scores
3) compute the weighted sum of the memory embeddings using the probabilities
4) add the query embedding to the memory output and return the result
"""
def __init__(self, embedding_size, rotate=True):
"""Initialize linear rotation."""
super().__init__()
if rotate:
self.rotate = nn.Linear(embedding_size, embedding_size, bias=False)
else:
self.rotate = lambda x: x
self.softmax = nn.Softmax(dim=1)
def forward(self, query_embs, in_mem_embs, out_mem_embs):
"""Compute MemNN Hop step.
:param query_embs: (bsz x esz) embedding of queries
:param in_mem_embs: bsz list of (num_mems x esz) embedding of memories
for activation
:param out_mem_embs: bsz list of (num_mems x esz) embedding of memories
for outputs
:returns: (bsz x esz) output state
"""
# rotate query embeddings
attn = torch.bmm(query_embs.unsqueeze(1), in_mem_embs).squeeze(1)
probs = self.softmax(attn)
memory_output = torch.bmm(probs.unsqueeze(1), out_mem_embs).squeeze(1)
output = memory_output + self.rotate(query_embs)
return output | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/agents/memnn/modules.py | 0.909398 | 0.269121 | modules.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import math
import torch
import torch.nn as nn
class Starspace(nn.Module):
def __init__(self, opt, num_features, dict):
super().__init__()
self.lt = nn.Embedding(num_features, opt['embeddingsize'], 0,
sparse=True, max_norm=opt['embeddingnorm'])
if not opt['tfidf']:
dict = None
self.encoder = Encoder(self.lt, dict)
if not opt['share_embeddings']:
self.lt2 = nn.Embedding(num_features, opt['embeddingsize'], 0,
sparse=True, max_norm=opt['embeddingnorm'])
self.encoder2 = Encoder(self.lt2, dict)
else:
self.encoder2 = self.encoder
self.opt = opt
self.lin = nn.Linear(opt['embeddingsize'], opt['embeddingsize'], bias=False)
self.lins = 0
if 'lins' in opt:
self.lins = opt['lins']
def forward(self, xs, ys=None, cands=None):
xs_enc = []
ys_enc = []
xs_emb = self.encoder(xs)
if self.lins > 0:
xs_emb = self.lin(xs_emb)
if ys is not None:
# training includes the correct example first.
xs_enc.append(xs_emb)
ys_enc.append(self.encoder2(ys))
for c in cands:
xs_enc.append(xs_emb)
c_emb = self.encoder2(c)
ys_enc.append(c_emb)
return torch.cat(xs_enc), torch.cat(ys_enc)
class Encoder(nn.Module):
def __init__(self, shared_lt, dict):
super().__init__()
self.lt = shared_lt
if dict is not None:
l = len(dict)
freqs = torch.Tensor(l)
for i in range(l):
ind = dict.ind2tok[i]
freq = dict.freq[ind]
freqs[i] = 1.0 / (1.0 + math.log(1.0 + freq))
self.freqs = freqs
else:
self.freqs = None
def forward(self, xs):
xs_emb = self.lt(xs)
if self.freqs is not None:
# tfidf embeddings
l = xs.size(1)
w = torch.Tensor(l)
for i in range(l):
w[i] = self.freqs[xs.data[0][i]]
w = w.mul(1 / w.norm())
xs_emb = xs_emb.squeeze(0).t().matmul(w.unsqueeze(1)).t()
else:
# basic embeddings (faster)
xs_emb = xs_emb.mean(1)
return xs_emb | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/agents/starspace/modules.py | 0.913527 | 0.170508 | modules.py | pypi |
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import torch.nn as nn
import torch.nn.functional as F
import torch
from .gru import BayesianGRU
from .loadstates import (
load_dictionary,
load_emb_params,
make_emb_state_dict,
load_rnn_params,
make_bayesian_state_dict,
make_gru_state_dict,
)
class Mlb(nn.Module):
def __init__(self, opt, vocab):
super(Mlb, self).__init__()
self.opt = opt
self.training = self.opt.get('datatype').startswith('train')
self.dict = vocab
self.vocab_words = self.dict.tok2ind.keys()
self.vocab_answers = self.dict.ans2ind.keys()
self.num_classes = len(self.vocab_answers)
# Modules
self.embedding = nn.Embedding(
num_embeddings=len(self.dict.tok2ind),
embedding_dim=620,
padding_idx=self.dict.tok2ind[self.dict.null_token],
sparse=False
)
if self.opt['use_bayesian']:
self.rnn = BayesianGRU(620,
self.opt['dim_q'],
dropout=self.opt['dropout_st'])
else:
self.rnn = nn.GRU(input_size=620,
hidden_size=self.opt['dim_q'],
batch_first=True,
dropout=self.opt['dropout_st'])
def process_lengths(self, input):
max_length = input.size(1)
if input.size(0) != 1:
sub = input.eq(0).sum(1).squeeze(0)
else:
sub = input.eq(0).sum(1)
lengths = list(max_length - sub)
return lengths
def select_last(self, x, lengths):
batch_size = x.size(0)
mask = x.new().resize_as_(x).fill_(0)
for i in range(batch_size):
mask[i][lengths[i] - 1].fill_(1)
x = x.mul(mask)
x = x.sum(1).view(batch_size, self.opt['dim_q'])
return x
def _classif(self, x):
x = getattr(F, self.opt['activation_cls'])(x)
x = F.dropout(x, p=self.opt['dropout_cls'], training=self.training)
x = self.linear_classif(x)
return x
def forward_st(self, input, lengths=None):
if lengths is None:
lengths = self.process_lengths(input)
x = self.embedding(input)
max_length = max(lengths)
x, hn = self.rnn(x, max_length=max_length) # seq2seq
if lengths:
x = self.select_last(x, lengths)
return x
def save(self, path=None):
path = self.opt.get('model_file', None) if path is None else path
if path and hasattr(self, 'embedding'):
print("[ saving model: " + path + " ]")
model = {
'model': self.state_dict(),
'optim': self.optim.state_dict(),
'opt': self.opt
}
with open(path, 'wb') as write:
torch.save(model, write)
def set_states(self, states):
"""Set the state dicts of the modules from saved states."""
self.load_state_dict(states['model'])
def set_init_states(self):
"""Set the initial state dicts of the modules from saved states."""
dictionary = load_dictionary(self.opt['download_path'])
parameters = load_emb_params(self.opt['download_path'])
state_dict = make_emb_state_dict(dictionary,
parameters,
self.dict.ind2tok.values())
self.embedding.load_state_dict(state_dict)
parameters = load_rnn_params(self.opt['download_path'])
if self.opt['use_bayesian']:
state_dict = make_bayesian_state_dict(parameters)
else:
state_dict = make_gru_state_dict(parameters)
self.rnn.load_state_dict(state_dict)
return self.rnn
def get_optim(self):
optim_class = torch.optim.Adam
self.optim = optim_class(filter(lambda p: p.requires_grad,
self.parameters()),
lr=self.opt['lr'])
if self.states:
self.optim.load_state_dict(self.states['optim'])
return self.optim
class MlbNoAtt(Mlb):
def __init__(self, opt, vocab, states):
super(MlbNoAtt, self).__init__(opt, vocab)
self.linear_v = nn.Linear(self.opt['dim_v'], self.opt['dim_h'])
self.linear_q = nn.Linear(self.opt['dim_q'], self.opt['dim_h'])
self.linear_classif = nn.Linear(self.opt['dim_h'], self.num_classes)
self.states = states
if self.states:
# set loaded states if applicable
self.set_states(self.states)
else:
self.set_init_states()
def forward(self, input_v, input_q):
x_q = self.forward_st(input_q)
x = self.forward_fusion(input_v, x_q)
x = self._classif(x)
return x
def forward_fusion(self, input_v, input_q):
# visual (cnn features)
x_v = F.dropout(input_v,
p=self.opt['dropout_v'],
training=self.training)
x_v = self.linear_v(x_v)
x_v = getattr(F, self.opt['activation_v'])(x_v)
# question (rnn features)
x_q = F.dropout(input_q,
p=self.opt['dropout_q'],
training=self.training)
x_q = self.linear_q(x_q)
x_q = getattr(F, self.opt['activation_q'])(x_q)
# hadamard product
x_mm = torch.mul(x_q, x_v)
return x_mm
class MlbAtt(Mlb):
def __init__(self, opt, vocab, states):
super(MlbAtt, self).__init__(opt, vocab)
self.conv_v_att = nn.Conv2d(self.opt['dim_v'],
self.opt['dim_att_h'],
1, 1)
self.linear_q_att = nn.Linear(self.opt['dim_q'], self.opt['dim_att_h'])
self.conv_att = nn.Conv2d(self.opt['dim_att_h'],
self.opt['num_glimpses'],
1, 1)
if self.opt['original_att']:
self.linear_v_fusion = nn.Linear(self.opt['dim_v'] *
self.opt['num_glimpses'],
self.opt['dim_h'])
self.linear_q_fusion = nn.Linear(self.opt['dim_q'],
self.opt['dim_h'])
self.linear_classif = nn.Linear(self.opt['dim_h'],
self.num_classes)
else:
self.list_linear_v_fusion = nn.ModuleList(
[nn.Linear(self.opt['dim_v'], self.opt['dim_h'])
for i in range(self.opt['num_glimpses'])]
)
self.linear_q_fusion = nn.Linear(self.opt['dim_q'],
self.opt['dim_h'] *
self.opt['num_glimpses'])
self.linear_classif = nn.Linear(
self.opt['dim_h'] * self.opt['num_glimpses'],
self.num_classes
)
self.states = states
if self.states:
# set loaded states if applicable
self.set_states(self.states)
else:
self.set_init_states()
def forward(self, input_v, input_q):
x_q = self.forward_st(input_q)
list_v_att = self.forward_attention(input_v, x_q)
x = self.forward_glimpses(list_v_att, x_q)
x = self._classif(x)
return x
def forward_attention(self, input_v, x_q_vec):
batch_size = input_v.size(0)
width = input_v.size(2)
height = input_v.size(3)
# Process visual before fusion
x_v = input_v
x_v = F.dropout(x_v,
p=self.opt['dropout_att_v'],
training=self.training)
x_v = self.conv_v_att(x_v)
x_v = getattr(F, self.opt['activation_att_v'])(x_v)
x_v = x_v.view(batch_size, self.opt['dim_att_h'], width * height)
x_v = x_v.transpose(1, 2)
# Process question before fusion
x_q = F.dropout(x_q_vec,
p=self.opt['dropout_att_q'],
training=self.training)
x_q = self.linear_q_att(x_q)
x_q = getattr(F, self.opt['activation_att_q'])(x_q)
x_q = x_q.view(batch_size, 1, self.opt['dim_att_h'])
x_q = x_q.expand(batch_size, width * height, self.opt['dim_att_h'])
# First multimodal fusion
x_att = self.forward_fusion_att(x_v, x_q)
x_att = getattr(F, self.opt['activation_att_mm'])(x_att)
# Process attention vectors
x_att = F.dropout(x_att,
p=self.opt['dropout_att_mm'],
training=self.training)
x_att = x_att.view(batch_size, width, height, self.opt['dim_att_h'])
x_att = x_att.transpose(2, 3).transpose(1, 2)
x_att = self.conv_att(x_att)
x_att = x_att.view(batch_size,
self.opt['num_glimpses'],
width * height)
list_att_split = torch.split(x_att, 1, dim=1)
list_att = []
for x_att in list_att_split:
x_att = x_att.contiguous()
x_att = x_att.view(batch_size, width * height)
x_att = F.softmax(x_att)
list_att.append(x_att)
# Apply attention vectors to input_v
x_v = input_v.view(batch_size, self.opt['dim_v'], width * height)
x_v = x_v.transpose(1, 2)
list_v_att = []
for x_att in list_att:
x_att = x_att.view(batch_size, width * height, 1)
x_att = x_att.expand(batch_size, width * height, self.opt['dim_v'])
x_v_att = torch.mul(x_att, x_v)
x_v_att = x_v_att.sum(1)
x_v_att = x_v_att.view(batch_size, self.opt['dim_v'])
list_v_att.append(x_v_att)
return list_v_att
def forward_glimpses(self, list_v_att, x_q_vec):
# Process visual for each glimpses
list_v = []
if self.opt['original_att']:
x_v = torch.cat(list_v_att, 1)
x_v = F.dropout(x_v,
p=self.opt['dropout_v'],
training=self.training)
x_v = self.linear_v_fusion(x_v)
x_v = getattr(F, self.opt['activation_v'])(x_v)
else:
for glimpse_id, x_v_att in enumerate(list_v_att):
x_v = F.dropout(x_v_att,
p=self.opt['dropout_v'],
training=self.training)
x_v = self.list_linear_v_fusion[glimpse_id](x_v)
x_v = getattr(F, self.opt['activation_v'])(x_v)
list_v.append(x_v)
x_v = torch.cat(list_v, 1)
# Process question
x_q = F.dropout(x_q_vec,
p=self.opt['dropout_q'],
training=self.training)
x_q = self.linear_q_fusion(x_q)
x_q = getattr(F, self.opt['activation_q'])(x_q)
# Second multimodal fusion
x = self.forward_fusion_cls(x_v, x_q)
return x
def forward_fusion_att(self, input_v, input_q):
x_att = torch.mul(input_v, input_q)
return x_att
def forward_fusion_cls(self, input_v, input_q):
x_att = torch.mul(input_v, input_q)
return x_att | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/agents/mlb_vqa/mlb_modules.py | 0.942738 | 0.288707 | mlb_modules.py | pypi |
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import os
import numpy
import torch
from collections import OrderedDict
urls = {}
urls['dictionary'] = 'http://www.cs.toronto.edu/~rkiros/models/dictionary.txt'
urls['utable'] = 'http://www.cs.toronto.edu/~rkiros/models/utable.npy'
urls['uni_skip'] = 'http://www.cs.toronto.edu/~rkiros/models/uni_skip.npz'
def load_dictionary(download_dir):
path_dico = os.path.join(download_dir, 'dictionary.txt')
if not os.path.exists(path_dico):
os.system('mkdir -p ' + download_dir)
os.system('wget {} -P {}'.format(urls['dictionary'], download_dir))
with open(path_dico, 'r') as handle:
dico_list = handle.readlines()
dico = {word.strip(): idx for idx, word in enumerate(dico_list)}
return dico
def load_emb_params(download_dir):
table_name = 'utable'
path_params = os.path.join(download_dir, table_name + '.npy')
if not os.path.exists(path_params):
os.system('mkdir -p ' + download_dir)
os.system('wget {} -P {}'.format(urls[table_name], download_dir))
params = numpy.load(path_params, encoding='latin1') # to load from python2
return params
def load_rnn_params(download_dir):
skip_name = 'uni_skip'
path_params = os.path.join(download_dir, skip_name + '.npz')
if not os.path.exists(path_params):
os.system('mkdir -p ' + download_dir)
os.system('wget {} -P {}'.format(urls[skip_name], download_dir))
params = numpy.load(path_params, encoding='latin1') # to load from python2
return params
def make_emb_state_dict(dictionary, parameters, vocab):
weight = torch.zeros(len(vocab), 620)
unknown_params = parameters[dictionary['UNK']]
nb_unknown = 0
for id_weight, word in enumerate(vocab):
if word in dictionary:
id_params = dictionary[word]
params = parameters[id_params]
else:
# print('Warning: word `{}` not in dictionary'.format(word))
params = unknown_params
nb_unknown += 1
weight[id_weight] = torch.from_numpy(params)
state_dict = OrderedDict({'weight': weight})
if nb_unknown > 0:
print('Warning: {}/{} words are not in dictionary, thus set UNK'
.format(nb_unknown, len(dictionary)))
return state_dict
def make_gru_state_dict(p):
s = OrderedDict()
s['bias_ih_l0'] = torch.zeros(7200)
s['bias_hh_l0'] = torch.zeros(7200) # must stay equal to 0
s['weight_ih_l0'] = torch.zeros(7200, 620)
s['weight_hh_l0'] = torch.zeros(7200, 2400)
s['weight_ih_l0'][:4800] = torch.from_numpy(p['encoder_W']).t()
s['weight_ih_l0'][4800:] = torch.from_numpy(p['encoder_Wx']).t()
s['bias_ih_l0'][:4800] = torch.from_numpy(p['encoder_b'])
s['bias_ih_l0'][4800:] = torch.from_numpy(p['encoder_bx'])
s['weight_hh_l0'][:4800] = torch.from_numpy(p['encoder_U']).t()
s['weight_hh_l0'][4800:] = torch.from_numpy(p['encoder_Ux']).t()
return s
def make_bayesian_state_dict(p):
s = OrderedDict()
s['gru_cell.weight_ir.weight'] = torch.from_numpy(p['encoder_W']).t()[:2400]
s['gru_cell.weight_ii.weight'] = torch.from_numpy(p['encoder_W']).t()[2400:]
s['gru_cell.weight_in.weight'] = torch.from_numpy(p['encoder_Wx']).t()
s['gru_cell.weight_ir.bias'] = torch.from_numpy(p['encoder_b'])[:2400]
s['gru_cell.weight_ii.bias'] = torch.from_numpy(p['encoder_b'])[2400:]
s['gru_cell.weight_in.bias'] = torch.from_numpy(p['encoder_bx'])
s['gru_cell.weight_hr.weight'] = torch.from_numpy(p['encoder_U']).t()[:2400]
s['gru_cell.weight_hi.weight'] = torch.from_numpy(p['encoder_U']).t()[2400:]
s['gru_cell.weight_hn.weight'] = torch.from_numpy(p['encoder_Ux']).t()
return s | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/agents/mlb_vqa/loadstates.py | 0.638948 | 0.255672 | loadstates.py | pypi |
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import torch
import torch.nn as nn
import torch.nn.functional as F
from .dropout import SequentialDropout
class AbstractGRUCell(nn.Module):
def __init__(self, input_size, hidden_size,
bias_ih=True, bias_hh=False):
super(AbstractGRUCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias_ih = bias_ih
self.bias_hh = bias_hh
# Modules
self.weight_ir = nn.Linear(input_size, hidden_size, bias=bias_ih)
self.weight_ii = nn.Linear(input_size, hidden_size, bias=bias_ih)
self.weight_in = nn.Linear(input_size, hidden_size, bias=bias_ih)
self.weight_hr = nn.Linear(hidden_size, hidden_size, bias=bias_hh)
self.weight_hi = nn.Linear(hidden_size, hidden_size, bias=bias_hh)
self.weight_hn = nn.Linear(hidden_size, hidden_size, bias=bias_hh)
def forward(self, x, hx=None):
raise NotImplementedError
class GRUCell(AbstractGRUCell):
def __init__(self, input_size, hidden_size,
bias_ih=True, bias_hh=False):
super(GRUCell, self).__init__(input_size, hidden_size,
bias_ih, bias_hh)
def forward(self, x, hx=None):
if hx is None:
hx = x.new().resize_((x.size(0), self.hidden_size).fill_(0))
r = F.sigmoid(self.weight_ir(x) + self.weight_hr(hx))
i = F.sigmoid(self.weight_ii(x) + self.weight_hi(hx))
n = F.tanh(self.weight_in(x) + r * self.weight_hn(hx))
hx = (1 - i) * n + i * hx
return hx
class BayesianGRUCell(AbstractGRUCell):
def __init__(self, input_size, hidden_size,
bias_ih=True, bias_hh=False,
dropout=0.25):
super(BayesianGRUCell, self).__init__(input_size, hidden_size,
bias_ih, bias_hh)
self.set_dropout(dropout)
def set_dropout(self, dropout):
self.dropout = dropout
self.drop_ir = SequentialDropout(p=dropout)
self.drop_ii = SequentialDropout(p=dropout)
self.drop_in = SequentialDropout(p=dropout)
self.drop_hr = SequentialDropout(p=dropout)
self.drop_hi = SequentialDropout(p=dropout)
self.drop_hn = SequentialDropout(p=dropout)
def end_of_sequence(self):
self.drop_ir.end_of_sequence()
self.drop_ii.end_of_sequence()
self.drop_in.end_of_sequence()
self.drop_hr.end_of_sequence()
self.drop_hi.end_of_sequence()
self.drop_hn.end_of_sequence()
def forward(self, x, hx=None):
if hx is None:
hx = x.new().resize_((x.size(0), self.hidden_size)).fill_(0)
x_ir = self.drop_ir(x)
x_ii = self.drop_ii(x)
x_in = self.drop_in(x)
x_hr = self.drop_hr(hx)
x_hi = self.drop_hi(hx)
x_hn = self.drop_hn(hx)
r = F.sigmoid(self.weight_ir(x_ir) + self.weight_hr(x_hr))
i = F.sigmoid(self.weight_ii(x_ii) + self.weight_hi(x_hi))
n = F.tanh(self.weight_in(x_in) + r * self.weight_hn(x_hn))
hx = (1 - i) * n + i * hx
return hx
class AbstractGRU(nn.Module):
def __init__(self, input_size, hidden_size,
bias_ih=True, bias_hh=False):
super(AbstractGRU, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias_ih = bias_ih
self.bias_hh = bias_hh
self._load_gru_cell()
def _load_gru_cell(self):
raise NotImplementedError
def forward(self, x, hx=None, max_length=None):
batch_size = x.size(0)
seq_length = x.size(1)
if max_length is None:
max_length = seq_length
output = []
for i in range(max_length):
hx = self.gru_cell(x[:, i, :], hx=hx)
output.append(hx.view(batch_size, 1, self.hidden_size))
output = torch.cat(output, 1)
return output, hx
class GRU(AbstractGRU):
def __init__(self, input_size, hidden_size,
bias_ih=True, bias_hh=False):
super(GRU, self).__init__(input_size, hidden_size,
bias_ih, bias_hh)
def _load_gru_cell(self):
self.gru_cell = GRUCell(self.input_size, self.hidden_size,
self.bias_ih, self.bias_hh)
class BayesianGRU(AbstractGRU):
def __init__(self, input_size, hidden_size,
bias_ih=True, bias_hh=False,
dropout=0.25):
self.dropout = dropout
super(BayesianGRU, self).__init__(input_size, hidden_size,
bias_ih, bias_hh)
def _load_gru_cell(self):
self.gru_cell = BayesianGRUCell(self.input_size, self.hidden_size,
self.bias_ih, self.bias_hh,
dropout=self.dropout)
def set_dropout(self, dropout):
self.dropout = dropout
self.gru_cell.set_dropout(dropout)
def forward(self, x, hx=None, max_length=None):
batch_size = x.size(0)
seq_length = x.size(1)
if max_length is None:
max_length = seq_length
output = []
for i in range(max_length):
hx = self.gru_cell(x[:, i, :], hx=hx)
output.append(hx.view(batch_size, 1, self.hidden_size))
self.gru_cell.end_of_sequence()
output = torch.cat(output, 1)
return output, hx | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/agents/mlb_vqa/gru.py | 0.945045 | 0.436322 | gru.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import math
import torch
from torch import nn
def xavier_init(module):
"""Xavier initializer for module parameters."""
for parameter in module.parameters():
if len(parameter.data.shape) == 1:
# 1D vector means bias
parameter.data.fill_(0)
else:
fan_in = parameter.data.size(0)
fan_out = parameter.data.size(1)
parameter.data.normal_(0, math.sqrt(2 / (fan_in + fan_out)))
class ImgNet(nn.Module):
"""Module to embed the visual information. Used by answerer agent.
In ``forward``: Embed image attributes and concatenate them together.
**Note:** ``parlai.core.image_featurizers.ImageLoader`` can also be
used instead.
"""
def __init__(self, feature_size, input_size=None):
super().__init__()
# input_size is needed for modules which require input_size specification
# nn.Embedding requires input size to be specified, while nn.Conv2d doesn't
self.net = nn.Embedding(input_size, feature_size)
xavier_init(self)
def forward(self, image):
embeds = self.net(image)
features = torch.cat(embeds.transpose(0, 1), 1)
return features
class ListenNet(nn.Module):
"""Module for listening the sequence spoken by other agent. In ``forward``:
Generate token embeddings.
"""
def __init__(self, in_size, embed_size):
super().__init__()
self.net = nn.Embedding(in_size, embed_size)
xavier_init(self)
def forward(self, text_tokens):
embeds = self.net(text_tokens)
return embeds
class StateNet(nn.Module):
"""Module for containing the state update mechanism for an agent. In
``forward``: Update states by passing the embeddings through LSTMCell.
"""
def __init__(self, embed_size, state_size):
super().__init__()
self.net = nn.LSTMCell(embed_size, state_size)
xavier_init(self)
def forward(self, states, embeds):
states = self.net(embeds, states)
return states
class SpeakNet(nn.Module):
"""Module for speaking a token based on current state. In ``forward``:
Return a probability distribution of utterances of tokens.
"""
def __init__(self, state_size, out_size):
super().__init__()
self.net = nn.Linear(state_size, out_size)
self.softmax = nn.Softmax()
xavier_init(self)
def forward(self, state):
out_distr = self.softmax(self.net(state))
return out_distr
class PredictNet(nn.Module):
"""Module to make a prediction as per goal. Used by questioner agent. In
``forward``: Return a probability distribution of utterances of tokens.
"""
def __init__(self, embed_size, state_size, out_size):
super().__init__()
self.net_lstm = nn.LSTMCell(embed_size, state_size)
self.net_mlp = nn.Linear(state_size, out_size)
self.softmax = nn.Softmax()
xavier_init(self)
def forward(self, task_embeds, states):
states = self.net_lstm(task_embeds, states)
out_distr = self.softmax(self.predict_net(states[1]))
_, prediction = out_distr.max(1)
return prediction | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/agents/coopgame_agent/modules.py | 0.964178 | 0.452838 | modules.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from parlai.core.torch_generator_agent import TorchGeneratorAgent
from .modules import Seq2seq, opt_to_kwargs
import torch
import torch.nn as nn
import json
class Seq2seqAgent(TorchGeneratorAgent):
"""Agent which takes an input sequence and produces an output sequence.
This model supports encoding the input and decoding the output via one of
several flavors of RNN. It then uses a linear layer (whose weights can
be shared with the embedding layer) to convert RNN output states into
output tokens. This model supports greedy decoding, selecting the
highest probability token at each time step, as well as beam
search.
For more information, see the following papers:
- Neural Machine Translation by Jointly Learning to Align and Translate
`(Bahdanau et al. 2014) <arxiv.org/abs/1409.0473>`_
- Sequence to Sequence Learning with Neural Networks
`(Sutskever et al. 2014) <arxiv.org/abs/1409.3215>`_
- Effective Approaches to Attention-based Neural Machine Translation
`(Luong et al. 2015) <arxiv.org/abs/1508.04025>`_
"""
@classmethod
def add_cmdline_args(cls, argparser):
"""Add command-line arguments specifically for this agent."""
agent = argparser.add_argument_group('Seq2Seq Arguments')
agent.add_argument('--init-model', type=str, default=None,
help='load dict/model/opts from this path')
agent.add_argument('-hs', '--hiddensize', type=int, default=128,
help='size of the hidden layers')
agent.add_argument('-esz', '--embeddingsize', type=int, default=128,
help='size of the token embeddings')
agent.add_argument('-nl', '--numlayers', type=int, default=2,
help='number of hidden layers')
agent.add_argument('-dr', '--dropout', type=float, default=0.1,
help='dropout rate')
agent.add_argument('-bi', '--bidirectional', type='bool',
default=False,
help='whether to encode the context with a '
'bidirectional rnn')
agent.add_argument('-att', '--attention', default='none',
choices=['none', 'concat', 'general', 'dot',
'local'],
help='Choices: none, concat, general, local. '
'If set local, also set attention-length. '
'(see arxiv.org/abs/1508.04025)')
agent.add_argument('-attl', '--attention-length', default=48, type=int,
help='Length of local attention.')
agent.add_argument('--attention-time', default='post',
choices=['pre', 'post'],
help='Whether to apply attention before or after '
'decoding.')
agent.add_argument('-rnn', '--rnn-class', default='lstm',
choices=Seq2seq.RNN_OPTS.keys(),
help='Choose between different types of RNNs.')
agent.add_argument('-dec', '--decoder', default='same',
choices=['same', 'shared'],
help='Choose between different decoder modules. '
'Default "same" uses same class as encoder, '
'while "shared" also uses the same weights. '
'Note that shared disabled some encoder '
'options--in particular, bidirectionality.')
agent.add_argument('-lt', '--lookuptable', default='unique',
choices=['unique', 'enc_dec', 'dec_out', 'all'],
help='The encoder, decoder, and output modules can '
'share weights, or not. '
'Unique has independent embeddings for each. '
'Enc_dec shares the embedding for the encoder '
'and decoder. '
'Dec_out shares decoder embedding and output '
'weights. '
'All shares all three weights.')
agent.add_argument('-soft', '--numsoftmax', default=1, type=int,
help='default 1, if greater then uses mixture of '
'softmax (see arxiv.org/abs/1711.03953).')
agent.add_argument('-idr', '--input-dropout', type=float, default=0.0,
help='Probability of replacing tokens with UNK in training.')
super(cls, Seq2seqAgent).add_cmdline_args(argparser)
Seq2seqAgent.dictionary_class().add_cmdline_args(argparser)
return agent
@staticmethod
def model_version():
"""Return current version of this model, counting up from 0.
Models may not be backwards-compatible with older versions.
Version 1 split from version 0 on Aug 29, 2018.
Version 2 split from version 1 on Nov 13, 2018
To use version 0, use --model legacy:seq2seq:0
To use version 1, use --model legacy:seq2seq:1
(legacy agent code is located in parlai/agents/legacy_agents).
"""
return 2
def __init__(self, opt, shared=None):
"""Set up model."""
super().__init__(opt, shared)
self.id = 'Seq2Seq'
def build_model(self, states=None):
"""Initialize model, override to change model setup."""
opt = self.opt
if not states:
states = {}
kwargs = opt_to_kwargs(opt)
self.model = Seq2seq(
len(self.dict), opt['embeddingsize'], opt['hiddensize'],
padding_idx=self.NULL_IDX, start_idx=self.START_IDX,
end_idx=self.END_IDX, unknown_idx=self.dict[self.dict.unk_token],
longest_label=states.get('longest_label', 1),
**kwargs)
if (opt.get('dict_tokenizer') == 'bpe' and
opt['embedding_type'] != 'random'):
print('skipping preinitialization of embeddings for bpe')
elif not states and opt['embedding_type'] != 'random':
# `not states`: only set up embeddings if not loading model
self._copy_embeddings(self.model.decoder.lt.weight,
opt['embedding_type'])
if opt['lookuptable'] in ['unique', 'dec_out']:
# also set encoder lt, since it's not shared
self._copy_embeddings(self.model.encoder.lt.weight,
opt['embedding_type'], log=False)
if states:
# set loaded states if applicable
self.model.load_state_dict(states['model'])
if self.use_cuda:
self.model.cuda()
if opt['embedding_type'].endswith('fixed'):
print('Seq2seq: fixing embedding weights.')
self.model.decoder.lt.weight.requires_grad = False
self.model.encoder.lt.weight.requires_grad = False
if opt['lookuptable'] in ['dec_out', 'all']:
self.model.decoder.e2s.weight.requires_grad = False
if self.use_cuda:
self.model.cuda()
if self.multigpu:
self.model = torch.nn.DataParallel(self.model)
self.model.encoder = self.model.module.encoder
self.model.decoder = self.model.module.decoder
self.model.longest_label = self.model.module.longest_label
self.model.output = self.model.module.output
self.model.reorder_encoder_states = (
self.model.module.reorder_encoder_states
)
return self.model
def build_criterion(self):
# set up criteria
if self.opt.get('numsoftmax', 1) > 1:
self.criterion = nn.NLLLoss(
ignore_index=self.NULL_IDX, size_average=False)
else:
self.criterion = nn.CrossEntropyLoss(
ignore_index=self.NULL_IDX, size_average=False)
if self.use_cuda:
self.criterion.cuda()
def vectorize(self, *args, **kwargs):
"""Override vectorize for seq2seq."""
kwargs['add_start'] = False # model does this in module code
kwargs['add_end'] = True # we do want this
return super().vectorize(*args, **kwargs)
def batchify(self, *args, **kwargs):
"""Override batchify options for seq2seq."""
kwargs['sort'] = True # need sorted for pack_padded
return super().batchify(*args, **kwargs)
def save(self, path=None):
"""Save model parameters if model_file is set."""
path = self.opt.get('model_file', None) if path is None else path
if path and hasattr(self, 'model'):
model = {}
if self.multigpu:
model['model'] = self.model.module.state_dict()
else:
model['model'] = self.model.state_dict()
model['longest_label'] = self.model.longest_label
model['optimizer'] = self.optimizer.state_dict()
model['optimizer_type'] = self.opt['optimizer']
with open(path, 'wb') as write:
torch.save(model, write)
# save opt file
with open(path + '.opt', 'w') as handle:
# save version string
self.opt['model_version'] = self.model_version()
json.dump(self.opt, handle)
def load(self, path):
"""Return opt and model states."""
states = torch.load(path, map_location=lambda cpu, _: cpu)
# set loaded states if applicable
if self.multigpu:
self.model.module.load_state_dict(states['model'])
else:
self.model.load_state_dict(states['model'])
if 'longest_label' in states:
self.model.longest_label = states['longest_label']
return states | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/agents/seq2seq/seq2seq.py | 0.948567 | 0.362885 | seq2seq.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import torch
import torch.nn as nn
from torch.nn.functional import softmax
from functools import lru_cache
class MemNN(nn.Module):
def __init__(self, opt, num_features, use_cuda=False):
super().__init__()
self.opt = opt
# Prepare features
self.num_time_features = opt['mem_size']
self.extra_features_slots = 0
if opt['time_features']:
self.time_features = torch.LongTensor(range(num_features,
num_features + self.num_time_features))
num_features += self.num_time_features
self.extra_features_slots += 1
def embedding():
return Embed(num_features, opt['embedding_size'],
position_encoding=opt['position_encoding'], padding_idx=0)
self.query_embedder = embedding()
self.answer_embedder = embedding()
self.in_memory_embedder = embedding()
self.out_memory_embedder = embedding()
self.memory_hop = Hop(opt['embedding_size'])
self.score = DotScore()
if use_cuda:
self.score.cuda()
self.memory_hop.cuda()
self.use_cuda = use_cuda
def time_feature(self, t):
return self.time_features[min(t, self.num_time_features - 1)]
def update_memories_with_extra_features_(self, memory_lengths, memories):
memory_lengths = memory_lengths.data
memories = memories.data
if self.extra_features_slots > 0:
num_nonempty_memories = int(memory_lengths.ne(0).long().sum())
updated_memories = memories.new(memories.numel() + num_nonempty_memories * self.extra_features_slots)
src_offset = 0
dst_offset = 0
for i in range(memory_lengths.size(0)):
for j in range(self.opt['mem_size']):
length = memory_lengths[i, j]
if length > 0:
if self.opt['time_features']:
updated_memories[dst_offset] = self.time_feature(j)
dst_offset += 1
updated_memories[dst_offset:dst_offset + length] = memories[src_offset:src_offset + length]
src_offset += length
dst_offset += length
memory_lengths += memory_lengths.ne(0).long() * self.extra_features_slots
memories.set_(updated_memories)
def forward(self, memories, queries, memory_lengths, query_lengths):
self.update_memories_with_extra_features_(memory_lengths, memories)
in_memory_embeddings = self.in_memory_embedder(memory_lengths, memories)
out_memory_embeddings = self.out_memory_embedder(memory_lengths, memories)
query_embeddings = self.query_embedder(query_lengths, queries)
attention_mask = memory_lengths.data.ne(0).detach()
if self.use_cuda:
in_memory_embeddings = in_memory_embeddings.cuda()
out_memory_embeddings = out_memory_embeddings.cuda()
query_embeddings = query_embeddings.cuda()
attention_mask = attention_mask.cuda()
for _ in range(self.opt['hops']):
query_embeddings = self.memory_hop(query_embeddings,
in_memory_embeddings, out_memory_embeddings, attention_mask)
return query_embeddings
class Embed(nn.Embedding):
def __init__(self, *args, position_encoding=False, **kwargs):
self.position_encoding = position_encoding
super().__init__(*args, **kwargs)
def forward(self, lengths, indices):
lengths_mat = lengths.data
if lengths.dim() == 1 or lengths.size(1) == 1:
lengths_mat = lengths_mat.unsqueeze(0)
if lengths_mat.dim() == 1:
raise RuntimeError(lengths.shape)
input = torch.LongTensor(lengths_mat.size(0), lengths_mat.size(1), torch.max(lengths_mat))
pad = self.padding_idx if self.padding_idx is not None else 0
input.fill_(pad)
emb_list = []
offset = 0
for i, row in enumerate(lengths_mat):
for j, length in enumerate(row):
length = length.item()
if length > 0:
input[i, j, :length] = indices[offset:offset + length]
offset += length
for i, row in enumerate(lengths_mat):
emb = super().forward(input[i, :, :])
if self.position_encoding:
emb = emb * self.position_tensor(row, emb)
emb = torch.sum(emb, dim=1).squeeze(1)
for j, length in enumerate(row):
length = length.item()
if length > 0:
emb[j] /= length
emb_list.append(emb)
embs = torch.stack(emb_list)
if lengths.dim() == 1:
embs = embs.squeeze(0)
elif lengths.size(1) == 1:
embs = embs.squeeze().unsqueeze(1)
return embs
@staticmethod
@lru_cache(maxsize=32)
def position_matrix(J, d):
m = torch.Tensor(J, d)
for k in range(1, d + 1):
for j in range(1, J + 1):
m[j - 1, k - 1] = (1 - j / J) - (k / d) * (1 - 2 * j / J)
return m
@staticmethod
def position_tensor(sentence_lengths, embeddings):
t = torch.zeros(embeddings.size())
embedding_dim = embeddings.size()[-1]
for i, length in enumerate(sentence_lengths):
if length > 0:
t[i, :length, :] = Embed.position_matrix(length, embedding_dim)
return t
class Hop(nn.Module):
def __init__(self, embedding_size):
super(Hop, self).__init__()
self.embedding_size = embedding_size
self.linear = nn.Linear(embedding_size, embedding_size, bias=False)
def forward(self, query_embeddings, in_memory_embeddings, out_memory_embeddings, attention_mask=None):
attention = torch.bmm(in_memory_embeddings, query_embeddings.unsqueeze(2)).squeeze(2)
if attention_mask is not None:
# exclude masked elements from the softmax
attention = attention_mask.float() * attention + (1 - attention_mask.float()) * -1e20
probs = softmax(attention, dim=1).unsqueeze(1)
memory_output = torch.bmm(probs, out_memory_embeddings).squeeze(1)
query_embeddings = self.linear(query_embeddings)
output = memory_output + query_embeddings
return output
class Decoder(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, opt, dictionary):
super().__init__()
self.dict = dictionary
self.h2o = nn.Linear(hidden_size, len(dictionary))
self.dropout = nn.Dropout(opt['dropout'])
self.rnn = nn.GRU(input_size, hidden_size, num_layers)
def hidden_to_idx(self, hidden, dropout=False):
"""Converts hidden state vectors into indices into the dictionary."""
if hidden.size(0) > 1:
raise RuntimeError('Bad dimensions of tensor:', hidden)
hidden = hidden.squeeze(0)
scores = self.h2o(hidden)
if dropout:
scores = self.dropout(scores)
_, idx = scores.max(1)
idx.unsqueeze_(1)
return idx, scores
def forward(self, input, state):
output, state = self.rnn(input, state)
return self.hidden_to_idx(output, dropout=self.training)
class DotScore(nn.Module):
def one_to_one(self, query_embeddings, answer_embeddings, reply_embeddings=None):
return (query_embeddings * answer_embeddings).sum(dim=1).squeeze(1)
def one_to_many(self, query_embeddings, answer_embeddings, reply_embeddings=None):
return query_embeddings.mm(answer_embeddings.t()) | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/agents/legacy_agents/memnn/modules_v0.py | 0.957833 | 0.257269 | modules_v0.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
# ------------------------------------------------------------------------------
# Modules
# ------------------------------------------------------------------------------
class StackedBRNN(nn.Module):
def __init__(self, input_size, hidden_size, num_layers,
dropout_rate=0, dropout_output=False, rnn_type=nn.LSTM,
concat_layers=False, padding=False):
super(StackedBRNN, self).__init__()
self.padding = padding
self.dropout_output = dropout_output
self.dropout_rate = dropout_rate
self.num_layers = num_layers
self.concat_layers = concat_layers
self.rnns = nn.ModuleList()
for i in range(num_layers):
input_size = input_size if i == 0 else 2 * hidden_size
self.rnns.append(rnn_type(input_size, hidden_size,
num_layers=1,
bidirectional=True))
def forward(self, x, x_mask):
"""Can choose to either handle or ignore variable length sequences.
Always handle padding in eval.
"""
# No padding necessary.
if x_mask.data.sum() == 0:
return self._forward_unpadded(x, x_mask)
# Pad if we care or if its during eval.
if self.padding or not self.training:
return self._forward_padded(x, x_mask)
# We don't care.
return self._forward_unpadded(x, x_mask)
def _forward_unpadded(self, x, x_mask):
"""Faster encoding that ignores any padding."""
# Transpose batch and sequence dims
x = x.transpose(0, 1).contiguous()
# Encode all layers
outputs = [x]
for i in range(self.num_layers):
rnn_input = outputs[-1]
# Apply dropout to hidden input
if self.dropout_rate > 0:
rnn_input = F.dropout(rnn_input,
p=self.dropout_rate,
training=self.training)
# Forward
rnn_output = self.rnns[i](rnn_input)[0]
outputs.append(rnn_output)
# Concat hidden layers
if self.concat_layers:
output = torch.cat(outputs[1:], 2)
else:
output = outputs[-1]
# Transpose back
output = output.transpose(0, 1).contiguous()
# Dropout on output layer
if self.dropout_output and self.dropout_rate > 0:
output = F.dropout(output,
p=self.dropout_rate,
training=self.training)
return output
def _forward_padded(self, x, x_mask):
"""Slower (significantly), but more precise,
encoding that handles padding."""
# Compute sorted sequence lengths
lengths = x_mask.data.eq(0).long().sum(1).squeeze()
_, idx_sort = torch.sort(lengths, dim=0, descending=True)
_, idx_unsort = torch.sort(idx_sort, dim=0)
lengths = list(lengths[idx_sort])
idx_sort = Variable(idx_sort)
idx_unsort = Variable(idx_unsort)
# Sort x
x = x.index_select(0, idx_sort)
# Transpose batch and sequence dims
x = x.transpose(0, 1).contiguous()
# Pack it up
rnn_input = nn.utils.rnn.pack_padded_sequence(x, lengths)
# Encode all layers
outputs = [rnn_input]
for i in range(self.num_layers):
rnn_input = outputs[-1]
# Apply dropout to input
if self.dropout_rate > 0:
dropout_input = F.dropout(rnn_input.data,
p=self.dropout_rate,
training=self.training)
rnn_input = nn.utils.rnn.PackedSequence(dropout_input,
rnn_input.batch_sizes)
outputs.append(self.rnns[i](rnn_input)[0])
# Unpack everything
for i, o in enumerate(outputs[1:], 1):
outputs[i] = nn.utils.rnn.pad_packed_sequence(o)[0]
# Concat hidden layers or take final
if self.concat_layers:
output = torch.cat(outputs[1:], 2)
else:
output = outputs[-1]
# Transpose and unsort
output = output.transpose(0, 1).contiguous()
output = output.index_select(0, idx_unsort)
# Dropout on output layer
if self.dropout_output and self.dropout_rate > 0:
output = F.dropout(output,
p=self.dropout_rate,
training=self.training)
return output
class SeqAttnMatch(nn.Module):
"""Given sequences X and Y, match sequence Y to each element in X.
* o_i = sum(alpha_j * y_j) for i in X
* alpha_j = softmax(y_j * x_i)
"""
def __init__(self, input_size, identity=False):
super(SeqAttnMatch, self).__init__()
if not identity:
self.linear = nn.Linear(input_size, input_size)
else:
self.linear = None
def forward(self, x, y, y_mask):
"""Input shapes:
x = batch * len1 * h
y = batch * len2 * h
y_mask = batch * len2
Output shapes:
matched_seq = batch * len1 * h
"""
# Project vectors
if self.linear:
x_proj = self.linear(x.view(-1, x.size(2))).view(x.size())
x_proj = F.relu(x_proj)
y_proj = self.linear(y.view(-1, y.size(2))).view(y.size())
y_proj = F.relu(y_proj)
else:
x_proj = x
y_proj = y
# Compute scores
scores = x_proj.bmm(y_proj.transpose(2, 1))
# Mask padding
y_mask = y_mask.unsqueeze(1).expand(scores.size())
scores.data.masked_fill_(y_mask.data, -float('inf'))
# Normalize with softmax
alpha_flat = F.softmax(scores.view(-1, y.size(1)))
alpha = alpha_flat.view(-1, x.size(1), y.size(1))
# Take weighted average
matched_seq = alpha.bmm(y)
return matched_seq
class BilinearSeqAttn(nn.Module):
"""A bilinear attention layer over a sequence X w.r.t y:
* o_i = softmax(x_i'Wy) for x_i in X.
Optionally don't normalize output weights.
"""
def __init__(self, x_size, y_size, identity=False):
super(BilinearSeqAttn, self).__init__()
if not identity:
self.linear = nn.Linear(y_size, x_size)
else:
self.linear = None
def forward(self, x, y, x_mask):
"""
x = batch * len * h1
y = batch * h2
x_mask = batch * len
"""
Wy = self.linear(y) if self.linear is not None else y
xWy = x.bmm(Wy.unsqueeze(2)).squeeze(2)
xWy.data.masked_fill_(x_mask.data, -float('inf'))
self.xWy = xWy
if self.training:
# In training we output log-softmax for NLL
alpha = F.log_softmax(xWy)
else:
# ...Otherwise 0-1 probabilities
# alpha = F.softmax(xWy)
# Note: We found better eval performance with unnormalized weights
# here
alpha = xWy.exp()
return alpha
class LinearSeqAttn(nn.Module):
"""Self attention over a sequence:
* o_i = softmax(Wx_i) for x_i in X.
"""
def __init__(self, input_size):
super(LinearSeqAttn, self).__init__()
self.linear = nn.Linear(input_size, 1)
def forward(self, x, x_mask):
"""
x = batch * len * hdim
x_mask = batch * len
"""
x_flat = x.view(-1, x.size(-1))
scores = self.linear(x_flat).view(x.size(0), x.size(1))
scores.data.masked_fill_(x_mask.data, -float('inf'))
alpha = F.softmax(scores)
return alpha
# ------------------------------------------------------------------------------
# Functional
# ------------------------------------------------------------------------------
def uniform_weights(x, x_mask):
"""Return uniform weights over non-masked input."""
alpha = Variable(torch.ones(x.size(0), x.size(1)))
if x.data.is_cuda:
alpha = alpha.cuda()
alpha = alpha * x_mask.eq(0).float()
alpha = alpha / alpha.sum(1).expand(alpha.size())
return alpha
def weighted_avg(x, weights):
"""x = batch * len * d
weights = batch * len
"""
return weights.unsqueeze(1).bmm(x).squeeze(1) | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/agents/drqa/layers.py | 0.953351 | 0.352843 | layers.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import torch
import unicodedata
from collections import Counter
from parlai.core.build_data import modelzoo_path
# ------------------------------------------------------------------------------
# Data/model utilities.
# ------------------------------------------------------------------------------
def normalize_text(text):
return unicodedata.normalize('NFD', text)
def load_embeddings(opt, word_dict):
"""Initialize embeddings from file of pretrained vectors."""
embeddings = torch.Tensor(len(word_dict), opt['embedding_dim'])
embeddings.normal_(0, 1)
opt['embedding_file'] = modelzoo_path(opt.get('datapath'),
opt['embedding_file'])
# Fill in embeddings
if not opt.get('embedding_file'):
raise RuntimeError('Tried to load embeddings with no embedding file.')
with open(opt['embedding_file']) as f:
for line in f:
parsed = line.rstrip().split(' ')
if len(parsed) > 2:
assert(len(parsed) == opt['embedding_dim'] + 1)
w = normalize_text(parsed[0])
if w in word_dict:
vec = torch.Tensor([float(i) for i in parsed[1:]])
embeddings[word_dict[w]].copy_(vec)
# Zero NULL token
embeddings[word_dict['__NULL__']].fill_(0)
return embeddings
def build_feature_dict(opt):
"""Make mapping of feature option to feature index."""
feature_dict = {}
if opt['use_in_question']:
feature_dict['in_question'] = len(feature_dict)
feature_dict['in_question_uncased'] = len(feature_dict)
if opt['use_tf']:
feature_dict['tf'] = len(feature_dict)
if opt['use_time'] > 0:
for i in range(opt['use_time'] - 1):
feature_dict['time=T%d' % (i + 1)] = len(feature_dict)
feature_dict['time>=T%d' % opt['use_time']] = len(feature_dict)
return feature_dict
# ------------------------------------------------------------------------------
# Torchified input utilities.
# ------------------------------------------------------------------------------
def vectorize(opt, ex, word_dict, feature_dict):
"""Turn tokenized text inputs into feature vectors."""
# Index words
document = torch.LongTensor([word_dict[w] for w in ex['document']])
question = torch.LongTensor([word_dict[w] for w in ex['question']])
# Create extra features vector
features = torch.zeros(len(ex['document']), len(feature_dict))
# f_{exact_match}
if opt['use_in_question']:
q_words_cased = set([w for w in ex['question']])
q_words_uncased = set([w.lower() for w in ex['question']])
for i in range(len(ex['document'])):
if ex['document'][i] in q_words_cased:
features[i][feature_dict['in_question']] = 1.0
if ex['document'][i].lower() in q_words_uncased:
features[i][feature_dict['in_question_uncased']] = 1.0
# f_{tf}
if opt['use_tf']:
counter = Counter([w.lower() for w in ex['document']])
l = len(ex['document'])
for i, w in enumerate(ex['document']):
features[i][feature_dict['tf']] = counter[w.lower()] * 1.0 / l
if opt['use_time'] > 0:
# Counting from the end, each (full-stop terminated) sentence gets
# its own time identitfier.
sent_idx = 0
def _full_stop(w):
return w in {'.', '?', '!'}
for i, w in reversed(list(enumerate(ex['document']))):
sent_idx = sent_idx + 1 if _full_stop(w) else max(sent_idx, 1)
if sent_idx < opt['use_time']:
features[i][feature_dict['time=T%d' % sent_idx]] = 1.0
else:
features[i][feature_dict['time>=T%d' % opt['use_time']]] = 1.0
# Maybe return without target
if ex['target'] is None:
return document, features, question
# ...or with target
start = torch.LongTensor([ex['target'][0]])
end = torch.LongTensor([ex['target'][1]])
return document, features, question, start, end
def batchify(batch, null=0, cuda=False):
"""Collate inputs into batches."""
NUM_INPUTS = 3
NUM_TARGETS = 2
NUM_EXTRA = 2
# Get elements
docs = [ex[0] for ex in batch]
features = [ex[1] for ex in batch]
questions = [ex[2] for ex in batch]
text = [ex[-2] for ex in batch]
spans = [ex[-1] for ex in batch]
# Batch documents and features
max_length = max([d.size(0) for d in docs])
x1 = torch.LongTensor(len(docs), max_length).fill_(null)
x1_mask = torch.ByteTensor(len(docs), max_length).fill_(1)
x1_f = torch.zeros(len(docs), max_length, features[0].size(1))
for i, d in enumerate(docs):
x1[i, :d.size(0)].copy_(d)
x1_mask[i, :d.size(0)].fill_(0)
x1_f[i, :d.size(0)].copy_(features[i])
# Batch questions
max_length = max([q.size(0) for q in questions])
x2 = torch.LongTensor(len(questions), max_length).fill_(null)
x2_mask = torch.ByteTensor(len(questions), max_length).fill_(1)
for i, q in enumerate(questions):
x2[i, :q.size(0)].copy_(q)
x2_mask[i, :q.size(0)].fill_(0)
# Pin memory if cuda
if cuda:
x1 = x1.pin_memory()
x1_f = x1_f.pin_memory()
x1_mask = x1_mask.pin_memory()
x2 = x2.pin_memory()
x2_mask = x2_mask.pin_memory()
# Maybe return without targets
if len(batch[0]) == NUM_INPUTS + NUM_EXTRA:
return x1, x1_f, x1_mask, x2, x2_mask, text, spans
# ...Otherwise add targets
elif len(batch[0]) == NUM_INPUTS + NUM_EXTRA + NUM_TARGETS:
y_s = torch.cat([ex[3] for ex in batch])
y_e = torch.cat([ex[4] for ex in batch])
return x1, x1_f, x1_mask, x2, x2_mask, y_s, y_e, text, spans
# ...Otherwise wrong number of inputs
raise RuntimeError('Wrong number of inputs per batch')
# ------------------------------------------------------------------------------
# General logging utilities.
# ------------------------------------------------------------------------------
class AverageMeter(object):
"""Computes and stores the average and current value."""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/agents/drqa/utils.py | 0.893524 | 0.358157 | utils.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import torch
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import logging
from torch.autograd import Variable
from .utils import load_embeddings, AverageMeter
from .rnn_reader import RnnDocReader
logger = logging.getLogger('DrQA')
class DocReaderModel(object):
"""High level model that handles intializing the underlying network
architecture, saving, updating examples, and predicting examples.
"""
def __init__(self, opt, word_dict, feature_dict, state_dict=None):
# Book-keeping.
self.opt = opt
self.word_dict = word_dict
self.feature_dict = feature_dict
self.updates = 0
self.train_loss = AverageMeter()
# Building network.
self.network = RnnDocReader(opt)
if state_dict:
new_state = set(self.network.state_dict().keys())
for k in list(state_dict['network'].keys()):
if k not in new_state:
del state_dict['network'][k]
self.network.load_state_dict(state_dict['network'])
# Building optimizer.
parameters = [p for p in self.network.parameters() if p.requires_grad]
if opt['optimizer'] == 'sgd':
self.optimizer = optim.SGD(parameters, opt['learning_rate'],
momentum=opt['momentum'],
weight_decay=opt['weight_decay'])
elif opt['optimizer'] == 'adamax':
self.optimizer = optim.Adamax(parameters,
weight_decay=opt['weight_decay'])
else:
raise RuntimeError('Unsupported optimizer: %s' % opt['optimizer'])
def set_embeddings(self):
# Read word embeddings.
if not self.opt.get('embedding_file'):
logger.warning('[ WARNING: No embeddings provided. '
'Keeping random initialization. ]')
return
logger.info('[ Loading pre-trained embeddings ]')
embeddings = load_embeddings(self.opt, self.word_dict)
logger.info('[ Num embeddings = %d ]' % embeddings.size(0))
# Sanity check dimensions
new_size = embeddings.size()
old_size = self.network.embedding.weight.size()
if new_size[1] != old_size[1]:
raise RuntimeError('Embedding dimensions do not match.')
if new_size[0] != old_size[0]:
logger.warning(
'[ WARNING: Number of embeddings changed (%d->%d) ]' %
(old_size[0], new_size[0])
)
# Swap weights
self.network.embedding.weight.data = embeddings
# If partially tuning the embeddings, keep the old values
if self.opt['tune_partial'] > 0:
if self.opt['tune_partial'] + 2 < embeddings.size(0):
fixed_embedding = embeddings[self.opt['tune_partial'] + 2:]
self.network.fixed_embedding = fixed_embedding
def update(self, ex):
# Train mode
self.network.train()
# Transfer to GPU
if self.opt['cuda']:
inputs = [Variable(e.cuda(non_blocking=True)) for e in ex[:5]]
target_s = Variable(ex[5].cuda(non_blocking=True))
target_e = Variable(ex[6].cuda(non_blocking=True))
else:
inputs = [Variable(e) for e in ex[:5]]
target_s = Variable(ex[5])
target_e = Variable(ex[6])
# Run forward
score_s, score_e = self.network(*inputs)
# Compute loss and accuracies
loss = F.nll_loss(score_s, target_s) + F.nll_loss(score_e, target_e)
self.train_loss.update(loss.data[0], ex[0].size(0))
# Clear gradients and run backward
self.optimizer.zero_grad()
loss.backward()
# Clip gradients
torch.nn.utils.clip_grad_norm(self.network.parameters(),
self.opt['grad_clipping'])
# Update parameters
self.optimizer.step()
self.updates += 1
# Reset any partially fixed parameters (e.g. rare words)
self.reset_parameters()
def predict(self, ex):
# Eval mode
self.network.eval()
# Transfer to GPU
if self.opt['cuda']:
inputs = [Variable(e.cuda(non_blocking=True), volatile=True)
for e in ex[:5]]
else:
inputs = [Variable(e, volatile=True) for e in ex[:5]]
# Run forward
score_s, score_e = self.network(*inputs)
# Transfer to CPU/normal tensors for numpy ops
score_s = score_s.data.cpu()
score_e = score_e.data.cpu()
# Get argmax text spans
text = ex[-2]
spans = ex[-1]
predictions = []
pred_scores = []
max_len = self.opt['max_len'] or score_s.size(1)
for i in range(score_s.size(0)):
scores = torch.ger(score_s[i], score_e[i])
scores.triu_().tril_(max_len - 1)
scores = scores.numpy()
s_idx, e_idx = np.unravel_index(np.argmax(scores), scores.shape)
s_offset, e_offset = spans[i][s_idx][0], spans[i][e_idx][1]
predictions.append(text[i][s_offset:e_offset])
pred_scores.append(np.max(scores))
return predictions, pred_scores
def reset_parameters(self):
# Reset fixed embeddings to original value
if self.opt['tune_partial'] > 0:
offset = self.opt['tune_partial'] + 2
if offset < self.network.embedding.weight.data.size(0):
self.network.embedding.weight.data[offset:] \
= self.network.fixed_embedding
def save(self, filename):
params = {
'state_dict': {
'network': self.network.state_dict(),
},
'feature_dict': self.feature_dict,
'config': self.opt,
}
try:
torch.save(params, filename)
except BaseException:
logger.warn('[ WARN: Saving failed... continuing anyway. ]')
def cuda(self):
self.network.cuda() | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/agents/drqa/model.py | 0.889518 | 0.227448 | model.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import torch
import torch.nn as nn
from . import layers
class RnnDocReader(nn.Module):
"""Network for the Document Reader module of DrQA."""
RNN_TYPES = {'lstm': nn.LSTM, 'gru': nn.GRU, 'rnn': nn.RNN}
def __init__(self, opt, padding_idx=0):
super(RnnDocReader, self).__init__()
# Store config
self.opt = opt
# Word embeddings (+1 for padding)
self.embedding = nn.Embedding(opt['vocab_size'],
opt['embedding_dim'],
padding_idx=padding_idx)
# ...(maybe) keep them fixed
if opt['fix_embeddings']:
for p in self.embedding.parameters():
p.requires_grad = False
# Register a buffer to (maybe) fill later for keeping *some* fixed
if opt['tune_partial'] > 0:
buffer_size = torch.Size((
opt['vocab_size'] - opt['tune_partial'] - 2,
opt['embedding_dim']
))
self.register_buffer('fixed_embedding', torch.Tensor(buffer_size))
# Projection for attention weighted question
if opt['use_qemb']:
self.qemb_match = layers.SeqAttnMatch(opt['embedding_dim'])
# Input size to RNN: word emb + question emb + manual features
doc_input_size = opt['embedding_dim'] + opt['num_features']
if opt['use_qemb']:
doc_input_size += opt['embedding_dim']
# RNN document encoder
self.doc_rnn = layers.StackedBRNN(
input_size=doc_input_size,
hidden_size=opt['hidden_size'],
num_layers=opt['doc_layers'],
dropout_rate=opt['dropout_rnn'],
dropout_output=opt['dropout_rnn_output'],
concat_layers=opt['concat_rnn_layers'],
rnn_type=self.RNN_TYPES[opt['rnn_type']],
padding=opt['rnn_padding'],
)
# RNN question encoder
self.question_rnn = layers.StackedBRNN(
input_size=opt['embedding_dim'],
hidden_size=opt['hidden_size'],
num_layers=opt['question_layers'],
dropout_rate=opt['dropout_rnn'],
dropout_output=opt['dropout_rnn_output'],
concat_layers=opt['concat_rnn_layers'],
rnn_type=self.RNN_TYPES[opt['rnn_type']],
padding=opt['rnn_padding'],
)
# Output sizes of rnn encoders
doc_hidden_size = 2 * opt['hidden_size']
question_hidden_size = 2 * opt['hidden_size']
if opt['concat_rnn_layers']:
doc_hidden_size *= opt['doc_layers']
question_hidden_size *= opt['question_layers']
# Question merging
if opt['question_merge'] not in ['avg', 'self_attn']:
raise NotImplementedError('question_merge = %s' % opt['question_merge'])
if opt['question_merge'] == 'self_attn':
self.self_attn = layers.LinearSeqAttn(question_hidden_size)
# Bilinear attention for span start/end
self.start_attn = layers.BilinearSeqAttn(
doc_hidden_size,
question_hidden_size,
)
self.end_attn = layers.BilinearSeqAttn(
doc_hidden_size,
question_hidden_size,
)
def forward(self, x1, x1_f, x1_mask, x2, x2_mask):
"""Inputs:
x1 = document word indices [batch * len_d]
x1_f = document word features indices [batch * len_d * nfeat]
x1_mask = document padding mask [batch * len_d]
x2 = question word indices [batch * len_q]
x2_mask = question padding mask [batch * len_q]
"""
# Embed both document and question
x1_emb = self.embedding(x1)
x2_emb = self.embedding(x2)
# Dropout on embeddings
if self.opt['dropout_emb'] > 0:
x1_emb = nn.functional.dropout(x1_emb, p=self.opt['dropout_emb'],
training=self.training)
x2_emb = nn.functional.dropout(x2_emb, p=self.opt['dropout_emb'],
training=self.training)
# Add attention-weighted question representation
if self.opt['use_qemb']:
x2_weighted_emb = self.qemb_match(x1_emb, x2_emb, x2_mask)
drnn_input = torch.cat([x1_emb, x2_weighted_emb, x1_f], 2)
else:
drnn_input = torch.cat([x1_emb, x1_f], 2)
# Encode document with RNN
doc_hiddens = self.doc_rnn(drnn_input, x1_mask)
# Encode question with RNN + merge hiddens
question_hiddens = self.question_rnn(x2_emb, x2_mask)
if self.opt['question_merge'] == 'avg':
q_merge_weights = layers.uniform_weights(question_hiddens, x2_mask)
elif self.opt['question_merge'] == 'self_attn':
q_merge_weights = self.self_attn(question_hiddens, x2_mask)
question_hidden = layers.weighted_avg(question_hiddens, q_merge_weights)
# Predict start and end positions
start_scores = self.start_attn(doc_hiddens, question_hidden, x1_mask)
end_scores = self.end_attn(doc_hiddens, question_hidden, x1_mask)
return start_scores, end_scores | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/parlai/agents/drqa/rnn_reader.py | 0.951182 | 0.229039 | rnn_reader.py | pypi |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from torch import nn
import torch
from torch.autograd import Variable
from torch.nn import functional as F
from copy import deepcopy
def mask_out(data, mask):
return data.index_select(0, mask.nonzero().squeeze())
def normalize(data, p=2, dim=1, eps=1e-12):
return data / torch.norm(data, p, dim).clamp(min=eps).expand_as(data)
class ObjectChecklistModel(nn.Module):
def __init__(self, opt, data_agent):
super().__init__()
self.opt = opt
self.input_emb = nn.Embedding(data_agent.wordcnt, opt['embedding_dim'], padding_idx=0)
self.action_type_emb = nn.Embedding(data_agent.get_num_actions(), opt['action_type_emb_dim'])
self.encoder = nn.GRU(opt['embedding_dim'], opt['rnn_h'], opt['rnn_layers'], batch_first=True, bidirectional=opt['bidir'])
self.decoder = nn.Sequential(
nn.Linear(opt['rnn_h'], 1),
)
self.log_softmax = nn.LogSoftmax()
self.trans = nn.Sequential(
nn.Linear(opt['rnn_h'] * (2 if opt['bidir'] else 1), opt['embedding_dim']),
nn.Tanh(),
)
counter_emb = opt['counter_emb_dim']
if opt['counter_ablation']:
counter_emb = 0
self.dec_gru = nn.GRU(opt['rnn_h'] * (2 if opt['bidir'] else 1) + counter_emb + (opt['embedding_dim'] if not opt['room_ablation'] else 0) + opt['action_type_emb_dim'] + opt['action_type_emb_dim'] + opt['embedding_dim'] + opt['embedding_dim'] + opt['rnn_h'] * (2 if opt['bidir'] else 1), opt['rnn_h'], opt['rnn_layers'], batch_first=True)
self.merge = nn.Sequential(
nn.Linear(opt['rnn_h'] * 2, opt['rnn_h']),
nn.Tanh(),
)
self.counter_emb = nn.Embedding(opt['counter_max'] + 1, opt['counter_emb_dim'])
def forward_loss(self, x, action_key, second_action_key, action_type, current_room, checked, y, y_mask, counter_feat, average_=True):
"""
x: [batch, seq_in], int
action_key: [y_dim], int
second_action_key: [y_dim], int
action_type: [y_dim], int
current_room: [batch, seq_out], int
checked: [batch, seq_out + 1, y_dim], float, binary
y: [batch, seq_out, y_dim], float, binary
y_mask: [batch, seq_out, y_dim], float, binary
counter_feat: [batch, seq_out, y_dim], int
"""
opt = self.opt
batch_size, seq_out, seq_in = x.size(0), y.size(1), x.size(1)
h_0 = Variable(torch.zeros(opt['rnn_layers'] * (2 if opt['bidir'] else 1), batch_size, opt['rnn_h']))
if opt['cuda']:
h_0 = h_0.cuda()
emb_out = self.input_emb(x) # [batch, seq_in, dim]
enc_out, hidden = self.encoder(emb_out, h_0) # [batch, seq_in, h], [layer, batch, h]
action_emb_ori = self.input_emb(action_key.unsqueeze(1)).squeeze(1) # [y_dim, dim]
y_dim, emb_dim = action_emb_ori.size()
action_emb = action_emb_ori.unsqueeze(0).expand(batch_size, y_dim, emb_dim).transpose(1, 2) # [batch, dim, y_dim]
second_action_emb_ori = self.input_emb(second_action_key.unsqueeze(1)).squeeze(1) # [y_dim, dim]
second_action_emb = second_action_emb_ori.unsqueeze(0).expand(batch_size, y_dim, emb_dim).transpose(1, 2) # [batch, dim, y_dim]
alpha = F.softmax(torch.bmm(emb_out, action_emb).transpose(1, 2).contiguous().view(-1, seq_in) + torch.bmm(self.trans(enc_out.contiguous().view(batch_size * seq_in, -1)).contiguous().view(batch_size, seq_in, -1), action_emb).transpose(1, 2).contiguous().view(-1, seq_in)) # [batch * y_dim, seq_in]
attention = torch.bmm(alpha.view(batch_size, y_dim, seq_in), enc_out) # [batch, y_dim, h]
second_alpha = F.softmax(torch.bmm(emb_out, second_action_emb).transpose(1, 2).contiguous().view(-1, seq_in) + torch.bmm(self.trans(enc_out.view(batch_size * seq_in, -1)).view(batch_size, seq_in, -1), second_action_emb).transpose(1, 2).contiguous().view(-1, seq_in))
second_attention = torch.bmm(second_alpha.view(batch_size, y_dim, seq_in), enc_out) # [batch, y_dim, h]
action_type_out_ori = self.action_type_emb(action_type) # [y_dim, dim]
action_type_out = action_type_out_ori.unsqueeze(0).expand(batch_size, y_dim, opt['action_type_emb_dim'])
action_type_emb_dim = action_type_out.size(2)
room_emb = self.input_emb(current_room) # [batch, seq_out, emb_dim]
loss = 0
if not average_:
loss = None
hidden = self.merge(hidden.view(batch_size, -1)).unsqueeze(1).expand(batch_size, y_dim, opt['rnn_h']).contiguous().view(1, batch_size * y_dim, -1)
for i in range(seq_out):
counter_in = self.counter_emb(counter_feat[:, i]) # [batch, y_dim, dim]
room_in = room_emb[:, i].unsqueeze(1).expand(batch_size, y_dim, emb_dim)
if i == 0:
y_in = Variable(torch.zeros(batch_size, y_dim))
if opt['cuda']:
y_in = y_in.cuda()
else:
y_in = y[:, i - 1]
y_second_in = torch.mm(y_in, second_action_emb_ori).unsqueeze(1).expand(batch_size, y_dim, emb_dim) # [batch, y_dim, dim]
y_type_in = torch.mm(y_in, action_type_out_ori).unsqueeze(1).expand(batch_size, y_dim, action_type_emb_dim) # [batch, y_dim, dim]
y_in = torch.mm(y_in, action_emb_ori).unsqueeze(1).expand(batch_size, y_dim, emb_dim) # [batch, y_dim, dim]
dec_in_list = [attention]
if not opt['counter_ablation']:
dec_in_list.append(counter_in)
if not opt['room_ablation']:
dec_in_list.append(room_in)
dec_in_list.append(action_type_out)
dec_in_list.append(y_type_in)
dec_in_list.append(y_second_in)
dec_in_list.append(y_in)
dec_in_list.append(second_attention)
dec_in = torch.cat(dec_in_list, 2)
dec_out, hidden = self.dec_gru(dec_in.view(batch_size * y_dim, 1, -1), hidden) # [batch * y_dim, 1, h], [1, batch * y_dim, h]
dec_out = dec_out.squeeze(1) # [batch * y_dim, h]
dec_out = self.log_softmax(self.decoder(dec_out).view(batch_size, y_dim))
if not average_:
new_loss = - (dec_out * y[:, i]).sum(1)
if loss is None:
loss = new_loss
else:
loss += new_loss
else:
loss += - (dec_out * y[:, i]).sum()
if not average_:
return loss
loss /= y.sum()
return loss
def forward_predict(self, x, action_key, second_action_key, action_type, check_mapping, checked, graphs, data_agent, constrain_=True):
"""
check_mapping: [y_dim, y_dim], float, binary
"""
graphs = deepcopy(graphs)
opt = self.opt
batch_size, seq_out, seq_in = x.size(0), opt['max_seq_out'], x.size(1)
h_0 = Variable(torch.zeros(opt['rnn_layers'] * (2 if opt['bidir'] else 1), batch_size, opt['rnn_h']), volatile=True)
if opt['cuda']:
h_0 = h_0.cuda()
emb_out = self.input_emb(x)
enc_out, hidden = self.encoder(emb_out, h_0)
action_emb_ori = self.input_emb(action_key.unsqueeze(1)).squeeze(1) # [y_dim, dim]
y_dim, emb_dim = action_emb_ori.size()
action_emb = action_emb_ori.unsqueeze(0).expand(batch_size, y_dim, emb_dim).transpose(1, 2) # [batch, dim, y_dim]
second_action_emb_ori = self.input_emb(second_action_key.unsqueeze(1)).squeeze(1) # [y_dim, dim]
second_action_emb = second_action_emb_ori.unsqueeze(0).expand(batch_size, y_dim, emb_dim).transpose(1, 2) # [batch, dim, y_dim]
alpha = F.softmax(torch.bmm(emb_out, action_emb).transpose(1, 2).contiguous().view(-1, seq_in) + torch.bmm(self.trans(enc_out.contiguous().view(batch_size * seq_in, -1)).contiguous().view(batch_size, seq_in, -1), action_emb).transpose(1, 2).contiguous().view(-1, seq_in))
attention = torch.bmm(alpha.view(batch_size, y_dim, seq_in), enc_out) # [batch, y_dim, h]
second_alpha = F.softmax(torch.bmm(emb_out, second_action_emb).transpose(1, 2).contiguous().view(-1, seq_in) + torch.bmm(self.trans(enc_out.view(batch_size * seq_in, -1)).view(batch_size, seq_in, -1), second_action_emb).transpose(1, 2).contiguous().view(-1, seq_in))
second_attention = torch.bmm(second_alpha.view(batch_size, y_dim, seq_in), enc_out) # [batch, y_dim, h]
action_type_out_ori = self.action_type_emb(action_type.unsqueeze(1)).squeeze(1) # [y_dim, dim]
action_type_out = action_type_out_ori.unsqueeze(0).expand(batch_size, y_dim, opt['action_type_emb_dim'])
action_type_emb_dim = action_type_out.size(2)
counter_feat = Variable(torch.zeros(batch_size, y_dim).long())
if opt['cuda']:
counter_feat = counter_feat.cuda()
text_out = [[] for _ in range(batch_size)]
hidden = self.merge(hidden.view(batch_size, -1)).unsqueeze(1).expand(batch_size, y_dim, opt['rnn_h']).contiguous().view(1, batch_size * y_dim, -1)
y_onehot = None
for i in range(seq_out):
room_in = torch.zeros(batch_size).long()
for j in range(batch_size):
room_in[j] = data_agent.get_room(graphs[j])
if opt['cuda']:
room_in = room_in.cuda()
room_in = Variable(room_in, volatile=True)
room_in = self.input_emb(room_in.unsqueeze(1)).expand(batch_size, y_dim, emb_dim)
if i == 0:
y_in = Variable(torch.zeros(batch_size, y_dim))
if opt['cuda']:
y_in = y_in.cuda()
else:
y_in = y_onehot
y_second_in = torch.mm(y_in, second_action_emb_ori).unsqueeze(1).expand(batch_size, y_dim, emb_dim) # [batch, y_dim, dim]
y_type_in = torch.mm(y_in, action_type_out_ori).unsqueeze(1).expand(batch_size, y_dim, action_type_emb_dim) # [batch, y_dim, dim]
y_in = torch.mm(y_in, action_emb_ori).unsqueeze(1).expand(batch_size, y_dim, emb_dim) # [batch, y_dim, dim]
counter_in = self.counter_emb(counter_feat) # [batch, y_dim, dim]
dec_in_list = [attention]
if not opt['counter_ablation']:
dec_in_list.append(counter_in)
if not opt['room_ablation']:
dec_in_list.append(room_in)
dec_in_list.append(action_type_out)
dec_in_list.append(y_type_in)
dec_in_list.append(y_second_in)
dec_in_list.append(y_in)
dec_in_list.append(second_attention)
dec_in = torch.cat(dec_in_list, 2)
dec_out, hidden = self.dec_gru(dec_in.view(batch_size * y_dim, 1, -1), hidden) # [batch * y_dim, 1, h], [1, batch * y_dim, h]
y_mask = torch.zeros(batch_size, y_dim)
for j in range(batch_size):
data_agent.get_mask(graphs[j], y_mask[j])
if opt['cuda']:
y_mask = y_mask.cuda()
y_mask = Variable(y_mask, volatile=True)
dec_out = dec_out.squeeze(1) # [batch * y_dim, h]
dec_out = self.decoder(dec_out).view(batch_size, y_dim)
if constrain_:
dec_out = dec_out * y_mask + -1e7 * (1 - y_mask)
y_out = torch.max(dec_out, 1, keepdim=True)[1].data
y_onehot = torch.zeros(batch_size, y_dim)
y_onehot.scatter_(1, y_out.cpu(), 1)
if opt['cuda']:
y_onehot = y_onehot.cuda()
y_onehot = Variable(y_onehot, volatile=True) # [batch, y_dim]
y_out = y_out.squeeze()
for j in range(batch_size):
if len(text_out[j]) > 0 and text_out[j][-1] == 'STOP': continue
cur_tuple = data_agent.get_action_tuple(y_out[j])
text_out[j].append(data_agent.reverse_parse_action(cur_tuple))
if text_out[j][-1] != 'STOP':
exec_result = graphs[j].parse_exec(text_out[j][-1])
if constrain_:
assert exec_result, text_out[j][-1]
for action_name in data_agent.key_to_check[data_agent.check_to_key[cur_tuple]]:
action_id = data_agent.get_action_id(action_name)
counter_feat[j, action_id] = counter_feat[j, action_id] + 1
counter_feat.data.clamp_(max=opt['counter_max'])
return text_out
class Seq2SeqModel(nn.Module):
def __init__(self, opt, data_agent):
super().__init__()
self.opt = opt
self.y_dim = data_agent.y_dim
self.input_emb = nn.Embedding(data_agent.wordcnt, opt['embedding_dim'], padding_idx=0)
self.encoder = nn.GRU(opt['embedding_dim'], opt['rnn_h'], opt['rnn_layers'], batch_first=True)
self.decoder = nn.GRU(self.y_dim, opt['rnn_h'], opt['rnn_layers'], batch_first=True)
self.mapping = nn.Sequential(
nn.Linear(opt['rnn_h'] * 2, self.y_dim),
nn.LogSoftmax(),
)
def forward_loss(self, x, y, average_=True):
"""
x: [batch, seq_in], int
y: [batch, seq_out, 3 * target], float, binary
"""
opt = self.opt
batch_size, seq_out = x.size(0), y.size(1)
h_0 = Variable(torch.zeros(opt['rnn_layers'], batch_size, opt['rnn_h']))
if opt['cuda']:
h_0 = h_0.cuda()
enc_out, hidden = self.encoder(self.input_emb(x), h_0) # [batch, seq_in, h], [layer, batch, h]
loss = 0 if average_ else None
for i in range(seq_out):
if i == 0:
y_in = Variable(torch.zeros(batch_size, 1, y.size(2)))
if opt['cuda']:
y_in = y_in.cuda()
else:
y_in = y[:, i - 1].unsqueeze(1)
dec_out, hidden = self.decoder(y_in, hidden) # [batch, 1, h], [layer, batch, h]
alpha = F.softmax(torch.bmm(enc_out, hidden[-1].unsqueeze(2))) # [batch, seq_in, 1]
attention = torch.bmm(enc_out.transpose(1, 2), alpha).squeeze(2) # [batch, h]
dec_out = self.mapping(torch.cat([attention, dec_out.squeeze(1)], dim=1)) # [batch, y_dim]
if average_:
loss += - (dec_out * y[:, i]).sum()
else:
new_loss = - (dec_out * y[:, i]).sum(1)
if loss is None:
loss = new_loss
else:
loss += new_loss
if not average_:
return loss
loss /= y.sum()
return loss
def forward_predict(self, x, graphs, data_agent, constrain_=True):
graphs = deepcopy(graphs)
opt = self.opt
batch_size = x.size(0)
h_0 = Variable(torch.zeros(opt['rnn_layers'], batch_size, opt['rnn_h']))
if opt['cuda']:
h_0 = h_0.cuda()
enc_out, hidden = self.encoder(self.input_emb(x), h_0) # [batch, seq_in, h], [layer, batch, h]
text_out = [[] for _ in range(batch_size)]
y_onehot = None
for i in range(opt['max_seq_out']):
if i == 0:
y_in = Variable(torch.zeros(batch_size, 1, self.y_dim))
if opt['cuda']:
y_in = y_in.cuda()
else:
y_in = y_onehot.unsqueeze(1)
dec_out, hidden = self.decoder(y_in, hidden)
alpha = F.softmax(torch.bmm(enc_out, hidden[-1].unsqueeze(2)))
attention = torch.bmm(enc_out.transpose(1, 2), alpha).squeeze(2)
dec_out = self.mapping(torch.cat([attention, dec_out.squeeze(1)], dim=1)) # [batch, y_dim]
y_mask = torch.zeros(batch_size, self.y_dim)
for j in range(batch_size):
data_agent.get_mask(graphs[j], y_mask[j])
if opt['cuda']:
y_mask = y_mask.cuda()
y_mask = Variable(y_mask, volatile=True)
if constrain_:
dec_out = dec_out * y_mask + -1e7 * (1 - y_mask)
y_out = torch.max(dec_out, 1, keepdim=True)[1].data # [batch, 1]
y_onehot = torch.zeros(batch_size, self.y_dim) # [batch, y_dim]
y_onehot.scatter_(1, y_out.cpu(), 1)
y_onehot = Variable(y_onehot)
if opt['cuda']:
y_onehot = y_onehot.cuda()
y_out = y_out.squeeze()
for j in range(batch_size):
if len(text_out[j]) > 0 and text_out[j][-1] == 'STOP': continue
text_out[j].append(data_agent.reverse_parse_action(data_agent.get_action_tuple(y_out[j])))
if text_out[j][-1] != 'STOP':
exec_result = graphs[j].parse_exec(text_out[j][-1])
if constrain_:
assert exec_result, text_out[j][-1]
return text_out | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/projects/mastering_the_dungeon/agents/graph_world2/models.py | 0.931936 | 0.440409 | models.py | pypi |
# Copyright (c) 2017-present, Moscow Institute of Physics and Technology.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from parlai.core.worlds import World
from parlai.core.utils import display_messages
from parlai.core.agents import create_agent_from_shared
import requests
import json
import time
class ConvAIWorld(World):
"""ConvAIWorld provides conversations with participants in the convai.io
competition.
This world takes in exactly one agent which will converse with a partner
over a remote connection.
For each remote conversation being maintained by this world, a copy of the
original agent will be instantiated from the original agent's `share()`
method.
"""
@staticmethod
def add_cmdline_args(argparser):
convai = argparser.add_argument_group('ConvAI Arguments')
convai.add_argument('-bi', '--bot-id', required=True,
help='Id of local bot used to communicate with RouterBot')
convai.add_argument('-bc', '--bot-capacity', type=int, default=-1,
help='The maximum number of open dialogs. Use -1 '
'for unlimited number of open dialogs')
convai.add_argument('-rbu', '--router-bot-url', required=True,
help='Url of RouterBot')
convai.add_argument('-rbpd', '--router-bot-pull-delay', type=int, default=1,
help='Delay before new request to RouterBot: minimum 1 sec')
convai.add_argument('-m', '--max-pull-delay', type=int, default=600,
help='Maximum delay for new requests if case of server '
'unavailability')
def __init__(self, opt, agents, shared=None):
super().__init__(opt, shared)
if len(agents) != 1:
raise RuntimeError('Need agent to talk to.')
self.agent = agents[0]
self.shared = agents[0].share()
# Current chat id
self.curr_chatID = None
# All active and finished (but not cleared yet) chats
self.chats = {}
# Finished chats
self.finished_chats = set()
# Pairs of exchanges between remote and local agents (for printing)
self.last_exchanges = dict()
# Pool of messages from RouterBot
self.messages = []
# Url of RouterBot
self.router_bot_url = opt['router_bot_url']
# Delay before new request to RouterBot: minimum 1 sec
self.router_bot_pull_delay = opt['router_bot_pull_delay']
if self.router_bot_pull_delay < 1:
self.router_bot_pull_delay = 1
# Minimal pull delay is equal to initial value of router_bot_pull_delay
self.minimum_pull_delay = self.router_bot_pull_delay
# Maximum delay couldn't be smaller than minimum_pull_delay
self.maximum_pull_delay = opt['max_pull_delay']
if self.maximum_pull_delay < self.minimum_pull_delay:
self.maximum_pull_delay = self.minimum_pull_delay
# Id of local bot used to communicate with RouterBot
self.bot_id = opt['bot_id']
# The maximum number of open dialogs.
# Use -1 for unlimited number of open dialogs
self.bot_capacity = opt['bot_capacity']
# RouterBot url with current bot id
self.bot_url = self.router_bot_url + self.bot_id
def _get_updates(self):
"""Make HTTP request to Router Bot for new messages
Expecting server response to be like {'ok': True, "result": [...]}
:return: list of new messages received since last request
"""
res = requests.get(self.bot_url + '/getUpdates')
if res.status_code != 200:
print(res.text)
self._increase_delay()
return {'ok': False, "result": []}
elif self.router_bot_pull_delay > self.minimum_pull_delay:
self.router_bot_pull_delay = self.minimum_pull_delay
return res.json()
def _increase_delay(self):
if self.router_bot_pull_delay < self.maximum_pull_delay:
self.router_bot_pull_delay *= 2
if self.router_bot_pull_delay > self.maximum_pull_delay:
self.router_bot_pull_delay = self.maximum_pull_delay
print('Warning! Increasing pull delay to %d', self.router_bot_pull_delay)
def _send_message(self, observation, chatID):
"""Make HTTP request to Router Bot to post new message
:param observation: message that will be sent to server
:param chatID: id of chat
:return: None
"""
if self._is_end_of_conversation(observation['text']):
data = {
'text': '/end',
'evaluation': {
'quality': 0,
'breadth': 0,
'engagement': 0
}
}
else:
data = {
'text': observation['text'],
'evaluation': 0
}
message = {
'chat_id': chatID,
'text': json.dumps(data)
}
headers = {
'Content-Type': 'application/json'
}
res = requests.post(self.bot_url + '/sendMessage',
json=message, headers=headers)
if res.status_code != 200:
print(res.text)
res.raise_for_status()
@staticmethod
def _is_begin_of_conversation(message):
return message.startswith('/start')
@staticmethod
def _is_end_of_conversation(message):
return message.startswith('/end')
@staticmethod
def _is_skip_response(message):
return message == ''
@staticmethod
def _get_chat_id(message):
return message['message']['chat']['id']
@staticmethod
def _get_message_text(message):
return message['message']['text']
@staticmethod
def _strip_start_message(message):
lines = message.split('\n')[1:]
lines = ['your persona: ' + line for line in lines]
return '\n'.join(lines)
def _init_chat(self, chatID):
"""Create new chat for new dialog.
Sets up a new instantiation of the agent so that each chat has its own
local state.
:param chatID: chat id
:return: new instance of your local agent
"""
agent_info = self.shared
# Add refs to current world instance and chat id to agent 'opt' parameter
if 'opt' not in agent_info.keys() or agent_info['opt'] is None:
agent_info['opt'] = {}
agent_info['opt']['convai_chatID'] = chatID
local_agent = create_agent_from_shared(agent_info)
self.chats[chatID] = local_agent
return self.chats[chatID]
def cleanup_finished_chat(self, chatID):
"""Shutdown specified chat.
:param chatID: chat id
:return: None
"""
if chatID in self.finished_chats:
self.chats.pop(chatID).shutdown()
self.finished_chats.remove(chatID)
def cleanup_finished_chats(self):
"""Shutdown all finished chats.
:return: None
"""
for chatID in self.finished_chats.copy():
self.cleanup_finished_chat(chatID)
def pull_new_messages(self):
"""Requests the server for new messages and processes every message.
If a message starts with '/start' string then a new chat will be created and
the message will be added to stack.
If a message has the same chat id as already existing chat then it will be
added to message stack for this chat.
Any other messages will be ignored.
If after processing all messages message stack is still empty then new request
to server will be performed.
:return: None
"""
print('Waiting for new messages from server...', flush=True)
while True:
time.sleep(self.router_bot_pull_delay)
msgs = self._get_updates()
if len(msgs["result"]) > 0:
for msg in msgs["result"]:
print('\nProceed message: %s' % msg)
text = self._get_message_text(msg)
chatID = self._get_chat_id(msg)
if self.chats.get(chatID, None) is not None:
print('Message was recognized as part of chat #%s'
% chatID)
self.messages.append((chatID, text))
elif self._is_begin_of_conversation(text):
print('Message was recognised as start of new chat #%s'
% chatID)
if self.bot_capacity == -1 or 0 <= self.bot_capacity > \
(len(self.chats) - len(self.finished_chats)):
self._init_chat(chatID)
text = self._strip_start_message(text)
self.messages.append((chatID, text))
print('New world and agents for chat #%s are created.'
% chatID)
else:
print('Cannot start new chat #%s due to bot capacity'
'limit reached.' % chatID)
else:
print('Message was not recognized as part of any chat.'
'Message skipped.')
if len(self.messages) > 0:
break
else:
print('Waiting for new messages from server...', flush=True)
def parley(self):
"""Pops next message from stack, gets corresponding chat, agents, world
and performs communication between agents.
Result of communication will be send to server.
If message stack is empty then server will be requested for new messages.
:return: None
"""
print('Try to cleanup finished chat before new parley.')
self.cleanup_finished_chats()
if len(self.messages) == 0:
print('Message stack is empty. Try to request new messages from server.')
self.pull_new_messages()
print('Pop next message from stack')
(chatID, text) = self.messages.pop(0)
episode_done = self._is_end_of_conversation(text)
local_agent = self.chats.get(chatID, None)
if local_agent is not None:
self.curr_chatID = chatID
msg = {
'id': 'MasterBot#%s' % chatID,
'text': text,
'episode_done': episode_done
}
local_agent.observe(msg)
reply = local_agent.act()
self.last_exchanges[chatID] = [msg, reply]
if self._is_end_of_conversation(reply['text']) or reply['episode_done']:
episode_done = True
if self._is_skip_response(reply['text']):
print('Skip response from agent for chat #%s' % chatID)
else:
print('Send response from agent to chat #%s: %s' % (chatID, reply))
self._send_message(reply, chatID)
else:
print('Message was not recognized as part of any chat. Message skipped.')
if episode_done:
self.finished_chats.add(chatID)
def display(self):
if self.curr_chatID in self.chats.keys():
return display_messages(self.last_exchanges[self.curr_chatID])
else:
return ''
def shutdown(self):
for chatID in self.chats.keys():
self.chats[chatID].shutdown()
if chatID not in self.finished_chats:
self._send_message({'text': '/end'}, chatID)
def get_chats(self):
return self.chats.keys()
def get_finished_chats(self):
return self.finished_chats | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/projects/convai/convai_world.py | 0.88377 | 0.20264 | convai_world.py | pypi |
# Copyright (c) 2017-present, Moscow Institute of Physics and Technology.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from projects.convai.convai_world import ConvAIWorld
from parlai.core.params import ParlaiParser
from parlai.core.agents import Agent, create_agent
from parlai.core.utils import display_messages
import random
class ConvAISampleAgent(Agent):
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
self.id = 'ConvAISampleAgent'
self.text = 'Nothing to say yet!'
self.episode_done = False
def observe(self, observation):
print('\t' + display_messages([observation]))
self.observation = observation
self.episode_done = observation['episode_done']
if self.episode_done:
self.text = '/end'
else:
self.text = random.choice([
'I love you!',
'Wow!',
'Really?',
'Nice!',
'Hi',
'Hello',
'This is not very interesting. Let\'s change the subject of the '
'conversation and talk about cats.',
'/end'])
def act(self):
reply = {
'id': self.getID(),
'text': self.text,
'episode_done': self.episode_done
}
print('\t' + display_messages([reply]))
return reply
def setup_args():
parser = ParlaiParser(True, True)
ConvAIWorld.add_cmdline_args(parser)
return parser
def run_convai_bot(opt):
agent = create_agent(opt)
world = ConvAIWorld(opt, [agent])
while True:
try:
world.parley()
except Exception as e:
print('Exception: {}'.format(e))
def main():
parser = setup_args()
parser.set_params(model='projects.convai.convai_bot:ConvAISampleAgent')
opt = parser.parse_args()
print('Run ConvAI bot in inifinite loop...')
run_convai_bot(opt)
if __name__ == '__main__':
main() | /roboy_parlai-0.1.post3.tar.gz/roboy_parlai-0.1.post3/projects/convai/convai_bot.py | 0.746693 | 0.169853 | convai_bot.py | pypi |
from inspect import getmembers
from typing import Any
from pytest import MonkeyPatch
import functools
import io
import os
import socket
from .pytest_sandbox import MockOsModule
import builtins
import math
from dataclasses import is_dataclass, fields
from typing import Tuple
class MockedModule:
pass
class ProhibitedFunctionCall(Exception):
pass
def fn_doesnt_exist(module_name: str, fn_name: str):
def swapped_fn_call(*args, **kwargs):
raise ProhibitedFunctionCall(
f"{module_name}.{fn_name} is prohibited during testing"
)
return swapped_fn_call
def swap_function_calls(monkey_patch_context, module, mocked_module, attrs_to_keep):
for member in getmembers(module):
fn_name = member[0] # type: ignore
if fn_name in attrs_to_keep:
continue
if callable(member[1]):
if hasattr(mocked_module, fn_name):
monkey_patch_context.setattr(
module, fn_name, getattr(mocked_module, fn_name)
)
else:
monkey_patch_context.setattr(
module, fn_name, fn_doesnt_exist(module.__name__, fn_name)
)
def patch_system_modules(test_fn):
@functools.wraps(test_fn)
def wrapped(*args, **kwargs):
with MonkeyPatch.context() as m:
fh = MockOsModule()
m.setattr(io, "open", fh.open)
m.setattr(builtins, "open", fh.open)
swap_function_calls(m, os, fh, {"path", "fspath"})
swap_function_calls(m, socket, MockedModule(), {})
return test_fn(*args, **kwargs)
return wrapped
def equals(a: Any, b: Any) -> bool:
return equals_with_message(a, b)[0]
def eq_msg(a: Any, b: Any) -> str:
return equals_with_message(a, b)[1]
def equals_with_message(a: Any, b: Any) -> Tuple[bool, str]:
if type(a) != type(b):
return False, f"Types don't match: {type(a)} != {type(b)}"
obj_typ = type(a)
if obj_typ == dict:
a_keys_set = set(a.keys())
b_keys_set = set(b.keys())
keys_equal, msg = equals_with_message(a_keys_set, b_keys_set)
if not keys_equal:
return False, f"Dictionary keys don't match {msg}"
for key in a.keys():
val_eq, msg = equals_with_message(a[key], b[key])
if not val_eq:
return False, f"Dictionary values don't match for key {key}:\n{msg}"
return True, ""
elif obj_typ == list:
if len(a) != len(b):
return False, f"Lists don't have the same length: {len(a)} != {len(b)}"
for i in range(len(a)):
val_eq, msg = equals_with_message(a[i], b[i])
if not val_eq:
return False, f"List values don't match for index {i}: {msg}"
return True, ""
elif obj_typ == tuple:
if len(a) != len(b):
return False, f"Tuples don't have the same length: {len(a)} != {len(b)}"
for i in range(len(a)):
val_eq, msg = equals_with_message(a[i], b[i])
if not val_eq:
return False, f"Tuple values don't match for index {i}: {msg}"
return True, ""
elif obj_typ == set:
if len(a) != len(b):
return False, f"Sets don't have the same length: {len(a)} != {len(b)}"
num_items_not_in_b = 0
msg = ""
for a_item in a:
for b_item in b:
if a_item.__hash__() == b_item.__hash__():
val_eq, val_msg = equals_with_message(a_item, b_item)
if val_eq:
break
else:
return (
False,
f"Set values don't match: {a_item} from left set != {b_item} from right set\n {val_msg}",
)
elif equals(a_item, b_item):
break
else:
num_items_not_in_b += 1
msg += f"{a_item} from left set not in right set\n"
if num_items_not_in_b > 0:
return False, f"Left set != Right set:\n {msg}"
else:
return True, ""
elif obj_typ == float:
return math.isclose(a, b), f"{a} != {b}"
elif is_dataclass(obj_typ):
if a == b:
return True, ""
for field in fields(obj_typ):
if field.compare:
val_eq, msg = equals_with_message(
getattr(a, field.name), getattr(b, field.name)
)
if not val_eq:
return (
False,
f"Values don't match for `{obj_typ}.{field.name}`\n {msg}",
)
return True, ""
elif (
hasattr(obj_typ, "__eq__")
and hasattr(obj_typ.__eq__, "__objclass__")
and obj_typ.__eq__.__objclass__ == object # type: ignore
):
for member in getmembers(a):
if member[0].startswith("_") or callable(member[1]):
continue
if not hasattr(b, member[0]):
return (
False,
f"Right object of type `{obj_typ}` doesn't have the attribute `{member[0]}`",
)
val_eq, msg = equals_with_message(member[1], getattr(b, member[0]))
if not val_eq:
return (
False,
f"Values don't match for `{obj_typ}.{member[0]}`\n {msg}",
)
return True, ""
else:
return a == b, f"{a} != {b}" | /robust_ai_utils-0.0.4.tar.gz/robust_ai_utils-0.0.4/robust_ai_utils/assert_utils.py | 0.689619 | 0.15511 | assert_utils.py | pypi |
Robust ATD CLI tools
================
[](https://github.com/shadowbq/robust-atd/releases)
[](/LICENSE)
[](https://github.com/shadowbq/robust-atd/releases)
[](/README.md)
"Robust" is a set of tools to leverage the HTTPS REST API of the [McAfee Advanced Threat Detection](http://www.mcafee.com/us/products/advanced-threat-defense.aspx) 3.8 - 4.x appliance.
## Tools Overview
* `robust` : basic cli submission of a single piece of malware to a MATD server.
* `robust-profiles` : list the available MATD profiles
* `robust-search` : search MATD via MD5 for a report.
* `robust-watchdog` : monitor a directory for files and submit `multithreaded` to MATD
* `robust-convict` : submit `multithreaded` a directory filled with samples and sort into malicious, clean, error, etc.
* `robust-reporter` : parse offline the json files returned during large batch submissions.
* `robust-version-checker` : Check the MATD Server Version
## Important
This is *not a supported or official application of McAfee*. This work is based off of publicly available published documentation for integrating with the McAfee ATD REST API 3.6.x to 4.x
Official API Documentation is available here:
* https://support.mcafee.com/ServicePortal/faces/knowledgecenter?q=api&v=&p=Advanced+Threat+Defense
## McAfee ATD - Advanced Threat defense
McAfee ATD is a commercial grade enterprise security sandbox analysis appliance. It main function is to provide advanced detection for stealthy, zero-day malware. McAfee Advanced Threat Defense is available as an on-premises appliance or a virtual form factor, with support for both private and public cloud with availability in the Azure Marketplace.
* https://www.mcafee.com/us/products/advanced-threat-defense.aspx
* https://www.mcafee.com/us/resources/data-sheets/ds-advanced-threat-defense.pdf
## Install
Req: Python 2.7.x.
Bug #5: https://github.com/shadowbq/robust-atd/issues/5 - ~~`pip install robust-atd`~~
Note: Python 3.x is not supported.
### PKG Download & Manual Install Alternative
Note: `python setup.py install` will attempt to install dependencies from the internet via `pip`.
For offline runtime installation, please download the pip packages listed in the `requirements.txt`.
### Virutalenv
It is recommended to install virtualenv & virtualenvwrapper via `Virtualenv Burrito`.
See: [README_PYTHON_UP.md](/README_PYTHON_UP.md)
```
$> mkvirtualenv robust
$> workon robust
$(robust)> wget https://github.com/shadowbq/robust-atd/archive/master.zip
$(robust)> unzip master.zip
$(robust)> cd master
$(robust)> python setup.py install
```
-or-
```
$> mkvirtualenv --python=python2.7 robust
$> workon robust
$(robust)> pip install robust-atd
```
### Robust (DOT) Configuration file
Robust will use the `~\.robust` configuration file to load defaults into the scripts.
The configuration file is broken into multiple sections. If you use a section you must define all the settings in that section.
* [auth]
* [connection]
* [convict]
It is recommended to set the file to `read-write` only for the current user, and remove all world `(-)rwx` permissions.
Authentication Section `[auth]` :
```shell
$(robust)> cat ~/.robust
[auth]
user: admin
password: password.
```
Connection Detail Section `[connection]` :
```shell
$(robust)> cat ~/.robust
[connection]
ip: atd.localhost.localdomain
skipssl: true
maxthreads: 15
```
Data Storage Section `[storage]`:
Note: Datastorage locations will be created if they do not exist.
```shell
$(robust)> cat ~/.robust
[storage]
severity: 3
cleandir: ~/robust/clean
dirtydir: ~/robust/dirty
reportdir: ~/robust/reports
errordir: ~/robust/errors
```
This file is expanded via the `os` module is compliant with windows user directories.
## Robust:
Using `robust` for submitting samples.
```
usage: robust.py [-h] [-u USER] [-p PASSWORD] [-i ATD IP] [-n] -s
FILE_TO_UPLOAD -a ANALYZER_PROFILE [-v] [--version]
Robust McAfee ATD Python CLI tool
optional arguments:
-h, --help show this help message and exit
--version show program's version number and exit
-v, --verbosity increase output verbosity
(default: None)
-q, --quiet (q)uiet all output
(default: False)
Authentication parameters:
-u USER (u)sername for the API of the ATD
(default: admin)
-p PASSWORD (p)assword for username
(default: password.)
-i ATD IP (i)p or hostname address of ATD
(default: atd.localhost.localdomain)
-n do (n)ot verify the SSL certificate for the communications
(default: False)
Sample parameters:
-s FILE_TO_UPLOAD (s)ample or file to be analyzed
(default: None)
-a ANALYZER_PROFILE (a)nalyzer profile id to be used during analysis
(default: None)
Examples:
robust.py -u admin -p password. -i atd.localhost.localdomain -s /usr/local/bin/file_to_scan -a 1
```
### Submitting a Sample
A sample can be submitted via cli with full flags, `.robust` configuration file, or interrupt passwords.
```shell
$(robust)> robust.py -u admin -p password. -i atd.localhost.localdomain -s /home/malware/non-malicious-container/putty_upx_7.exe
```
Using interrupt (interactive) passwords:
```shell
$(robust)> robust-profiles.py -n -l
Password: <input password>
ATD profiles: 1
Profile id: 26
Name: Win XP Down Select (Online)
OS: winXPsp3
Run all down selects?: Off
******************
```
### Managing Outputs
Using System Return codes with `-q` Quiet output flag. When the quiet flag is
used for submitting samples or searching reports the *severity* of the application
is returned as a system exit/return code. Negative return codes indicate *faults*
or failure during submission.
```
(robust)>$ robust.py -n -a 26 -s ./.samples/Sample.exe -q
(robust)>$ echo $?
2
```
Common Fault codes:
```
-1 ---> Error connecting to the ATD Server
-2 ---> Error uploading file to the ATD Server
-3 ---> Analysis failed
-4 ---> Error getting report
-5 ---> Error Obtaining vmprofilelist
```
Malware ranking:
(If the severity level of the sample is 3 and above it is generally regarded a threat)
```
N/A -> Sample did not run
-1 --> Sample is white listed
0 ---> No malicious activity detected (None)
1 ---> Sample is slightly suspicious (Low)
2 ---> Sample is somewhat/probably is suspicious
3 ---> Sample is malicious (Medium)
4 ---> Sample is malicious
5 ---> Sample is malicious (Very High)
```
## robust-version-checker
You can quickly test your connection settings in the CLI.
```
$(robust)> robust-version-checker.py -u robust -p password. -i atd.example.com -n
Connection successful...
Session Value: g7aenj99pfp0gbrogfbqsd9085
User ID: 57
ATD ver: 4.2.2.16
ATD Box heartbeat: 1519939175
```
## robust-profiles
A tool designed to pull the *Analyzer Profile* policy list available to a specific user.
Pulling the Policy List - In order to submit a sample using `robust` you must identify the Analyzer Profile ID. `robust-profiles` assists in identifying the available profiles your user can submit samples to.
```
$(robust)> robust-profiles.py -n -l
ATD profiles: 10
Profile id: 1
Name: Android
OS: android
Run All Selected?: Off
******************
Profile id: 26
Name: Win XP Down Select (Online)
OS: winXPsp3
Run All Selected?: Off
******************
Profile id: 25
Name: Windows XP Full Run (Offline)
OS: winXPsp3
Run All Selected?: On
******************
Profile id: 24
Name: Windows XP Full Run (Online)
OS: winXPsp3
Run All Selected?: On
******************
```
## robust-search
A tool designed to search and return reports for a specific md5 hash.
```shell
(robust)$> $ robust-search.py -m 2F7568342339CDB8321B52FF7BEBE661 -n
(Sample.exe:2F7568342339CDB8321B52FF7BEBE661) = 2: "Sample probably is suspicious"
```
### Help details
`robust-search` has the options `-w` and `-t` to collect the proper report on the submission.
```
usage: robust-search.py [-h] [-u USER] [-p PASSWORD] [-i ATD IP] [-n] -m MD5
[-t {html,txt,xml,zip,json,ioc,stix,pdf,sample}]
[-w FILENAME] [--version] [-v | -q]
Robust McAfee ATD Python CLI tool
optional arguments:
-h, --help show this help message and exit
--version show program's version number and exit
-v, --verbosity increase output (v)erbosity
(default: None)
-q, --quiet (q)uiet all output
(default: False)
Authentication parameters:
-u USER (u)sername for the API of the ATD
(default: admin)
-p PASSWORD (p)assword for username
(default: password.)
-i ATD IP (i)p or hostname address of ATD
(default: atd.localhost.localdomain)
-n do (n)ot verify the SSL certificate for the communications
(default: False)
Search parameters:
-m MD5 (m)d5 32bit hash of the sample to search
(default: None)
Reporting parameters:
-t {html,txt,xml,zip,json,ioc,stix,pdf,sample}
(t)ype of report requested
(default: None)
-w FILENAME (w)rite filename for saving the requested report
(default: None)
```
## robust-watchdog
A tool that watches a directory recursively for any new files to submit.
Example CLI
```
usage: robust-watchdog.py [-h] -u USER [-p PASSWORD] -i ATD IP [-n] -a
ANALYZER_PROFILE -d DIRECTORY [-e] [-j MAXTHREADS]
[--version] [-v | -q]
Robust McAfee ATD Python CLI tool
optional arguments:
-h, --help show this help message and exit
--version show program's version number and exit
-v, --verbosity increase output (v)erbosity
(default: None)
-q, --quiet (q)uiet all output
(default: False)
Authentication parameters:
-u USER (u)sername for the API of the ATD
(default: None)
-p PASSWORD (p)assword for username
(default: None)
-i ATD IP (i)p or hostname address of ATD
(default: None)
-n do (n)ot verify the SSL certificate for the communications
(default: False)
Watch parameters:
-f (f)ollow and watch the directory for new files to submit
(default: True)
-a ANALYZER_PROFILE (a)nalyzer profile id to be used during analysis
(default: None)
-d DIRECTORY (d)irectory to watch for events
(default: None)
-e (e)xisting files in directory will be submitted
(default: False)
-j MAXTHREADS (j) max number of threads
(default: 1)
```
Let it run in a shell and open another one or the file browser to create files in the /path/to/directory. Since the handler is printing the results, the output will reflect the flags chosen similar to `robust.py`:
The `-e` flag can be passed to cause all existing files in the directory (recurisively) to be submitted upon start.
```shell
(robust)$> robust-watchdog.py -a 26 -d ./ -n -e
.
...
.
.....
```
## robust-convict
`robust-convict` is a tool designed like `robust-watchdog` but its purpose is to help sort large directories of malware samples into directories, while downloading their corresponding reports.
Example Usage
```
robust-convict.py -n -a 26 -c ./tmp/clean/ -x ./tmp/dirty/ -r ./tmp/reports/ -z ./tmp/errors/ -d ./tmp/preprocess -j 10 -t zip -q
```
Options
```
usage: robust-convict.py [-h] [-u USER] [-p PASSWORD] [-i ATD IP] [-n] -a
ANALYZER_PROFILE -d DIRECTORY [-e] [-y SEVERITY]
[-c CLEANDIR] [-x DIRTYDIR] [-r REPORTDIR]
[-z ERRORDIR]
[-t {html,txt,xml,zip,json,ioc,stix,pdf,sample}]
[-j MAXTHREADS] [--version] [-v | -q]
Robust McAfee ATD Python CLI tool
optional arguments:
-h, --help show this help message and exit
--version show program's version number and exit
-v, --verbosity increase output (v)erbosity
(default: None)
-q, --quiet (q)uiet all output
(default: False)
Authentication parameters:
-u USER (u)sername for the API of the ATD
(default: robust)
-p PASSWORD (p)assword for username
(default: ****<.robust>*****)
-i ATD IP (i)p or hostname address of ATD
(default: atd.localhost.localdomain)
-n do (n)ot verify the SSL certificate for the communications
(default: True)
Watch parameters:
-f (f)ollow and watch the directory for new files to submit
(default: False)
-a ANALYZER_PROFILE (a)nalyzer profile id to be used during analysis
(default: None)
-d DIRECTORY (d)irectory to watch for events
(default: None)
-e (e)xisting files in directory will be submitted
(default: False)
-j MAXTHREADS (j) max number of threads
(default: 1)
Convict parameters:
-y SEVERITY (y) treat sample as dirty with this severity [0-5] or higher
(default: 3)
-c CLEANDIR (c) move clean files to this directory
(default: ~/robust/clean/)
-x DIRTYDIR (x) move processed dirty files to this directory
(default: ~/robust/malware/)
-r REPORTDIR (r) save reports to this directory
(default: ~/robust/reports/)
-z ERRORDIR (z) move error or skip files to this directory
(default: ~/robust/errors/)
-t {html,txt,xml,zip,json,ioc,stix,pdf,sample}
(t)ype of report requested
(default: None)
```
## robust-reporter
`robust-reporter` is a tool designed to quickly summarize the downloaded `*.json` files in your 'reports' directory.
Options
```
usage: robust-reporter.py [-h] [-r REPORTDIR] [--version] [-v | -q]
Robust McAfee ATD Python CLI tool
optional arguments:
-h, --help show this help message and exit
--version show program's version number and exit
-v, --verbosity increase output (v)erbosity
(default: None)
-q, --quiet (q)uiet all output
(default: False)
Reporter parameters:
-r REPORTDIR (r) reports are processed or stored using this directory
(default: ~/robust/reports/)
```
Sample Run
```
$ robust-reporter.py
82344C9864B0F1D120C0D1AB7F7C54C3 (---) : Somewhat/probably is suspicious(Low-Medium) - 24sec
D012492123E4CF0CFB3A017A2E92C077 (Malware.Dynamic) : Malicious(High) - 194sec
DB273A97C54E3E23F411EA7C9B5A82DA (Malware.Dynamic) : Malicious (Medium) - 53sec
165A36C02B3FAAF4DE38F93A3DCB821B (---) : Somewhat/probably is suspicious(Low-Medium) - 36sec
D10195670651A40C46C22972CD839E89 (Artemis!D10195670651) : Malicious (Very High) - 32sec
8271093E0E78574428BBDDDA6F34A980 (Malware.Dynamic) : Malicious(High) - 192sec
86DAFA0262BF217F5344A3B057C0DB06 (Malware.Dynamic) : Malicious(High) - 193sec
8DA4CDC3E2EE16021F237EA7A043DA8E (Malware.Dynamic) : Malicious(High) - 191sec
```
## Tunning for Linux File Watchers
### iNotify Tuning Parameters
The inotify(7) subsystem has three important tunings that impact robust's directory watching.
```
/proc/sys/fs/inotify/max_user_instances impacts how many different root dirs you can watch.
/proc/sys/fs/inotify/max_user_watches impacts how many dirs you can watch across all watched roots.
/proc/sys/fs/inotify/max_queued_events impacts how likely it is that your system will experience a notification overflow.
```
You obviously need to ensure that `max_user_instances` and `max_user_watches` are set so that the system is capable of keeping track of your files.
`max_queued_events` is important to size correctly; if it is too small, the kernel will drop events and robust won't be able to report on them. Making this value bigger reduces the risk of this happening.
# Developers
## Install Development
```
$(robust)> pip install -r devel-requirements.txt
```
## Development Tasks
```shell
(robust)$> invoke -l
Available tasks:
build Build the setup.py
clean Clean up docs, bytecode, and extras
codestats Run flake8 PeP8 tests for code stats
release ``version`` should be a string like '0.4' or '1.0'.
smell Run flake8 PeP8 tests
test Run Unit tests
```
### Running the Test Suite
Nose is run via `invoke test`
```
Clearing rm -rf build
Clearing rm -rf dist
Clearing rm -rf *.egg-info
Clearing rm -rf pyclient.log
Clearing rm -rf **/**/*.pyc
Clearing rm -rf **/*.pyc
Clearing rm -rf ./*.pyc
...................
-----------------------------------------------------------------------------
19 tests run in 0.3 seconds (19 tests passed)
117 E501 line too long (97 > 79 characters)
7 F401 'ratd' imported but unused
1 F841 local variable 'rb_rtnv' is assigned to but never used
1 N802 function name should be lowercase
5 W601 .has_key() is deprecated, use 'in'
```
### Additional LICENSE information
A modified Fork of `atdcli.py` (Carlos Munoz - 2014).
https://pypi.python.org/pypi/atd
## VX Workshop Appliance Option
There is a fully operational Xubuntu 14.04 liveCD that includes:
* robust - https://github.com/shadowbq/robust-atd
* maltrieve - https://github.com/shadowbq/maltrieve
* vxcage - https://github.com/shadowbq/vxcage
It also includes
* hexeditors
* static analysis tools
* google chrome
* vmtools
* etc..
xubuntu-14.04.4-desktop-x86_64-VX-Workshop-0.4.iso (~ 1.2 GB)
Available to download with READMEs here: https://goo.gl/flcvew
| /robust-atd-0.5.2.tar.gz/robust-atd-0.5.2/README.md | 0.450359 | 0.858778 | README.md | pypi |
import argparse
import ConfigParser
import os.path
import ratd
import ratd.utils as utils
def check_md5(value):
if len(value) != 32:
raise argparse.ArgumentTypeError("%s is an invalid md5 hash value" % value)
return value
def slash_dir(value):
if value[len(value)-1] != "/":
raise argparse.ArgumentTypeError("%s should end in a slash" % value)
value = os.path.expanduser(value)
return value
class CliArgError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class CliArgs():
def __init__(self, tool, explicit=None):
self.arg_dict = {
'user': '(u)sername for the API of the ATD\n\t\t(default: %(default)s)',
'password': '(p)assword for username\n\t\t(default: %(default)s) ',
'password_secured': '(p)assword for username\n\t\t(default: ****<.robust>*****) ',
'ip': '(i)p or hostname address of ATD\n\t\t(default: %(default)s) ',
'sample': '(s)ample or file to be analyzed\n\t\t(default: %(default)s)',
'skipssl': 'do (n)ot verify the SSL certificate for the communications\n\t\t(default: %(default)s)',
'analyzer': '(a)nalyzer profile id to be used during analysis\n\t\t(default: %(default)s)',
'profiles': '(l)ist analyzer profiles available\n\t\t(default: %(default)s)',
'directory': '(d)irectory to watch for events\n\t\t(default: %(default)s)',
'existing': '(e)xisting files in directory will be submitted\n\t\t(default: %(default)s)',
'rType': '(t)ype of report requested\n\t\t(default: %(default)s)',
'rPrint': '(o)utput type for reporter \n\t\t(default: %(default)s)',
'follow': '(f)ollow and watch the directory for new files to submit\n\t\t(default: %(default)s)',
'filename': '(w)rite report to filename for saving the requested report\n\t\t(default: %(default)s)',
'md5': '(m)d5 32bit hash of the sample to search\n\t\t(default: %(default)s)',
'cleandir': '(c) move clean files to this directory\n\t\t(default: %(default)s)',
'dirtydir': '(x) move processed dirty files to this directory\n\t\t(default: %(default)s)',
'reportdir': '(r) reports are processed or stored using this directory\n\t\t(default: %(default)s)',
'errordir': '(z) move error or skip files to this directory \n\t\t(default: %(default)s)',
'severity': '(y) treat sample as dirty with this severity [0-5] or higher\n\t\t(default: %(default)s)',
'maxthreads': '(j) max number of threads\n\t\t(default: %(default)s)',
'quiet': '(q)uiet all output\n\t\t(default: %(default)s)',
'verbosity': 'increase output (v)erbosity\n\t\t(default: %(default)s)'
}
self.description = 'Robust McAfee ATD Python CLI tool'
self.epilog = ''
self.dot_robust = self.dot_robust_helper()
self.parser = argparse.ArgumentParser(epilog=self.epilog, description=self.description, formatter_class=argparse.RawTextHelpFormatter)
if tool == 'authOnly':
self.auth_args()
elif tool == 'profile':
self.auth_args()
profile_group = self.parser.add_argument_group('Profile parameters')
profile_group.add_argument('-l', required=True, action='store_true', dest='listprofiles', help=self.arg_dict['profiles'])
elif tool == 'sample':
self.auth_args()
self.sample_args()
elif tool == 'search':
self.auth_args()
self.search_args()
self.output_args()
elif tool == 'reporter':
reporter_group = self.parser.add_argument_group('Reporter parameters')
reporter_group.add_argument('-o', required=False, action='store', dest='rPrint', default='txt', choices=['txt', 'csv'], help=self.arg_dict['rPrint'])
if 'reportdir' in self.dot_robust:
reporter_group.add_argument('-r', required=False, action='store', type=slash_dir, default=self.dot_robust['reportdir'], dest='reportdir', help=self.arg_dict['reportdir'])
else:
reporter_group.add_argument('-r', required=True, action='store', type=slash_dir, dest='reportdir', help=self.arg_dict['reportdir'])
elif tool == 'watch' or tool == 'convict':
self.auth_args()
watch_group = self.parser.add_argument_group('Watch parameters')
if tool == 'watch':
watch_group.add_argument('-f', required=False, action='store_true', default=True, dest='follow', help=self.arg_dict['follow'])
else:
watch_group.add_argument('-f', required=False, action='store_true', dest='follow', help=self.arg_dict['follow'])
watch_group.add_argument('-a', required=True, action='store', dest='analyzer_profile', help=self.arg_dict['analyzer'])
watch_group.add_argument('-d', required=True, action='store', dest='directory', help=self.arg_dict['directory'])
watch_group.add_argument('-e', required=False, action='store_true', dest='existing', help=self.arg_dict['existing'])
# SUPPRESSION flag for hidden submission
watch_group.add_argument('--sample', dest='file_to_upload', help=argparse.SUPPRESS)
if tool == 'convict':
convict_group = self.parser.add_argument_group('Convict parameters')
if 'severity' in self.dot_robust:
convict_group.add_argument('-y', required=False, action='store', default=self.dot_robust['severity'], dest='severity', help=self.arg_dict['severity'])
else:
convict_group.add_argument('-y', required=False, action='store', dest='severity', help=self.arg_dict['severity'])
if 'cleandir' in self.dot_robust:
convict_group.add_argument('-c', required=False, action='store', type=slash_dir, default=self.dot_robust['cleandir'], dest='cleandir', help=self.arg_dict['cleandir'])
else:
convict_group.add_argument('-c', required=True, action='store', type=slash_dir, dest='cleandir', help=self.arg_dict['cleandir'])
if 'dirtydir' in self.dot_robust:
convict_group.add_argument('-x', required=False, action='store', type=slash_dir, default=self.dot_robust['dirtydir'], dest='dirtydir', help=self.arg_dict['dirtydir'])
else:
convict_group.add_argument('-x', required=True, action='store', type=slash_dir, dest='dirtydir', help=self.arg_dict['dirtydir'])
if 'reportdir' in self.dot_robust:
convict_group.add_argument('-r', required=False, action='store', type=slash_dir, default=self.dot_robust['reportdir'], dest='reportdir', help=self.arg_dict['reportdir'])
else:
convict_group.add_argument('-r', required=True, action='store', type=slash_dir, dest='reportdir', help=self.arg_dict['reportdir'])
if 'errordir' in self.dot_robust:
convict_group.add_argument('-z', required=False, action='store', type=slash_dir, default=self.dot_robust['errordir'], dest='errordir', help=self.arg_dict['errordir'])
else:
convict_group.add_argument('-z', required=True, action='store', type=slash_dir, dest='errordir', help=self.arg_dict['errordir'])
convict_group.add_argument('-t', required=False, action='store', dest='rType', choices=['html', 'txt', 'xml', 'zip', 'json', 'ioc', 'stix', 'pdf', 'sample'], help=self.arg_dict['rType'])
if 'maxthreads' in self.dot_robust:
watch_group.add_argument('-j', required=False, action='store', default=self.dot_robust['maxthreads'], dest='maxthreads', help=self.arg_dict['maxthreads'])
else:
watch_group.add_argument('-j', required=False, action='store', dest='maxthreads', help=self.arg_dict['maxthreads'])
else:
raise CliArgError(tool)
self.common_args()
if explicit is None:
self.parser.parse_args(namespace=self)
else:
self.parser.parse_args(args=explicit, namespace=self)
def config_section_map(self, config, section, defaults):
dict1 = {}
options = config.options(section)
for option in options:
try:
dict1[option] = config.get(section, option)
if dict1[option] == -1:
DebugPrint("skip: %s" % option)
except:
try:
dict1[option] = defaults[option]
except:
print("exception on %s!" % option)
dict1[option] = None
for k,v in defaults.iteritems():
if not k in dict1:
dict1[k] = v
return dict1
def dot_robust_helper(self):
config = ConfigParser.ConfigParser({})
fname = os.path.expanduser("~/.robust")
auth_defaults = {'user': False, 'password': False}
connection_defaults = {'ip': False, 'skipssl': False, 'maxthreads': 1}
storage_defaults = {'severity': 3, 'cleandir': '~/robust/clean/', 'dirtydir': '~/robust/malware/', 'reportdir': '~/robust/reports/', 'errordir': '~/robust/errors/'}
if os.path.isfile(fname):
config.read(fname)
if config.has_section("auth"):
auth = self.config_section_map(config, "auth", auth_defaults)
dot_robust_auth = {
'user': auth["user"],
'password': auth["password"]
}
else:
dot_robust_auth = auth_defaults
if config.has_section("connection"):
connection = self.config_section_map(config, "connection", connection_defaults)
dot_robust_connection = {
'ip': connection["ip"],
'skipssl': connection["skipssl"],
'maxthreads': connection["maxthreads"]
}
else:
dot_robust_connection = connection_defaults
if config.has_section("storage"):
storage = self.config_section_map(config, "storage", storage_defaults)
dot_robust_storage = {
'severity': storage["severity"],
'cleandir': storage["cleandir"],
'dirtydir': storage["dirtydir"],
'reportdir': storage["reportdir"],
'errordir': storage["errordir"]
}
else:
dot_robust_storage = storage_defaults
dot_robust_dict = utils.merge_dicts(dot_robust_auth, dot_robust_connection, dot_robust_storage)
else:
dot_robust_dict = utils.merge_dicts(auth_defaults, connection_defaults, storage_defaults)
return dot_robust_dict
def common_args(self):
self.parser.add_argument('--version', action='version', version=ratd.__version__)
exclusive = self.parser.add_mutually_exclusive_group()
exclusive.add_argument('-v', "--verbosity", action="count", help=self.arg_dict['verbosity'])
exclusive.add_argument('-q', "--quiet", required=False, action='store_true', dest='quiet', help=self.arg_dict['quiet'])
def auth_args(self):
auth_group = self.parser.add_argument_group('Authentication parameters')
if self.dot_robust['user']:
auth_group.add_argument('-u', required=False, action='store', default=self.dot_robust['user'], dest='user', help=self.arg_dict['user'], metavar='USER')
else:
auth_group.add_argument('-u', required=True, action='store', dest='user', help=self.arg_dict['user'], metavar='USER')
if self.dot_robust['password']:
auth_group.add_argument('-p', required=False, action='store', default=self.dot_robust['password'], dest='password', help=self.arg_dict['password_secured'], metavar='PASSWORD')
else:
auth_group.add_argument('-p', required=False, action='store', dest='password', help=self.arg_dict['password'], metavar='PASSWORD')
if self.dot_robust['ip']:
auth_group.add_argument('-i', required=False, action='store', default=self.dot_robust['ip'], dest='ip', help=self.arg_dict['ip'], metavar='ATD IP')
else:
auth_group.add_argument('-i', required=True, action='store', dest='ip', help=self.arg_dict['ip'], metavar='ATD IP')
if self.dot_robust['skipssl']:
auth_group.add_argument('-n', required=False, action='store_true', default=self.dot_robust['skipssl'], dest='skipssl', help=self.arg_dict['skipssl'])
else:
auth_group.add_argument('-n', required=False, action='store_true', dest='skipssl', help=self.arg_dict['skipssl'])
def sample_args(self):
sample_group = self.parser.add_argument_group('Sample parameters')
sample_group.add_argument('-s', required=True, action='store', dest='file_to_upload', help=self.arg_dict['sample'])
sample_group.add_argument('-a', required=True, action='store', dest='analyzer_profile', help=self.arg_dict['analyzer'])
def search_args(self):
search_group = self.parser.add_argument_group('Search parameters')
search_group.add_argument('-m', required=True, type=check_md5, action='store', dest='md5', help=self.arg_dict['md5'])
def output_args(self):
output_group = self.parser.add_argument_group('Reporting parameters')
output_group.add_argument('-t', required=False, action='store', dest='rType', choices=['html', 'txt', 'xml', 'zip', 'json', 'ioc', 'stix', 'pdf', 'sample'], help=self.arg_dict['rType'])
output_group.add_argument('-w', required=False, action='store', dest='filename', help=self.arg_dict['filename']) | /robust-atd-0.5.2.tar.gz/robust-atd-0.5.2/ratd/cliargs/__init__.py | 0.537041 | 0.224331 | __init__.py | pypi |
import numpy as np
from scipy.special import softmax
class CELoss(object):
def compute_bin_boundaries(self, probabilities = np.array([])):
#uniform bin spacing
if probabilities.size == 0:
bin_boundaries = np.linspace(0, 1, self.n_bins + 1)
self.bin_lowers = bin_boundaries[:-1]
self.bin_uppers = bin_boundaries[1:]
else:
#size of bins
bin_n = int(self.n_data/self.n_bins)
bin_boundaries = np.array([])
probabilities_sort = np.sort(probabilities)
for i in range(0,self.n_bins):
bin_boundaries = np.append(bin_boundaries,probabilities_sort[i*bin_n])
bin_boundaries = np.append(bin_boundaries,1.0)
self.bin_lowers = bin_boundaries[:-1]
self.bin_uppers = bin_boundaries[1:]
def get_probabilities(self, output, labels, logits):
#If not probabilities apply softmax!
if logits:
self.probabilities = softmax(output, axis=1)
else:
self.probabilities = output
self.labels = labels
self.confidences = np.max(self.probabilities, axis=1)
self.predictions = np.argmax(self.probabilities, axis=1)
self.accuracies = np.equal(self.predictions,labels)
def binary_matrices(self):
idx = np.arange(self.n_data)
#make matrices of zeros
pred_matrix = np.zeros([self.n_data,self.n_class])
label_matrix = np.zeros([self.n_data,self.n_class])
pred_matrix[idx,self.predictions] = 1
label_matrix[idx,self.labels] = 1
self.acc_matrix = np.equal(pred_matrix, label_matrix)
def compute_bins(self, index = None):
self.bin_prop = np.zeros(self.n_bins)
self.bin_acc = np.zeros(self.n_bins)
self.bin_conf = np.zeros(self.n_bins)
self.bin_score = np.zeros(self.n_bins)
if index == None:
confidences = self.confidences
accuracies = self.accuracies
else:
confidences = self.probabilities[:,index]
accuracies = self.acc_matrix[:,index]
for i, (bin_lower, bin_upper) in enumerate(zip(self.bin_lowers, self.bin_uppers)):
# Calculated |confidence - accuracy| in each bin
in_bin = np.greater(confidences,bin_lower.item()) * np.less_equal(confidences,bin_upper.item())
self.bin_prop[i] = np.mean(in_bin)
if self.bin_prop[i].item() > 0:
self.bin_acc[i] = np.mean(accuracies[in_bin])
self.bin_conf[i] = np.mean(confidences[in_bin])
self.bin_score[i] = np.abs(self.bin_conf[i] - self.bin_acc[i])
class MaxProbCELoss(CELoss):
def loss(self, output, labels, n_bins = 15, logits = True):
self.n_bins = n_bins
super().compute_bin_boundaries()
super().get_probabilities(output, labels, logits)
super().compute_bins()
#http://people.cs.pitt.edu/~milos/research/AAAI_Calibration.pdf
class ECELoss(MaxProbCELoss):
def loss(self, output, labels, n_bins = 15, logits = True):
super().loss(output, labels, n_bins, logits)
return np.dot(self.bin_prop,self.bin_score)
class MCELoss(MaxProbCELoss):
def loss(self, output, labels, n_bins = 15, logits = True):
super().loss(output, labels, n_bins, logits)
return np.max(self.bin_score)
#https://arxiv.org/abs/1905.11001
#Overconfidence Loss (Good in high risk applications where confident but wrong predictions can be especially harmful)
class OELoss(MaxProbCELoss):
def loss(self, output, labels, n_bins = 15, logits = True):
super().loss(output, labels, n_bins, logits)
return np.dot(self.bin_prop,self.bin_conf * np.maximum(self.bin_conf-self.bin_acc,np.zeros(self.n_bins)))
#https://arxiv.org/abs/1904.01685
class SCELoss(CELoss):
def loss(self, output, labels, n_bins = 15, logits = True):
sce = 0.0
self.n_bins = n_bins
self.n_data = len(output)
self.n_class = len(output[0])
super().compute_bin_boundaries()
super().get_probabilities(output, labels, logits)
super().binary_matrices()
for i in range(self.n_class):
super().compute_bins(i)
sce += np.dot(self.bin_prop,self.bin_score)
return sce/self.n_class
class TACELoss(CELoss):
def loss(self, output, labels, threshold = 0.01, n_bins = 15, logits = True):
tace = 0.0
self.n_bins = n_bins
self.n_data = len(output)
self.n_class = len(output[0])
super().get_probabilities(output, labels, logits)
self.probabilities[self.probabilities < threshold] = 0
super().binary_matrices()
for i in range(self.n_class):
super().compute_bin_boundaries(self.probabilities[:,i])
super().compute_bins(i)
tace += np.dot(self.bin_prop,self.bin_score)
return tace/self.n_class
#create TACELoss with threshold fixed at 0
class ACELoss(TACELoss):
def loss(self, output, labels, n_bins = 15, logits = True):
return super().loss(output, labels, 0.0 , n_bins, logits) | /robust_deep_learning-0.5.0-py3-none-any.whl/robust_deep_learning/uncertainty.py | 0.709422 | 0.336059 | uncertainty.py | pypi |
import torch.nn as nn
import torch
import math
class SoftMaxLossFirstPart(nn.Module):
def __init__(self, num_features, num_classes, temperature=1.0):
super(SoftMaxLossFirstPart, self).__init__()
self.num_features = num_features
self.num_classes = num_classes
self.temperature = nn.Parameter(torch.tensor([temperature]), requires_grad=False)
self.weights = nn.Parameter(torch.Tensor(num_classes, num_features))
self.bias = nn.Parameter(torch.Tensor(num_classes))
nn.init.uniform_(self.weights, a=-math.sqrt(1.0/self.num_features), b=math.sqrt(1.0/self.num_features))
nn.init.zeros_(self.bias)
def forward(self, features):
logits = features.matmul(self.weights.t()) + self.bias
return logits / self.temperature
def extra_repr(self):
return 'num_features={}, num_classes={}, bias={}'.format(self.num_features, self.num_classes, self.bias is not None)
class SoftMaxLossSecondPart(nn.Module):
def __init__(self, model_classifier, debug=False, gpu=None):
super(SoftMaxLossSecondPart, self).__init__()
self.model_classifier = model_classifier
self.loss = nn.CrossEntropyLoss()
self.debug = debug
self.gpu = gpu
def preprocess(self, inputs, targets):
return inputs, targets
def forward(self, logits, targets):
num_classes = logits.size(1)
targets_one_hot = torch.eye(num_classes)[targets].long().cuda(self.gpu)
loss = self.loss(logits, targets)
if not self.debug:
return loss
else:
intra_inter_logits = torch.where(targets_one_hot != 0, logits, torch.Tensor([float('Inf')]).cuda(self.gpu))
inter_intra_logits = torch.where(targets_one_hot != 0, torch.Tensor([float('Inf')]).cuda(self.gpu), logits)
intra_logits = intra_inter_logits[intra_inter_logits != float('Inf')].detach().cpu().numpy()
inter_logits = inter_intra_logits[inter_intra_logits != float('Inf')].detach().cpu().numpy()
return loss, 1.0, inter_logits, intra_logits | /robust_deep_learning-0.5.0-py3-none-any.whl/robust_deep_learning/softmax.py | 0.943204 | 0.536313 | softmax.py | pypi |
import numpy as np
from .scores import get_scores
import sklearn.metrics
from .general import get_outputs_labels_and_metrics
def find_index_of_nearest(value, array):
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx
def get_ood_metrics_from_scores(in_data_scores, out_data_scores, fpr=0.05):
assert in_data_scores['type'] == out_data_scores["type"]
print("====>>>> getting ood detection metrics from scores <<<<====")
y_true = np.concatenate((np.ones((in_data_scores['values'].size(0),), dtype=int), np.zeros((out_data_scores['values'].size(0),), dtype=int)), axis=None)
y_pred = np.concatenate((in_data_scores['values'].cpu(), out_data_scores['values'].cpu()), axis=None)
fpr_list, tpr_list, fpr_tpr_thresholds = sklearn.metrics.roc_curve(y_true, y_pred)
tpr_at_given_fpr = tpr_list[find_index_of_nearest(fpr, fpr_list)]
print("tpr_at_given_fpr:\t", '{:.2f}'.format(100*tpr_at_given_fpr))
auroc = sklearn.metrics.auc(fpr_list, tpr_list)
#auroc = sklearn.metrics.roc_auc_score(y_true, y_pred)
print("auroc:\t\t\t", '{:.2f}'.format(100*auroc))
precision_in, recall_in, precision_in_recall_in_thresholds = sklearn.metrics.precision_recall_curve(y_true, y_pred)
auprin = sklearn.metrics.auc(recall_in, precision_in)
#auprin = sklearn.metrics.average_precision_score(y_true, y_pred)
print("auprin:\t\t\t", '{:.2f}'.format(100*auprin))
y_pred = -y_pred
precision_out, recall_out, precision_out_recall_out_thresholds = sklearn.metrics.precision_recall_curve(y_true, y_pred, pos_label=0)
auprout = sklearn.metrics.auc(recall_out, precision_out)
#auprout = sklearn.metrics.average_precision_score(y_true, y_pred, pos_label=0)
print("auprout:\t\t", '{:.2f}'.format(100*auprout))
results = {}
results['score_type'] = in_data_scores['type']
results['fpr'] = fpr_list
results['tpr'] = tpr_list
results['fpr_tpr_thresholds'] = fpr_tpr_thresholds
results['precision_in'] = precision_in
results['recall_in'] = recall_in
results['precision_in_recall_in_thresholds'] = precision_in_recall_in_thresholds
results['precision_out'] = precision_out
results['recall_out'] = recall_out
results['precision_out_recall_out_thresholds'] = precision_out_recall_out_thresholds
results['tpr_at_given_fpr'] = tpr_at_given_fpr
results['auroc'] = auroc
results['auprin'] = auprin
results['auprout'] = auprout
return results
def get_ood_metrics(model, in_data_valid_loader, out_data_loader, score_type, fpr=0.05, gpu=None):
print("====>>>> getting ood detection metrics <<<<====")
results = get_outputs_labels_and_metrics(model, in_data_valid_loader, gpu=gpu)
in_data_scores = get_scores(results["outputs"], score_type=score_type)
results = get_outputs_labels_and_metrics(model, out_data_loader, gpu=gpu)
out_data_scores = get_scores(results["outputs"], score_type=score_type)
return get_ood_metrics_from_scores(in_data_scores, out_data_scores, fpr=fpr)
def get_ood_detections(model, inputs, thresholds, fpr="0.05", gpu=None):
model.cuda(gpu)
model.eval()
inputs.cuda(gpu, non_blocking=True)
detections = model(inputs) > thresholds['values'][fpr]
results = {}
results['score_type'] = thresholds['score_type']
results['values'] = detections
return results | /robust_deep_learning-0.5.0-py3-none-any.whl/robust_deep_learning/detection.py | 0.560493 | 0.349422 | detection.py | pypi |
# Robust DeID: De-Identification of Medical Notes using Transformer Architectures
[](https://zenodo.org/badge/latestdoi/458346577)
This repository was used to train and evaluate various de-identification models and strategies on medical notes from the I2B2-DEID dataset and the MassGeneralBrigham (MGB) network.
The models and strategies are extensible and can be used on other datasets as well. Trained models are published on huggingface under the [OBI organization](https://huggingface.co/obi).
Main features are:
1. Transformer models - Any transformer model from the [huggingface](https://huggingface.co) library can be used for training. We make available a RoBERTa [Liu et al., 2019](https://arxiv.org/pdf/1907.11692.pdf) model and a ClinicalBERT [Alsentzer et al., 2019](https://arxiv.org/pdf/1904.03323.pdf) model fine-tuned for de-identification on huggingface: [obi_roberta_deid](https://huggingface.co/obi/deid_roberta_i2b2), [obi_bert_deid](https://huggingface.co/obi/deid_bert_i2b2). Both can be used for testing (forward pass).
2. Recall biased thresholding - Ability to use classification bias to aggressively remove PHI from notes. This is a safer and more robust option when working with sensitive data like medical notes.
3. Custom clinical tokenizer - Includes 60 regular expressions based on the structure and information generally found in medical notes. This tokenizer resolves common typographical errors and missing spaces that occur in clinical notes.
4. Context enhancement - Option to add on additional tokens to a given sequence as context on the left and right. These tokens can be used only as context, or we can also train on these tokens (which essentially mimics a sliding window approach). The reason for including context tokens was to provide additional context especially for peripheral tokens in a given sequence.
Since de-identification is a sequence labeling task, this tool can be applied to any other sequence labeling task as well.\
More details on how to use this tool, the format of data and other useful information is presented below.
Comments, feedback and improvements are welcome and encouraged!
## Dataset Annotations
* The guidelines for the dataset annotation and prodigy setup can be found here:
[Annotation guidelines](./AnnotationGuidelines.md)
## Installation
### Dependencies
* You can either install the dependencies using conda or pip. Both are specified below.
* We developed this package using the conda environment specified in [deid.yml](./deid.yml). You can create the environment using this file, and it will install the required dependencies.
* Robust De-ID requires the packages specified in the [requirements.txt](./requirements.txt) file. You can use pip install to install these packages.
* We used the conda approach and activated the **deid** conda environment for building and testing this package
```shell
conda env create -f deid.yml
conda activate deid
```
### Robust De-Id
* To install robust-deid, first install the dependencies (as mentioned above) and then do a pip install of robust de-id package.
```shell
pip install robust-deid
```
## Data Format
* The format of the data differs slightly when training a model to running the forward pass.
* The data is in the json format, where we store the notes in a jsonl file. Each line in this file is a json object that refers to one note.
* A jsonl file will have multiple lines, where each line represents a json object that corresponds to a unique note.
* More details on what is present in each line (json object) is presented below.
```json lines
{"...": "..."}
{"...": "..."}
{"...": "..."}
```
### Training
* The notes should be in json format. The "key" values that we mention below are the ones that we used, you are free to change the keys in the json file (make sure that these changes are reflected in the subsequent steps - train/test/evaluate).
* We show an example for a single note, for multiple notes we would add multiple of the json objects shown below in a single jsonl file.
* The default values in the package assume that the text is present in the "text" field.
* There should be a "meta" field that should contain a unique "note_id" field. Every note should have a unique "note_id". Other metadata fields may be added if required for your needs.
* The "spans" field should contain the annotated spans for the training dataset. They should be in sorted order (based on start position)
* The "spans" field will contain a list of spans, where each span should contain the "start", "end" and "label" of the span
```json
{ "text": "Physician Discharge Summary Admit date: 10/12/1982 Discharge date: 10/22/1982 Patient Information Jack Reacher, 54 y.o. male (DOB = 1/21/1928) ...",
"meta": {"note_id": "1", "patient_id": "1"},
"spans": [{"id":"0", "start": 40, "end": 50, "label": "DATE"}, {"id":"1", "start": 67, "end": 77, "label": "DATE"}, {"id":"3", "start": 98, "end": 110, "label": "PATIENT"}, {"id":"3", "start": 112, "end": 114, "label": "AGE"}, {"...": "..."}]}
```
### Test (Forward Pass/Inference)
* The format is almost the same as above. Since, at test time we don't have annotated spans, we assign an empty list to the "spans" field
* We show an example for a single note, for multiple notes we would add multiple of the json objects shown below in a single jsonl file.
```json
{ "text": "Physician Discharge Summary Admit date: 10/12/1982 Discharge date: 10/22/1982 Patient Information Jack Reacher, 54 y.o. male (DOB = 1/21/1928) ...",
"meta": {"note_id": "1", "patient_id": "1"},
"spans": []}
```
## Usage
* Once you have the package installed and the data ready, follow the steps described below.
* Feel free to replace the models in the demos with any of the ones you have trained or any model from [huggingface](https://huggingface.co).
### Test (Forward Pass/Inference)
* We have demos for running the forward pass in the following folder: [steps/forward_pass](./steps/forward_pass). You can add or modify any of the values mentioned in the notebook or shell scripts based on your needs (e.g. sentencizers, tokenizers, model parameters in the config file etc.).
* The forward pass can be run via JupyterNotebook (can also be used via a python script) or a shell script.
* To use a trained model to run the forward pass on the desired dataset, using a JupyterNotebook, follow the steps shown in the [ForwardPass.ipynb](./steps/forward_pass/Forward%20Pass.ipynb) notebook.
* To use a trained model to run the forward pass on the desired dataset, using a shell script, follow the steps shown in the [forward_pass.sh](./steps/forward_pass/forward_pass.sh) script.
* We also include the step of using the model predictions to de-identify the medical notes in the notebook/script (i.e. producing the de-identified version of the original dataset/text).
### Training
* We have demos for running the forward pass in the following folder: [steps/train](./steps/train). You can add or modify any of the values mentioned in the notebook or shell scripts based on your needs (e.g. sentencizers, tokenizers, model parameters in the config file etc.).
* Training a model can be done via JupyterNotebook (can also be used via a python script) or a shell script.
* To use a trained model to run the forward pass on the desired dataset, using a JupyterNotebook, follow the steps shown in the [Train.ipynb](./steps/train/Train.ipynb) notebook.
* To use a trained model to run the forward pass on the desired dataset, using a shell script, follow the steps shown in the [train.sh](./steps/train/train.sh) script.
* We used the i2b2 2014 dataset while creating the demo (you can use any dataset of your choice). To download the i2b2 2014 dataset please visit: https://portal.dbmi.hms.harvard.edu/projects/n2c2-nlp/
### Evaluation
* To evaluate a trained model on a dataset, refer to the demos present in the following folder: [steps/evaluation](./steps/evaluation)
* Evaluating a model can be done via JupyterNotebook (can also be used via a python script) or a shell script.
* To use a trained model and evaluate its performance on the desired dataset, using a JupyterNotebook, follow the steps shown in the [Evaluation.ipynb](./steps/evaluation/Evaluation.ipynb) notebook.
* To use a trained model and evaluate its performance on the desired dataset, using a shell script, follow the steps shown in the [evaluation.sh](./steps/evaluation/evaluation.sh) script.
* We do both token (suffixed with "_token") and span level evaluation for each entity and overall.
* There's also an option to do binary evaluation - which can be specified via the ner_type_maps argument in the config file. These map existing PHI labels to a new set of PHI labels on which we do the evaluation
### Recall biased thresholding
* The objective is to modify the classification thresholds, i.e. instead of choosing the class with the highest probability as the prediction for a token (optimize F1), we modify the classification thresholds to optimize recall.
* While this may decrease precision, having high levels of recall is essential for sensitive datasets.
* The demos in the following folder: [steps/recall_threshold](./steps/recall_threshold) demonstrate how we can take our trained models and estimate classification thresholds to optimize recall
* To use a trained model, optimize it for a desired level of recall (based on validation data) and evaluate its performance on the test dataset, using a JupyterNotebook, follow the steps shown in the [RecallThreshold.ipynb](./steps/recall_threshold/RecallThreshold.ipynb) notebook.
## Trained Models
* Our models for de-identification of medical notes can be found in: [OBI organization](https://huggingface.co/obi).
* Models:
* [OBI-ClinicalBERT De-Identification Model](https://huggingface.co/obi/deid_bert_i2b2)
* [OBI-RoBERTa De-Identification Model](https://huggingface.co/obi/deid_roberta_i2b2)
* Demo:
* [Medical Note De-Identification](https://huggingface.co/spaces/obi/Medical-Note-Deidentification)
| /robust_deid-0.3.1.tar.gz/robust_deid-0.3.1/README.md | 0.667906 | 0.986507 | README.md | pypi |
import json
import random
from argparse import ArgumentParser
from typing import Union, NoReturn, Iterable, Dict, List
random.seed(41)
class SpanValidation(object):
"""
This class is used to build a mapping between the note id
and the annotated spans in that note. This will be used during the
evaluation of the models. This is required to perform span level
evaluation.
"""
@staticmethod
def get_spans(
input_file: str,
metadata_key: str = 'meta',
note_id_key: str = 'note_id',
spans_key: str = 'spans'
):
"""
Get a mapping between the note id
and the annotated spans in that note. This will mainly be used during the
evaluation of the models.
Args:
input_file (str): The input file
metadata_key (str): The key where the note metadata is present
note_id_key (str): The key where the note id is present
spans_key (str): The key that contains the annotated spans for a note dictionary
Returns:
(Iterable[Dict[str, Union[str, List[Dict[str, str]]]]]): An iterable that iterates through each note
and contains the note id and annotated spans
for that note
"""
# Read the input files (data source)
for line in open(input_file, 'r'):
note = json.loads(line)
note_id = note[metadata_key][note_id_key]
# Store the note_id and the annotated spans
note[spans_key].sort(key=lambda _span: (_span['start'], _span['end']))
yield {'note_id': note_id, 'note_spans': note[spans_key]}
def main() -> NoReturn:
cli_parser = ArgumentParser(description='configuration arguments provided at run time from the CLI')
cli_parser.add_argument(
'--input_file',
type=str,
required=True,
help='the the jsonl file that contains the notes'
)
cli_parser.add_argument(
'--metadata_key',
type=str,
default='meta',
help='the key where the note metadata is present in the json object'
)
cli_parser.add_argument(
'--note_id_key',
type=str,
default='note_id',
help='the key where the note id is present in the json object'
)
cli_parser.add_argument(
'--spans_key',
type=str,
default='spans',
help='the key where the annotated spans for the notes are present in the json object'
)
cli_parser.add_argument(
'--output_file',
type=str,
required=True,
help='the file where the note id and the corresponding spans for that note are to be saved'
)
args = cli_parser.parse_args()
# Write the dataset to the output file
with open(args.output_file, 'w') as file:
for span_info in SpanValidation.get_spans(
input_file=args.input_file,
metadata_key=args.metadata_key,
note_id_key=args.note_id_key,
spans_key=args.spans_key):
file.write(json.dumps(span_info) + '\n')
if __name__ == "__main__":
main() | /robust_deid-0.3.1.tar.gz/robust_deid-0.3.1/src/robust_deid/ner_datasets/span_validation.py | 0.851891 | 0.353177 | span_validation.py | pypi |
import re
import json
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from typing import Iterable, Dict, List, Sequence, Union, Mapping, Tuple, NoReturn
from .preprocessing import PreprocessingLoader
class SpanFixer(object):
"""
The tokens and spans may not align depending on the tokenizer used.
This class either expands the span to cover the tokens, so we don't have a mismatch.
A mismatch is when a span_start will not coincide with some token_start or the span_end
will not coincide with some token_end. This class changes the span_start and span_end
so that the span_start will coincide with some token_start and the span_end
will coincide with some token_end - and we don't get any position mismatch errors while
building our dataset. This entire process involves updating span positions which can lead to duplicate
or overlapping spans, which then need to be removed.
E.g we have text: The patient is 75yo man
AGE Span: 75
Token: 75yo
As you can see the span is smaller than the token, which will lead to an error when
building the NER dataset.
To ensure this does not happen, we correct the span. We change the span from
75 to 75yo -> So now AGE Span is 75yo instead of 75. This script essentially changes
the annotated spans to match the tokens. In an ideal case we wouldn't need this script
but since medical notes have many typos, this script becomes necessary to deal with
issues and changes that arise from different tokenizers.
Also sort the spans and convert the start and end keys of the spans to integers
"""
def __init__(
self,
sentencizer: str,
tokenizer: str,
ner_priorities: Mapping[str, int],
verbose: bool = True
) -> NoReturn:
"""
Initialize the sentencizer and tokenizer
Args:
sentencizer (str): The sentencizer to use for splitting text into sentences
tokenizer (str): The tokenizer to use for splitting text into tokens
ner_priorities (Mapping[str, int]): The priority when choosing which duplicates to remove.
Mapping that represents a priority for each NER type
verbose (bool): To print out warnings etc
"""
self._sentencizer = PreprocessingLoader.get_sentencizer(sentencizer)
self._tokenizer = PreprocessingLoader.get_tokenizer(tokenizer)
self._ner_priorities = ner_priorities
self._verbose = verbose
def __get_token_positions(self, text: str) -> Tuple[Dict[int, int], Dict[int, int]]:
"""
Get the start and end positions of all the tokens in the note.
Args:
text (str): The text present in the note
Returns:
token_start_positions (Mapping[int, int]): The start positions of all the tokens in the note
token_end_positions (Mapping[int, int]): The end positions of all the tokens in the note
"""
token_start_positions = dict()
token_end_positions = dict()
for sentence in self._sentencizer.get_sentences(text):
offset = sentence['start']
for token in self._tokenizer.get_tokens(sentence['text']):
start = token['start'] + offset
end = token['end'] + offset
token_start_positions[start] = 1
token_end_positions[end] = 1
return token_start_positions, token_end_positions
def get_duplicates(
self,
spans: List[Dict[str, Union[str, int]]],
) -> List[int]:
"""
Return the indexes where there are duplicate/overlapping spans. A duplicate or
span is one where the same token can have two labels.
E.g:
Token: BWH^Bruce
This is a single token where BWH is the hospital label and Bruce is the Patient label
The fix_alignment function assigns this entre token the hospital label but it also
assigns this entire token the patient label. Since we have two labels for the same
token, we need to remove one of them.
We assign this entire token one label - either hospital label or the patient label
In this case we assign patient because of higher priority. So now we need to remove
the hospital label from the dataset (since it is essentially a duplicate label). This
script handles this case.
There are cases when two different labels match the same token partially
E.g
Text: JT/781-815-9090
Spans: JT - hospital, 781-815-9090 - Phone
Tokens: (Jt/781) & (- 815 - 9090)
As you can see the token JT/781 will be assigned the label in the fix_alignment function
but 781-815-9090 is also phone and the 781 portion is overlapped, and we need to resolve this.
In this script, we resolve it by treating JT/781 as one span (hospital) and
-815-9090 as another span (phone).
Args:
spans ([List[Dict[str, Union[str, int]]]): The NER spans in the note
Returns:
remove_spans (Sequence[int]): A list of indexes of the spans to remove
"""
remove_spans = list()
prev_start = -1
prev_end = -1
prev_label = None
prev_index = None
spans.sort(key=lambda _span: (_span['start'], _span['end']))
for index, span in enumerate(spans):
current_start = span['start']
current_end = span['end']
current_label = span['label']
if type(current_start) != int or type(current_end) != int:
raise ValueError('The start and end keys of the span must be of type int')
# Check if the current span matches another span
# that is if this span covers the same tokens as the
# previous spans (but has a different label)
# Based on the priority, treat the span with the low
# priority label as a duplicate label and add it to the
# list of spans that need to be removed
if current_start == prev_start and current_end == prev_end:
if self._ner_priorities[current_label] > self._ner_priorities[prev_label]:
# Store index of the previous span if it has lower priority
remove_spans.append(prev_index)
# Reset span details
prev_start = current_start
prev_end = current_end
prev_index = index
prev_label = current_label
if self._verbose:
print('DUPLICATE: ', span)
print('REMOVED: ', spans[remove_spans[-1]])
elif self._ner_priorities[current_label] <= self._ner_priorities[prev_label]:
# Store current index of span if it has lower priority
remove_spans.append(index)
if self._verbose:
print('DUPLICATE: ', spans[prev_index])
print('REMOVED: ', spans[remove_spans[-1]])
# Check for overlapping span
elif current_start < prev_end:
# If the current span end matches the overlapping span end
# Remove the current span, since it is smaller
if current_end <= prev_end:
remove_spans.append(index)
if self._verbose:
print('DUPLICATE: ', spans[prev_index])
print('REMOVED: ', spans[remove_spans[-1]])
# If the current end is greater than the prev_end
# then we split it into tow spans. We treat the previous span
# as one span and the end of the previous span to the end of the current span
# as another span.
elif current_end > prev_end:
# Create the new span - start=previous_span_end, end=current_span_end
overlap_length = spans[prev_index]['end'] - current_start
new_text = span['text'][overlap_length:]
# Remove extra spaces that may arise during this span separation
new_text = re.sub('^(\s+)', '', new_text, flags=re.DOTALL)
span['start'] = current_end - len(new_text)
span['text'] = new_text
if self._verbose:
print('OVERLAP: ', spans[prev_index])
print('UPDATED: ', span)
# Reset span details
prev_start = current_start
prev_end = current_end
prev_label = current_label
prev_index = index
# Reset span details
else:
prev_start = current_start
prev_end = current_end
prev_label = current_label
prev_index = index
return remove_spans
def fix_alignment(
self,
text: str,
spans: Sequence[Dict[str, Union[str, int]]]
) -> Iterable[Dict[str, Union[str, int]]]:
"""
Align the span and tokens. When the tokens and spans don't align, we change the
start and end positions of the spans so that they align with the tokens. This is
needed when a different tokenizer is used and the spans which are defined against
a different tokenizer don't line up with the new tokenizer. Also remove spaces present
at the start or end of the span.
E.g:
Token: BWH^Bruce
This is a single token where BWH is the hospital label and Bruce is the Patient label
The fix_alignment function assigns this entre token the hospital label but it also
assigns this entire token the patient label. This function basically expands the span
so that it matches the start and end positions of some token. By doing this it may create
overlapping and duplicate spans. As you can see it expands the patient label to match the
start of the token and it expands the hospital label to match the end of the token.
function.
Args:
text (str): The text present in the note
spans ([Sequence[Dict[str, Union[str, int]]]): The NER spans in the note
Returns:
(Iterable[Dict[str, Union[str, int]]]): Iterable through the modified spans
"""
# Get token start and end positions so that we can check if a span
# coincides with the start and end position of some token.
token_start_positions, token_end_positions = self.__get_token_positions(text)
for span in spans:
start = span['start']
end = span['end']
if type(start) != int or type(end) != int:
raise ValueError('The start and end keys of the span must be of type int')
if re.search('^\s', text[start:end]):
if self._verbose:
print('WARNING - space present in the start of the span')
start = start + 1
if re.search('(\s+)$', text[start:end], flags=re.DOTALL):
new_text = re.sub('(\s+)$', '', text[start:end], flags=re.DOTALL)
end = start + len(new_text)
# When a span does not coincide with the start and end position of some token
# it means there will be an error when building the ner dataset, we try and avoid
# that error by updating the spans itself, that is we expand the start/end positions
# of the spans so that it is aligned with the tokens.
while token_start_positions.get(start, False) is False:
start -= 1
while token_end_positions.get(end, False) is False:
end += 1
# Print what the old span was and what the new expanded span will look like
if self._verbose and (int(span['start']) != start or int(span['end']) != end):
print('OLD SPAN: ', text[int(span['start']):int(span['end'])])
print('NEW SPAN: ', text[start:end])
# Update the span with its new start and end positions
span['start'] = start
span['end'] = end
span['text'] = text[start:end]
yield span
def fix_note(
self,
text: str,
spans: Sequence[Dict[str, Union[str, int]]],
) -> Iterable[Dict[str, Union[str, int]]]:
"""
This function changes the span_start and span_end
so that the span_start will coincide with some token_start and the span_end
will coincide with some token_end and also removes duplicate/overlapping spans
that may arise when we change the span start and end positions. The resulting
spans from this function will always coincide with some token start and token
end, and hence will not have any token and span mismatch errors when building the
NER dataset. For more details and examples check the documentation of the
fix_alignment and get_duplicates functions.
Args:
text (str): The text present in the note
spans ([Sequence[Mapping[str, Union[str, int]]]): The NER spans in the note
Returns:
(Iterable[Mapping[str, Union[str, int]]]): Iterable through the fixed spans
"""
# Fix span position alignment
spans = [span for span in self.fix_alignment(text=text, spans=spans)]
# Check for duplicate/overlapping spans
remove_spans = self.get_duplicates(spans=spans)
for index, span in enumerate(spans):
# Remove the duplicate/overlapping spans
if index not in remove_spans:
yield span
def fix(
self,
input_file: str,
text_key: str = 'text',
spans_key: str = 'spans'
) -> Iterable[Dict[str, Union[str, Dict[str, str], List[Dict[str, str]]]]]:
"""
This function changes the span_start and span_end
so that the span_start will coincide with some token_start and the span_end
will coincide with some token_end and also removes duplicate/overlapping spans
that may arise when we change the span start and end positions. The resulting
spans from this function will always coincide with some token start and token
end, and hence will not have any token and span mismatch errors when building the
NER dataset. For more details and examples check the documentation of the
fix_alignment and get_duplicates functions. Fix spans that arise due to bad typos,
which are not fixed during tokenization. This essentially updates the spans so that
they line up with the start and end positions of tokens - so that there is no error
when we assign labels to tokens based on these spans
Args:
input_file (str): The file that contains the notes that we want to fix the token issues in
text_key (str) the key where the note & token text is present in the json object
spans_key (str): The key where the note spans are present in the json object
Returns:
(Iterable[Dict[str, Union[str, Dict[str, str], List[Dict[str, str]]]]]): Iterable through the fixed
notes
"""
for line in open(input_file, 'r'):
note = json.loads(line)
note[spans_key] = [span for span in self.fix_note(text=note[text_key], spans=note[spans_key])]
yield note
def main():
# The following code sets up the arguments to be passed via CLI or via a JSON file
cli_parser = ArgumentParser(
description='configuration arguments provided at run time from the CLI',
formatter_class=ArgumentDefaultsHelpFormatter
)
cli_parser.add_argument(
'--input_file',
type=str,
required=True,
help='the the jsonl file that contains the notes'
)
cli_parser.add_argument(
'--sentencizer',
type=str,
required=True,
help='the sentencizer to use for splitting notes into sentences'
)
cli_parser.add_argument(
'--tokenizer',
type=str,
required=True,
help='the tokenizer to use for splitting text into tokens'
)
cli_parser.add_argument(
'--ner_types',
nargs="+",
required=True,
help='the NER types'
)
cli_parser.add_argument(
'--ner_priorities',
nargs="+",
required=True,
help='the priorities for the NER types - the priority when choosing which duplicates to remove'
)
cli_parser.add_argument(
'--verbose',
action='store_true',
help='whether to print the information aout span fixes'
)
cli_parser.add_argument(
'--text_key',
type=str,
default='text',
help='the key where the note & token text is present in the json object'
)
cli_parser.add_argument(
'--spans_key',
type=str,
default='spans',
help='the key where the note spans is present in the json object'
)
cli_parser.add_argument(
'--output_file',
type=str,
required=True,
help='the output json file that will contain the new fixed spans'
)
args = cli_parser.parse_args()
# Mapping that represents a priority for each PHI type
# For example, the PATIENT type will have a higher priority as
# compared to STAFF.
if len(args.ner_types) == len(args.ner_priorities):
ner_priorities = {ner_type: priority for ner_type, priority in zip(args.ner_types, args.ner_priorities)}
else:
raise ValueError('Length of ner_types and ner_priorities must be the same')
span_fixer = SpanFixer(
tokenizer=args.tokenizer,
sentencizer=args.sentencizer,
ner_priorities=ner_priorities,
verbose=args.verbose
)
with open(args.output_file, 'w') as file:
for note in span_fixer.fix(
input_file=args.input_file,
text_key=args.text_key,
spans_key=args.spans_key
):
file.write(json.dumps(note) + '\n')
if __name__ == '__main__':
main() | /robust_deid-0.3.1.tar.gz/robust_deid-0.3.1/src/robust_deid/ner_datasets/span_fixer.py | 0.885656 | 0.616128 | span_fixer.py | pypi |
import json
import random
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from typing import Iterable, Dict, List, Union, Optional, Sequence, NoReturn
from .dataset_builder import Dataset, SentenceDataset
from .preprocessing import PreprocessingLoader
random.seed(41)
class DatasetCreator(object):
"""
Build a NER token classification dataset
For training we will build the dataset using the annotated spans (e.g from prodigy)
For predictions we will assign default labels.
The dataset is on a sentence level, i.e each note is split into sentences and the de-id
task is run on a sentence level. Even the predictions are run on a sentence level
The dataset would be something like:
Tokens: [[tok1, tok2, ... tok-n], [tok ...], ..., [tok ...]]
Labels: [[lab1, lab2, ... lab-n], [lab ...], ..., [lab ...]]
Where the inner list represents the sentences - the tokens in the sentence and the respective
labels for each token. The labels depend on the notation
This script can also be used for predictions, the Labels will be filled with some
default value. This is done so that we can use the same script for building a dataset to train a model
and a dataset to obtain predictions using a model
Example:
Note: Bruce Wayne is a 60yo man. He lives in Gotham
Sentences: [Bruce Wayne Jr is a 60yo man., He lives in Gotham]
Tokens: [[Bruce, Wayne, Jr, is, a, 60, yo, man, .], [He, lives, in, Gotham]]
Labels (BIO notation): [[B-Name, I-Name, I-Name, O, O, O, O, O, O], [O, O, O, B-LOC]]
Labels (BILOU notation): [[B-Name, I-Name, L-Name, O, O, O, O, O, O], [O, O, O, U-LOC]]
We also can create sentences that uses previous/next chunks as context - in this case the dataset would
look something like this. (Assume we limit the size of the chunks to 3 tokens)
Sentences: [Bruce Wayne Jr is a 60yo man., He lives in Gotham]
Tokens: [[Bruce, Wayne, Jr, is, a, 60, yo, man, ., He, lives, in], [yo, man, ., He, lives, in, Gotham]]
Labels (BIO notation): [[B-Name, I-Name, I-Name, O, O, O, O, O, O, NA, NA, NA], [NA, NA, NA, O, O, O, B-LOC]]
Labels (BILOU notation): [[B-Name, I-Name, L-Name, O, O, O, O, O, O, NA, NA, NA], [NA, NA, NA, O, O, O, U-LOC]]
NA represents the token is used for context
"""
def __init__(
self,
sentencizer: str,
tokenizer: str,
max_tokens: int = 128,
max_prev_sentence_token: int = 32,
max_next_sentence_token: int = 32,
default_chunk_size: int = 32,
ignore_label: str = 'NA'
) -> NoReturn:
"""
Initialize the sentencizer and tokenizer
Args:
sentencizer (str): Specify which sentencizer you want to use
tokenizer (str): Specify which tokenizer you want to use
max_tokens (int): The maximum number of tokens allowed in a sentence/training example,
truncate if it exceeds.
max_prev_sentence_token (int): The maximum number of previous chunk tokens allowed in a
sentence/training example
max_next_sentence_token (int): The maximum number of next chunk tokens allowed in a
sentence/training example.
ignore_label (str): The label assigned to the previous and next chunks to distinguish
from the current sentence
"""
self._sentencizer = PreprocessingLoader.get_sentencizer(sentencizer=sentencizer)
self._tokenizer = PreprocessingLoader.get_tokenizer(tokenizer=tokenizer)
# Initialize the object that will be used to get the tokens and the sentences
self._dataset = Dataset(sentencizer=self._sentencizer, tokenizer=self._tokenizer)
# Initialize the object that will take all the sentences in the note and return
# a dataset where each row represents a sentence in the note. The sentence in each
# row will also contain a previous chunk and next chunk (tokens) that will act as context
# when training the mode
# [ps1, ps 2, ps 3...ps-i], [cs1, cs2, ... cs-j], [ns, ns, ... ns-k] - as you can see the current sentence
# which is the sentence we train on (or predict on) will be in the middle - the surrounding tokens will
# provide context to the current sentence
self._sentence_dataset = SentenceDataset(
max_tokens=max_tokens,
max_prev_sentence_token=max_prev_sentence_token,
max_next_sentence_token=max_next_sentence_token,
default_chunk_size=default_chunk_size,
ignore_label=ignore_label
)
def create(
self,
input_file: str,
mode: str = 'predict',
notation: str = 'BIO',
token_text_key: str = 'text',
metadata_key: str = 'meta',
note_id_key: str = 'note_id',
label_key: str = 'labels',
span_text_key: str = 'spans'
) -> Iterable[Dict[str, Union[List[Dict[str, Union[str, int]]], List[str]]]]:
"""
This function is used to get the sentences that will be part of the NER dataset.
We check whether the note belongs to the desired dataset split. If it does,
we fix any spans that can cause token-span alignment errors. Then we extract
all the sentences in the notes, the tokens in each sentence. Finally we
add some context tokens to the sentence if required. This function returns
an iterable that iterated through each of the processed sentences
Args:
input_file (str): Input jsonl file. Make sure the spans are in ascending order (based on start position)
mode (str): Dataset being built for train or predict.
notation (str): The NER labelling notation
token_text_key (str): The key where the note text and token text is present in the json object
metadata_key (str): The key where the note metadata is present in the json object
note_id_key (str): The key where the note id is present in the json object
label_key (str): The key where the token label will be stored in the json object
span_text_key (str): The key where the note spans is present in the json object
Returns:
(Iterable[Dict[str, Union[List[Dict[str, Union[str, int]]], List[str]]]]): Iterate through the processed
sentences/training examples
"""
# Go through the notes
for line in open(input_file, 'r'):
note = json.loads(line)
note_text = note[token_text_key]
note_id = note[metadata_key][note_id_key]
# Skip to next note if empty string
if not note_text:
continue
if mode == 'train':
note_spans = note[span_text_key]
# No spans in predict mode
elif mode == 'predict':
note_spans = None
else:
raise ValueError("Invalid mode - can only be train/predict")
# Store the list of tokens in the sentence
# Eventually this list will contain all the tokens in the note (split on the sentence level)
# Store the start and end positions of the sentence in the note. This can
# be used later to reconstruct the note from the sentences
# we also store the note_id for each sentence so that we can map it back
# to the note and therefore have all the sentences mapped back to the notes they belong to.
sent_tokens = [sent_tok for sent_tok in self._dataset.get_tokens(
text=note_text,
spans=note_spans,
notation=notation
)]
# The following loop goes through each sentence in the note and returns
# the current sentence and previous and next chunks that will be used for context
# The chunks will have a default label (e.g NA) to distinguish from the current sentence
# and so that we can ignore these chunks when calculating loss and updating weights
# during training
for ner_sent_index, ner_sentence in self._sentence_dataset.get_sentences(
sent_tokens=sent_tokens,
token_text_key=token_text_key,
label_key=label_key
):
# Return the processed sentence. This sentence will then be used
# by the model
current_sent_info = ner_sentence['current_sent_info']
note_sent_info_store = {'start': current_sent_info[0]['start'],
'end': current_sent_info[-1]['end'], 'note_id': note_id}
ner_sentence['note_sent_info'] = note_sent_info_store
yield ner_sentence
def main():
cli_parser = ArgumentParser(
description='configuration arguments provided at run time from the CLI',
formatter_class=ArgumentDefaultsHelpFormatter
)
cli_parser.add_argument(
'--input_file',
type=str,
required=True,
help='the the jsonl file that contains the notes. spans need to be sorted in ascending order (based on start '
'position) '
)
cli_parser.add_argument(
'--notation',
type=str,
default='BIO',
help='the notation we will be using for the label scheme'
)
cli_parser.add_argument(
'--max_tokens',
type=int,
default=128,
help='The max tokens that a given sentence (training/prediction example) in the note can have'
)
cli_parser.add_argument(
'--default_chunk_size',
type=int,
default=32,
help='the default chunk size for the previous and next chunks for a given sentence (training/prediction '
'example) in the note can have '
)
cli_parser.add_argument(
'--max_prev_sentence_token',
type=int,
default=32,
help='the max chunk size for the previous chunks for a given sentence (training/prediction example) in the '
'note can have '
)
cli_parser.add_argument(
'--max_next_sentence_token',
type=int,
default=32,
help='the max chunk size for the next chunks for a given sentence (training/prediction example) in the note '
'can have '
)
cli_parser.add_argument(
'--mode',
type=str,
choices=['train', 'predict'],
required=True,
help='whether we are building the dataset for training or prediction'
)
cli_parser.add_argument(
'--sentencizer',
type=str,
required=True,
help='the sentencizer to use for splitting notes into sentences'
)
cli_parser.add_argument(
'--tokenizer',
type=str,
required=True,
help='the tokenizer to use for splitting text into tokens'
)
cli_parser.add_argument(
'--ignore_label',
type=str,
default='NA',
help='whether to use the ignore label or not'
)
cli_parser.add_argument(
'--token_text_key',
type=str,
default='text',
help='the key where the note text is present in the json object'
)
cli_parser.add_argument(
'--metadata_key',
type=str,
default='meta',
help='the key where the note metadata is present in the json object'
)
cli_parser.add_argument(
'--note_id_key',
type=str,
default='note_id',
help='the key where the note metadata is present in the json object'
)
cli_parser.add_argument(
'--label_key',
type=str,
default='label',
help='the key where the note label for each token is present in the json object'
)
cli_parser.add_argument(
'--span_text_key',
type=str,
default='spans',
help='the key where the note annotates spans are present in the json object'
)
cli_parser.add_argument(
'--format',
type=str,
default='jsonl',
help='format to store the dataset in: jsonl or conll'
)
cli_parser.add_argument(
'--output_file',
type=str,
help='The file where the NER dataset will be stored'
)
args = cli_parser.parse_args()
dataset_creator = DatasetCreator(
sentencizer=args.sentencizer,
tokenizer=args.tokenizer,
max_tokens=args.max_tokens,
max_prev_sentence_token=args.max_prev_sentence_token,
max_next_sentence_token=args.max_next_sentence_token,
default_chunk_size=args.default_chunk_size,
ignore_label=args.ignore_label)
ner_notes = dataset_creator.create(
input_file=args.input_file,
mode=args.mode,
notation=args.notation,
token_text_key=args.token_text_key,
metadata_key=args.metadata_key,
note_id_key=args.note_id_key,
label_key=args.label_key,
span_text_key=args.span_text_key
)
# Store the NER dataset in the desired format
if args.format == 'jsonl':
# Write the dataset to the output file
with open(args.output_file, 'w') as file:
for ner_sentence in ner_notes:
file.write(json.dumps(ner_sentence) + '\n')
elif args.format == 'conll':
with open(args.output_file, 'w') as file:
for ner_sentence in ner_notes:
tokens = ner_sentence['tokens']
labels = ner_sentence['labels']
current_sent_info = ner_sentence['current_sent_info']
note_id = ner_sentence['note_sent_info']['note_id']
if len(tokens) != len(labels) or len(labels) != len(current_sent_info):
raise ValueError('Length mismatch')
for token, label, sent_info in zip(tokens, labels, current_sent_info):
sent_info['note_id'] = note_id
data = token + ' ' + label + ' ' + json.dumps(sent_info) + '\n'
file.write(data)
file.write('\n')
if __name__ == '__main__':
main() | /robust_deid-0.3.1.tar.gz/robust_deid-0.3.1/src/robust_deid/ner_datasets/dataset_creator.py | 0.885928 | 0.62223 | dataset_creator.py | pypi |
import json
import random
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from collections import Counter
from typing import NoReturn, List
from .distribution import NERDistribution, DatasetSplits, PrintDistribution
random.seed(41)
class DatasetSplitter(object):
"""
Prepare dataset splits - training, validation & testing splits
Compute ner distributions in our dataset. Compute ner distributions
based on which we create and store a dictionary which will contain
information about which notes (in a dataset) belong to which split.
Based on this distribution and whether we want to keep certain notes
grouped (e.g by patient) we assign notes to a split, such that the
final ner type distribution in each split is similar.
"""
def __init__(
self,
train_proportion: int = 70,
validation_proportion: int = 15,
test_proportion: int = 15
) -> NoReturn:
"""
Initialize the proportions of the splits.
Args:
train_proportion (int): Ratio of train dataset
validation_proportion (int): Ratio of validation dataset
test_proportion (int): Ratio of test dataset
"""
self._train_proportion = train_proportion
self._validation_proportion = validation_proportion
self._test_proportion = test_proportion
self._split = None
self._lookup_split = dict()
def get_split(self, split: str) -> List[str]:
return [key for key in self._lookup_split[split].keys()]
def set_split(self, split: str) -> NoReturn:
"""
Set the split that you are currently checking/processing.
Based on the split you can perform certain checks and
computation. Once the split is set, read the information
present in the split_info_path. Extract only the information
belonging to the split. Create a hash map where we have
the keys as the note_ids/patient ids that belong to the split. This hashmap
can then be used to check if a particular note belongs to this
split.
Args:
split (str): The split - train, test etc (depends on how you named it)
"""
if split not in ['train', 'validation', 'test']:
raise ValueError('Invalid split')
self._split = split
def __update_split(self, key: str) -> NoReturn:
"""
Update the hash map where we have
the keys (e.g note_id) that belong to the split. This hashmap
can then be used to check if a particular note belongs to this
split.
Args:
key (str): The key that identify the note belonging to the split
"""
self._lookup_split[self._split][key] = 1
def check_note(self, key: str) -> bool:
"""
Use the hash map created in the __get_i2b2_filter_map function
to check if the note (note_info) belongs to this split (train,
val, test etc). If it does, return true, else false
Args:
key (str): The key that identify the note belonging to the split
Returns:
(bool): True if the note belongs to the split, false otherwise
"""
if self._split is None:
raise ValueError('Split not set')
if self._lookup_split[self._split].get(key, False):
return True
else:
return False
def assign_splits(
self,
input_file: str,
spans_key: str = 'spans',
metadata_key: str = 'meta',
group_key: str = 'note_id',
margin: float = 0.3
) -> NoReturn:
"""
Get the dataset splits - training, validation & testing splits
Based on the NER distribution and whether we want to keep certain
notes grouped (e.g by patient). Return an iterable that contains
a tuple that contains the note_id and the split. This can be used
to filter notes based on the splits.
Args:
input_file (str): The input file
spans_key (str): The key where the note spans are present
metadata_key (str): The key where the note metadata is present
group_key (str): The key where the note group (e.g note_id or patient id etc) is present.
This field is what the notes will be grouped by, and all notes belonging
to this grouping will be in the same split
margin (float): Margin of error when maintaining proportions in the splits
"""
# Compute the distribution of NER types in the grouped notes.
# For example the distribution of NER types in all notes belonging to a
# particular patient
self._lookup_split = {
'train': dict(),
'validation': dict(),
'test': dict()
}
ner_distribution = NERDistribution()
for line in open(input_file, 'r'):
note = json.loads(line)
key = note[metadata_key][group_key]
ner_distribution.update_distribution(spans=note[spans_key], key=key)
# Initialize the dataset splits object
dataset_splits = DatasetSplits(
ner_distribution=ner_distribution,
train_proportion=self._train_proportion,
validation_proportion=self._validation_proportion,
test_proportion=self._test_proportion,
margin=margin
)
# Check the note and assign it to a split
for line in open(input_file, 'r'):
note = json.loads(line)
key = note[metadata_key][group_key]
split = dataset_splits.get_split(key=key)
self.set_split(split)
self.__update_split(key)
return None
def main() -> NoReturn:
"""
Prepare dataset splits - training, validation & testing splits
Compute ner distributions in our dataset. Based on this distribution
and whether we want to keep certain notes grouped (e.g by patient)
we assign notes to a split, such that the final ner type distribution
in each split is similar.
"""
# Compute the distribution of NER types in the grouped notes.
# For example the distribution of NER types in all notes belonging to a
# particular patient
# The following code sets up the arguments to be passed via CLI or via a JSON file
cli_parser = ArgumentParser(
description='configuration arguments provided at run time from the CLI',
formatter_class=ArgumentDefaultsHelpFormatter
)
cli_parser.add_argument(
'--input_file',
type=str,
required=True,
help='the the jsonl file that contains the notes'
)
cli_parser.add_argument(
'--spans_key',
type=str,
default='spans',
help='the key where the note spans is present in the json object'
)
cli_parser.add_argument(
'--metadata_key',
type=str,
default='meta',
help='the key where the note metadata is present in the json object'
)
cli_parser.add_argument(
'--group_key',
type=str,
default='note_id',
help='the key to group notes by in the json object'
)
cli_parser.add_argument(
'--train_proportion',
type=int,
default=70,
help='ratio of train dataset'
)
cli_parser.add_argument(
'--train_file',
type=str,
default=None,
help='The file to store the train data'
)
cli_parser.add_argument(
'--validation_proportion',
type=int,
default=15,
help='ratio of validation dataset'
)
cli_parser.add_argument(
'--validation_file',
type=str,
default=None,
help='The file to store the validation data'
)
cli_parser.add_argument(
'--test_proportion',
type=int,
default=15,
help='ratio of test dataset'
)
cli_parser.add_argument(
'--test_file',
type=str,
default=None,
help='The file to store the test data'
)
cli_parser.add_argument(
'--margin',
type=float,
default=0.3,
help='margin of error when maintaining proportions in the splits'
)
cli_parser.add_argument(
'--print_dist',
action='store_true',
help='whether to print the label distribution in the splits'
)
args = cli_parser.parse_args()
dataset_splitter = DatasetSplitter(
train_proportion=args.train_proportion,
validation_proportion=args.validation_proportion,
test_proportion=args.test_proportion
)
dataset_splitter.assign_splits(
input_file=args.input_file,
spans_key=args.spans_key,
metadata_key=args.metadata_key,
group_key=args.group_key,
margin=args.margin
)
if args.train_proportion > 0:
with open(args.train_file, 'w') as file:
for line in open(args.input_file, 'r'):
note = json.loads(line)
key = note[args.metadata_key][args.group_key]
dataset_splitter.set_split('train')
if dataset_splitter.check_note(key):
file.write(json.dumps(note) + '\n')
if args.validation_proportion > 0:
with open(args.validation_file, 'w') as file:
for line in open(args.input_file, 'r'):
note = json.loads(line)
key = note[args.metadata_key][args.group_key]
dataset_splitter.set_split('validation')
if dataset_splitter.check_note(key):
file.write(json.dumps(note) + '\n')
if args.test_proportion > 0:
with open(args.test_file, 'w') as file:
for line in open(args.input_file, 'r'):
note = json.loads(line)
key = note[args.metadata_key][args.group_key]
dataset_splitter.set_split('test')
if dataset_splitter.check_note(key):
file.write(json.dumps(note) + '\n')
if args.print_dist:
# Read the dataset splits file and compute the NER type distribution
key_counts = Counter()
ner_distribution = NERDistribution()
for line in open(args.input_file, 'r'):
note = json.loads(line)
key = note[args.metadata_key][args.group_key]
key_counts[key] += 1
ner_distribution.update_distribution(spans=note[args.spans_key], key=key)
print_distribution = PrintDistribution(ner_distribution=ner_distribution, key_counts=key_counts)
train_splits = dataset_splitter.get_split('train')
validation_splits = dataset_splitter.get_split('validation')
test_splits = dataset_splitter.get_split('test')
all_splits = train_splits + validation_splits + test_splits
# Print distribution for each split
print_distribution.split_distribution(split='total', split_info=all_splits)
print_distribution.split_distribution(split='train', split_info=train_splits)
print_distribution.split_distribution(split='validation', split_info=validation_splits)
print_distribution.split_distribution(split='test', split_info=test_splits)
if __name__ == "__main__":
main() | /robust_deid-0.3.1.tar.gz/robust_deid-0.3.1/src/robust_deid/ner_datasets/dataset_splitter.py | 0.890948 | 0.585753 | dataset_splitter.py | pypi |
from collections import deque
from typing import Deque, List, Sequence, Iterable, Optional, NoReturn, Dict, Mapping, Union, Tuple
class SentenceDataset(object):
"""
When we mention previous sentence and next sentence, we don't mean exactly one sentence
but rather a previous chunk and a next chunk. This can include one or more sentences and
it does not mean that the sentence has to be complete (it can be cutoff in between) - hence a chunk
This class is used to build a dataset at the sentence
level. It takes as input all the tokenized sentences in the note. So the input is
a list of lists where the outer list represents the sentences in the note and the inner list
is a list of tokens in the sentence. It then returns a dataset where each sentence is
concatenated with the previous and a next chunk. This is done so that when we build a model
we can use the previous and next chunks to add context to the sentence/model. The weights and loss etc
will be computed and updated based on the current sentence. The previous and next chunks will
only be used to add context. We could have different sizes of previous and next chunks
depending on the position of the sentence etc. Essentially we build a sentence level dataset
where we can also provide context to the sentence by including the previous and next chunks
"""
def __init__(
self,
max_tokens: int,
max_prev_sentence_token: int,
max_next_sentence_token: int,
default_chunk_size: int,
ignore_label: str
) -> NoReturn:
"""
Set the maximum token length a given training example (sentence level) can have.
That is the total length of the current sentence + previous chunk + next chunk
We also set the the maximum length of the previous and next chunks. That is how many
tokens can be in these chunks. However if the total length exceeds, tokens in the
previous and next chunks will be dropped to ensure that the total length is < max_tokens
The default chunk size ensures that the length of the chunks will be a minimum number of
tokens based on the value passed. For example is default_chunk_size=10, the length
of the previous chunks and next chunks will be at least 10 tokens.
Args:
max_tokens (int): maximum token length a given training example (sentence level) can have
max_prev_sentence_token (int): The max chunk size for the previous chunks for a given sentence
(training/prediction example) in the note can have
max_next_sentence_token (int): The max chunk size for the next chunks for a given sentence
(training/prediction example) in the note can have
default_chunk_size (int): the training example will always include a chunk of this length
as part of the previous and next chunks
ignore_label (str): The label assigned to the previous and next chunks to distinguish
from the current sentence
"""
self._id_num = None
self._max_tokens = max_tokens
self._max_prev_sentence_token = max_prev_sentence_token
self._max_next_sentence_token = max_next_sentence_token
self._default_chunk_size = default_chunk_size
self._ignore_label = ignore_label
@staticmethod
def chunker(
seq: Sequence[Mapping[str, Union[str, int]]],
size: int
) -> Iterable[Sequence[Mapping[str, Union[str, int]]]]:
"""
Return chunks of the sequence. The size of each chunk will be based
on the value passed to the size argument.
Args:
seq (Sequence): maximum token length a given training example (sentence level) can have
size (int): The max chunk size for the chunks
Return:
(Iterable[Sequence[Mapping[str, Union[str, int]]]]): Iterable that iterates through fixed size chunks of
the input sequence chunked version of the sequence
"""
return (seq[pos:pos + size] for pos in range(0, len(seq), size))
def get_previous_sentences(self, sent_tokens: Sequence[Sequence[Mapping[str, Union[str, int]]]]) -> List[Deque]:
"""
Go through all the sentences in the medical note and create a list of
previous sentences. The output of this function will be a list of chunks
where each index of the list contains the sentences (chunks) - (tokens) present before
the sentence at that index in the medical note. For example prev_sent[0] will
be empty since there is no sentence before the first sentence in the note
prev_sent[1] will be equal to sent[0], that is the previous sentence of the
second sentence will be the first sentence. We make use of deque, where we
start to deque elements when it start to exceed max_prev_sentence_token. This
list of previous sentences will be used to define the previous chunks
Args:
sent_tokens (Sequence[str]): Sentences in the note and
each element of the list contains a
list of tokens in that sentence
Returns:
previous_sentences (List[deque]): A list of deque objects where each index contains a
list (queue) of previous tokens (chunk) with respect
to the sentence represented by that index in the note
"""
previous_sentences = list()
# Create a queue and specify the capacity of the queue
# Tokens will be popped from the queue when the capacity is exceeded
prev_sentence = deque(maxlen=self._max_prev_sentence_token)
# The first previous chunk is empty since the first sentence in the note does not have
# anything before it
previous_sentences.append(prev_sentence.copy())
# As we iterate through the list of sentences in the not, we add the tokens from the previous chunks
# to the the queue. Since we have a queue, as soon as the capacity is exceeded we pop tokens from
# the queue
for sent_token in sent_tokens[:-1]:
for token in sent_token:
prev_sentence.append(token)
# As soon as each sentence in the list is processed
# We add a copy of the current queue to a list - this list keeps track of the
# previous chunks for a sentence
previous_sentences.append(prev_sentence.copy())
return previous_sentences
def get_next_sentences(self, sent_tokens: Sequence[Sequence[Mapping[str, Union[str, int]]]]) -> List[Deque]:
"""
Go through all the sentences in the medical note and create a list of
next sentences. The output of this function will be a list of lists
where each index of the list contains the list of sentences present after
the sentence at that index in the medical note. For example next_sent[-] will
be empty since there is no sentence after the last sentence in the note
next_sent[0] will be equal to sent[1:], that is the next sentence of the
first sentence will be the subsequent sentences. We make use of deque, where we
start to deque elements when it start to exceed max_next_sentence_token. This
list of previous sentences will be used to define the previous chunks
Args:
sent_tokens (Sequence[str]): Sentences in the note and each
element of the list contains a
list of tokens in that sentence
Returns:
next_sentences (List[deque]): A list of deque objects where each index contains a list (queue)
of next tokens (chunk) with respect to the sentence represented
by that index in the note
"""
# A list of next sentences is first created and reversed
next_sentences = list()
# Create a queue and specify the capacity of the queue
# Tokens will be popped from the queue when the capacity is exceeded
next_sentence = deque(maxlen=self._max_next_sentence_token)
# The first (which becomes the last chunk when we reverse this list) next chunk is empty since
# the last sentence in the note does not have
# anything after it
next_sentences.append(next_sentence.copy())
for sent_token in reversed(sent_tokens[1:]):
for token in reversed(sent_token):
next_sentence.appendleft(token)
next_sentences.append(next_sentence.copy())
# The list is reversed - since we went through the sentences in the reverse order in
# the earlier steps
return [next_sent for next_sent in reversed(next_sentences)]
def get_sentences(
self,
sent_tokens: Sequence[Sequence[Mapping[str, Union[str, int]]]],
token_text_key: str = 'text',
label_key: str = 'label',
start_chunk: Optional[Sequence[Mapping[str, Union[str, int]]]] = None,
end_chunk: Optional[Sequence[Mapping[str, Union[str, int]]]] = None,
sub: bool = False
) -> Iterable[Tuple[int, Dict[str, Union[List[Dict[str, Union[str, int]]], List[str]]]]]:
"""
When we mention previous sentence and next sentence, we don't mean exactly one sentence
but rather a previous chunk and a next chunk. This can include one or more sentences and
it does not mean that the sentence has to be complete (it can be cutoff in between) - hence a chunk
We iterate through all the tokenized sentences in the note. So the input is
a list of lists where the outer list represents the sentences in the note and the inner list
is a list of tokens in the sentence. It then returns a dataset where each sentence is
concatenated with the previous and the next sentence. This is done so that when we build a model
we can use the previous and next sentence to add context to the model. The weights and loss etc
will be computed and updated based on the current sentence. The previous and next sentence will
only be used to add context. We could have different sizes of previous and next chunks
depending on the position of the sentence etc. Since we split a note in several sentences which are
then used as training data.
ignore_label is used to differentiate between the current sentence and the previous and next
chunks. The chunks will have the label NA so that and the current sentence
will have the label (DATE, AGE etc) so that they can be distinguished.
If however we are building a dataset for predictions
the current sentence will have the default label O, but the next and previous chunks will still
have the label NA. However if the total length exceeds, tokens in the
previous and next chunks will be dropped to ensure that the total length is < max_tokens
The default chunk size ensures that the length of the chunks will be a minimum number of
tokens based on the value passed. For example is default_chunk_size=10, the length
of the previous chunks and next chunks will be at least 10 tokens. If the total length > max tokens
even after decreasing the sizes of the previous and next chunks, then we split this long
sentence into sub sentences and repeat the process described above.
Args:
sent_tokens (Sequence[Sequence[Mapping[str, Union[str, int]]]]): Sentences in the note and each sentence
contains the tokens (dict) in that sentence
the token dict object contains the
token text, start, end etc
token_text_key (str): Each sentence contains a list of tokens where each token is a dict. We use the text
key to extract the text of the token from the dictionary
label_key (str): Each sentence contains a list of tokens where each token is a dict. We use the label_key
key to extract the label of the token from the dictionary. (if it does not have a label
the default label will be assigned)
start_chunk (Optional[Sequence[Mapping[str, Union[str, int]]]]): Prefix the first sentence of with some
pre-defined chunk
end_chunk (Optional[Sequence[Mapping[str, Union[str, int]]]]): Suffix the last sentence of with some
pre-defined chunk
sub (bool): Whether the function is called to process sub-sentences (used when we are splitting
long sentences into smaller sub sentences to keep sentence length < max_tokens
Returns:
(Iterable[Tuple[int, Dict[str, Union[List[Dict[str, Union[str, int]]], List[str]]]]]): Iterate through the
returned sentences,
where each sentence
has the previous
chunks and next
chunks attached
to it.
"""
# Id num keeps track of the id of the sentence - that is the position the sentence occurs in
# the note. We keep the id of sub sentences the same as the sentence, so that the user
# knows that these sub sentences are chunked from a longer sentence.
# <SENT 0> <SENT 1>. Say length of sent 0 with the previous and next chunks is less than max_tokens
# we return sent 0 with id 0. For sent 1, say the length is longer, we split it into sub
# sentences - <SUB 1><SUB 2> - we return SUB 1, and SUB 2 with id 1 - so we know that it belongs
# to <SENT 1> in the note.
if not sub:
self._id_num = -1
# Initialize the object that will take all the sentences in the note and return
# a dataset where each row represents a sentence in the note. The sentence in each
# row will also contain a previous chunk and next chunk (tokens) that will act as context
# when training the mode
# [ps1, ps 2, ps 3...ps-i], [cs1, cs2, ... cs-j], [ns, ns, ... ns-k] - as you can see the current sentence
# which is the sentence we train on (or predict on) will be in the middle - the surrounding tokens will
# provide context to the current sentence
# Get the previous sentences (chunks) for each sentence in the note
previous_sentences = self.get_previous_sentences(sent_tokens)
# Get the next sentences (chunks) for each sentence in the note
next_sentences = self.get_next_sentences(sent_tokens)
# For the note we are going to iterate through all the sentences in the note and
# concatenate each sentence with the previous and next chunks. (This forms the data that
# will be used for training/predictions) Each sentence with the concatenated chunks will be
# a training sample. We would do the same thing for getting predictions on a sentence as well
# The only difference would be the labels that are used. We would use the default label O for
# prediction and the annotated labels for prediction
if len(sent_tokens) != len(previous_sentences) or len(sent_tokens) != len(next_sentences):
raise ValueError('Sentence length mismatch')
for index, (previous_sent, current_sent, next_sent) in enumerate(
zip(previous_sentences, sent_tokens, next_sentences)):
sent_tokens_text = list()
sent_labels = list()
sent_toks = list()
# Get the tokens and labels for the current sentence
for token in current_sent:
# We store this, if we need to process sub sentences when a sentence exceeds max_tokens
sent_toks.append(token)
sent_tokens_text.append(token[token_text_key])
sent_labels.append(token[label_key])
# We check if the number of tokens in teh current sentence + previous chunk
# + next chunk exceeds max tokens. If it does we start popping tokens from the previous and next chunks
# until the number of tokens is equal to max tokens
previous_sent_length = len(previous_sent)
current_sent_length = len(sent_tokens_text)
next_sent_length = len(next_sent)
total_length = previous_sent_length + current_sent_length + next_sent_length
# If the length of the current sentence plus the length of the previous and next
# chunks exceeds the max_tokens, start popping tokens from the previous and next
# chunks until either total length < max_tokens or the number of tokens in the previous and
# next chunks goes below the default chunk size
while total_length > self._max_tokens and \
(next_sent_length > self._default_chunk_size or previous_sent_length > self._default_chunk_size):
if next_sent_length >= previous_sent_length:
next_sent.pop()
next_sent_length -= 1
total_length -= 1
elif previous_sent_length > next_sent_length:
previous_sent.popleft()
previous_sent_length -= 1
total_length -= 1
# If this is not a sub sentence, increment the ID to
# indicate the processing of the next sentence of the note
# If it is a sub sentence, keep the ID the same, to indicate
# it belongs to a larger sentence
if not sub:
self._id_num += 1
# If total length < max_tokens - process the sentence with the current sentence
# and add on the previous and next chunks and return
if total_length <= self._max_tokens:
# Check if we want to add a pre-defined chunk for the first sentence in the note
if index == 0 and start_chunk is not None:
previous_sent_tokens = [chunk[token_text_key] for chunk in start_chunk] + \
[prev_token[token_text_key] for prev_token in list(previous_sent)]
else:
previous_sent_tokens = [prev_token[token_text_key] for prev_token in list(previous_sent)]
# Check if we want to add a pre-defined chunk for the last sentence in the note
if index == len(sent_tokens) - 1 and end_chunk is not None:
next_sent_tokens = [next_token[token_text_key] for next_token in list(next_sent)] + \
[chunk[token_text_key] for chunk in end_chunk]
else:
next_sent_tokens = [next_token[token_text_key] for next_token in list(next_sent)]
previous_sent_length = len(previous_sent_tokens)
next_sent_length = len(next_sent_tokens)
# Store information about the current sentence - start and end pos etc
# this can be used to distinguish from the next and previous chunks
# current_sent_info = {'token_info':current_sent}
# Assign an different label (the ignore label) to the chunks - since they are used only for context
previous_sent_labels = list()
next_sent_labels = list()
if self._ignore_label == 'NA':
previous_sent_labels = [self._ignore_label] * previous_sent_length
next_sent_labels = [self._ignore_label] * next_sent_length
elif self._ignore_label == 'label':
if index == 0 and start_chunk is not None:
previous_sent_labels = [chunk[label_key] for chunk in start_chunk] + \
[prev_token[label_key] for prev_token in list(previous_sent)]
else:
previous_sent_labels = [prev_token[label_key] for prev_token in list(previous_sent)]
if index == len(sent_tokens) - 1 and end_chunk is not None:
next_sent_labels = [next_token[label_key] for next_token in list(next_sent)] + \
[chunk[label_key] for chunk in end_chunk]
else:
next_sent_labels = [next_token[label_key] for next_token in list(next_sent)]
# Concatenate the chunks and the sentence
# sent_tokens_text.append(token[token_text_key])
tokens_data = previous_sent_tokens + sent_tokens_text + next_sent_tokens
labels_data = previous_sent_labels + sent_labels + next_sent_labels
# Return processed sentences
yield self._id_num, {'tokens': tokens_data, 'labels': labels_data, 'current_sent_info': current_sent}
# Process the sub sentences - we take a long sentence
# and split it into smaller chunks - and we recursively call the function on this list
# of smaller chunks - as mentioned before the smaller chunks (sub sentences) will have the
# same ID as the original sentence
else:
# Store the smaller chunks - say <SENT1> is too long
# <PREV CHUNK><SENT1><NEXT CHUNK>
# We get chunk sent 1 - to <SUB1><SUB2><SUB3> and we pass this [<SUB1><SUB2><SUB3>] to the function
# as a recursive call. This list is now processed as a smaller note that essentially belongs
# to a sentence. But as you can see we did not pass <PREV CHUNK> & <NEXT CHUNK>, because
# these are chunks that are not part of the current sentence, but they still need to be
# included in the final output - and the work around is mentioned below
# So that we have a previous chunk for <SUB1> and next chunk for <SUB3>
# we include the previous_sent_tokens and next_sent_tokens as the start chunk
# and the next chunk in the function call below
# <PREV CHUNK><SUB1><NEXT SUB1>, id = x
# <PREV SUB2><SUB2><NEXT SUB2>, id = x
# <PREV SUB3><SUB3><NEXT CHUNK>, id = x
sub_sentences = list()
# Prefix the first sentence in these smaller chunks
previous_sent_tokens = list(previous_sent)
# Suffix the last sentence in these smaller chunks
next_sent_tokens = list(next_sent)
# Get chunks
for chunk in SentenceDataset.chunker(sent_toks, self._max_tokens - (2 * self._default_chunk_size)):
sub_sentences.append(chunk)
# Process list of smaller chunks
for sub_sent in self.get_sentences(
sub_sentences,
token_text_key,
label_key,
start_chunk=previous_sent_tokens,
end_chunk=next_sent_tokens,
sub=True
):
yield sub_sent | /robust_deid-0.3.1.tar.gz/robust_deid-0.3.1/src/robust_deid/ner_datasets/dataset_builder/sentence_dataset.py | 0.927256 | 0.833663 | sentence_dataset.py | pypi |
import random
import re
from typing import Iterable, Dict, Sequence, Union, Mapping, Optional, List
from .labels import NERTokenLabels, NERPredictTokenLabels, MismatchError
random.seed(41)
class Dataset(object):
"""
Build a NER token classification dataset. Each token should have a corresponding label
based on the annotated spans
For training we will build the dataset using the annotated spans (e.g from prodigy)
For predictions we will assign default labels. to keep the format of the dataset the same
The dataset is on a sentence level, i.e each note is split into sentences and the
task is run on a sentence level. Even the predictions are run on a sentence level
The dataset would be something like:
Tokens: [tok1, tok2, ... tok n]
Labels: [lab1, lab2, ... lab n]
For the prediction mode the labels would be: [default, default, default .... default]
This script can also be used for predictions, the Labels will be filled with some
default value. This is done so that we can use the same script for building a dataset to train a model
and a dataset to obtain predictions using a model
"""
def __init__(
self,
sentencizer,
tokenizer
):
"""
Build a NER token classification dataset
For training we will build the dataset using the annotated spans (e.g from prodigy)
For predictions we will assign default labels.
The dataset is on a sentence level, i.e each note is split into sentences and the de-id
task is run on a sentence level. Even the predictions are run on a sentence level
The dataset would be something like:
Tokens: [tok1, tok2, ... tok n]
Labels: [lab1, lab2, ... lab n]
This script can also be used for predictions, the Labels will be filled with some
default value. This is done so that we can use the same script for building a dataset to train a model
and a dataset to obtain predictions using a model
Args:
sentencizer (Union[SpacySentencizer, MimicStanzaSentencizer, NoteSentencizer]): The sentencizer to use for
splitting notes into
sentences
tokenizer (Union[ClinicalSpacyTokenizer, SpacyTokenizer, CoreNLPTokenizer]): The tokenizer to use for
splitting text into tokens
"""
self._sentencizer = sentencizer
self._tokenizer = tokenizer
def get_tokens(
self,
text: str,
spans: Optional[List[Mapping[str, Union[str, int]]]] = None,
notation: str = 'BIO',
token_text_key: str = 'text',
label_key: str = 'label'
) -> Iterable[Sequence[Dict[str, Union[str, int]]]]:
"""
Get a nested list of tokens where the the inner list represents the tokens in the
sentence and the outer list will contain all the sentences in the note
Args:
text (str): The text present in the note
spans (Optional[List[Mapping[str, Union[str, int]]]]): The NER spans in the note. This will be none if
building the dataset for prediction
notation (str): The notation we will be using for the label scheme (e.g BIO, BILOU etc)
token_text_key (str): The key where the note text is present
label_key (str): The key where the note label for each token is present
Returns:
Iterable[Sequence[Dict[str, Union[str, int]]]]: Iterable that iterates through all the sentences
and yields the list of tokens in each sentence
"""
# Initialize the object that will be used to align tokens and spans based on the notation
# as mentioned earlier - this will be used only when mode is train - because we have
# access to labelled spans for the notes
if spans is None:
label_spans = NERPredictTokenLabels('O')
else:
label_spans = NERTokenLabels(spans=spans, notation=notation)
# Iterate through the sentences in the note
for sentence in self._sentencizer.get_sentences(text=text):
# This is used to determine the position of the tokens with respect to the entire note
offset = sentence['start']
# Keeps track of the tokens in the sentence
tokens = list()
for token in self._tokenizer.get_tokens(text=sentence['text']):
# Get the token position (start, end) in the note
token['start'] += offset
token['end'] += offset
if token[token_text_key].strip() in ['\n', '\t', ' ', ''] or token['start'] == token['end']:
continue
# Shorten consecutive sequences of special characters, this can prevent BERT from truncating
# extremely long sentences - that could arise because of these characters
elif re.search('(\W|_){9,}', token[token_text_key]):
print('WARNING - Shortening a long sequence of special characters from {} to 8'.format(
len(token[token_text_key])))
token[token_text_key] = re.sub('(?P<specchar>(\W|_)){8,}', '\g<specchar>' * 8,
token[token_text_key])
elif len(token[token_text_key].split(' ')) != 1:
print('WARNING - Token contains a space character - will be replaced with hyphen')
token[token_text_key] = token[token_text_key].replace(' ', '-')
# Get the labels for each token based on the notation (BIO)
# In predict mode - the default label (e.g O) will be assigned
try:
# Get the label for the token - based on the notation
label = label_spans.get_labels(token=token)
if label[2:] == 'OTHERISSUE':
raise ValueError('Fix OTHERISSUE spans')
# Check if there is a token and span mismatch, i.e the token and span does not align
except MismatchError:
print(token)
raise ValueError('Token-Span mismatch')
token[label_key] = label
tokens.append(token)
if tokens:
yield tokens | /robust_deid-0.3.1.tar.gz/robust_deid-0.3.1/src/robust_deid/ner_datasets/dataset_builder/dataset.py | 0.869632 | 0.783864 | dataset.py | pypi |
from typing import Mapping, Union, Sequence, List
from .mismatch_error import MismatchError
class NERTokenLabels(object):
"""
This class is used to align tokens with the spans
Each token is assigned one of the following labels
'B-LABEL', 'I-LABEL', 'O'. For example the text
360 Longwood Avenue is 2 tokens - [360, Longwood, Avenue]
and each token would be assigned the following labels
[B-LOC, I-LOC, I-LOC] (this would also depend on what
notation we are using). Generally the data after prodigy
annotation has all the tokens and all the spans.
We would have tokens:[tok1, tok2, ... tokn]
and spans:[span1:[tok1, tok2, tok3], span2:[tok7], ... span k]
This would be used to convert into the format we are using
which is assign the label to each token based on which span it
belongs to.
"""
def __init__(
self,
spans: List[Mapping[str, Union[str, int]]],
notation: str
):
"""
Initialize variables that will be used to align tokens
and span labels. The spans variable will contain all the spans
in the note. Notation is whether we would like to use BIO, IO, BILOU,
when assigning the label to each token based on which span it belongs to.
Keep track of the total number of spans etc.
Args:
spans (Sequence[Mapping[str, Union[str, int]]]): List of all the spans in the text
notation (str): NER label notation
"""
# Keeps track of all the spans (list) in the text (note)
self._spans = spans
for span in self._spans:
if type(span['start']) != int or type(span['end']) != int:
raise ValueError('The start and end keys of the span must be of type int')
self._spans.sort(key=lambda _span: (_span['start'], _span['end']))
# The current span is the first element of the list
self._current_span = 0
# Boolean variable that indicates whether the token is inside
# the span (I-LABEL)
self._inside = False
# Total number of spans
self._span_count = len(self._spans)
# Depending on the notation passed, we will return the label for
# the token accordingly
if notation == 'BIO':
self._prefix_single = 'B-'
self._prefix_begin = 'B-'
self._prefix_inside = 'I-'
self._prefix_end = 'I-'
self._prefix_outside = 'O'
elif notation == 'BIOES':
self._prefix_single = 'S-'
self._prefix_begin = 'B-'
self._prefix_inside = 'I-'
self._prefix_end = 'E-'
self._prefix_outside = 'O'
elif notation == 'BILOU':
self._prefix_single = 'U-'
self._prefix_begin = 'B-'
self._prefix_inside = 'I-'
self._prefix_end = 'L-'
self._prefix_outside = 'O'
elif notation == 'IO':
self._prefix_single = 'I-'
self._prefix_begin = 'I-'
self._prefix_inside = 'I-'
self._prefix_end = 'I-'
self._prefix_outside = 'O'
def __check_begin(self, token: Mapping[str, Union[str, int]]) -> str:
"""
Given a token, return the label (B-LABEL) and check whether the token
covers the entire span or is a sub set of the span
Args:
token (Mapping[str, Union[str, int]]): Contains the token text, start and end position of the token
in the text
Returns:
(str): The label - 'B-LABEL'
"""
# Set the inside flag to true to indicate that the next token that is checked
# will be checked to see if it belongs 'inside' the span
self._inside = True
if token['end'] > int(self._spans[self._current_span]['end']):
raise MismatchError('Span and Token mismatch - Begin Token extends longer than the span')
# If this token does not cover the entire span then we expect another token
# to be in the span and that token should be assigned the I-LABEL
elif token['end'] < int(self._spans[self._current_span]['end']):
return self._prefix_begin + self._spans[self._current_span]['label']
# If this token does cover the entire span then we set inside = False
# to indicate this span is complete and increment the current span
# to move onto the next span in the text
elif token['end'] == int(self._spans[self._current_span]['end']):
self._current_span += 1
self._inside = False
return self._prefix_single + self._spans[self._current_span - 1]['label']
def __check_inside(self, token: Mapping[str, Union[str, int]]) -> str:
"""
Given a token, return the label (I-LABEL) and check whether the token
covers the entire span or is still inside the span.
Args:
token (Mapping[str, Union[str, int]]): Contains the token text, start and end position of the token
in the text
Returns:
(str): The label - 'I-LABEL'
"""
if (token['start'] >= int(self._spans[self._current_span]['end'])
or token['end'] > int(self._spans[self._current_span]['end'])):
raise MismatchError('Span and Token mismatch - Inside Token starts after the span ends')
# If this token does not cover the entire span then we expect another token
# to be in the span and that token should be assigned the I-LABEL
elif token['end'] < int(self._spans[self._current_span]['end']):
return self._prefix_inside + self._spans[self._current_span]['label']
# If this token does cover the entire span then we set inside = False
# to indicate this span is complete and increment the current span
# to move onto the next span in the text
elif token['end'] == int(self._spans[self._current_span]['end']):
self._current_span += 1
self._inside = False
return self._prefix_end + self._spans[self._current_span - 1]['label']
def get_labels(self, token: Mapping[str, Union[str, int]]) -> str:
"""
Given a token, return the label (B-LABEL, I-LABEL, O) based on
the spans present in the text & the desired notation.
Args:
token (Mapping[str, Union[str, int]]): Contains the token text, start and end position of the token
in the text
Returns:
(str): One of the labels according to the notation - 'B-LABEL', 'I-LABEL', 'O'
"""
# If we have iterated through all the spans in the text (note), all the tokens that
# come after the last span will be marked as 'O' - since they don't belong to any span
if self._current_span >= self._span_count:
return self._prefix_outside
# Check if the span can be assigned the B-LABEL
if token['start'] == int(self._spans[self._current_span]['start']):
return self.__check_begin(token)
# Check if the span can be assigned the I-LABEL
elif token['start'] > int(self._spans[self._current_span]['start']) and self._inside is True:
return self.__check_inside(token)
# Check if the token is outside a span
elif self._inside is False and (token['end'] <= int(self._spans[self._current_span]['start'])):
return self._prefix_outside
else:
raise MismatchError(
'Span and Token mismatch - the span and tokens don\'t line up. There might be a tokenization issue '
'that needs to be fixed') | /robust_deid-0.3.1.tar.gz/robust_deid-0.3.1/src/robust_deid/ner_datasets/dataset_builder/labels/ner_token_labels.py | 0.902191 | 0.59884 | ner_token_labels.py | pypi |
from collections import Counter
from typing import Sequence, NoReturn
from .ner_distribution import NERDistribution
class PrintDistribution(object):
"""
This class is used to print the distribution of NER types
"""
def __init__(self, ner_distribution: NERDistribution, key_counts: Counter) -> NoReturn:
"""
Initialize
Args:
ner_distribution (NERDistribution): NERDistribution object that keeps track of the NER type distributions
key_counts (Counter): Number of keys/groups (e.g note_ids, patient ids etc)
"""
self._ner_distribution = ner_distribution
self._key_counts = key_counts
def split_distribution(self, split: str, split_info: Sequence[str]) -> NoReturn:
"""
Print NER type distribution
Args:
split (str): The dataset split
split_info (Sequence[str]): The keys belonging to that split
"""
split_distribution = Counter()
number_of_notes = 0
for key in split_info:
number_of_notes += self._key_counts[key]
split_distribution.update(self._ner_distribution.get_group_distribution(key))
total_ner = sum(split_distribution.values())
percentages = {ner_type: float(count) / total_ner * 100 if total_ner else 0
for ner_type, count in split_distribution.items()}
print('{:^70}'.format('============ ' + split.upper() + ' NER Distribution ============='))
print('{:<20}{:<10}'.format('Number of Notes: ', number_of_notes))
print('{:<20}{:<10}\n'.format('Number of Groups: ', len(split_info)))
for ner_type, count in split_distribution.most_common():
print('{:<10}{:<10}{:<5}{:<10}{:<5}{:<10}'.format(
'NER Type: ', ner_type,
'Count: ', count,
'Percentage: ', '{:0.2f}'.format(percentages[ner_type]))
)
print('{:<10}{:<10}{:<5}{:<10}{:<5}{:<10}'.format(
'NER Type:', 'TOTALS', 'Count: ', total_ner, 'Percentage: ', '{:0.2f}'.format(100))
)
print('\n') | /robust_deid-0.3.1.tar.gz/robust_deid-0.3.1/src/robust_deid/ner_datasets/distribution/print_distribution.py | 0.925222 | 0.232495 | print_distribution.py | pypi |
from collections import Counter, defaultdict
from typing import Sequence, Mapping, NoReturn
class NERDistribution(object):
"""
Store the distribution of ner types based on some key.
That is we store the NER type distribution for some given key value and we update
the distribution when spans related to that key is passed
"""
def __init__(self) -> NoReturn:
"""
Initialize the NER type - count mapping
"""
# Counter the captures the ner types and counts per patient/note_id in the dataset
# Depending on what we set the group_key as. Basically gather counts with respect
# to some grouping of the notes
# E.g - {{PATIENT 1: {AGE: 99, DATE: 55, ...}, {PATIENT 2: {AGE: 5, DATE: 9, ...} ... }
self._ner_distribution = defaultdict(Counter)
def update_distribution(self, spans: Sequence[Mapping[str, str]], key: str) -> NoReturn:
"""
Update the distribution of ner types for the given key
Args:
spans (Sequence[Mapping[str, str]]): The list of spans in the note
key (str): The note id or patient id of the note (some grouping)
"""
# Go through the spans in the note and compute the ner distribution
# Compute both the overall ner distribution and ner distribution per
# patient (i.e the ner types in all the notes associated with the patient)
if not self._ner_distribution.get(key, False):
self._ner_distribution[key] = Counter()
for span in spans:
self._ner_distribution[key][span['label']] += 1
def get_ner_distribution(self) -> defaultdict:
"""
Return overall ner distribution. The NER type distribution for every key.
Returns:
ner_distribution (defaultdict(Counter)): Overall NER type distribution for all keys
"""
return self._ner_distribution
def get_group_distribution(self, key: str) -> Counter:
"""
Return the NER type distribution for the given key
Returns:
(Counter): ner distribution w.r.t some grouping (key)
"""
if key in self._ner_distribution.keys():
return self._ner_distribution[key]
else:
raise ValueError('Key not found') | /robust_deid-0.3.1.tar.gz/robust_deid-0.3.1/src/robust_deid/ner_datasets/distribution/ner_distribution.py | 0.918013 | 0.42662 | ner_distribution.py | pypi |
from typing import Union, Optional, Sequence
from .sentencizers import SpacySentencizer, NoteSentencizer
from .tokenizers import ClinicalSpacyTokenizer, SpacyTokenizer, CoreNLPTokenizer
class PreprocessingLoader(object):
@staticmethod
def get_sentencizer(sentencizer: str) -> Union[SpacySentencizer, NoteSentencizer]:
"""
Get the desired the sentencizer
We can either use the sci-spacy (en_core_sci_lg or en_core_web_sm) or
consider the entire note as a single sentence.
Args:
sentencizer (str): Specify which sentencizer you want to use
Returns:
Union[SpacySentencizer, NoteSentencizer]: An object of the requested
sentencizer class
"""
if sentencizer in ['en_core_sci_lg', 'en_core_sci_md', 'en_core_sci_sm', 'en_core_web_sm']:
return SpacySentencizer(spacy_model=sentencizer)
elif sentencizer == 'note':
return NoteSentencizer()
else:
raise ValueError('Invalid sentencizer - does not exist')
@staticmethod
def get_tokenizer(tokenizer: str) -> Union[SpacyTokenizer, ClinicalSpacyTokenizer, CoreNLPTokenizer]:
"""
Initialize the tokenizer based on the CLI arguments
We can either use the default scipacy (en_core_sci_lg or en_core_web_sm)
or the modified scipacy (with regex rule) tokenizer.
It also supports the corenlp tokenizer
Args:
tokenizer (str): Specify which tokenizer you want to use
Returns:
Union[SpacyTokenizer, ClinicalSpacyTokenizer, CoreNLPTokenizer]: An object of the requested tokenizer class
"""
if tokenizer in ['en_core_sci_lg', 'en_core_sci_md', 'en_core_sci_sm', 'en_core_web_sm', 'en']:
return SpacyTokenizer(spacy_model=tokenizer)
elif tokenizer == 'corenlp':
return CoreNLPTokenizer()
elif tokenizer == 'clinical':
# Abbreviations - we won't split tokens that match these (e.g 18F-FDG)
return ClinicalSpacyTokenizer(spacy_model='en_core_sci_sm')
else:
raise ValueError('Invalid tokenizer - does not exist') | /robust_deid-0.3.1.tar.gz/robust_deid-0.3.1/src/robust_deid/ner_datasets/preprocessing/preprocessing_loader.py | 0.923708 | 0.178633 | preprocessing_loader.py | pypi |
import re
import spacy
from spacy.symbols import ORTH
from .spacy_tokenizer import SpacyTokenizer
from .utils import DateRegex, CleanRegex, ClinicalRegex
def read_abbreviations():
import importlib.resources as pkg_resources
from . import abbreviations
abbrevs = []
with pkg_resources.open_text(abbreviations, 'medical_abbreviations.txt') as f:
abbrevs += [line.rstrip('\n') for line in f]
return abbrevs
class ClinicalSpacyTokenizer(SpacyTokenizer):
"""
This class is used to read text and return the tokens
present in the text (and their start and end positions)
"""
def __init__(
self,
spacy_model,
split_multiple=True,
split_temperature=True,
split_percentage=True
):
"""
Initialize a spacy model to read text and split it into
tokens.
Args:
spacy_model (str): Name of the spacy model
"""
super().__init__(spacy_model)
self._nlp.tokenizer.prefix_search = self.__get_prefix_regex(split_multiple, split_temperature,
split_percentage).search
self._nlp.tokenizer.infix_finditer = self.__get_infix_regex().finditer
self._nlp.tokenizer.suffix_search = self.__get_suffix_regex().search
new_rules = {}
for orth, exc in self._nlp.tokenizer.rules.items():
if re.search('((Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Sept|Oct|Nov|Dec)[.]$)|(^(W|w)ed$)', orth):
continue
new_rules[orth] = exc
self._nlp.tokenizer.rules = new_rules
abbreviations = read_abbreviations()
for abbreviation in abbreviations:
special_case = [{ORTH: abbreviation}]
self._nlp.tokenizer.add_special_case(abbreviation, special_case)
# this matches any lower case tokens - abstract this part out - whetehr to lowercase abbreviations ro not
exclusions_uncased = {abbreviation.lower(): [{ORTH: abbreviation.lower()}] for abbreviation in
abbreviations}
for k, excl in exclusions_uncased.items():
try:
self._nlp.tokenizer.add_special_case(k, excl)
except:
print('failed to add exception: {}'.format(k))
def __get_prefix_regex(self, split_multiple, split_temperature, split_percentage):
date_prefix = DateRegex.get_infixes()
clinical_prefix = ClinicalRegex.get_prefixes(split_multiple, split_temperature, split_percentage)
clean_prefix = CleanRegex.get_prefixes()
digit_infix = ClinicalRegex.get_infixes()
prefixes = clean_prefix + self._nlp.Defaults.prefixes + date_prefix + clinical_prefix + digit_infix
prefix_regex = spacy.util.compile_prefix_regex(prefixes)
return prefix_regex
def __get_suffix_regex(self):
clean_suffix = CleanRegex.get_suffixes()
suffixes = clean_suffix + self._nlp.Defaults.suffixes
suffix_regex = spacy.util.compile_suffix_regex(suffixes)
return suffix_regex
def __get_infix_regex(self):
date_infixes = DateRegex.get_infixes()
clean_infixes = CleanRegex.get_infixes()
digit_infix = ClinicalRegex.get_infixes()
infixes = self._nlp.Defaults.infixes + date_infixes + clean_infixes
infix_re = spacy.util.compile_infix_regex(infixes)
return infix_re
def get_nlp(self):
return self._nlp | /robust_deid-0.3.1.tar.gz/robust_deid-0.3.1/src/robust_deid/ner_datasets/preprocessing/tokenizers/clinical_spacy_tokenizer.py | 0.514888 | 0.214465 | clinical_spacy_tokenizer.py | pypi |
import json
from typing import Iterable, Mapping, Dict, Union
from pycorenlp import StanfordCoreNLP
class CoreNLPTokenizer(object):
"""
This class is used to read text and return the tokens
present in the text (and their start and end positions)
using core nlp tokenization
"""
def __init__(self, port: int = 9000):
"""
Initialize a core nlp server to read text and split it into
tokens using the core nlp annotators
Args:
port (int): The port to run the server on
"""
self._core_nlp = StanfordCoreNLP('http://localhost:{0}'.format(port))
def get_stanford_annotations(self, text: str, annotators: str = 'tokenize,ssplit,pos,lemma') -> Dict:
"""
Use the core nlp server to annotate the text and return the
results as a json object
Args:
text (str): The text to annotate
annotators (str): The core nlp annotations to run on the text
Returns:
output (Dict): The core nlp results
"""
output = self._core_nlp.annotate(text, properties={
"timeout": "50000",
"ssplit.newlineIsSentenceBreak": "two",
'annotators': annotators,
'outputFormat': 'json'
})
if type(output) is str:
output = json.loads(output, strict=False)
return output
def get_tokens(self, text: str) -> Iterable[Dict[str, Union[str, int]]]:
"""
Return an iterable that iterates through the tokens in the text
Args:
text (str): The text to annotate
Returns:
(Iterable[Mapping[str, Union[str, int]]]): Yields a dictionary that contains the text of the token
the start position of the token in the entire text
and the end position of the token in the entire text
"""
stanford_output = self.get_stanford_annotations(text)
for sentence in stanford_output['sentences']:
for token in sentence['tokens']:
yield {'text': token['originalText'],
'start': token['characterOffsetBegin'],
'end': token['characterOffsetEnd']} | /robust_deid-0.3.1.tar.gz/robust_deid-0.3.1/src/robust_deid/ner_datasets/preprocessing/tokenizers/core_nlp_tokenizer.py | 0.830732 | 0.355691 | core_nlp_tokenizer.py | pypi |
from typing import List
class CleanRegex(object):
"""
This class is used to define the regexes that will be used by the
spacy tokenizer rules. Mainly the regexes are used to clean up
tokens that have unwanted characters (e.g extra hyphens).
"""
#Staff - 3
#Hosp - 4, 5
#Loc - 2
@staticmethod
def get_prefixes() -> List[str]:
"""
This function is used to build the regex that will clean up dirty characters
present at the prefix position (start position) of a token. For example the token ---clean
has three hyphens that need to be split from the word clean. This regex
will be used by spacy to clean it up. This rule considers any characters that is
not a letter or a digit as dirty characters
Examples: ----------------9/36, :63, -ESH
Returns:
(list): List of regexes to clean the prefix of the token
"""
#Handles case 5 of HOSP
return ['((?P<prefix>([^a-zA-Z0-9.]))(?P=prefix)*)', '([.])(?!\d+(\W+|$))']
@staticmethod
def get_suffixes() -> List[str]:
"""
This function is used to build the regex that will clean up dirty characters
present at the suffix position (end position) of a token. For example the token clean---
has three hyphens that need to be split from the word clean. This regex
will be used by spacy to clean it up. This rule considers any characters that is
not a letter or a digit as dirty characters
Examples: FRANK^, regimen---------------, no)
Returns:
(list): List of regexes to clean the suffix of the token
"""
return ['((?P<suffix>([^a-zA-Z0-9]))(?P=suffix)*)']
@staticmethod
def get_infixes() -> List[str]:
"""
This function is used to build the regex that will clean up dirty characters
present at the infix position (in-between position) of a token. For example the token
clean---me has three hyphens that need to be split from the word clean and me. This regex
will be used by spacy to clean it up. This rule considers any characters that is
not a letter or a digit as dirty characters
Examples: FRANK^08/30/76^UNDERWOOD, regimen---------------1/37
Returns:
(list): List of regexes to clean the infix of the token
"""
#Handles case 3 of STAFF
#Handles case 4 of HOSP
#Handles case 2 of LOC
connector_clean = '\^|;|&#|([\(\)\[\]:="])'
#full_stop_clean = '(?<=[a-zA-Z])(\.)(?=([A-Z][A-Za-z]+)|[^a-zA-Z0-9_.]+)'
bracket_comma_clean = '(((?<=\d)[,)(](?=[a-zA-Z]+))|((?<=[a-zA-Z])[,)(](?=\w+)))'
#special_char_clean = '(?<=[a-zA-Z])(\W{3,}|[_]{3,})(?=[A-Za-z]+)'
special_char_clean = '(?<=[a-zA-Z])([_\W_]{3,})(?=[A-Za-z]+)'
#Sometimes when there is no space between a period and a comma - it becomes part of the same token
#e.g John.,M.D - we need to split this up.
comma_period_clean = '(?<=[a-zA-Z])(\.,)(?=[A-Za-z]+)'
return [connector_clean, bracket_comma_clean, special_char_clean, comma_period_clean] | /robust_deid-0.3.1.tar.gz/robust_deid-0.3.1/src/robust_deid/ner_datasets/preprocessing/tokenizers/utils/clean_regex.py | 0.778818 | 0.589391 | clean_regex.py | pypi |
import json
import numpy as np
from scipy.special import softmax
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from typing import NoReturn
from sklearn.metrics import precision_recall_curve
from .dataset_builder import NERLabels
class RecallThresholder(object):
def __init__(self, ner_types, notation):
ner_labels = NERLabels(notation=notation, ner_types=ner_types)
label_list = ner_labels.get_label_list()
self._mask = np.zeros((len(label_list)), dtype=bool)
self._mask[label_list.index('O')] = True
def get_precision_recall_threshold(self, logits_file, recall_cutoff, threshold_mode='max', predictions_key='predictions', labels_key='labels'):
if(threshold_mode == 'max'):
y_true, y_pred = self.__convert_binary_max(
logits_file=logits_file,
predictions_key=predictions_key,
labels_key=labels_key
)
elif(threshold_mode == 'sum'):
y_true, y_pred = self.__convert_binary_sum(
logits_file=logits_file,
predictions_key=predictions_key,
labels_key=labels_key
)
precision, recall, threshold = self.__get_precision_recall_threshold(y_true=y_true, y_pred=y_pred, recall_cutoff=recall_cutoff)
return precision[-1], recall[-1], threshold[-1]
def __convert_binary_max(self, logits_file, predictions_key='predictions', labels_key='labels'):
y_true = list()
y_pred = list()
for line in open(logits_file, 'r'):
note = json.loads(line)
for prediction, label in zip(note[predictions_key], note[labels_key]):
logits = softmax(prediction)
masked_logits = np.ma.MaskedArray(data=logits, mask=self._mask)
y_true.append(0 if label == 'O' else 1)
y_pred.append(masked_logits.max())
return y_true, y_pred
def __convert_binary_sum(self, logits_file, predictions_key='predictions', labels_key='labels'):
y_true = list()
y_pred = list()
for line in open(logits_file, 'r'):
note = json.loads(line)
for prediction, label in zip(note[predictions_key], note[labels_key]):
logits = softmax(prediction)
masked_logits = np.ma.MaskedArray(data=logits, mask=self._mask)
y_true.append(0 if label == 'O' else 1)
y_pred.append(masked_logits.sum())
return y_true, y_pred
def __get_precision_recall_threshold(self, y_true, y_pred, recall_cutoff):
precision, recall, thresholds = precision_recall_curve(y_true, y_pred, pos_label=1)
thresholds = np.append(thresholds, thresholds[-1])
precision_filter = precision[recall > recall_cutoff]
recall_filter = recall[recall > recall_cutoff]
thresholds_filter = thresholds[recall > recall_cutoff]
return precision_filter, recall_filter, thresholds_filter
def main() -> NoReturn:
cli_parser = ArgumentParser(
description='configuration arguments provided at run time from the CLI',
formatter_class=ArgumentDefaultsHelpFormatter
)
cli_parser.add_argument(
'--logits_file',
type=str,
required=True,
help='the jsonl file that contains the logit predictions at each token position'
)
cli_parser.add_argument(
'--ner_types',
nargs="+",
required=True,
help='the NER types'
)
cli_parser.add_argument(
'--notation',
type=str,
default='BIO',
help='the notation we will be using for the label scheme'
)
cli_parser.add_argument(
'--threshold_mode',
type=str,
choices=['max', 'sum'],
required=True,
help='whether we want to use the summation approach or max approach for thresholding. will need to call the right approach with the sequence tagger as well'
)
cli_parser.add_argument(
'--recall_cutoff',
type=float,
required=True,
help='the recall value you are trying to achieve'
)
cli_parser.add_argument(
'--predictions_key',
type=str,
default='predictions',
help='the key where the note predictions (logits) for each token is present in the json object'
)
cli_parser.add_argument(
'--labels_key',
type=str,
default='labels',
help='the key where the note labels for each token is present in the json object'
)
args = cli_parser.parse_args()
recall_thresholder = RecallThresholder(ner_types=args.ner_types, notation=args.notation)
precision, recall, threshold = recall_thresholder.get_precision_recall_threshold(
logits_file=args.logits_file,
recall_cutoff=args.recall_cutoff/100,
threshold_mode=args.threshold_mode,
predictions_key=args.predictions_key,
labels_key=args.labels_key
)
print('Threshold Mode: ' + args.threshold_mode.upper())
print('At threshold: ', threshold)
print('Precision is: ', precision * 100)
print('Recall is: ', recall * 100)
if __name__ == "__main__":
main() | /robust_deid-0.3.1.tar.gz/robust_deid-0.3.1/src/robust_deid/sequence_tagging/recall_thresholder.py | 0.772702 | 0.19235 | recall_thresholder.py | pypi |
from typing import Optional
from dataclasses import dataclass, field
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
task_name: Optional[str] = field(
default="ner",
metadata={"help": "The name of the task (ner, pos...)."}
)
notation: str = field(
default="BIO",
metadata={"help": "NER notation e.g BIO"},
)
ner_types: Optional[str] = field(
default=None,
metadata={"help": "Pass a list of NER types"},
)
train_file: Optional[str] = field(
default=None,
metadata={"help": "The input training data file (a csv or JSON file)."}
)
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate on (a csv or JSON file)."},
)
test_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input test data file to predict on (a csv or JSON file)."},
)
output_predictions_file: Optional[str] = field(
default=None,
metadata={"help": "A location where to write the output of the test data"},
)
text_column_name: Optional[str] = field(
default='tokens',
metadata={"help": "The column name of text to input in the file (a csv or JSON file)."}
)
label_column_name: Optional[str] = field(
default='labels',
metadata={"help": "The column name of label to input in the file (a csv or JSON file)."}
)
overwrite_cache: bool = field(
default=False,
metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": "Whether to pad all samples to model maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
},
)
truncation: bool = field(
default=True,
metadata={
"help": "Activates and controls truncation"
},
)
max_length: int = field(
default=512,
metadata={
"help": "Controls the maximum length to use by one of the truncation/padding parameters."
},
)
do_lower_case: bool = field(
default=False,
metadata={
"help": "Whether to lowercase the text"
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
},
)
label_all_tokens: bool = field(
default=False,
metadata={
"help": "Whether to put the label for one word on all tokens of generated by that word or just on the "
"one (in which case the other tokens will have a padding index)."
},
)
return_entity_level_metrics: bool = field(
default=True,
metadata={"help": "Whether to return all the entity levels during evaluation or just the overall ones."},
)
token_ignore_label: str = field(
default='NA',
metadata={"help": "The label that indicates where the tokens will be ignored in loss computation. Used for "
"indicating context tokens to the model"}
) | /robust_deid-0.3.1.tar.gz/robust_deid-0.3.1/src/robust_deid/sequence_tagging/arguments/data_training_arguments.py | 0.940415 | 0.50177 | data_training_arguments.py | pypi |
from collections import defaultdict
from typing import Sequence, Mapping, NoReturn, List, Union
class NoteLevelAggregator(object):
"""
The input while training the model is at a sentence level. What happens is we
have a bunch of notes (say 20) which we split into sentences and tokenize, so
we end up with tokenized sentences (say 400). Each sentence is then used as a
training example. Now this list of sentences is shuffled and the model is trained.
For evaluation and prediction however we want to know which sentence belong to
which note since we want go back from the sentence to the note level. This class
basically aggregates sentence level information back to the note level. So that we
can do evaluation at the note level and get aggregate predictions for the entire note.
To perform this we keep track of all the note_ids - [ID1, ID2 ...]. We sue this list
as a reference - so when we return predictions we return a list [[P1], [P2] ..] where
P1 corresponds to the predictions for the note with note id ID1.
"""
def __init__(
self,
note_ids: Sequence[str],
note_sent_info: Sequence[Mapping[str, Union[str, int]]]
) -> NoReturn:
"""
Initialize the reference note_ids, this list and the position of the note_id in this list
is used as reference when aggregating predictions/tokens belonging to a note.
The note_ids are used as references for the functions below.
Args:
note_ids (Sequence[str]): The sequence of note_ids to use as reference
note_sent_info (Sequence[Mapping[str, Union[str, int]]]): The information for each sentence
(training example) it contains which note_id
the sentence belongs to and the start and end
position of that sentence in the note
"""
self._note_ids = note_ids
self._note_index_map = self.__get_note_index_map(note_sent_info)
check_len = len([index for note_index in self._note_index_map for index in note_index])
check_len_unique = len(set([index for note_index in self._note_index_map for index in note_index]))
if len(note_sent_info) != check_len or check_len != check_len_unique:
raise ValueError('Length mismatch')
@staticmethod
def __get_note_aggregate(note_sent_info: Sequence[Mapping[str, Union[str, int]]]) -> defaultdict(list):
"""
Return a mapping where the key is the note_id and the value is a sequence that
contain the sentence information. For example 'id1':[{index=8, start:0, end:30},
{index=80, start:35, end:70}, {index=2, start:71, end:100} ..]
What this mapping is saying that for this note_id, the first sentence in the note
is the 8th sentence in the dataset, the second sentence in the note is the 80th
sentence in the dataset and the third sentence is the 2nd sentence in the dataset.
This is because the dataset can be shuffled.
Args:
note_sent_info (Sequence[Mapping[str, Union[str, int]]]): The information for each sentence
(training example) it contains which note_id
the sentence belongs to and the start and end
position of that sentence in the note
Returns:
note_aggregate (defaultdict(list)): Contains the note_id to sentence (train example)
mapping with respect to its position with the dataset
"""
note_aggregate = defaultdict(list)
for index, note_sent in enumerate(note_sent_info):
note_id = note_sent['note_id']
start = note_sent['start']
end = note_sent['end']
note_aggregate[note_id].append({'index': index, 'start': start, 'end': end})
# Sort the sentences/training example based on its start position in the note
for note_id, aggregate_info in note_aggregate.items():
aggregate_info.sort(key=lambda info: info['start'])
return note_aggregate
def __get_note_index_map(self, note_sent_info: Sequence[Mapping[str, Union[str, int]]]) -> List[List[int]]:
"""
Return a sequence that contains a sequence within which contains the sentence position w.r.t to the dataset.
for that note (the note being note_id_1 for position 1)
For example we have note_ids=[i1, i2, i3, ...]
This function will return [[8, 80, 2 ..], [7, 89, 9], [1, 3, 5, ...]
Where position 1 corresponds to ID - i1 and we say that the 8th, 80th and 2nd
sentence in the dataset correspond to the sentences in the note i1 (in sorted order).
For position 2, its ID - i2 and we say that the 7, 89, 9 sentence (training example)
in the dataset correspond to the sentences in the note i2 (in sorted order).
Remember the dataset can be shuffled.
Args:
note_sent_info (Sequence[Mapping[str, Union[str, int]]]): The information for each sentence
(training example) it contains which note_id
the sentence belongs to and the start and end
position of that sentence in the note
Returns:
List[List[int]]: Return a sequence that contains a sequence within which contains
the sentence position w.r.t to the dataset for that note
(the note being note_id_1 for position 1)
"""
note_aggregate = NoteLevelAggregator.__get_note_aggregate(note_sent_info)
return [[note_agg['index'] for note_agg in note_aggregate.get(note_id, None)] for note_id in self._note_ids]
def get_aggregate_sequences(
self,
sequences: Union[Sequence[Sequence[str]], Sequence[Sequence[Mapping[str, Union[str, int]]]]]
) -> List[List[str]]:
"""
Return a sequence that contains a sequence within which contains the tokens or labels.
for that note (the note being note_id_1 for position 1)
For example we have note_ids=[i1, i2, i3, ...]
This function will return [[PREDICTIONS -i1 ...], [PREDICTIONS -i2 ...], [PREDICTIONS -i3 ...]
Where position 1 corresponds to ID - i1 and it contains the following predictions
that are present in the note i1 (in sorted order).
Where position 2 corresponds to ID - i2 and it contains the following predictions
that are present in the note i2 (in sorted order).
Return a sequence that contains a sequence within which contains the sentence position w.r.t to the dataset.
for that note (the note being note_id_1 for position 1)
For example we have note_ids=[i1, i2, i3, ...]
This function will return [[8, 80, 2 ..], [7, 89, 9], [1, 3, 5, ...]
Where position 1 corresponds to ID - i1 and we say that the 8th, 80th and 2nd
sentence in the dataset correspond to the sentences in the note i1 (in sorted order).
For position 2, its ID - i2 and we say that the 7, 89, 9 sentence (training example)
in the dataset correspond to the sentences in the note i2 (in sorted order).
Remember the dataset can be shuffled.
Args:
sequences (Union[Sequence[Sequence[str]], Sequence[Sequence[Mapping[str, Union[str, int]]]]]): The sequence
of tokens or
labels
Returns:
List[List[int]]: Return a sequence that contains a sequence within which contains
the predictions for that note (the note being note_id_1 for position 1)
"""
return [[sequence for index in note_index for sequence in sequences[index]] for note_index in
self._note_index_map] | /robust_deid-0.3.1.tar.gz/robust_deid-0.3.1/src/robust_deid/sequence_tagging/note_aggregate/note_level_aggregator.py | 0.915157 | 0.837487 | note_level_aggregator.py | pypi |
from typing import Sequence, NoReturn
import torch
from .utils import check_consistent_length
class CRFProcess(object):
def __init__(
self,
label_list: Sequence[str],
top_k: int
) -> NoReturn:
"""
Initialize a label list where the position corresponds to a particular label. For example
position 0 will correspond to B-DATE etc. top k will return the top k CRF sequences
Args:
label_list (Sequence[str]): The list of NER labels
top_k (int): The number of top CRF sequences to return
"""
self._label_list = label_list
self._top_k = top_k
self._crf = None
def set_crf(self, crf):
"""
Store the CRF layer used while training the model
Args:
crf (): Set the CRF layer - this contains the CRF weights (NER transition weights)
"""
self._crf = crf
def process_sequences(
self,
sequences: Sequence[Sequence[str]],
scores: Sequence[float]
) -> NoReturn:
"""
The function will be implemented by the sub class and will return a sequence of NER
predictions based on the implemented function
Args:
sequences (Sequence[Sequence[str]]): The list of possible sequences from the CRF layer
scores (Sequence[float]): The scores for the sequences
"""
raise NotImplementedError
def decode(
self,
predictions: Sequence[Sequence[Sequence[float]]],
labels: Sequence[Sequence[int]]
):
"""
Decode the predictions and labels so that the evaluation function and prediction
functions can use them accordingly. The predictions and labels are numbers (ids)
of the labels, these will be converted back to the NER tags (B-AGE, I-DATE etc) using
the label_list. In this function we process the CRF sequences and their scores and
select the NER sequence based on the implementation of the process_sequences function
Also remove the predictions and labels on the subword and context tokens
Args:
predictions (: Sequence[Sequence[str]]): The logits (scores for each tag) returned by the model
labels (Sequence[Sequence[str]]): Gold standard labels
Returns:
true_predictions (Sequence[Sequence[str]]): The predicted NER tags
true_labels (Sequence[Sequence[str]]): The gold standard NER tags
"""
# Check if the CRF layer has been initialized
if self._crf is None:
raise ValueError('CRF layer not initialized/set - use the set_crf function to set it')
# Convert to a torch tensor, since the CRF layer expects a torch tensor
logits = torch.tensor(predictions)
labels_tensor = torch.tensor(labels)
output_tags = list()
# Get the CRF outputs
# Process the top K outputs based and store the processed sequence
# based on process_sequences function
for seq_logits, seq_labels in zip(logits, labels_tensor):
seq_mask = seq_labels != -100
seq_logits_crf = seq_logits[seq_mask].unsqueeze(0)
tags = self._crf.viterbi_tags(seq_logits_crf, top_k=self._top_k)
# Unpack "batch" results
if self._top_k is None:
sequences = [tag[0] for tag in tags]
scores = [tag[1] for tag in tags]
else:
sequences = [tag[0] for tag in tags[0]]
scores = [tag[1] for tag in tags[0]]
output_tags.append(self.process_sequences(sequences, scores))
# Remove ignored index (special tokens)
true_predictions = [
[self._label_list[p] for p in prediction]
for prediction in output_tags
]
true_labels = [
[self._label_list[l] for l in label if l != -100]
for label in labels
]
check_consistent_length(true_predictions, true_labels)
return true_predictions, true_labels | /robust_deid-0.3.1.tar.gz/robust_deid-0.3.1/src/robust_deid/sequence_tagging/post_process/model_outputs/crf_process.py | 0.95452 | 0.619443 | crf_process.py | pypi |
from typing import Sequence, NoReturn, Tuple
import numpy as np
from scipy.special import softmax
from .utils import check_consistent_length
class ThresholdProcessMax(object):
"""
"""
def __init__(self, label_list: Sequence[str], threshold: float) -> NoReturn:
"""
Initialize a label list where the posiion corresponds to a particular label. For example
position 0 will correspond to B-DATE etc.
Args:
label_list (Sequence[str]): The list of NER labels
"""
self._label_list = label_list
self._threshold = threshold
self._outside_label_index = self._label_list.index('O')
self._mask = np.zeros((len(self._label_list)), dtype=bool)
self._mask[self._outside_label_index] = True
def get_masked_array(self, data):
return np.ma.MaskedArray(data=data, mask=self._mask)
def process_prediction(self, prediction):
softmax_prob = softmax(prediction)
masked_softmax_prob = self.get_masked_array(data=softmax_prob)
max_value = masked_softmax_prob[masked_softmax_prob >= self._threshold]
if type(max_value.sum()) == np.ma.core.MaskedConstant:
return self._outside_label_index
else:
return masked_softmax_prob.argmax()
def decode(
self,
predictions: Sequence[Sequence[Sequence[float]]],
labels: Sequence[Sequence[int]]
) -> Tuple[Sequence[Sequence[str]], Sequence[Sequence[str]]]:
"""
Args:
predictions (Sequence[Sequence[Sequence[float]]]): The logits (scores for each tag) returned by the model
labels (Sequence[Sequence[int]]): Gold standard labels
Returns:
true_predictions (Sequence[Sequence[str]]): The predicted NER tags
true_labels (Sequence[Sequence[str]]): The gold standard NER tags
"""
# Remove ignored index (special tokens)
true_predictions = [
[self._label_list[self.process_prediction(p)] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
true_labels = [
[self._label_list[l] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
check_consistent_length(true_predictions, true_labels)
return true_predictions, true_labels | /robust_deid-0.3.1.tar.gz/robust_deid-0.3.1/src/robust_deid/sequence_tagging/post_process/model_outputs/threshold_process_max.py | 0.95202 | 0.473596 | threshold_process_max.py | pypi |
from typing import Sequence, NoReturn, Tuple
class LogitsProcess(object):
"""
Process the output of the model forward pass. The forward pass will return the predictions
(e.g the logits), labels if present. We process the output and return the processed
values based on the application. This script we return the final prediction as the
argmax of the logits.
"""
def __init__(self, label_list: Sequence[str]) -> NoReturn:
"""
Initialize a label list where the position corresponds to a particular label. For example
position 0 will correspond to B-DATE etc.
Args:
label_list (Sequence[str]): The list of NER labels
"""
self._label_list = label_list
def decode(
self,
predictions: Sequence[Sequence[Sequence[float]]],
labels: Sequence[Sequence[int]]
) -> Tuple[Sequence[Sequence[Sequence[float]]], Sequence[Sequence[str]]]:
"""
Decode the predictions and labels so that the evaluation function and prediction
functions can use them accordingly. The predictions and labels are numbers (ids)
of the labels, these will be converted back to the NER tags (B-AGE, I-DATE etc) using
the label_list. In this function we just take the argmax of the logits (scores) of the predictions
Also remove the predictions and labels on the subword and context tokens
Args:
predictions (Sequence[Sequence[Sequence[float]]]): The logits (scores for each tag) returned by the model
labels (Sequence[Sequence[int]]): Gold standard labels
Returns:
true_predictions (Sequence[Sequence[str]]): The predicted NER tags
true_labels (Sequence[Sequence[str]]): The gold standard NER tags
"""
# Remove ignored index (special tokens)
true_predictions = [
[[float(value) for value in p] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
true_labels = [
[self._label_list[l] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
return true_predictions, true_labels | /robust_deid-0.3.1.tar.gz/robust_deid-0.3.1/src/robust_deid/sequence_tagging/post_process/model_outputs/logits_process.py | 0.947588 | 0.8308 | logits_process.py | pypi |
from typing import Sequence
from .argmax_process import ArgmaxProcess
from .crf_argmax_process import CRFArgmaxProcess
from .logits_process import LogitsProcess
from .threshold_process_max import ThresholdProcessMax
from .threshold_process_sum import ThresholdProcessSum
class PostProcessPicker(object):
"""
This class is used to pick the post process layer that processed the model
logits. The class provides functions that returns the desired post processor objects
For example we can pick the argamx of the logits, where we just choose the highest scoring
tag as the prediction for a token or we can use a crf layer to pick the highest
scoring sequence of tags
"""
def __init__(self, label_list):
"""
Initialize the NER label list
Args:
label_list (Sequence[str]): The NER labels. e.g B-DATE, I-DATE, B-AGE ...
"""
self._label_list = label_list
def get_argmax(self) -> ArgmaxProcess:
"""
Return a post processor that uses argmax to process the model logits for obtaining the predictions
Chooses the highest scoring tag.
Returns:
(ArgmaxProcess): Return argmax post processor
"""
return ArgmaxProcess(self._label_list)
def get_crf(self) -> CRFArgmaxProcess:
"""
Return a post processor that uses CRF layer to process the model logits for obtaining the predictions
Chooses the highest scoring sequence of tags based on the CRF layer
Returns:
(CRFArgmaxProcess): Return CRF layer post processor
"""
return CRFArgmaxProcess(self._label_list)
def get_logits(self) -> LogitsProcess:
"""
Return a post processor that returns the model logits
Returns:
(LogitsProcess): Return Logits layer post processor
"""
return LogitsProcess(self._label_list)
def get_threshold_max(self, threshold) -> ThresholdProcessMax:
"""
Return a post processor that uses a threshold (max) to process and return the model logits
Returns:
(ThresholdProcessMax): Return Threshold Max post processor
"""
return ThresholdProcessMax(self._label_list, threshold=threshold)
def get_threshold_sum(self, threshold) -> ThresholdProcessSum:
"""
Return a post processor that uses a threshold (sum) to process and return the model logits
Returns:
(ThresholdProcessMax): Return Threshold Sum post processor
"""
return ThresholdProcessSum(self._label_list, threshold=threshold) | /robust_deid-0.3.1.tar.gz/robust_deid-0.3.1/src/robust_deid/sequence_tagging/post_process/model_outputs/post_process_picker.py | 0.941419 | 0.31883 | post_process_picker.py | pypi |
from typing import Sequence, NoReturn, Tuple
import numpy as np
from scipy.special import softmax
from .utils import check_consistent_length
class ThresholdProcessSum(object):
"""
"""
def __init__(self, label_list: Sequence[str], threshold: float) -> NoReturn:
"""
Initialize a label list where the posiion corresponds to a particular label. For example
position 0 will correspond to B-DATE etc.
Args:
label_list (Sequence[str]): The list of NER labels
"""
self._label_list = label_list
self._threshold = threshold
self._outside_label_index = self._label_list.index('O')
self._mask = np.zeros((len(self._label_list)), dtype=bool)
self._mask[self._outside_label_index] = True
def get_masked_array(self, data):
return np.ma.MaskedArray(data=data, mask=self._mask)
def process_prediction(self, prediction):
softmax_prob = softmax(prediction)
masked_softmax_prob = self.get_masked_array(data=softmax_prob)
if masked_softmax_prob.sum() >= self._threshold:
return masked_softmax_prob.argmax()
else:
return self._outside_label_index
def decode(
self,
predictions: Sequence[Sequence[Sequence[float]]],
labels: Sequence[Sequence[int]]
) -> Tuple[Sequence[Sequence[str]], Sequence[Sequence[str]]]:
"""
Args:
predictions (Sequence[Sequence[Sequence[float]]]): The logits (scores for each tag) returned by the model
labels (Sequence[Sequence[int]]): Gold standard labels
Returns:
true_predictions (Sequence[Sequence[str]]): The predicted NER tags
true_labels (Sequence[Sequence[str]]): The gold standard NER tags
"""
# Remove ignored index (special tokens)
true_predictions = [
[self._label_list[self.process_prediction(p)] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
true_labels = [
[self._label_list[l] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
check_consistent_length(true_predictions, true_labels)
return true_predictions, true_labels | /robust_deid-0.3.1.tar.gz/robust_deid-0.3.1/src/robust_deid/sequence_tagging/post_process/model_outputs/threshold_process_sum.py | 0.951165 | 0.468912 | threshold_process_sum.py | pypi |
import numpy as np
from typing import Sequence, NoReturn, Tuple
from .utils import check_consistent_length
class ArgmaxProcess(object):
"""
Process the output of the model forward pass. The forward pass will return the predictions
(e.g the logits), labels if present. We process the output and return the processed
values based on the application. This script we return the final prediction as the
argmax of the logits.
"""
def __init__(self, label_list: Sequence[str]) -> NoReturn:
"""
Initialize a label list where the position corresponds to a particular label. For example
position 0 will correspond to B-DATE etc.
Args:
label_list (Sequence[str]): The list of NER labels
"""
self._label_list = label_list
def decode(
self,
predictions: Sequence[Sequence[Sequence[float]]],
labels: Sequence[Sequence[int]]
) -> Tuple[Sequence[Sequence[str]], Sequence[Sequence[str]]]:
"""
Decode the predictions and labels so that the evaluation function and prediction
functions can use them accordingly. The predictions and labels are numbers (ids)
of the labels, these will be converted back to the NER tags (B-AGE, I-DATE etc) using
the label_list. In this function we just take the argmax of the logits (scores) of the predictions
Also remove the predictions and labels on the subword and context tokens
Args:
predictions (Sequence[Sequence[Sequence[float]]]): The logits (scores for each tag) returned by the model
labels (Sequence[Sequence[str]]): Gold standard labels
Returns:
true_predictions (Sequence[Sequence[str]]): The predicted NER tags
true_labels (Sequence[Sequence[str]]): The gold standard NER tags
"""
predictions = np.argmax(predictions, axis=2)
# Remove ignored index (special tokens)
true_predictions = [
[self._label_list[p] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
true_labels = [
[self._label_list[l] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
check_consistent_length(true_predictions, true_labels)
return true_predictions, true_labels | /robust_deid-0.3.1.tar.gz/robust_deid-0.3.1/src/robust_deid/sequence_tagging/post_process/model_outputs/argmax_process.py | 0.933119 | 0.723725 | argmax_process.py | pypi |
from typing import Mapping, Sequence, List, Union, Optional, NoReturn
from datasets import Dataset
from transformers import PreTrainedTokenizerFast, PreTrainedTokenizer
class DatasetTokenizer(object):
"""
The main goal of this class is to solve the problem described below.
Most of the comments have been copied from the huggingface webpage.
What this class does is initialize a tokenizer with the desired parameters
and then tokenize our dataset and align the tokens with the labels
while keeping in mind the problem & solution described below. We can use this
function to train and for predictions - we just assume the predictions dataset
will have a label column filled with some values (so this code can be re-used).
Now we arrive at a common obstacle with using pre-trained models for
token-level classification: many of the tokens in the dataset may not
be in the tokenizer vocabulary. Bert and many models like it use a method
called WordPiece Tokenization, meaning that single words are split into multiple
tokens such that each token is likely to be in the vocabulary. For example,
the tokenizer would split the date (token) 2080 into the tokens ['208', '##0'].
This is a problem for us because we have exactly one tag per token (2080 -> B-DATE).
If the tokenizer splits a token into multiple sub-tokens, then we will end up with
a mismatch between our tokens and our labels (208, 0) - two tokens but one label (B-DATE).
One way to handle this is to only train on the tag labels for the first subtoken of a
split token. We can do this in huggingface Transformers by setting the labels
we wish to ignore to -100. In the example above, if the label for 2080 is B-DATE
and say the id (from the label to id mapping) for B-DATE is 3, we would set the labels
of ['208', '##0'] to [3, -100]. This tells the model to ignore the tokens labelled with
-100 while updating the weights etc.
"""
def __init__(
self,
tokenizer: Union[PreTrainedTokenizerFast, PreTrainedTokenizer],
token_column: str,
label_column: str,
label_to_id: Mapping[str, int],
b_to_i_label: Sequence[int],
padding: Union[bool, str],
truncation: Union[bool, str],
is_split_into_words: bool,
max_length: Optional[int],
label_all_tokens: bool,
token_ignore_label: Optional[str]
) -> NoReturn:
"""
Set the tokenizer we are using to subword tokenizer our dataset. The name of the
column that contains the pre-split tokens, the name of the column that contains
the labels for each token, label to id mapping.
Set the padding strategy of the input. Set whether to truncate the input tokens.
Indicate whether the input is pre-split into tokens. Set the max length of the
input tokens (post subword tokenization). This will be used in conjunction with truncation.
Set whether we want to label even the sub tokens
In the description above we say for 2080 (B-DATE) - [208, ##0]
We do [3, -100] - which says assume to label of token 2080 is the one
predicted for 208 or we can just label both sub tokens
in which case it would be [3, 3] - so we would label 208 as DATE
and ##0 as DATE - then we would have to figure out how to merge these
labels etc
Args:
tokenizer (Union[PreTrainedTokenizerFast, PreTrainedTokenizer]): Tokenizer from huggingface
token_column (str): The column that contains the tokens in the dataset
label_column (str): The column that contains the labels in the dataset
label_to_id (Mapping[str, int]): The mapping between labels and ID
b_to_i_label (Sequence[int]): The mapping between labels and ID
padding (Union[bool, str]): Padding strategy
truncation (Union[bool, str]): Truncation strategy
is_split_into_words (bool): Is the input pre-split(tokenized)
max_length (Optional[int]): Max subword tokenized length for the model
label_all_tokens (bool): Whether to label sub words
token_ignore_label (str): The value of the token ignore label - we ignore these in the loss computation
"""
self._tokenizer = tokenizer
self._token_column = token_column
self._label_column = label_column
self._label_to_id = label_to_id
self._b_to_i_label = b_to_i_label
# We can tell the tokenizer that we’re dealing with ready-split tokens rather than full
# sentence strings by passing is_split_into_words=True.
# Set the following parameters using the kwargs
self._padding = padding
self._truncation = truncation
self._is_split_into_words = is_split_into_words
self._max_length = max_length
self._label_all_tokens = label_all_tokens
self._token_ignore_label = token_ignore_label
self._ignore_label = -100
def tokenize_and_align_labels(self, dataset: Dataset) -> Dataset:
"""
This function is the one that is used to read the input dataset
Run the subword tokenization on the pre-split tokens and then
as mentioned above align the subtokens and labels and add the ignore
label. This will read the input - say [60, year, old, in, 2080]
and will return the subtokens - [60, year, old, in, 208, ##0]
some other information like token_type_ids etc
and the labels [0, 20, 20, 20, 3, -100] (0 - corresponds to B-AGE, 20 corresponds to O
and 3 corresponds to B-DATE. This returned input serves as input for training the model
or for gathering predictions from a trained model.
Another important thing to note is that we have mentioned before that
we add chunks of tokens that appear before and after the current chunk for context. We would
also need to assign the label -100 (ignore_label) to these chunks, since we are using them
only to provide context. Basically if a token has the label NA, we don't use it for
training or evaluation. For example the input would be something
like tokens: [James, Doe, 60, year, old, in, 2080, BWH, tomorrow, only],
labels: [NA, NA, B-DATE, O, O, O, B-DATE, NA, NA, NA]. NA represents the tokens used for context
This function would return some tokenizer info (e.g attention mask etc), along with
the information that maps the tokens to the subtokens -
[James, Doe, 60, year, old, in, 208, ##0, BW, ##h, tomorrow, only]
and the labels - [-100, -100, 0, 20, 20, 20, 3, -100, -100, -100]
(if label_all_tokens was true, we would return [-100, -100, 0, 20, 20, 20, 3, 3, -100, -100]).
Args:
dataset (Dataset): The pre-split (tokenized dataset) that contain labels
Returns:
tokenized_inputs (Dataset): Subword tokenized and label aligned dataset
"""
# Run the tokenizer - subword tokenization
tokenized_inputs = self._tokenizer(
dataset[self._token_column],
padding=self._padding,
truncation=self._truncation,
max_length=self._max_length,
is_split_into_words=self._is_split_into_words,
)
# Align the subwords and tokens
labels = [self.__get_labels(
labels,
tokenized_inputs.word_ids(batch_index=index)
) for index, labels in enumerate(dataset[self._label_column])]
tokenized_inputs[self._label_column] = labels
return tokenized_inputs
def __get_labels(
self,
labels: Sequence[str],
word_ids: Sequence[int]
) -> List[int]:
"""
Go thorough the subword tokens - which are given as word_ids. 2 different tokens
2080 & John will have different word_ids, but the subword tokens 2080 & ##0 will
have the same word_id, we use this to align and assign the labels accordingly.
if the subword tokens belong to [CLS], [SEP] append the ignore label (-100) to the
list of labels. If the (2080) subword token (##0) belongs to a token - 2080
then the labels would be [3, -100] if label_all_tokens is false. Also if the token
is used only for context (with label NA) it would get the value -100 for its label
Args:
labels (Sequence[str]): The list of labels for the input (example)
word_ids (Sequence[int]): The word_ids after subword tokenization of the input
Returns:
label_ids (List[int]): The list of label ids for the input with the ignore label (-100) added
as required.
"""
label_ids = list()
previous_word_idx = None
for word_idx in word_ids:
# Special tokens have a word id that is None. We set the label to -100 so they are automatically
# ignored in the loss function.
if word_idx is None:
label_ids.append(self._ignore_label)
# We set the label for the first token of each word.
elif word_idx != previous_word_idx:
if labels[word_idx] == self._token_ignore_label:
label_ids.append(self._ignore_label)
else:
label_ids.append(self._label_to_id[labels[word_idx]])
# For the other tokens in a word, we set the label to either the current label or -100, depending on
# the label_all_tokens flag.
else:
if labels[word_idx] == self._token_ignore_label:
label_ids.append(self._ignore_label)
else:
label_ids.append(
self._b_to_i_label[self._label_to_id[labels[word_idx]]]
if self._label_all_tokens else self._ignore_label
)
previous_word_idx = word_idx
return label_ids | /robust_deid-0.3.1.tar.gz/robust_deid-0.3.1/src/robust_deid/sequence_tagging/dataset_builder/dataset_tokenizer.py | 0.944087 | 0.856032 | dataset_tokenizer.py | pypi |
from typing import List, Sequence, Mapping, Optional, NoReturn, Dict, Union
from .ner_labels import NERLabels
class LabelMapper(object):
"""
This class is used to map one set of NER labels to another set of NER labels
For example we might want to map all NER labels to Binary HIPAA labels.
E.g:
We change the token labels - [B-AGE, O, O, U-LOC, B-DATE, L-DATE, O, B-STAFF, I-STAFF, L-STAFF] to
[B-HIPAA, O, O, U-HIPAA, B-HIPAA, I-HIPAA, O, O, O, O] or if we wanted binary I2B2 labels we map it to
[B-I2B2, O, O, U-I2B2, B-I2B2, -I2B2, O, B-I2B2, I-I2B2, L-I2B2]
We do this mapping at the token and the span level. That is we have a span from says start=9, end=15
labelled as LOC, we map this label to HIPAA or I2B2. This class maps an exisitng set of labels to
another set of labels
"""
def __init__(
self,
notation,
ner_types: Sequence[str],
ner_types_maps: Sequence[str],
description: str
) -> NoReturn:
"""
Initialize the variables that will be used to map the NER labels and spans
The ner_map and spans_map should correspond to each other and contain the same NER types
Args:
"""
self._description = description
if('O' in ner_types_maps):
self._types = list(set(ner_types_maps) - set('O'))
else:
self._types = list(set(ner_types_maps))
self._types.sort()
self._spans_map = {ner_type: ner_type_map for ner_type, ner_type_map in zip(ner_types, ner_types_maps)}
ner_labels = NERLabels(notation=notation, ner_types=ner_types)
self._ner_map = dict()
for label in ner_labels.get_label_list():
if label == 'O' or self._spans_map[label[2:]] == 'O':
self._ner_map[label] = 'O'
else:
self._ner_map[label] = label[0:2] + self._spans_map[label[2:]]
def map_sequence(self, tag_sequence: Sequence[str]) -> List[str]:
"""
Mapping a sequence of NER labels to another set of NER labels.
E.g: If we use a binary HIPAA mapping
This sequence [B-AGE, O, O, U-LOC, B-DATE, L-DATE, O, B-STAFF, I-STAFF, L-STAFF] will be mapped to
[B-HIPAA, O, O, U-HIPAA, B-HIPAA, I-HIPAA, O, O, O, O]
Return the original sequence if no mapping is used (i.e the maps are == None)
Args:
tag_sequence (Sequence[str]): A sequence of NER labels
Returns:
(List[str]): A mapped sequence of NER labels
"""
# Return the original sequence if no mapping is used
return [self._ner_map[tag] for tag in tag_sequence]
def map_spans(self, spans: Sequence[Mapping[str, Union[str, int]]]) -> Sequence[Dict[str, Union[str, int]]]:
"""
Mapping a sequence of NER spans to another set of NER spans.
E.g: If we use a binary HIPAA mapping
The spans: [{start:0, end:5, label: DATE}, {start:17, end:25, label: STAFF}, {start:43, end:54, label: PATIENT}]
will be mapped to: [{start:0, end:5, label: HIPAA}, {start:17, end:25, label: O}, {start:43, end:54, label: HIPAA}]
Return the original list of spans if no mapping is used (i.e the maps are == None)
Args:
spans (Sequence[Mapping[str, Union[str, int]]]): A sequence of NER spans
Returns:
(Sequence[Mapping[str, Union[str, int]]]): A mapped sequence of NER spans
"""
return [{'start': span['start'], 'end': span['end'], 'label': self._spans_map[span['label']]} \
for span in spans]
def get_ner_description(self) -> str:
"""
Get the description of the ner labels and span maps used
Returns:
(str): A description of the label/span maps used
"""
return self._description
def get_ner_types(self) -> List[str]:
"""
Get the PHI types back from the list of NER labels
[B-AGE, I-AGE, B-DATE, I-DATE ..] ---> [AGE, DATE, ...]
Returns:
ner_types (List[str]): The list of unique NER types
"""
return self._types | /robust_deid-0.3.1.tar.gz/robust_deid-0.3.1/src/robust_deid/sequence_tagging/dataset_builder/label_mapper.py | 0.935817 | 0.696541 | label_mapper.py | pypi |
from typing import Sequence, List, NoReturn, Dict
class NERLabels(object):
"""
Prepare the labels that will be used by the model. Parse the NER types
and prepare the NER labels. For example - NER Types: [AGE, DATE],
it will create a list like this (for BIO notation) [B-AGE, I-AGE, B-DATE, I-DATE, O]
These are the labels that will be assigned to the tokens based on the PHI type.
Say we had the following NER types: NAME, AGE, HOSP
The NER labels in the BIO notation would be B-AGE, B-HOSP, B-NAME, I-AGE, I-HOSP, I-NAME, O
This script creates a list of the NER labels ([B-AGE, B-HOSP, B-NAME, I-AGE, I-HOSP, I-NAME, O])
based on the NER types (NAME, AGE, HOSP) that have been defined. Labels have been sorted.
The script also returns the number of labels, the label_to_id mapping, the id_to_label mapping
Label_id_mapping: {B-AGE:0, B-HOSP:1, B-NAME:2, I-AGE:3, I-HOSP:4, I-NAME:5, O:6}
This information will be used during training, evaluation and prediction.
"""
def __init__(self, notation: str, ner_types: Sequence[str]) -> NoReturn:
"""
Initialize the notation that we are using for the NER task
Args:
notation (str): The notation that will be used for the NER labels
ner_types (Sequence[str]): The list of NER categories
"""
self._notation = notation
self._ner_types = ner_types
def get_label_list(self) -> List[str]:
"""
Given the NER types return the NER labels.
NER Types: [AGE, DATE] -> return a list like this (for BIO notation) [B-AGE, I-AGE, B-DATE, I-DATE, O]
Returns:
ner_labels (List[str]): The list of NER labels based on the NER notation (e.g BIO)
"""
# Add the 'O' (Outside - Non-phi) label to the list
if 'O' not in self._ner_types:
ner_labels = ['O']
else:
ner_labels = list()
# Go through each label and prefix it based on the notation (e.g - B, I etc)
for ner_type in self._ner_types:
for ner_tag in list(self._notation):
if ner_tag != 'O':
ner_labels.append(ner_tag + '-' + ner_type)
ner_labels.sort()
return ner_labels
def get_label_to_id(self) -> Dict[str, int]:
"""
Return a label to id mapping
Returns:
label_to_id (Dict[str, int]): label to id mapping
"""
labels = self.get_label_list()
label_to_id = {label: index_id for index_id, label in enumerate(labels)}
return label_to_id
def get_id_to_label(self) -> Dict[int, str]:
"""
Return a id to label mapping
Returns:
id_to_label (Dict[int, str]): id to label mapping
"""
labels = self.get_label_list()
id_to_label = {index_id: label for index_id, label in enumerate(labels)}
return id_to_label | /robust_deid-0.3.1.tar.gz/robust_deid-0.3.1/src/robust_deid/sequence_tagging/dataset_builder/ner_labels.py | 0.904695 | 0.680049 | ner_labels.py | pypi |
from typing import Sequence, Optional, NoReturn
from datasets import load_dataset, Dataset
class NERDataset(object):
"""
This class is a wrapper around the huggingface datasets library
It maintains the train, validation and test datasets based on the
train, validation and test files passed by loading the dataset object
from the file and provides a get function to access each of the datasets.
"""
def __init__(
self,
train_file: Optional[Sequence[str]] = None,
validation_file: Optional[Sequence[str]] = None,
test_file: Optional[Sequence[str]] = None,
extension: str = 'json',
shuffle: bool = True,
seed: int = 41
) -> NoReturn:
"""
Load the train, validation and test datasets from the files passed. Read the files and convert
it into a huggingface dataset.
Args:
train_file (Optional[Sequence[str]]): The list of files that contain train data
validation_file (Optional[Sequence[str]]): The list of files that contain validation data
test_file (Optional[Sequence[str]]): The list of files that contain test data
shuffle (bool): Whether to shuffle the dataset
seed (int): Shuffle seed
"""
self._datasets = NERDataset.__prepare_data(
train_file,
validation_file,
test_file,
extension,
shuffle,
seed
)
@staticmethod
def __prepare_data(
train_file: Optional[Sequence[str]],
validation_file: Optional[Sequence[str]],
test_file: Optional[Sequence[str]],
extension: str,
shuffle: bool,
seed: int
) -> Dataset:
"""
Get the train, validation and test datasets from the files passed. Read the files and convert
it into a huggingface dataset.
Args:
train_file (Optional[Sequence[str]]): The list of files that contain train data
validation_file (Optional[Sequence[str]]): The list of files that contain validation data
test_file (Optional[Sequence[str]]): The list of files that contain test data
shuffle (bool): Whether to shuffle the dataset
seed (int): Shuffle seed
Returns:
(Dataset): The huggingface dataset with train, validation, test splits (if included)
"""
# Read the datasets (train, validation, test etc).
data_files = {}
if train_file is not None:
data_files['train'] = train_file
if validation_file is not None:
data_files['validation'] = validation_file
if test_file is not None:
data_files['test'] = test_file
# Shuffle the dataset
if shuffle:
datasets = load_dataset(extension, data_files=data_files).shuffle(seed=seed)
else:
# Don't shuffle the dataset
datasets = load_dataset(extension, data_files=data_files)
return datasets
def get_train_dataset(self) -> Dataset:
"""
Return the train dataset
Returns:
(Dataset): The huggingface dataset - train split
"""
return self._datasets['train']
def get_validation_dataset(self) -> Dataset:
"""
Return the validation dataset
Returns:
(Dataset): The huggingface dataset - validation split
"""
return self._datasets['validation']
def get_test_dataset(self) -> Dataset:
"""
Return the test dataset
Returns:
(Dataset): The huggingface dataset - test split
"""
return self._datasets['test'] | /robust_deid-0.3.1.tar.gz/robust_deid-0.3.1/src/robust_deid/sequence_tagging/dataset_builder/ner_dataset.py | 0.936641 | 0.830388 | ner_dataset.py | pypi |
from typing import Dict, NoReturn
from transformers import AutoConfig, AutoModelForTokenClassification
class ModelPicker(object):
"""
This class is used to pick the model we are using to train.
The class provides functions that returns the desired model objects
i.e get the desired model for training etc
"""
def __init__(
self,
model_name_or_path: str,
config: AutoConfig,
cache_dir: str,
model_revision: str,
use_auth_token: bool
) -> NoReturn:
"""
Initialize the variables needed for loading the huggingface models
Args:
model_name_or_path (str): Path to pretrained model or model identifier from huggingface.co/models
config (AutoConfig): Pretrained config object
cache_dir (str): Where do you want to store the pretrained models downloaded from huggingface.co
model_revision (str): The specific model version to use (can be a branch name, tag name or commit id).
use_auth_token (bool): Will use the token generated when running `transformers-cli login`
(necessary to use this script with private models).
"""
self._model_name_or_path = model_name_or_path
self._config = config
self._cache_dir = cache_dir
self._model_revision = model_revision
self._use_auth_token = use_auth_token
def get_argmax_bert_model(self) -> AutoModelForTokenClassification:
"""
Return a model that uses argmax to process the model logits for obtaining the predictions
and calculating the loss
Returns:
(AutoModelForTokenClassification): Return argmax token classification model
"""
return AutoModelForTokenClassification.from_pretrained(
self._model_name_or_path,
from_tf=bool(".ckpt" in self._model_name_or_path),
config=self._config,
cache_dir=self._cache_dir,
revision=self._model_revision,
use_auth_token=self._use_auth_token,
) | /robust_deid-0.3.1.tar.gz/robust_deid-0.3.1/src/robust_deid/sequence_tagging/models/hf/model_picker.py | 0.916463 | 0.374848 | model_picker.py | pypi |
from typing import Sequence, Tuple, Dict, NoReturn, Mapping, Union, Type
from seqeval.scheme import IOB1, IOB2, IOBES, BILOU
class MetricsCompute(object):
"""
This is the evaluation script which is passed to the huggingface
trainer - specifically the compute_metrics function. The trainer uses
this function to run the evaluation on the validation dataset and log/save
the metrics. This script is used to evaluate the token and span level metrics
on the validation dataset by the huggingface trainer. The evaluation is also run
on the NER labels and spans produced by the different label mapper
objects. For example we might run the evaluation on the original list of NER labels/spans
and we also run the evaluation on binary HIPAA labels/spans. This is done by mapping the
NER labels & spans using the list of label_mapper object present in label_mapper_list
The same evaluation script and metrics are first run on the original ner types/labels/spans
e.g:
[AGE, STAFF, DATE], [B-AGE, O, O, U-LOC, B-DATE, L-DATE, O, B-STAFF, I-STAFF, L-STAFF],
[{start:0, end:5, label: AGE}, {start:17, end:25, label: LOC}, {start:43, end:54, label: DATE},
{start:77, end:84, label: STAFF}]
and we also run on some mapped version of these ner types/labels/spans shown below
[HIPAA], [B-HIPAA, O, O, U-HIPAA, B-HIPAA, I-HIPAA, O, O, O, O], [{start:0, end:5, label: HIPAA},
{start:17, end:25, label: HIPAA}, {start:43, end:54, label: HIPAA}, {start:77, end:84, label: O}]
The context and subword tokens are excluded from the evaluation process
The results are returned - which are saved and logged
"""
def __init__(
self,
metric,
note_tokens: Sequence[Sequence[Mapping[str, Union[str, int]]]],
note_spans: Sequence[Sequence[Mapping[str, Union[str, int]]]],
label_mapper_list: Sequence,
post_processor,
note_level_aggregator,
notation: str,
mode: str,
confusion_matrix: bool = False,
format_results: bool = True
) -> NoReturn:
"""
Initialize the variables used ot perform evaluation. The evaluation object.
How the model predictions are decoded (e.g argmax, crf). The post processor object
also handles excluding context and subword tokens are excluded from the evaluation process
The notation, evaluation mode label maps. The note_tokens is used in the span level evaluation
process to check the character position of each token - and check if they match with the character
position of the spans. The note_spans are also used in the span level evaluation process, they contain
the position and labels of the spans.
Args:
metric (): The huggingface metric object, which contains the span and token level evaluation code
note_tokens (Sequence[Sequence[Mapping[str, Union[str, int]]]]): The list of tokens in the entire dataset
note_spans (Sequence[Sequence[Mapping[str, Union[str, int]]]]): The list of note spans in the entire dataset
post_processor (): Post processing the predictions (logits) - argmax, or crf etc. The prediction logits are
passed to this object, which is then processed using the argmax of the logits or a
crf function to return the sequence of NER labels
note_level_aggregator (): Aggregate sentence level predictions back to note level for evaluation
using this object
label_mapper_list (Sequence): The list of label mapper object that are used to map ner spans and
labels for evaluation
notation (str): The NER notation
mode (str): The span level eval mode - strict or default
format_results (bool): Format the results - return either a single dict (true) or a dict of dicts (false)
"""
self._metric = metric
self._note_tokens = note_tokens
self._note_spans = note_spans
self._label_mapper_list = label_mapper_list
self._note_level_aggregator = note_level_aggregator
self._notation = notation
self._scheme = MetricsCompute.get_scheme(self._notation)
self._mode = mode
self._post_processor = post_processor
self._confusion_matrix = confusion_matrix
self._format_results = format_results
@staticmethod
def get_scheme(notation: str) -> Union[Type[IOB2], Type[IOBES], Type[BILOU], Type[IOB1]]:
"""
Get the seqeval scheme based on the notation
Args:
notation (str): The NER notation
Returns:
(Union[IOB2, IOBES, BILOU, IOB1]): The seqeval scheme
"""
if notation == 'BIO':
return IOB2
elif notation == 'BIOES':
return IOBES
elif notation == 'BILOU':
return BILOU
elif notation == 'IO':
return IOB1
else:
raise ValueError('Invalid Notation')
def run_metrics(
self,
note_labels: Sequence[Sequence[str]],
note_predictions: Sequence[Sequence[str]]
) -> Union[Dict[str, Union[int, float]], Dict[str, Dict[str, Union[int, float]]]]:
"""
Run the evaluation metrics and return the span and token level results.
The metrics are run for each mapping of ner labels - based on the object in the
label_mapper_list. The evaluation is also run on the NER labels and spans produced by the different
label mapper objects. For example we might run the evaluation on the original list of NER labels/spans
and we also run the evaluation on binary HIPAA labels/spans. This is done by mapping the
NER labels & spans using the list of label_mapper object present in label_mapper_list
The same evaluation script and metrics are first run on the original ner types/labels/spans
e.g:
[AGE, STAFF, DATE], [B-AGE, O, O, U-LOC, B-DATE, L-DATE, O, B-STAFF, I-STAFF, L-STAFF],
[{start:0, end:5, label: AGE}, {start:17, end:25, label: LOC}, {start:43, end:54, label: DATE},
{start:77, end:84, label: STAFF}]
and we also run on some mapped version of these ner types/labels/spans shown below
[HIPAA], [B-HIPAA, O, O, U-HIPAA, B-HIPAA, I-HIPAA, O, O, O, O], [{start:0, end:5, label: HIPAA},
{start:17, end:25, label: HIPAA}, {start:43, end:54, label: HIPAA}, {start:77, end:84, label: O}]
Args:
note_labels (Sequence[Sequence[str]]): The list of NER labels for each note
note_predictions (Sequence[Sequence[str]]): The list of NER predictions for each notes
Returns:
final_results (Union[Dict[str, Union[int, float]], Dict[str, Dict[str, Union[int, float]]]]): Span and token
level
metric results
"""
final_results = {}
# Go through the list of different mapping (e.g HIPAA/I2B2)
for label_mapper in self._label_mapper_list:
# Get the NER information
ner_types = label_mapper.get_ner_types()
ner_description = label_mapper.get_ner_description()
# Map the NER labels and spans
predictions = [label_mapper.map_sequence(prediction) for prediction in note_predictions]
labels = [label_mapper.map_sequence(label) for label in note_labels]
spans = [label_mapper.map_spans(span) for span in self._note_spans]
# Run the span level and token level evaluation metrics
results = self._metric.compute(
predictions=predictions,
references=labels,
note_tokens=self._note_tokens,
note_spans=spans,
ner_types=ner_types,
ner_description=ner_description,
notation=self._notation,
scheme=self._scheme,
mode=self._mode,
confusion_matrix=self._confusion_matrix
)
# Return the results as a single mapping or a nested mapping
if not self._format_results:
for key, value in results.items():
final_results[key] = value
else:
for key, value in results.items():
if isinstance(value, dict):
for n, v in value.items():
final_results[f"{key}_{n}"] = v
else:
final_results[key] = value
# Return the results
return final_results
def compute_metrics(
self,
p: Tuple[Sequence[Sequence[str]], Sequence[Sequence[str]]]
) -> Union[Dict[str, Union[int, float]], Dict[str, Dict[str, Union[int, float]]]]:
"""
This script is used to compute the token and span level metrics when
the predictions and labels are passed. The first step is to convert the
model logits into the sequence of NER predictions using the post_processor
object (argmax, crf etc) and also exclude any context and subword tokens from the
evaluation process. Once we have the NER labels and predictions we run
the span and token level evaluation.
The evaluation is also run on the NER labels and spans produced by the different label mapper
objects. For example we might run the evaluation on the original list of NER labels/spans
and we also run the evaluation on binary HIPAA labels/spans. This is done by mapping the
NER labels & spans using the list of label_mapper object present in label_mapper_list
The same evaluation script and metrics are first run on the original ner types/labels/spans
e.g:
[AGE, STAFF, DATE], [B-AGE, O, O, U-LOC, B-DATE, L-DATE, O, B-STAFF, I-STAFF, L-STAFF],
[{start:0, end:5, label: AGE}, {start:17, end:25, label: LOC}, {start:43, end:54, label: DATE},
{start:77, end:84, label: STAFF}]
and we also run on some mapped version of these ner types/labels/spans shown below
[HIPAA], [B-HIPAA, O, O, U-HIPAA, B-HIPAA, I-HIPAA, O, O, O, O], [{start:0, end:5, label: HIPAA},
{start:17, end:25, label: HIPAA}, {start:43, end:54, label: HIPAA}, {start:77, end:84, label: O}]
Run the evaluation metrics and return the span and token level results.
The metrics are run for each mapping of ner labels - based on the object in the
label_mapper_list
Args:
p (Tuple[Sequence[Sequence[str]], Sequence[Sequence[str]]]): Tuple of model logits and labels
Returns:
final_results (Union[Dict[str, Union[int, float]], Dict[str, Dict[str, Union[int, float]]]]): Span and token
level
metric results
"""
predictions, labels = p
# Convert the logits (scores) to predictions
true_predictions, true_labels = self._post_processor.decode(predictions, labels)
# Aggregate sentence level labels and predictions to note level for evaluation
note_predictions = self._note_level_aggregator.get_aggregate_sequences(true_predictions)
note_labels = self._note_level_aggregator.get_aggregate_sequences(true_labels)
# Return results
return self.run_metrics(note_labels, note_predictions) | /robust_deid-0.3.1.tar.gz/robust_deid-0.3.1/src/robust_deid/sequence_tagging/evaluation/metrics_compute.py | 0.945286 | 0.643609 | metrics_compute.py | pypi |
from typing import Sequence, List, Optional, Type, Union, Mapping, Dict
# This script uses the two other scripts note_sequence_evaluation.py
# and note_token_evalaution.py to gather the span level and token
# level metrics during the evaluation phase in the huggingface
# training process. More information on how this script works
# can be found in - https://github.com/huggingface/datasets/tree/master/metrics/seqeval
# The code is borrowed from there and minor changes are made - to include token
# level metrics and evaluating spans at the character level as opposed to the
# token level
import datasets
from .note_sequence_evaluation import NoteSequenceEvaluation
from .note_token_evaluation import NoteTokenEvaluation
from .violations import Violations
_CITATION = """\
@inproceedings{ramshaw-marcus-1995-text,
title = "Text Chunking using Transformation-Based Learning",
author = "Ramshaw, Lance and
Marcus, Mitch",
booktitle = "Third Workshop on Very Large Corpora",
year = "1995",
url = "https://www.aclweb.org/anthology/W95-0107",
}
@misc{seqeval,
title={{seqeval}: A Python framework for sequence labeling evaluation},
url={https://github.com/chakki-works/seqeval},
note={Software available from https://github.com/chakki-works/seqeval},
author={Hiroki Nakayama},
year={2018},
}
"""
_DESCRIPTION = """seqeval is a Python framework for sequence labeling evaluation. seqeval can evaluate the
performance of chunking tasks such as named-entity recognition, part-of-speech tagging, semantic role labeling and so
on. This is well-tested by using the Perl script conlleval, which can be used for measuring the performance of a
system that has processed the CoNLL-2000 shared task data. seqeval supports following formats: IOB1 IOB2 IOE1 IOE2
IOBES See the [README.md] file at https://github.com/chakki-works/seqeval for more information. """
_KWARGS_DESCRIPTION = """
Produces labelling scores along with its sufficient statistics
from a source against one or more references.
Args:
predictions: List of List of predicted labels (Estimated targets as returned by a tagger)
references: List of List of reference labels (Ground truth (correct) target values)
suffix: True if the IOB prefix is after type, False otherwise. default: False
Returns:
'scores': dict. Summary of the scores for overall and per type
Overall:
'accuracy': accuracy,
'precision': precision,
'recall': recall,
'f1': F1 score, also known as balanced F-score or F-measure,
Per type:
'precision': precision,
'recall': recall,
'f1': F1 score, also known as balanced F-score or F-measure
Examples:
>>> predictions = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> references = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> seqeval = datasets.load_metric("seqeval")
>>> results = seqeval.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
['MISC', 'PER', 'overall_precision', 'overall_recall', 'overall_f1', 'overall_accuracy']
>>> print(results["overall_f1"])
0.5
>>> print(results["PER"]["f1"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class NoteEvaluation(datasets.Metric):
def _info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
features=datasets.Features(
{
"references": datasets.Sequence(datasets.Value("string", id="label"), id="sequence"),
"predictions": datasets.Sequence(datasets.Value("string", id="label"), id="sequence"),
}
),
inputs_description=_KWARGS_DESCRIPTION
)
def _compute(
self,
references: Sequence[Sequence[str]],
predictions: Sequence[Sequence[str]],
note_tokens: Sequence[Sequence[Mapping[str, Union[str, int]]]],
note_spans: Sequence[Sequence[Mapping[str, Union[str, int]]]],
ner_types: Sequence[str],
ner_description: str,
notation: str,
scheme: str,
mode: str,
confusion_matrix: bool = False,
suffix: bool = False,
sample_weight: Optional[List[int]] = None,
zero_division: Union[str, int] = "warn",
) -> Dict[str, Dict[str, Union[int, float]]]:
"""
Use the NoteSequenceEvaluation and NoteTokenEvaluation classes to extract the
token and span level precision, recall and f1 scores. Also return the micro averaged
precision recall and f1 scores
Args:
references (Sequence[Sequence[str]]): The list of annotated labels in the evaluation dataset
predictions (Sequence[Sequence[str]]): The list of predictions in the evaluation dataset
note_tokens (Sequence[Sequence[Mapping[str, Union[str, int]]]]): The list of tokens for the notes
in the evaluation dataset
note_spans (Sequence[Sequence[Mapping[str, Union[str, int]]]]): The list of annotated spans for the notes
in the evaluation dataset
ner_types (Sequence[str]): The list of NER types e.g AGE, DATE etc
ner_description (str): A prefix added to the evaluation result keys
scheme (Type[Token]): The NER labelling scheme
mode (str): Whether to use default or strict evaluation
suffix (str): Whether the B, I etc is in the prefix or the suffix
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
zero_division : "warn", 0 or 1, default="warn"
Sets the value to return when there is a zero division:
- recall: when there are no positive labels
- precision: when there are no positive predictions
- f-score: both
If set to "warn", this acts as 0, but warnings are also raised.
Returns:
(Dict[str, Dict[str, Union[int, float]]]): The token and span level metric scores
"""
# Span level metrics scores
report = NoteSequenceEvaluation.classification_report(
note_predictions=predictions,
note_tokens=note_tokens,
note_spans=note_spans,
ner_types=ner_types,
scheme=scheme,
mode=mode,
suffix=suffix,
output_dict=True,
sample_weight=sample_weight,
zero_division=zero_division,
)
# Token level metric scores
token_report = NoteTokenEvaluation.classification_report(
labels=references,
predictions=predictions,
ner_types=ner_types
)
violation_count = sum([Violations.get_violations(tag_sequence=prediction, notation=notation)
for prediction in predictions])
# Remove the macro and weighted average results
macro_score = report.pop("macro avg")
report.pop("weighted avg")
macro_token_score = token_report.pop("macro avg")
token_report.pop("weighted avg")
overall_score = report.pop("micro avg")
token_overall_score = token_report.pop("micro avg")
# Extract span level scores for each NER type
scores = {
type_name: {
"precision": score["precision"],
"recall": score["recall"],
"f1": score["f1-score"],
"number": score["support"],
}
for type_name, score in report.items()
}
# Extract token level scores for each NER type
token_scores = {
type_name + '-TOKEN': {
"precision": score["precision"],
"recall": score["recall"],
"f1": score["f1-score"],
"number": score["support"],
}
for type_name, score in token_report.items()
}
# Extract micro averaged span level score
overall = {'overall' + ner_description:
{"precision": overall_score["precision"],
"recall": overall_score["recall"],
"f1": overall_score["f1-score"],
}
}
# Extract micro averaged token level score
token_overall = {'token-overall' + ner_description:
{"precision": token_overall_score["precision"],
"recall": token_overall_score["recall"],
"f1": token_overall_score["f1-score"],
}
}
# Extract macro averaged token level score
macro_overall = {'macro-overall' + ner_description:
{"precision": macro_score["precision"],
"recall": macro_score["recall"],
"f1": macro_score["f1-score"],
}
}
# Extract macro averaged token level score
macro_token_overall = {'macro-token-overall' + ner_description:
{"precision": macro_token_score["precision"],
"recall": macro_token_score["recall"],
"f1": macro_token_score["f1-score"],
}
}
# Store number of NER violations
violation_count = {'violations' + ner_description: {'count': violation_count}}
# Return the results
if confusion_matrix:
confusion_matrix = {'confusion' + ner_description:
{'matrix': NoteTokenEvaluation.get_confusion_matrix(
labels=references,
predictions=predictions,
ner_types=ner_types
)}}
return {**scores, **overall, **token_scores, **token_overall, **macro_overall, **macro_token_overall,
**violation_count, **confusion_matrix}
else:
return {**scores, **overall, **token_scores, **token_overall, **macro_overall, **macro_token_overall,
**violation_count} | /robust_deid-0.3.1.tar.gz/robust_deid-0.3.1/src/robust_deid/sequence_tagging/evaluation/note_evaluation/note_evaluation.py | 0.959126 | 0.682402 | note_evaluation.py | pypi |
from collections import Counter
from typing import Sequence, List, Tuple, Union, Type, Optional
from seqeval.reporters import DictReporter
from sklearn.metrics import precision_score, recall_score, f1_score, confusion_matrix
class NoteTokenEvaluation(object):
"""
This class is used to evaluate token level precision, recall and F1 scores.
Script to evaluate at a token level. Calculate precision, recall, and f1 metrics
at the token level rather than the span level.
"""
@staticmethod
def unpack_nested_list(nested_list: Sequence[Sequence[str]]) -> List[str]:
"""
Use this function to unpack a nested list and also for token level evaluation we dont
need to consider the B, I prefixes (depending on the NER notation, so remove that as well.
Args:
nested_list (Sequence[Sequence[str]]): A nested list of predictions/labels
Returns:
(List[str]): Unpacked nested list of predictions/labels
"""
return [inner if inner == 'O' else inner[2:] for nested in nested_list for inner in nested]
@staticmethod
def get_counts(sequence: Sequence[str], ner_types: Sequence[str]) -> List[int]:
"""
Use this function to get the counts for each NER type
Args:
ner_list (Sequence[str]): A list of the NER labels/predicitons
Returns:
(List[int]): Position 0 contains the counts for the NER type that corresponds to position 0
"""
counts = Counter()
counts.update(sequence)
return [counts[ner_type] for ner_type in ner_types]
@staticmethod
def precision_recall_fscore(
labels: Sequence[str],
predictions: Sequence[str],
ner_types: Sequence[str],
average: Optional[str] = None
) -> Tuple[Union[float, List[float]], Union[float, List[float]], Union[float, List[float]], Union[int, List[int]]]:
"""
Use this function to get the token level precision, recall and fscore. Internally we use the
sklearn precision_score, recall_score and f1 score functions. Also return the count of each
NER type.
Args:
labels (Sequence[str]): A list of the gold standard NER labels
predictions (Sequence[str]): A list of the predicted NER labels
average (Optional[str]): None for per NER types scores, or pass an appropriate average value
Returns:
eval_precision (Union[float, List[float]]): precision score (averaged or per ner type)
eval_precision (Union[float, List[float]]): recall score (averaged or per ner type)
eval_precision (Union[float, List[float]]): F1 score (averaged or per ner type)
counts (Union[int, List[int]]): Counts (total or per ner type)
"""
eval_precision = precision_score(y_true=labels, y_pred=predictions, labels=ner_types, average=average)
eval_recall = recall_score(y_true=labels, y_pred=predictions, labels=ner_types, average=average)
eval_f1 = f1_score(y_true=labels, y_pred=predictions, labels=ner_types, average=average)
counts = NoteTokenEvaluation.get_counts(sequence=labels, ner_types=ner_types)
if (average == None):
eval_precision = list(eval_precision)
eval_recall = list(eval_recall)
eval_f1 = list(eval_f1)
else:
counts = sum(counts)
return eval_precision, eval_recall, eval_f1, counts
@staticmethod
def get_confusion_matrix(labels: Sequence[str], predictions: Sequence[str], ner_types: Sequence[str]):
"""
Use this function to get the token level precision, recall and fscore per NER type
and also the micro, macro and weighted averaged precision, recall and f scores.
Essentially we return a classification report
Args:
labels (Sequence[str]): A list of the gold standard NER labels
predictions (Sequence[str]): A list of the predicted NER labels
Returns:
(Type[DictReporter]): Classification report
"""
labels = NoteTokenEvaluation.unpack_nested_list(labels)
predictions = NoteTokenEvaluation.unpack_nested_list(predictions)
return confusion_matrix(y_true=labels, y_pred=predictions, labels=ner_types + ['O', ])
@staticmethod
def classification_report(
labels: Sequence[Sequence[str]],
predictions: Sequence[Sequence[str]],
ner_types: Sequence[str]
) -> Type[DictReporter]:
"""
Use this function to get the token level precision, recall and fscore per NER type
and also the micro, macro and weighted averaged precision, recall and f scores.
Essentially we return a classification report which contains all this information
Args:
labels (Sequence[Sequence[str]]): A list of the gold standard NER labels for each note
predictions (Sequence[Sequence[str]]): A list of the predicted NER labels for each note
Returns:
(Type[DictReporter]): Classification report that contains the token level metric scores
"""
# Unpack the nested lists (labels and predictions) before running the evaluation metrics
labels = NoteTokenEvaluation.unpack_nested_list(nested_list=labels)
predictions = NoteTokenEvaluation.unpack_nested_list(nested_list=predictions)
# Store results in this and return this object
reporter = DictReporter()
# Calculate precision, recall and f1 for each NER type
eval_precision, eval_recall, eval_f1, counts = NoteTokenEvaluation.precision_recall_fscore(
labels=labels,
predictions=predictions,
ner_types=ner_types,
average=None
)
# Store the results
for row in zip(ner_types, eval_precision, eval_recall, eval_f1, counts):
reporter.write(*row)
reporter.write_blank()
# Calculate the overall precision, recall and f1 - based on the defined averages
average_options = ('micro', 'macro', 'weighted')
for average in average_options:
eval_precision, eval_recall, eval_f1, counts = NoteTokenEvaluation.precision_recall_fscore(
labels=labels,
predictions=predictions,
ner_types=ner_types,
average=average
)
# Store the results
reporter.write('{} avg'.format(average), eval_precision, eval_recall, eval_f1, counts)
reporter.write_blank()
# Return the token level results
return reporter.report() | /robust_deid-0.3.1.tar.gz/robust_deid-0.3.1/src/robust_deid/sequence_tagging/evaluation/note_evaluation/note_token_evaluation.py | 0.953977 | 0.664418 | note_token_evaluation.py | pypi |
import re
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import ConfusionMatrixDisplay
class ResultsFormatter(object):
@staticmethod
def get_results_df(results):
def change_column_names(group):
group.rename(columns=lambda name: re.sub('(.*_)(?=[a-zA-Z0-9]+$)', '', name), inplace=True)
return group
results_df = pd.DataFrame([results])
group_pattern = '(.*(?=_recall|_precision|_f1|_number))'
grouped = results_df.groupby(results_df.columns.str.extract(group_pattern, expand=False), axis=1)
grouped_df_dict = {name:change_column_names(group) for name, group in grouped}
grouped_df = pd.concat(grouped_df_dict.values(), axis=1, keys=grouped_df_dict.keys())
return grouped_df.T.unstack().droplevel(level=0, axis=1)[['precision', 'recall', 'f1', 'number']]
@staticmethod
def get_confusion_matrix(confusion_matrix, ner_types):
S = 15
normalize = True
title = 'Confusion Matrix'
cmap=plt.cm.Blues
classes = ner_types + ['O', ]
plt.figure(figsize = (S, S))
cm = confusion_matrix
cmbk = cm
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
fig, ax = plt.subplots(figsize=(S, S*0.8))
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(0,cm.shape[1]),
yticks=np.arange(0,cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='Ground Truth',
xlabel='Predicted')
ax.xaxis.get_label().set_fontsize(16)
ax.yaxis.get_label().set_fontsize(16)
ax.title.set_size(16)
ax.tick_params(axis = 'both', which = 'major', labelsize = 14)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'#'.2f'
fmt='d'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cmbk[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black",fontsize=12)
fig.tight_layout()
return fig | /robust_deid-0.3.1.tar.gz/robust_deid-0.3.1/src/robust_deid/sequence_tagging/evaluation/results/results_formatter.py | 0.722625 | 0.28452 | results_formatter.py | pypi |
import json
import re
from argparse import ArgumentParser
from typing import Sequence, List, Tuple, Mapping, Union, Any, Type
import regex
from seqeval.scheme import IOB1, IOB2, IOBES, BILOU, Entities
from .utils import remove, replace_tag_type, replace_informative
class TextDeid(object):
def __init__(self, notation, span_constraint):
self._span_constraint = span_constraint
if self._span_constraint == 'strict':
self._scheme = TextDeid.__get_scheme('IO')
elif self._span_constraint == 'super_strict':
self._scheme = TextDeid.__get_scheme('IO')
else:
self._scheme = TextDeid.__get_scheme(notation)
def decode(self, tokens, predictions):
if self._span_constraint == 'exact':
return predictions
elif self._span_constraint == 'strict':
return TextDeid.__get_relaxed_predictions(predictions)
elif self._span_constraint == 'super_strict':
return TextDeid.__get_super_relaxed_predictions(tokens, predictions)
def get_predicted_entities_positions(
self,
tokens: Sequence[Mapping[str, Union[str, int]]],
predictions: List[str],
suffix: bool
) -> List[List[Union[Tuple[Union[str, int], Union[str, int]], Any]]]:
"""
Use the seqeval get_entities method, which goes through the predictions and returns
where the span starts and ends. - [O, O, B-AGE, I-AGE, O, O] this will return
spans starts at token 2 and ends at token 3 - with type AGE. We then extract the
position of the token in the note (character position) - so we return that
this span starts at 32 and ends at 37. The function then returns a nested list
that contains a tuple of tag type and tag position (character positions).
Example: [[(3, 9), LOC], [(34, 41), PATIENT], ...]]
Args:
tokens (Sequence[Mapping[str, Union[str, int]]]): The list of tokens in the note
predictions (Sequence[str]): The list of predictions for the note
suffix (str): Whether the B, I etc is in the prefix or the suffix
Returns:
positions_info (List[Tuple[Tuple[int, int], str]])): List containing tuples of tag positions and tag type
"""
positions_info = list()
entities = Entities(sequences=[predictions], scheme=self._scheme, suffix=suffix)
for entity_list in entities.entities:
for entity in entity_list:
position = (tokens[entity.start]['start'], tokens[entity.end - 1]['end'])
positions_info.append([position, entity.tag])
return positions_info
def run_deid(
self,
input_file,
predictions_file,
deid_strategy,
keep_age: bool = False,
metadata_key: str = 'meta',
note_id_key: str = 'note_id',
tokens_key: str = 'tokens',
predictions_key: str = 'predictions',
text_key: str = 'text'
):
# Store note_id to note mapping
note_map = dict()
for line in open(input_file, 'r'):
note = json.loads(line)
note_id = note[metadata_key][note_id_key]
note_map[note_id] = note
# Go through note predictions and de identify the note accordingly
for line in open(predictions_file, 'r'):
note = json.loads(line)
# Get the text using the note_id for this note from the note_map dict
note_id = note[note_id_key]
# Get the note from the note_map dict
deid_note = note_map[note_id]
# Get predictions
predictions = self.decode(tokens=note[tokens_key], predictions=note[predictions_key])
# Get entities and their positions
entity_positions = self.get_predicted_entities_positions(
tokens=note[tokens_key],
predictions=predictions,
suffix=False
)
yield TextDeid.__get_deid_text(
deid_note=deid_note,
entity_positions=entity_positions,
deid_strategy=deid_strategy,
keep_age=keep_age,
text_key=text_key
)
@staticmethod
def __get_deid_text(
deid_note,
entity_positions,
deid_strategy,
keep_age: bool = False,
text_key: str = 'text'
):
tag_mapping = TextDeid.__get_tag_mapping(deid_strategy=deid_strategy)
age_pattern = '((?<!\d+)([1-7]\d?)(?!\d+))|((?<!\d+)(8[0-8]?)(?!\d+))'
# Sort positions - store the last occurring tag first - i.e in descending order
# of start positions.
entity_positions.sort(key=lambda info: info[0][0], reverse=True)
# Get text and de identify it
note_text = deid_note[text_key]
deid_text = deid_note[text_key]
# Go through the entities and their positions and de identify the text
# Since we have the positions in sorted order (descending by start positions)
# we de identify the text from the end to the start - i.e back to front
for positions, tag in entity_positions:
start_pos, end_pos = positions
deid_tag = tag_mapping[tag]
age_unchanged = False
if tag == 'AGE' and keep_age:
span_text = note_text[start_pos:end_pos]
if regex.search(age_pattern, span_text, flags=regex.IGNORECASE):
deid_tag = span_text
age_unchanged = True
else:
deid_tag = deid_tag
if deid_strategy == 'replace_informative' and not age_unchanged:
deid_text = deid_text[:start_pos] + deid_tag.format(note_text[start_pos:end_pos]) + deid_text[end_pos:]
else:
deid_text = deid_text[:start_pos] + deid_tag + deid_text[end_pos:]
deid_note['deid_text'] = regex.sub('[\n]+', '\n', regex.sub('[ \t\r\f\v]+', ' ', deid_text)).strip()
return deid_note
@staticmethod
def __get_tag_mapping(deid_strategy):
if deid_strategy == 'remove':
return remove()
elif deid_strategy == 'replace_tag_type':
return replace_tag_type()
elif deid_strategy == 'replace_informative':
return replace_informative()
@staticmethod
def __get_relaxed_predictions(predictions):
return ['I-' + prediction[2:] if '-' in prediction else prediction for prediction in predictions]
@staticmethod
def __get_super_relaxed_predictions(tokens, predictions):
# Super relaxed
# 360 Longwood Ave, OBI, Boston
# Tokens: ['360', 'Longwood', 'Ave', ',', 'OBI', ',', Boston[
# Predictions: [B-LOC, I-LOC, L-LOC, O, U-LOC, O, U-LOC]
# Relaxed: [I-LOC, I-LOC, I-LOC, O, I-LOC, O, I-LOC]
# Super relaxed: [I-LOC, I-LOC, I-LOC, I-LOC, I-LOC, I-LOC, I-LOC]
relaxed_predictions = TextDeid.__get_relaxed_predictions(predictions)
prev_type = None
replace_indexes = list()
super_relaxed_predictions = list()
for index, (token, prediction) in enumerate(zip(tokens, relaxed_predictions)):
super_relaxed_predictions.append(prediction)
# Check special characters that appear after a prediction
# we can assign the prediction label to this sequence of special characters
if prediction == 'O' and prev_type is not None:
# [a-zA-Z0-9]
if re.search('^(\W|_)+$', token['text'], flags=re.IGNORECASE | re.DOTALL):
replace_indexes.append(index)
else:
prev_type = None
replace_indexes = list()
# Replace all the tokens identified above with the NER prediction type
# This is done only ig the current prediction type matches the previous type
elif prediction != 'O':
if prediction[2:] == prev_type and replace_indexes != []:
for replace_index in replace_indexes:
super_relaxed_predictions[replace_index] = 'I-' + prev_type
# Reset list and previous type
replace_indexes = list()
prev_type = prediction[2:]
else:
prev_type = None
return super_relaxed_predictions
@staticmethod
def __get_scheme(notation: str) -> Union[Type[IOB2], Type[IOBES], Type[BILOU], Type[IOB1]]:
"""
Get the seqeval scheme based on the notation
Args:
notation (str): The NER notation
Returns:
(Union[IOB2, IOBES, BILOU, IOB1]): The seqeval scheme
"""
if notation == 'BIO':
return IOB2
elif notation == 'BIOES':
return IOBES
elif notation == 'BILOU':
return BILOU
elif notation == 'IO':
return IOB1
else:
raise ValueError('Invalid Notation')
def main():
# The following code sets up the arguments to be passed via CLI or via a JSON file
cli_parser = ArgumentParser(description='configuration arguments provided at run time from the CLI')
cli_parser.add_argument(
'--input_file',
type=str,
required=True,
help='the the jsonl file that contains the notes'
)
cli_parser.add_argument(
'--predictions_file',
type=str,
required=True,
help='the location where the predictions are'
)
cli_parser.add_argument(
'--span_constraint',
type=str,
required=True,
choices=['exact', 'strict', 'super_strict'],
help='whether we want to modify the predictions, make the process of removing phi more struct etc'
)
cli_parser.add_argument(
'--notation',
type=str,
required=True,
help='the NER notation in the predictions'
)
cli_parser.add_argument(
'--deid_strategy',
type=str,
required=True,
choices=['remove', 'replace_tag_type', 'replace_informative'],
help='The strategy '
)
cli_parser.add_argument(
'--keep_age',
action='store_true',
help='whether to keep ages below 89'
)
cli_parser.add_argument(
'--text_key',
type=str,
default='text',
help='the key where the note text is present in the json object'
)
cli_parser.add_argument(
'--metadata_key',
type=str,
default='meta',
help='the key where the note metadata is present in the json object'
)
cli_parser.add_argument(
'--note_id_key',
type=str,
default='note_id',
help='the key where the note id is present in the json object'
)
cli_parser.add_argument(
'--tokens_key',
type=str,
default='tokens',
help='the key where the tokens for the notes are present in the json object'
)
cli_parser.add_argument(
'--predictions_key',
type=str,
default='predictions',
help='the key where the note predictions is present in the json object'
)
cli_parser.add_argument(
'--output_file',
type=str,
required=True,
help='the location we would write the deid notes'
)
# Parse args
args = cli_parser.parse_args()
text_deid = TextDeid(notation=args.notation, span_constraint=args.span_constraint)
deid_notes = text_deid.run_deid(
input_file=args.input_file,
predictions_file=args.predictions_file,
deid_strategy=args.deid_strategy,
keep_age=args.keep_age,
metadata_key=args.metadata_key,
note_id_key=args.note_id_key,
tokens_key=args.tokens_key,
predictions_key=args.predictions_key,
text_key=args.text_key
)
# Write the dataset to the output file
with open(args.output_file, 'w') as file:
for deid_note in deid_notes:
file.write(json.dumps(deid_note) + '\n')
if __name__ == "__main__":
# Get deid notes
main() | /robust_deid-0.3.1.tar.gz/robust_deid-0.3.1/src/robust_deid/deid/text_deid.py | 0.775775 | 0.257753 | text_deid.py | pypi |
import json
import pandas as pd
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from typing import Sequence, Dict, Iterable, Union, NoReturn, Optional
class DataLoader(object):
"""
Convert parquet file to jsonl file. While some of the columns in the parquet file will
be directly used as keys in the json object, some of the columns will be stored as metadata.
The parquet_columns columns specify the columns from the parquet file that will be stored in the
json object. The json_columns specify which columns will be stored directly as keys in
the json object and metadata_columns columns specify which columns will be stored as metadata.
The ordering in these lists need to match, because we do the above operations based on index
positions.
E.g - parquet_columns = ['NoteText', 'NoteID', 'PatientID', 'institution']
- json_columns = ['text']
- metadata_columns = ['note_id', 'patient_id', 'institute']
NoteTXT corresponds to text, NoteID -> note_id, PatientID -> patient_id, institution -> institute
As you can see we match based on positions. Once we process converted_columns, we process the
metadata columns (i.e index is used for mapping parquet columns to jsonl keys).
Hence it is important that the columns are specified in the right order.
JSON Object: {'text': medical text, 'meta':{'note_id':12345, 'patient_id':54321, 'institute':PP}}
"""
def __init__(
self,
parquet_columns: Optional[Sequence[str]] = None,
json_columns: Optional[Sequence[str]] = None,
metadata_columns: Optional[Sequence[str]] = None
) -> NoReturn:
"""
Initialize the parquet column names and json object key names
Args:
parquet_columns (Optional[Sequence[str]]): Columns to extract from parquet file.
If not given - will assign ['NoteText', 'NoteID']
json_columns (Optional[Sequence[str]]): Fields that will be stored directly in json object.
If not given - will assign ['text']
metadata_columns (Optional[Sequence[str]]): Fields that will be stored as metadata in json object.
If not given - will assign ['note_id']
"""
if metadata_columns is None:
metadata_columns = ['note_id']
if json_columns is None:
json_columns = ['text']
if parquet_columns is None:
parquet_columns = ['NoteText', 'NoteID']
self._parquet_columns = parquet_columns
self._json_columns = json_columns
self._metadata_columns = metadata_columns
def load(self, input_file: str) -> Iterable[Dict[str, Union[str, Dict[str, str]]]]:
"""
Read a parquet file, extract the relevant columns and create a json object for each row
of the parquet file. This function will return an iterable that can be used to iterate
through each of these json objects. The data and structure in the json objects will depend
on how this class has been initialized (parquet_columns, json_columns, metadata_columns)
JSON Object: {'text': medical text, 'meta':{'note_id':12345, 'patient_id':54321, 'institute':PP}}
Args:
input_file (str): Input parquet file
Returns:
(Iterable[Dict[str, Union[str, Dict[str, str]]]]): An iterable that iterates through the json objects
"""
data = pd.read_parquet(input_file)
data = data[self._parquet_columns]
for data_load in data.itertuples():
data_load_dict = {}
index = 0
for index, column in enumerate(self._json_columns):
data_load_dict[column] = data_load[index + 1]
index += 2
data_load_dict['meta'] = {metadata_column: data_load[meta_index + index]
for meta_index, metadata_column in enumerate(self._metadata_columns)}
yield data_load_dict
def main() -> NoReturn:
"""
Convert parquet file to jsonl file. While some of the columns in the parquet file will
be directly used as keys in the json object, some of the column will be stored as metadata.
The relevant columns specify the columns from the parquet file that will be stored in the
json object. The converted_columns specify which columns will be stored directly as keys in
the json object and metadata_columns columns specify which columns will be stored as metadata.
The ordering in these lists need to match, because we do the above operations based on index
positions.
E.g - relevant_columns = ['NoteTXT', 'NoteID', 'PatientID', 'institution']
- converted_columns = ['text']
- metadata_columns = ['note_id', 'patient_id', 'institute']
NoteTXT corresponds to text, NoteID -> note_id, PatientID -> patient_id, institution -> institute
As you can see we match based on positions. Once we process converted_columns, we process the
metadata columns (i.e index is used for mapping parquet columns to jsonl keys).
Hence it is important that the columns are specified in the right order.
JSON Object: {'text': medical text, 'meta':{'note_id':12345, 'patient_id':54321, 'institute':PP}}
"""
cli_parser = ArgumentParser(
description='configuration arguments provided at run time from the CLI',
formatter_class=ArgumentDefaultsHelpFormatter
)
# Take the first argument as a list instead of a file
cli_parser.add_argument(
'--input_file',
type=str,
required=True,
help='The input parquet file'
)
cli_parser.add_argument(
'--parquet_columns',
nargs="+",
default=['NoteText', 'NoteID'],
help='Columns to extract from parquet file. If not given - will assign [NoteText, NoteID]'
)
cli_parser.add_argument(
'--json_columns',
nargs="+",
default=['text'],
help='fields that will be stored directly in json object'
)
cli_parser.add_argument(
'--metadata_columns',
nargs="+",
default=['note_id'],
help='fields that will be stored as the metadata field in json object'
)
cli_parser.add_argument(
'--output_file',
type=str,
required=True,
help='where to write the jsonl output'
)
args = cli_parser.parse_args()
data_loader = DataLoader(
parquet_columns=args.parquet_columns,
json_columns=args.json_columns,
metadata_columns=args.metadata_columns
)
notes = data_loader.load(input_file=args.input_file)
# Write the jsonl output to the specified location
with open(args.output_file, 'w') as file:
for note in notes:
if 'spans' not in note.keys():
note['spans'] = []
file.write(json.dumps(note) + '\n')
return None
if __name__ == "__main__":
main() | /robust_deid-0.3.1.tar.gz/robust_deid-0.3.1/src/robust_deid/data_processing/data_loader.py | 0.876357 | 0.656892 | data_loader.py | pypi |
from urlparse import urljoin
from collections import OrderedDict
from .http import HttpClient
def average(values):
return float(sum(values)) / len(values)
class GraphiteClient(HttpClient):
'''
A simple client for querying Graphite.
:param endpoint: the Graphite URL;
:param min_queries_range:
The minimum range of data to query. Graphite occasionally returns empty
data when querying small time ranges (probably on busy servers). The
workaround is to query a larger time range and filter out unneeded
values, e.g. if we want the data points from 1 minute ago, we query 10
minutes and filter out the oldest 9 minutes.
Care must be taken when choosing this value, if it's too large Graphite
may return aggregated values, so it must be adapted to your storage
schemas.
As a guideline, the default value of 10 minutes gave good results on
our server for querying 1 minute data ranges with a
``10s:1d,1min:7d,10min:1y`` retention schema;
Additional arguments are passed to :class:`robgracli.http.HttpClient`.
'''
def __init__(self, endpoint, min_queries_range=60 * 10, *args, **kwargs):
super(GraphiteClient, self).__init__(*args, **kwargs)
self.endpoint = endpoint
self.min_queries_range = min_queries_range
def query(self, query, from_=60):
'''
Return datapoints for *query* over the last *from_* seconds.
The return value is an :class:`~collections.OrderedDict` with target
names as keys and datapoints ``(value, timestamp)`` pairs as values.
'''
query_from = max(self.min_queries_range, from_)
url = urljoin(self.endpoint, '/render')
response = self.get(url, params={
'target': query,
'format': 'json',
'from': '-%ss' % query_from,
})
data = response.json()
ret = OrderedDict()
for entry in data:
ret[entry['target']] = trim_datapoints(entry['datapoints'], from_)
return ret
def aggregate(self, query, from_=60, aggregator=average):
'''
Get the current value of a metric, by aggregating Graphite datapoints
over an interval.
Values returned by *query* over the last *from_* seconds are aggregated
using the *aggregator* function, after filtering out None values.
The return value is an :class:`~collections.OrderedDict` with target
names as keys and aggregated values as values, or None for targets that
returned no datapoints or only None values.
'''
data = self.query(query, from_)
ret = OrderedDict()
for key, values in data.items():
values = [v[0] for v in values if v[0] is not None]
if len(values):
ret[key] = aggregator(values)
else:
ret[key] = None
return ret
def find_metrics(self, query):
'''
Find metrics on the server.
Querying '*' will return the root of all metrics, and you can then find
other metrics from there.
Return a list of dicts of the form::
[
{
'text': 'carbon',
'expandable': 1,
'leaf': 0,
'id': 'carbon',
'allowChildren': 1
},
{
'text': 'statsd',
'expandable': 1,
'leaf': 0,
'id': 'statsd',
'allowChildren': 1
}
]
'''
url = urljoin(self.endpoint, '/metrics/find')
response = self.get(url, {'query': query})
return response.json()
def trim_datapoints(datapoints, max_age):
if len(datapoints):
last_ts = datapoints[-1][1]
return filter(lambda (_, ts): last_ts - ts <= max_age, datapoints)
else:
return [] | /robust-graphite-client-1.1.0.tar.gz/robust-graphite-client-1.1.0/robgracli/client.py | 0.900026 | 0.585338 | client.py | pypi |
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from _typeshed import NoneType
from typing import Union
from robust_json.errors import IncorrectFunctionParameterTypeError
def get_item_index(
item: any, array: list, always_array: bool = False
) -> Union[int, list, NoneType]:
"""
Get items' index in an array.
Parameters: `item : any` parameter specifies item which index needs to be found.
`array : list` parameter specifies array where to search. `always_array : bool`
parameter sets return type strictly to a list. If set to `True`, this function will
always return an array even if there is only one element. This is useful when in the future
this output will be iterated. If set to `False`, return type will be determined automatically.
This function returns an index/an array if indexes. If item is not present in the array,
this function will return `None` (if `always_array` is set to `True` and item is not found in
the array, this function will return an empty list)
This function raises a `IncorrectFunctionParameterTypeError` if one or more of its parameters have incorrect types.
This function raises a `ValueError` exception if `array` parameter is equal to an empty list.
This function raises any additional exceptions if occurred.
Examples:
>>> arr = ['1', '2', '3', '4', '5']
>>> index = get_item_index('3', arr)
>>> index
# index = 2
>>> arr = ['1', '2', '3', '4', '1', '5', '1']
>>> index = get_item_index('1', arr)
>>> index
# index = [0, 6]
>>> arr = ['1', '2', '3', '4']
>>> index = get_item_index('1', arr, True)
>>> index
# index = [0]
>>> arr = ['1', '2', '3', '4', '5']
>>> index = get_item_index('6', arr)
>>> index
# index = None
>>> arr = ['1', '2', '3', '4', '5']
>>> index = get_item_index('6', arr, True)
>>> index
# index = []
For more information please visit:
https://github.com/NickolaiBeloguzov/robust-json/blob/master/README.md#methods
"""
if type(array) != list:
raise IncorrectFunctionParameterTypeError("array", "list", type(array).__name__)
if array == []:
raise ValueError("Parameter `array` is an empty list.")
item_indexes = []
for i in enumerate(array):
if i[1] == item:
item_indexes.append(i[0])
else:
pass
if len(item_indexes) == 1 and always_array == False:
return item_indexes[0]
elif len(item_indexes) == 0 and always_array == False:
return None
else:
return item_indexes | /robust_json-1.2.7-py3-none-any.whl/robust_json/ext/get_item_index.py | 0.895916 | 0.60095 | get_item_index.py | pypi |
import numpy as np
from sklearn.utils import resample
def RWP(X, orig_cov, with_diag=False):
"""
Robust Wasserstein Profile function.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Data from which to compute the covariance estimate from bootrap sample.
orig_cov: ndarray of shape (n_features, n_features)
The covariance matrix of the variables from original data.
with_diagonal : bool, default=False
Whether or not to include diagonal when compute RWP function.
Returns
-------
rwp : float
rwp.
"""
n = X.shape[0]
p = X.shape[1]
X_bootstrap = resample(X, replace=True, n_samples=n)
A_s = np.cov(X_bootstrap,rowvar=False)
if with_diag:
A_s = A_s[np.tril_indices(p)]
else:
A_s = A_s[np.tril_indices(p,-1)]
return np.linalg.norm(A_s - orig_cov, ord=np.inf)
def RobustSelection(X, alpha, B=200, with_diag=False):
"""
Robust Selection algorithm for estimation of the regularization parameter.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Data from which to compute the covariance estimate
alpha : float or array_like
The confidence level: the higher alpha, the lower the order statistics,
the smaller regularization parameter.
Range is (0, 1).
B : int, default=200
Number of bootstrap samples such that (B)(1-alpha) is
also an integer.
with_diagonal : bool, default=False
Whether or not to include diagonal when compute RWP function.
Returns
-------
lambda : array_type or float
The estimated regularization parameter.
"""
p = X.shape[1]
A_n = np.cov(X, rowvar=False)
if with_diag:
A_n = A_n[np.tril_indices(p)]
else:
A_n = A_n[np.tril_indices(p,-1)]
R_vec = np.zeros(B)
for i in range(B):
R_vec[i] = RWP(X, A_n, with_diag)
R_vec = np.sort(R_vec)
index = (B)*(1-alpha) - 1
index = np.array(index)
return R_vec[(index).astype(int)] | /robust_selection-0.0.8-py3-none-any.whl/robsel/robsel.py | 0.923545 | 0.769535 | robsel.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.