body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
7e0110f6112ca6e5bff78ae7ce618b02ae24f7ecf0ba7e5c8066a79e3146b42d
def to_chunks_iobes(sequence, verbose=False, delim='@'): 'Turn a sequence of IOBES tags into a list of chunks.\n\n :param sequence: `List[str]` The tag sequence.\n :param verbose: `bool` Should we output warning on illegal transitions.\n :param delim: `str` The symbol the separates output chunks from their indices.\n\n :returns: `List[str]` The list of entities in the order they appear. The\n entities are in the form {chunk_type}{delim}{index}{delim}{index}...\n for example LOC@3@4@5 means a Location chunk was at indices 3, 4, and 5\n in the original sequence.\n ' chunks = [] current = None for (i, label) in enumerate(sequence): if label.startswith('B-'): if (current is not None): chunks.append(delim.join(current)) current = [label.replace('B-', ''), ('%d' % i)] if verbose: if ((i < (len(sequence) - 1)) and (label[2:] != sequence[(i + 1)][2:])): logger.warning('Warning: Single B token chunk @ %d', i) elif label.startswith('S-'): if (current is not None): chunks.append(delim.join(current)) current = None base = label.replace('S-', '') chunks.append(delim.join([base, ('%d' % i)])) elif label.startswith('I-'): if (current is not None): base = label.replace('I-', '') if (base == current[0]): current.append(('%d' % i)) else: chunks.append(delim.join(current)) if verbose: logger.warning(('Warning: I without matching previous B/I @ %d' % i)) current = [base, ('%d' % i)] else: if verbose: logger.warning(('Warning: I without a previous chunk @ %d' % i)) current = [label.replace('I-', ''), ('%d' % i)] elif label.startswith('E-'): if (current is not None): base = label.replace('E-', '') if (base == current[0]): current.append(('%d' % i)) chunks.append(delim.join(current)) current = None else: chunks.append(delim.join(current)) if verbose: logger.warning("Warning: E doesn't agree with previous B/I type!") current = [base, ('%d' % i)] chunks.append(delim.join(current)) current = None else: current = [label.replace('E-', ''), ('%d' % i)] if verbose: logger.warning(('Warning: E without previous chunk! @ %d' % i)) chunks.append(delim.join(current)) current = None else: if (current is not None): chunks.append(delim.join(current)) current = None if (current is not None): chunks.append(delim.join(current)) return chunks
Turn a sequence of IOBES tags into a list of chunks. :param sequence: `List[str]` The tag sequence. :param verbose: `bool` Should we output warning on illegal transitions. :param delim: `str` The symbol the separates output chunks from their indices. :returns: `List[str]` The list of entities in the order they appear. The entities are in the form {chunk_type}{delim}{index}{delim}{index}... for example LOC@3@4@5 means a Location chunk was at indices 3, 4, and 5 in the original sequence.
python/baseline/utils.py
to_chunks_iobes
amyhemmeter/baseline
0
python
def to_chunks_iobes(sequence, verbose=False, delim='@'): 'Turn a sequence of IOBES tags into a list of chunks.\n\n :param sequence: `List[str]` The tag sequence.\n :param verbose: `bool` Should we output warning on illegal transitions.\n :param delim: `str` The symbol the separates output chunks from their indices.\n\n :returns: `List[str]` The list of entities in the order they appear. The\n entities are in the form {chunk_type}{delim}{index}{delim}{index}...\n for example LOC@3@4@5 means a Location chunk was at indices 3, 4, and 5\n in the original sequence.\n ' chunks = [] current = None for (i, label) in enumerate(sequence): if label.startswith('B-'): if (current is not None): chunks.append(delim.join(current)) current = [label.replace('B-', ), ('%d' % i)] if verbose: if ((i < (len(sequence) - 1)) and (label[2:] != sequence[(i + 1)][2:])): logger.warning('Warning: Single B token chunk @ %d', i) elif label.startswith('S-'): if (current is not None): chunks.append(delim.join(current)) current = None base = label.replace('S-', ) chunks.append(delim.join([base, ('%d' % i)])) elif label.startswith('I-'): if (current is not None): base = label.replace('I-', ) if (base == current[0]): current.append(('%d' % i)) else: chunks.append(delim.join(current)) if verbose: logger.warning(('Warning: I without matching previous B/I @ %d' % i)) current = [base, ('%d' % i)] else: if verbose: logger.warning(('Warning: I without a previous chunk @ %d' % i)) current = [label.replace('I-', ), ('%d' % i)] elif label.startswith('E-'): if (current is not None): base = label.replace('E-', ) if (base == current[0]): current.append(('%d' % i)) chunks.append(delim.join(current)) current = None else: chunks.append(delim.join(current)) if verbose: logger.warning("Warning: E doesn't agree with previous B/I type!") current = [base, ('%d' % i)] chunks.append(delim.join(current)) current = None else: current = [label.replace('E-', ), ('%d' % i)] if verbose: logger.warning(('Warning: E without previous chunk! @ %d' % i)) chunks.append(delim.join(current)) current = None else: if (current is not None): chunks.append(delim.join(current)) current = None if (current is not None): chunks.append(delim.join(current)) return chunks
def to_chunks_iobes(sequence, verbose=False, delim='@'): 'Turn a sequence of IOBES tags into a list of chunks.\n\n :param sequence: `List[str]` The tag sequence.\n :param verbose: `bool` Should we output warning on illegal transitions.\n :param delim: `str` The symbol the separates output chunks from their indices.\n\n :returns: `List[str]` The list of entities in the order they appear. The\n entities are in the form {chunk_type}{delim}{index}{delim}{index}...\n for example LOC@3@4@5 means a Location chunk was at indices 3, 4, and 5\n in the original sequence.\n ' chunks = [] current = None for (i, label) in enumerate(sequence): if label.startswith('B-'): if (current is not None): chunks.append(delim.join(current)) current = [label.replace('B-', ), ('%d' % i)] if verbose: if ((i < (len(sequence) - 1)) and (label[2:] != sequence[(i + 1)][2:])): logger.warning('Warning: Single B token chunk @ %d', i) elif label.startswith('S-'): if (current is not None): chunks.append(delim.join(current)) current = None base = label.replace('S-', ) chunks.append(delim.join([base, ('%d' % i)])) elif label.startswith('I-'): if (current is not None): base = label.replace('I-', ) if (base == current[0]): current.append(('%d' % i)) else: chunks.append(delim.join(current)) if verbose: logger.warning(('Warning: I without matching previous B/I @ %d' % i)) current = [base, ('%d' % i)] else: if verbose: logger.warning(('Warning: I without a previous chunk @ %d' % i)) current = [label.replace('I-', ), ('%d' % i)] elif label.startswith('E-'): if (current is not None): base = label.replace('E-', ) if (base == current[0]): current.append(('%d' % i)) chunks.append(delim.join(current)) current = None else: chunks.append(delim.join(current)) if verbose: logger.warning("Warning: E doesn't agree with previous B/I type!") current = [base, ('%d' % i)] chunks.append(delim.join(current)) current = None else: current = [label.replace('E-', ), ('%d' % i)] if verbose: logger.warning(('Warning: E without previous chunk! @ %d' % i)) chunks.append(delim.join(current)) current = None else: if (current is not None): chunks.append(delim.join(current)) current = None if (current is not None): chunks.append(delim.join(current)) return chunks<|docstring|>Turn a sequence of IOBES tags into a list of chunks. :param sequence: `List[str]` The tag sequence. :param verbose: `bool` Should we output warning on illegal transitions. :param delim: `str` The symbol the separates output chunks from their indices. :returns: `List[str]` The list of entities in the order they appear. The entities are in the form {chunk_type}{delim}{index}{delim}{index}... for example LOC@3@4@5 means a Location chunk was at indices 3, 4, and 5 in the original sequence.<|endoftext|>
4ab9f3848b039d7885230cb37ea4b8c3c68c7afb0a538017b4bcaca01d8c6d3d
@exporter def span_f1(golds, preds): 'Calculate Span level F1 score.\n\n :param golds: `List[set[str]]` The list of the set of gold chunks.\n :param preds: `List[set[str]]` The list of the set of predicted chunks.\n\n :returns: `float` The f1 score.\n ' overlap = sum((len((g & p)) for (g, p) in zip(golds, preds))) gold_total = sum((len(g) for g in golds)) pred_total = sum((len(p) for p in preds)) return f_score(overlap, gold_total, pred_total)
Calculate Span level F1 score. :param golds: `List[set[str]]` The list of the set of gold chunks. :param preds: `List[set[str]]` The list of the set of predicted chunks. :returns: `float` The f1 score.
python/baseline/utils.py
span_f1
amyhemmeter/baseline
0
python
@exporter def span_f1(golds, preds): 'Calculate Span level F1 score.\n\n :param golds: `List[set[str]]` The list of the set of gold chunks.\n :param preds: `List[set[str]]` The list of the set of predicted chunks.\n\n :returns: `float` The f1 score.\n ' overlap = sum((len((g & p)) for (g, p) in zip(golds, preds))) gold_total = sum((len(g) for g in golds)) pred_total = sum((len(p) for p in preds)) return f_score(overlap, gold_total, pred_total)
@exporter def span_f1(golds, preds): 'Calculate Span level F1 score.\n\n :param golds: `List[set[str]]` The list of the set of gold chunks.\n :param preds: `List[set[str]]` The list of the set of predicted chunks.\n\n :returns: `float` The f1 score.\n ' overlap = sum((len((g & p)) for (g, p) in zip(golds, preds))) gold_total = sum((len(g) for g in golds)) pred_total = sum((len(p) for p in preds)) return f_score(overlap, gold_total, pred_total)<|docstring|>Calculate Span level F1 score. :param golds: `List[set[str]]` The list of the set of gold chunks. :param preds: `List[set[str]]` The list of the set of predicted chunks. :returns: `float` The f1 score.<|endoftext|>
f20e9dac4a90910129a56accd32d7963614cedbba2f51f7b163b96556c2bb7d1
@exporter def per_entity_f1(golds, preds, delim='@'): 'Calculate Span level F1 with break downs per entity type.\n\n :param golds: `List[set[str]]` The list of the set of gold chunks.\n :param preds: `List[set[str]]` The list of the set of predicted chunks.\n :param delim: `str` The symbol that separates an entity from its indices.\n\n :returns: `dict` The metrics at a global level and fine grained entity\n level performance.\n\n Note:\n This function returns most of the metrics needed for the\n `conlleval_output`. `acc` and `tokens` (the token level accuracy\n and the number of tokens respectively) need to be added.\n ' metrics = {} overlap = Counter() gold_total = Counter() pred_total = Counter() types = set() for (g, p) in zip(golds, preds): overlaps = (g & p) overlap['total'] += len(overlaps) gold_total['total'] += len(g) pred_total['total'] += len(p) for o in overlaps: ent = o.split(delim)[0] overlap[ent] += 1 types.add(ent) for o in g: ent = o.split(delim)[0] gold_total[ent] += 1 types.add(ent) for o in p: ent = o.split(delim)[0] pred_total[ent] += 1 types.add(ent) metrics['overlap'] = overlap['total'] metrics['gold_total'] = gold_total['total'] metrics['pred_total'] = pred_total['total'] metrics['precision'] = (precision(overlap['total'], pred_total['total']) * 100) metrics['recall'] = (recall(overlap['total'], gold_total['total']) * 100) metrics['f1'] = (f_score(overlap['total'], gold_total['total'], pred_total['total']) * 100) metrics['types'] = [] for t in sorted(types): metrics['types'].append({'ent': t, 'precision': (precision(overlap[t], pred_total[t]) * 100), 'recall': (recall(overlap[t], gold_total[t]) * 100), 'f1': (f_score(overlap[t], gold_total[t], pred_total[t]) * 100), 'count': pred_total[t]}) return metrics
Calculate Span level F1 with break downs per entity type. :param golds: `List[set[str]]` The list of the set of gold chunks. :param preds: `List[set[str]]` The list of the set of predicted chunks. :param delim: `str` The symbol that separates an entity from its indices. :returns: `dict` The metrics at a global level and fine grained entity level performance. Note: This function returns most of the metrics needed for the `conlleval_output`. `acc` and `tokens` (the token level accuracy and the number of tokens respectively) need to be added.
python/baseline/utils.py
per_entity_f1
amyhemmeter/baseline
0
python
@exporter def per_entity_f1(golds, preds, delim='@'): 'Calculate Span level F1 with break downs per entity type.\n\n :param golds: `List[set[str]]` The list of the set of gold chunks.\n :param preds: `List[set[str]]` The list of the set of predicted chunks.\n :param delim: `str` The symbol that separates an entity from its indices.\n\n :returns: `dict` The metrics at a global level and fine grained entity\n level performance.\n\n Note:\n This function returns most of the metrics needed for the\n `conlleval_output`. `acc` and `tokens` (the token level accuracy\n and the number of tokens respectively) need to be added.\n ' metrics = {} overlap = Counter() gold_total = Counter() pred_total = Counter() types = set() for (g, p) in zip(golds, preds): overlaps = (g & p) overlap['total'] += len(overlaps) gold_total['total'] += len(g) pred_total['total'] += len(p) for o in overlaps: ent = o.split(delim)[0] overlap[ent] += 1 types.add(ent) for o in g: ent = o.split(delim)[0] gold_total[ent] += 1 types.add(ent) for o in p: ent = o.split(delim)[0] pred_total[ent] += 1 types.add(ent) metrics['overlap'] = overlap['total'] metrics['gold_total'] = gold_total['total'] metrics['pred_total'] = pred_total['total'] metrics['precision'] = (precision(overlap['total'], pred_total['total']) * 100) metrics['recall'] = (recall(overlap['total'], gold_total['total']) * 100) metrics['f1'] = (f_score(overlap['total'], gold_total['total'], pred_total['total']) * 100) metrics['types'] = [] for t in sorted(types): metrics['types'].append({'ent': t, 'precision': (precision(overlap[t], pred_total[t]) * 100), 'recall': (recall(overlap[t], gold_total[t]) * 100), 'f1': (f_score(overlap[t], gold_total[t], pred_total[t]) * 100), 'count': pred_total[t]}) return metrics
@exporter def per_entity_f1(golds, preds, delim='@'): 'Calculate Span level F1 with break downs per entity type.\n\n :param golds: `List[set[str]]` The list of the set of gold chunks.\n :param preds: `List[set[str]]` The list of the set of predicted chunks.\n :param delim: `str` The symbol that separates an entity from its indices.\n\n :returns: `dict` The metrics at a global level and fine grained entity\n level performance.\n\n Note:\n This function returns most of the metrics needed for the\n `conlleval_output`. `acc` and `tokens` (the token level accuracy\n and the number of tokens respectively) need to be added.\n ' metrics = {} overlap = Counter() gold_total = Counter() pred_total = Counter() types = set() for (g, p) in zip(golds, preds): overlaps = (g & p) overlap['total'] += len(overlaps) gold_total['total'] += len(g) pred_total['total'] += len(p) for o in overlaps: ent = o.split(delim)[0] overlap[ent] += 1 types.add(ent) for o in g: ent = o.split(delim)[0] gold_total[ent] += 1 types.add(ent) for o in p: ent = o.split(delim)[0] pred_total[ent] += 1 types.add(ent) metrics['overlap'] = overlap['total'] metrics['gold_total'] = gold_total['total'] metrics['pred_total'] = pred_total['total'] metrics['precision'] = (precision(overlap['total'], pred_total['total']) * 100) metrics['recall'] = (recall(overlap['total'], gold_total['total']) * 100) metrics['f1'] = (f_score(overlap['total'], gold_total['total'], pred_total['total']) * 100) metrics['types'] = [] for t in sorted(types): metrics['types'].append({'ent': t, 'precision': (precision(overlap[t], pred_total[t]) * 100), 'recall': (recall(overlap[t], gold_total[t]) * 100), 'f1': (f_score(overlap[t], gold_total[t], pred_total[t]) * 100), 'count': pred_total[t]}) return metrics<|docstring|>Calculate Span level F1 with break downs per entity type. :param golds: `List[set[str]]` The list of the set of gold chunks. :param preds: `List[set[str]]` The list of the set of predicted chunks. :param delim: `str` The symbol that separates an entity from its indices. :returns: `dict` The metrics at a global level and fine grained entity level performance. Note: This function returns most of the metrics needed for the `conlleval_output`. `acc` and `tokens` (the token level accuracy and the number of tokens respectively) need to be added.<|endoftext|>
e828579bac650a52fddcc86a5be00289209c71bde3566b958c46467519946bf4
@exporter def conlleval_output(results): "Create conlleval formated output.\n\n :param results: `dict` The metrics. results should have the following keys.\n tokens: `int` The total number of tokens processed.\n acc: `float` The token level accuracy.\n gold_total: `int` The total number of gold entities.\n pred_total: `int` The total number of predicted entities.\n overlap: `int` The number of exact match entites.\n precision: `float` The precision of all entities.\n recall: `float` The recall of all entities.\n f1: `float` The f1 score of all entities.\n types: `List[dict]` A list of metrics for each entity type. Keys should include:\n ent: `str` The name of the entity.\n precision: `float` The precision of this entity type.\n recall: `float` The recall of this this entity type.\n f1: `float` The f1 score of this entity type.\n count: `int` The number of predicted entities of this type.\n\n :returns: `str` The formatted string ready for printing.\n\n Note:\n Both the metrics in the results dict and acc are expected to already be\n multiplied by 100. The result won't look correct and a lot of the\n metric will be cut off if they are not.\n\n Metrics per type are output in the order they appear in the list.\n conlleval.pl outputs the types in sorted order. To match this the list\n in `results['types'] should be sorted.\n " s = 'processed {tokens} tokens with {gold_total} phrases; found: {pred_total} phrases; correct: {overlap}.\naccuracy: {acc:>{length}.2f}%; precision: {precision:>6.2f}%; recall: {recall:>6.2f}%; FB1: {f1:>6.2f}\n' t = [] longest_ent = max(len(max(results['types'], key=(lambda x: len(x['ent'])))['ent']), 17) for type_metric in results['types']: t.append('{ent:>{longest_ent}}: precision: {precision:>6.2f}%; recall: {recall:>6.2f}%; FB1: {f1:>6.2f} {count}'.format(longest_ent=longest_ent, **type_metric)) s = (s + '\n'.join(t)) s = s.format(length=(longest_ent - 11), **results) return s
Create conlleval formated output. :param results: `dict` The metrics. results should have the following keys. tokens: `int` The total number of tokens processed. acc: `float` The token level accuracy. gold_total: `int` The total number of gold entities. pred_total: `int` The total number of predicted entities. overlap: `int` The number of exact match entites. precision: `float` The precision of all entities. recall: `float` The recall of all entities. f1: `float` The f1 score of all entities. types: `List[dict]` A list of metrics for each entity type. Keys should include: ent: `str` The name of the entity. precision: `float` The precision of this entity type. recall: `float` The recall of this this entity type. f1: `float` The f1 score of this entity type. count: `int` The number of predicted entities of this type. :returns: `str` The formatted string ready for printing. Note: Both the metrics in the results dict and acc are expected to already be multiplied by 100. The result won't look correct and a lot of the metric will be cut off if they are not. Metrics per type are output in the order they appear in the list. conlleval.pl outputs the types in sorted order. To match this the list in `results['types'] should be sorted.
python/baseline/utils.py
conlleval_output
amyhemmeter/baseline
0
python
@exporter def conlleval_output(results): "Create conlleval formated output.\n\n :param results: `dict` The metrics. results should have the following keys.\n tokens: `int` The total number of tokens processed.\n acc: `float` The token level accuracy.\n gold_total: `int` The total number of gold entities.\n pred_total: `int` The total number of predicted entities.\n overlap: `int` The number of exact match entites.\n precision: `float` The precision of all entities.\n recall: `float` The recall of all entities.\n f1: `float` The f1 score of all entities.\n types: `List[dict]` A list of metrics for each entity type. Keys should include:\n ent: `str` The name of the entity.\n precision: `float` The precision of this entity type.\n recall: `float` The recall of this this entity type.\n f1: `float` The f1 score of this entity type.\n count: `int` The number of predicted entities of this type.\n\n :returns: `str` The formatted string ready for printing.\n\n Note:\n Both the metrics in the results dict and acc are expected to already be\n multiplied by 100. The result won't look correct and a lot of the\n metric will be cut off if they are not.\n\n Metrics per type are output in the order they appear in the list.\n conlleval.pl outputs the types in sorted order. To match this the list\n in `results['types'] should be sorted.\n " s = 'processed {tokens} tokens with {gold_total} phrases; found: {pred_total} phrases; correct: {overlap}.\naccuracy: {acc:>{length}.2f}%; precision: {precision:>6.2f}%; recall: {recall:>6.2f}%; FB1: {f1:>6.2f}\n' t = [] longest_ent = max(len(max(results['types'], key=(lambda x: len(x['ent'])))['ent']), 17) for type_metric in results['types']: t.append('{ent:>{longest_ent}}: precision: {precision:>6.2f}%; recall: {recall:>6.2f}%; FB1: {f1:>6.2f} {count}'.format(longest_ent=longest_ent, **type_metric)) s = (s + '\n'.join(t)) s = s.format(length=(longest_ent - 11), **results) return s
@exporter def conlleval_output(results): "Create conlleval formated output.\n\n :param results: `dict` The metrics. results should have the following keys.\n tokens: `int` The total number of tokens processed.\n acc: `float` The token level accuracy.\n gold_total: `int` The total number of gold entities.\n pred_total: `int` The total number of predicted entities.\n overlap: `int` The number of exact match entites.\n precision: `float` The precision of all entities.\n recall: `float` The recall of all entities.\n f1: `float` The f1 score of all entities.\n types: `List[dict]` A list of metrics for each entity type. Keys should include:\n ent: `str` The name of the entity.\n precision: `float` The precision of this entity type.\n recall: `float` The recall of this this entity type.\n f1: `float` The f1 score of this entity type.\n count: `int` The number of predicted entities of this type.\n\n :returns: `str` The formatted string ready for printing.\n\n Note:\n Both the metrics in the results dict and acc are expected to already be\n multiplied by 100. The result won't look correct and a lot of the\n metric will be cut off if they are not.\n\n Metrics per type are output in the order they appear in the list.\n conlleval.pl outputs the types in sorted order. To match this the list\n in `results['types'] should be sorted.\n " s = 'processed {tokens} tokens with {gold_total} phrases; found: {pred_total} phrases; correct: {overlap}.\naccuracy: {acc:>{length}.2f}%; precision: {precision:>6.2f}%; recall: {recall:>6.2f}%; FB1: {f1:>6.2f}\n' t = [] longest_ent = max(len(max(results['types'], key=(lambda x: len(x['ent'])))['ent']), 17) for type_metric in results['types']: t.append('{ent:>{longest_ent}}: precision: {precision:>6.2f}%; recall: {recall:>6.2f}%; FB1: {f1:>6.2f} {count}'.format(longest_ent=longest_ent, **type_metric)) s = (s + '\n'.join(t)) s = s.format(length=(longest_ent - 11), **results) return s<|docstring|>Create conlleval formated output. :param results: `dict` The metrics. results should have the following keys. tokens: `int` The total number of tokens processed. acc: `float` The token level accuracy. gold_total: `int` The total number of gold entities. pred_total: `int` The total number of predicted entities. overlap: `int` The number of exact match entites. precision: `float` The precision of all entities. recall: `float` The recall of all entities. f1: `float` The f1 score of all entities. types: `List[dict]` A list of metrics for each entity type. Keys should include: ent: `str` The name of the entity. precision: `float` The precision of this entity type. recall: `float` The recall of this this entity type. f1: `float` The f1 score of this entity type. count: `int` The number of predicted entities of this type. :returns: `str` The formatted string ready for printing. Note: Both the metrics in the results dict and acc are expected to already be multiplied by 100. The result won't look correct and a lot of the metric will be cut off if they are not. Metrics per type are output in the order they appear in the list. conlleval.pl outputs the types in sorted order. To match this the list in `results['types'] should be sorted.<|endoftext|>
1c58b37bf465720f5a419b20241ebec73f8b373f3ca959327663e5fba25ae04c
@exporter def precision(overlap_count, guess_count): 'Compute the precision in a zero safe way.\n\n :param overlap_count: `int` The number of true positives.\n :param guess_count: `int` The number of predicted positives (tp + fp)\n\n :returns: `float` The precision.\n ' if (guess_count == 0): return 0.0 return (overlap_count / float(guess_count))
Compute the precision in a zero safe way. :param overlap_count: `int` The number of true positives. :param guess_count: `int` The number of predicted positives (tp + fp) :returns: `float` The precision.
python/baseline/utils.py
precision
amyhemmeter/baseline
0
python
@exporter def precision(overlap_count, guess_count): 'Compute the precision in a zero safe way.\n\n :param overlap_count: `int` The number of true positives.\n :param guess_count: `int` The number of predicted positives (tp + fp)\n\n :returns: `float` The precision.\n ' if (guess_count == 0): return 0.0 return (overlap_count / float(guess_count))
@exporter def precision(overlap_count, guess_count): 'Compute the precision in a zero safe way.\n\n :param overlap_count: `int` The number of true positives.\n :param guess_count: `int` The number of predicted positives (tp + fp)\n\n :returns: `float` The precision.\n ' if (guess_count == 0): return 0.0 return (overlap_count / float(guess_count))<|docstring|>Compute the precision in a zero safe way. :param overlap_count: `int` The number of true positives. :param guess_count: `int` The number of predicted positives (tp + fp) :returns: `float` The precision.<|endoftext|>
46745100806cb3e94fcf92859eb9fc4310eb711eae8ff24c2775ae1ce8207fcd
@exporter def recall(overlap_count, gold_count): 'Compute the recall in a zero safe way.\n\n :param overlap_count: `int` The number of true positives.\n :param gold_count: `int` The number of gold positives (tp + fn)\n\n :returns: `float` The recall.\n ' if (gold_count == 0): return 0.0 return (overlap_count / float(gold_count))
Compute the recall in a zero safe way. :param overlap_count: `int` The number of true positives. :param gold_count: `int` The number of gold positives (tp + fn) :returns: `float` The recall.
python/baseline/utils.py
recall
amyhemmeter/baseline
0
python
@exporter def recall(overlap_count, gold_count): 'Compute the recall in a zero safe way.\n\n :param overlap_count: `int` The number of true positives.\n :param gold_count: `int` The number of gold positives (tp + fn)\n\n :returns: `float` The recall.\n ' if (gold_count == 0): return 0.0 return (overlap_count / float(gold_count))
@exporter def recall(overlap_count, gold_count): 'Compute the recall in a zero safe way.\n\n :param overlap_count: `int` The number of true positives.\n :param gold_count: `int` The number of gold positives (tp + fn)\n\n :returns: `float` The recall.\n ' if (gold_count == 0): return 0.0 return (overlap_count / float(gold_count))<|docstring|>Compute the recall in a zero safe way. :param overlap_count: `int` The number of true positives. :param gold_count: `int` The number of gold positives (tp + fn) :returns: `float` The recall.<|endoftext|>
27ab2d40719368a5b12ab3c354dc2cab13100d13ae1f3762bbdf67164f8c44f3
@exporter def f_score(overlap_count, gold_count, guess_count, f=1): 'Compute the f1 score.\n\n :param overlap_count: `int` The number of true positives.\n :param gold_count: `int` The number of gold positives (tp + fn)\n :param guess_count: `int` The number of predicted positives (tp + fp)\n :param f: `int` The beta term to weight precision vs recall.\n\n :returns: `float` The f score\n ' beta_sq = (f * f) if (guess_count == 0): return 0.0 p = precision(overlap_count, guess_count) r = recall(overlap_count, gold_count) if ((p == 0.0) or (r == 0.0)): return 0.0 f = (((1.0 + beta_sq) * (p * r)) / ((beta_sq * p) + r)) return f
Compute the f1 score. :param overlap_count: `int` The number of true positives. :param gold_count: `int` The number of gold positives (tp + fn) :param guess_count: `int` The number of predicted positives (tp + fp) :param f: `int` The beta term to weight precision vs recall. :returns: `float` The f score
python/baseline/utils.py
f_score
amyhemmeter/baseline
0
python
@exporter def f_score(overlap_count, gold_count, guess_count, f=1): 'Compute the f1 score.\n\n :param overlap_count: `int` The number of true positives.\n :param gold_count: `int` The number of gold positives (tp + fn)\n :param guess_count: `int` The number of predicted positives (tp + fp)\n :param f: `int` The beta term to weight precision vs recall.\n\n :returns: `float` The f score\n ' beta_sq = (f * f) if (guess_count == 0): return 0.0 p = precision(overlap_count, guess_count) r = recall(overlap_count, gold_count) if ((p == 0.0) or (r == 0.0)): return 0.0 f = (((1.0 + beta_sq) * (p * r)) / ((beta_sq * p) + r)) return f
@exporter def f_score(overlap_count, gold_count, guess_count, f=1): 'Compute the f1 score.\n\n :param overlap_count: `int` The number of true positives.\n :param gold_count: `int` The number of gold positives (tp + fn)\n :param guess_count: `int` The number of predicted positives (tp + fp)\n :param f: `int` The beta term to weight precision vs recall.\n\n :returns: `float` The f score\n ' beta_sq = (f * f) if (guess_count == 0): return 0.0 p = precision(overlap_count, guess_count) r = recall(overlap_count, gold_count) if ((p == 0.0) or (r == 0.0)): return 0.0 f = (((1.0 + beta_sq) * (p * r)) / ((beta_sq * p) + r)) return f<|docstring|>Compute the f1 score. :param overlap_count: `int` The number of true positives. :param gold_count: `int` The number of gold positives (tp + fn) :param guess_count: `int` The number of predicted positives (tp + fp) :param f: `int` The beta term to weight precision vs recall. :returns: `float` The f score<|endoftext|>
55df35c53616b77404a214d0032e85764c7d5016fece0763b583ae52092418dc
@exporter def unzip_model(path): 'If the path for a model file is a zip file, unzip it in /tmp and return the unzipped path' if os.path.isdir(path): return path from baseline.mime_type import mime_type if (mime_type(path) == 'application/zip'): with open(path, 'rb') as f: sha1 = hashlib.sha1(f.read()).hexdigest() temp_dir = os.path.join('/tmp/', sha1) if (not os.path.exists(temp_dir)): logger.info('unzipping model') with zipfile.ZipFile(path, 'r') as zip_ref: zip_ref.extractall(temp_dir) if (len(os.listdir(temp_dir)) == 1): temp_dir = os.path.join(temp_dir, os.listdir(temp_dir)[0]) path = os.path.join(temp_dir, [x[:(- 6)] for x in os.listdir(temp_dir) if ('index' in x)][0]) return path
If the path for a model file is a zip file, unzip it in /tmp and return the unzipped path
python/baseline/utils.py
unzip_model
amyhemmeter/baseline
0
python
@exporter def unzip_model(path): if os.path.isdir(path): return path from baseline.mime_type import mime_type if (mime_type(path) == 'application/zip'): with open(path, 'rb') as f: sha1 = hashlib.sha1(f.read()).hexdigest() temp_dir = os.path.join('/tmp/', sha1) if (not os.path.exists(temp_dir)): logger.info('unzipping model') with zipfile.ZipFile(path, 'r') as zip_ref: zip_ref.extractall(temp_dir) if (len(os.listdir(temp_dir)) == 1): temp_dir = os.path.join(temp_dir, os.listdir(temp_dir)[0]) path = os.path.join(temp_dir, [x[:(- 6)] for x in os.listdir(temp_dir) if ('index' in x)][0]) return path
@exporter def unzip_model(path): if os.path.isdir(path): return path from baseline.mime_type import mime_type if (mime_type(path) == 'application/zip'): with open(path, 'rb') as f: sha1 = hashlib.sha1(f.read()).hexdigest() temp_dir = os.path.join('/tmp/', sha1) if (not os.path.exists(temp_dir)): logger.info('unzipping model') with zipfile.ZipFile(path, 'r') as zip_ref: zip_ref.extractall(temp_dir) if (len(os.listdir(temp_dir)) == 1): temp_dir = os.path.join(temp_dir, os.listdir(temp_dir)[0]) path = os.path.join(temp_dir, [x[:(- 6)] for x in os.listdir(temp_dir) if ('index' in x)][0]) return path<|docstring|>If the path for a model file is a zip file, unzip it in /tmp and return the unzipped path<|endoftext|>
3517a97af35088915b1ffbe8f1e38f271dbe435ec85c1f5a61f71cfd1a2c2a69
@exporter def zip_model(path): 'zips the model files' logger.info('zipping model files') model_files = [x for x in os.listdir('.') if (path[2:] in x)] z = zipfile.ZipFile('{}.zip'.format(path), 'w') for f in model_files: z.write(f) os.remove(f) z.close()
zips the model files
python/baseline/utils.py
zip_model
amyhemmeter/baseline
0
python
@exporter def zip_model(path): logger.info('zipping model files') model_files = [x for x in os.listdir('.') if (path[2:] in x)] z = zipfile.ZipFile('{}.zip'.format(path), 'w') for f in model_files: z.write(f) os.remove(f) z.close()
@exporter def zip_model(path): logger.info('zipping model files') model_files = [x for x in os.listdir('.') if (path[2:] in x)] z = zipfile.ZipFile('{}.zip'.format(path), 'w') for f in model_files: z.write(f) os.remove(f) z.close()<|docstring|>zips the model files<|endoftext|>
d2e6be8f4f51da97c928db9ef87391743552b26c553a4f030f529c388c4a4d26
@exporter def show_examples(model, es, rlut1, rlut2, vocab, mxlen, sample, prob_clip, max_examples, reverse): 'Expects model.predict to return [B, K, T].' si = np.random.randint(0, len(es)) batch_dict = es[si] lengths_key = model.src_lengths_key src_field = lengths_key.split('_')[0] src_array = batch_dict[src_field] if (max_examples > 0): max_examples = min(max_examples, src_array.shape[0]) for i in range(max_examples): example = {} for (k, v) in batch_dict.items(): example[k] = v[(i, np.newaxis)] logger.info('========================================================================') sent = lookup_sentence(rlut1, example[src_field].squeeze(), reverse=reverse) logger.info(('[OP] %s' % sent)) sent = lookup_sentence(rlut2, example['tgt'].squeeze()) logger.info(('[Actual] %s' % sent)) dst_i = model.predict(example)[0][0] sent = lookup_sentence(rlut2, dst_i) logger.info(('Guess: %s' % sent)) logger.info('------------------------------------------------------------------------')
Expects model.predict to return [B, K, T].
python/baseline/utils.py
show_examples
amyhemmeter/baseline
0
python
@exporter def show_examples(model, es, rlut1, rlut2, vocab, mxlen, sample, prob_clip, max_examples, reverse): si = np.random.randint(0, len(es)) batch_dict = es[si] lengths_key = model.src_lengths_key src_field = lengths_key.split('_')[0] src_array = batch_dict[src_field] if (max_examples > 0): max_examples = min(max_examples, src_array.shape[0]) for i in range(max_examples): example = {} for (k, v) in batch_dict.items(): example[k] = v[(i, np.newaxis)] logger.info('========================================================================') sent = lookup_sentence(rlut1, example[src_field].squeeze(), reverse=reverse) logger.info(('[OP] %s' % sent)) sent = lookup_sentence(rlut2, example['tgt'].squeeze()) logger.info(('[Actual] %s' % sent)) dst_i = model.predict(example)[0][0] sent = lookup_sentence(rlut2, dst_i) logger.info(('Guess: %s' % sent)) logger.info('------------------------------------------------------------------------')
@exporter def show_examples(model, es, rlut1, rlut2, vocab, mxlen, sample, prob_clip, max_examples, reverse): si = np.random.randint(0, len(es)) batch_dict = es[si] lengths_key = model.src_lengths_key src_field = lengths_key.split('_')[0] src_array = batch_dict[src_field] if (max_examples > 0): max_examples = min(max_examples, src_array.shape[0]) for i in range(max_examples): example = {} for (k, v) in batch_dict.items(): example[k] = v[(i, np.newaxis)] logger.info('========================================================================') sent = lookup_sentence(rlut1, example[src_field].squeeze(), reverse=reverse) logger.info(('[OP] %s' % sent)) sent = lookup_sentence(rlut2, example['tgt'].squeeze()) logger.info(('[Actual] %s' % sent)) dst_i = model.predict(example)[0][0] sent = lookup_sentence(rlut2, dst_i) logger.info(('Guess: %s' % sent)) logger.info('------------------------------------------------------------------------')<|docstring|>Expects model.predict to return [B, K, T].<|endoftext|>
427e2eb3118c08621144d7d38626df18cdc5341ed1eead80cfb7bf77e15e5e40
@exporter def convert_seq2seq_golds(indices, lengths, rlut, subword_fix=(lambda x: x)): 'Convert indices to words and format like a bleu reference corpus.\n\n :param indices: The indices of the gold sentence. Should be in the shape\n `[B, T]`. Iterating though axis=1 should yield ints.\n :param lengths: The length of the gold sentences.\n :param rlut: `dict[int] -> str` A lookup table from indices to words.\n\n :returns: List[List[List[str]]] Shape is [B, 1, T] where T is the number of\n words in that gold sentence\n ' golds = [] for (idx, l) in zip(indices, lengths): gold = idx[:l] gold_str = lookup_sentence(rlut, gold) gold = subword_fix(gold_str).split() golds.append([gold]) return golds
Convert indices to words and format like a bleu reference corpus. :param indices: The indices of the gold sentence. Should be in the shape `[B, T]`. Iterating though axis=1 should yield ints. :param lengths: The length of the gold sentences. :param rlut: `dict[int] -> str` A lookup table from indices to words. :returns: List[List[List[str]]] Shape is [B, 1, T] where T is the number of words in that gold sentence
python/baseline/utils.py
convert_seq2seq_golds
amyhemmeter/baseline
0
python
@exporter def convert_seq2seq_golds(indices, lengths, rlut, subword_fix=(lambda x: x)): 'Convert indices to words and format like a bleu reference corpus.\n\n :param indices: The indices of the gold sentence. Should be in the shape\n `[B, T]`. Iterating though axis=1 should yield ints.\n :param lengths: The length of the gold sentences.\n :param rlut: `dict[int] -> str` A lookup table from indices to words.\n\n :returns: List[List[List[str]]] Shape is [B, 1, T] where T is the number of\n words in that gold sentence\n ' golds = [] for (idx, l) in zip(indices, lengths): gold = idx[:l] gold_str = lookup_sentence(rlut, gold) gold = subword_fix(gold_str).split() golds.append([gold]) return golds
@exporter def convert_seq2seq_golds(indices, lengths, rlut, subword_fix=(lambda x: x)): 'Convert indices to words and format like a bleu reference corpus.\n\n :param indices: The indices of the gold sentence. Should be in the shape\n `[B, T]`. Iterating though axis=1 should yield ints.\n :param lengths: The length of the gold sentences.\n :param rlut: `dict[int] -> str` A lookup table from indices to words.\n\n :returns: List[List[List[str]]] Shape is [B, 1, T] where T is the number of\n words in that gold sentence\n ' golds = [] for (idx, l) in zip(indices, lengths): gold = idx[:l] gold_str = lookup_sentence(rlut, gold) gold = subword_fix(gold_str).split() golds.append([gold]) return golds<|docstring|>Convert indices to words and format like a bleu reference corpus. :param indices: The indices of the gold sentence. Should be in the shape `[B, T]`. Iterating though axis=1 should yield ints. :param lengths: The length of the gold sentences. :param rlut: `dict[int] -> str` A lookup table from indices to words. :returns: List[List[List[str]]] Shape is [B, 1, T] where T is the number of words in that gold sentence<|endoftext|>
668157cf5239b38b38a4420af8b5fb4302c0f89c5f37cf698e2c9832feea1250
@exporter def convert_seq2seq_preds(indices, rlut, subword_fix=(lambda x: x)): 'Convert indices to words and format like a bleu hypothesis corpus.\n\n :param indices: The indices of the predicted sentence. Should be in the\n shape `[B, T]`. Iterating though axis=1 should yield ints.\n :param rlut: `dict[int] -> str` A lookup table from indices to words.\n\n :returns: List[List[str]] Shape is [B, T] where T is the number of\n words in that predicted sentence\n ' preds = [] for idx in indices: pred_str = lookup_sentence(rlut, idx) pred = subword_fix(pred_str).split() preds.append(pred) return preds
Convert indices to words and format like a bleu hypothesis corpus. :param indices: The indices of the predicted sentence. Should be in the shape `[B, T]`. Iterating though axis=1 should yield ints. :param rlut: `dict[int] -> str` A lookup table from indices to words. :returns: List[List[str]] Shape is [B, T] where T is the number of words in that predicted sentence
python/baseline/utils.py
convert_seq2seq_preds
amyhemmeter/baseline
0
python
@exporter def convert_seq2seq_preds(indices, rlut, subword_fix=(lambda x: x)): 'Convert indices to words and format like a bleu hypothesis corpus.\n\n :param indices: The indices of the predicted sentence. Should be in the\n shape `[B, T]`. Iterating though axis=1 should yield ints.\n :param rlut: `dict[int] -> str` A lookup table from indices to words.\n\n :returns: List[List[str]] Shape is [B, T] where T is the number of\n words in that predicted sentence\n ' preds = [] for idx in indices: pred_str = lookup_sentence(rlut, idx) pred = subword_fix(pred_str).split() preds.append(pred) return preds
@exporter def convert_seq2seq_preds(indices, rlut, subword_fix=(lambda x: x)): 'Convert indices to words and format like a bleu hypothesis corpus.\n\n :param indices: The indices of the predicted sentence. Should be in the\n shape `[B, T]`. Iterating though axis=1 should yield ints.\n :param rlut: `dict[int] -> str` A lookup table from indices to words.\n\n :returns: List[List[str]] Shape is [B, T] where T is the number of\n words in that predicted sentence\n ' preds = [] for idx in indices: pred_str = lookup_sentence(rlut, idx) pred = subword_fix(pred_str).split() preds.append(pred) return preds<|docstring|>Convert indices to words and format like a bleu hypothesis corpus. :param indices: The indices of the predicted sentence. Should be in the shape `[B, T]`. Iterating though axis=1 should yield ints. :param rlut: `dict[int] -> str` A lookup table from indices to words. :returns: List[List[str]] Shape is [B, T] where T is the number of words in that predicted sentence<|endoftext|>
261c68dfc523a1fc83e9cfeddad343f4fadd2fb6f56459dcbd9240a0223b8bb2
@exporter def undo_bpe(seq): 'Undo the BPE splits to make Bleu comparable.\n\n :param seq: `str`: The string with encoded tokens in it.\n\n :returns: `str`: The string with BPE splits collapsed.\n ' return re.sub('@@( | ?$)', '', seq)
Undo the BPE splits to make Bleu comparable. :param seq: `str`: The string with encoded tokens in it. :returns: `str`: The string with BPE splits collapsed.
python/baseline/utils.py
undo_bpe
amyhemmeter/baseline
0
python
@exporter def undo_bpe(seq): 'Undo the BPE splits to make Bleu comparable.\n\n :param seq: `str`: The string with encoded tokens in it.\n\n :returns: `str`: The string with BPE splits collapsed.\n ' return re.sub('@@( | ?$)', , seq)
@exporter def undo_bpe(seq): 'Undo the BPE splits to make Bleu comparable.\n\n :param seq: `str`: The string with encoded tokens in it.\n\n :returns: `str`: The string with BPE splits collapsed.\n ' return re.sub('@@( | ?$)', , seq)<|docstring|>Undo the BPE splits to make Bleu comparable. :param seq: `str`: The string with encoded tokens in it. :returns: `str`: The string with BPE splits collapsed.<|endoftext|>
271affe0273e3a27e64c750a52e94f31c58e4a7433e1db107b8cf9015fbfd0ee
@exporter def undo_sentence_piece(seq): 'Undo the sentence Piece splits to make Bleu comparable.' return seq.replace('▁', '')
Undo the sentence Piece splits to make Bleu comparable.
python/baseline/utils.py
undo_sentence_piece
amyhemmeter/baseline
0
python
@exporter def undo_sentence_piece(seq): return seq.replace('▁', )
@exporter def undo_sentence_piece(seq): return seq.replace('▁', )<|docstring|>Undo the sentence Piece splits to make Bleu comparable.<|endoftext|>
70ca5befa05fd7096dd244c3c2e7d8a23590cca8125bc59a801f8e6eb90eb363
@exporter def ngrams(sentence, filtsz=3, joiner='@@'): 'Generate ngrams over a sentence\n\n :param sentence: (`List[str]`) Some tokens\n :param filtsz: The ngram width\n :param joiner: A string to join ngrams\n :return: (`List[str]`) A list of ngrams\n ' chunks = [] nt = len(sentence) for i in range(((nt - filtsz) + 1)): chunk = sentence[i:(i + filtsz)] chunks += [joiner.join(chunk)] return chunks
Generate ngrams over a sentence :param sentence: (`List[str]`) Some tokens :param filtsz: The ngram width :param joiner: A string to join ngrams :return: (`List[str]`) A list of ngrams
python/baseline/utils.py
ngrams
amyhemmeter/baseline
0
python
@exporter def ngrams(sentence, filtsz=3, joiner='@@'): 'Generate ngrams over a sentence\n\n :param sentence: (`List[str]`) Some tokens\n :param filtsz: The ngram width\n :param joiner: A string to join ngrams\n :return: (`List[str]`) A list of ngrams\n ' chunks = [] nt = len(sentence) for i in range(((nt - filtsz) + 1)): chunk = sentence[i:(i + filtsz)] chunks += [joiner.join(chunk)] return chunks
@exporter def ngrams(sentence, filtsz=3, joiner='@@'): 'Generate ngrams over a sentence\n\n :param sentence: (`List[str]`) Some tokens\n :param filtsz: The ngram width\n :param joiner: A string to join ngrams\n :return: (`List[str]`) A list of ngrams\n ' chunks = [] nt = len(sentence) for i in range(((nt - filtsz) + 1)): chunk = sentence[i:(i + filtsz)] chunks += [joiner.join(chunk)] return chunks<|docstring|>Generate ngrams over a sentence :param sentence: (`List[str]`) Some tokens :param filtsz: The ngram width :param joiner: A string to join ngrams :return: (`List[str]`) A list of ngrams<|endoftext|>
c5599dd755db88d6d3aaf14e0bbdac77402efd86ca8dec6f7f0565c9c37aef97
@exporter @str_file def read_label_first_data(f): 'Read data from a file where the first token in the label and each line is a single example.\n\n :param f: `Union[str, IO]` The file to read from.\n :return: `Tuple[List[str], List[List[str]]]` The labels and text\n ' (labels, texts) = ([], []) for line in f: line = line.rstrip() if line: (label, text) = line.split(maxsplit=1) labels.append(label) texts.append(text.split()) return (labels, texts)
Read data from a file where the first token in the label and each line is a single example. :param f: `Union[str, IO]` The file to read from. :return: `Tuple[List[str], List[List[str]]]` The labels and text
python/baseline/utils.py
read_label_first_data
amyhemmeter/baseline
0
python
@exporter @str_file def read_label_first_data(f): 'Read data from a file where the first token in the label and each line is a single example.\n\n :param f: `Union[str, IO]` The file to read from.\n :return: `Tuple[List[str], List[List[str]]]` The labels and text\n ' (labels, texts) = ([], []) for line in f: line = line.rstrip() if line: (label, text) = line.split(maxsplit=1) labels.append(label) texts.append(text.split()) return (labels, texts)
@exporter @str_file def read_label_first_data(f): 'Read data from a file where the first token in the label and each line is a single example.\n\n :param f: `Union[str, IO]` The file to read from.\n :return: `Tuple[List[str], List[List[str]]]` The labels and text\n ' (labels, texts) = ([], []) for line in f: line = line.rstrip() if line: (label, text) = line.split(maxsplit=1) labels.append(label) texts.append(text.split()) return (labels, texts)<|docstring|>Read data from a file where the first token in the label and each line is a single example. :param f: `Union[str, IO]` The file to read from. :return: `Tuple[List[str], List[List[str]]]` The labels and text<|endoftext|>
e3c20c2d3547fcad8eb8528e9e3d2626361357a102641cf61de0f67d6b0edeb4
@exporter @str_file(w='w') def write_label_first_data(w, labels, texts): 'Read data to a file where the first token in the label and each line is a single example.\n\n :param w: `Union[str, IO]` The file to write the results in\n :param labels: `List[str]` The labels for the examples\n :param texts: `List[List[str]]` The text examples\n ' w.write('\n'.join((' '.join(chain((l,), t)) for (l, t) in zip(labels, texts))))
Read data to a file where the first token in the label and each line is a single example. :param w: `Union[str, IO]` The file to write the results in :param labels: `List[str]` The labels for the examples :param texts: `List[List[str]]` The text examples
python/baseline/utils.py
write_label_first_data
amyhemmeter/baseline
0
python
@exporter @str_file(w='w') def write_label_first_data(w, labels, texts): 'Read data to a file where the first token in the label and each line is a single example.\n\n :param w: `Union[str, IO]` The file to write the results in\n :param labels: `List[str]` The labels for the examples\n :param texts: `List[List[str]]` The text examples\n ' w.write('\n'.join((' '.join(chain((l,), t)) for (l, t) in zip(labels, texts))))
@exporter @str_file(w='w') def write_label_first_data(w, labels, texts): 'Read data to a file where the first token in the label and each line is a single example.\n\n :param w: `Union[str, IO]` The file to write the results in\n :param labels: `List[str]` The labels for the examples\n :param texts: `List[List[str]]` The text examples\n ' w.write('\n'.join((' '.join(chain((l,), t)) for (l, t) in zip(labels, texts))))<|docstring|>Read data to a file where the first token in the label and each line is a single example. :param w: `Union[str, IO]` The file to write the results in :param labels: `List[str]` The labels for the examples :param texts: `List[List[str]]` The text examples<|endoftext|>
111e5a933ed4260066058397e831d23571584ac4da0f56a87bf8758aa771647a
def is_valid_url(url: str) -> bool: 'Check is the provided URL is valid.' return (re.match(url_validator, url) is not None)
Check is the provided URL is valid.
libraries/ml-lab-py/lab_client/commons/request_utils.py
is_valid_url
Felipe-Renck/machine-learning-lab
55
python
def is_valid_url(url: str) -> bool: return (re.match(url_validator, url) is not None)
def is_valid_url(url: str) -> bool: return (re.match(url_validator, url) is not None)<|docstring|>Check is the provided URL is valid.<|endoftext|>
ff1ef9b4747d08f40e992834bd50cf40b6fdca044da6e4339d115ac77a7dbc86
def is_downloadable(url: str) -> bool: '\n Does the url is valid and contain a downloadable resource\n ' try: import requests h = requests.head(url, allow_redirects=True) header = h.headers content_type = header.get('content-type') if (content_type and ('html' in content_type.lower())): return False return True except: return False
Does the url is valid and contain a downloadable resource
libraries/ml-lab-py/lab_client/commons/request_utils.py
is_downloadable
Felipe-Renck/machine-learning-lab
55
python
def is_downloadable(url: str) -> bool: '\n \n ' try: import requests h = requests.head(url, allow_redirects=True) header = h.headers content_type = header.get('content-type') if (content_type and ('html' in content_type.lower())): return False return True except: return False
def is_downloadable(url: str) -> bool: '\n \n ' try: import requests h = requests.head(url, allow_redirects=True) header = h.headers content_type = header.get('content-type') if (content_type and ('html' in content_type.lower())): return False return True except: return False<|docstring|>Does the url is valid and contain a downloadable resource<|endoftext|>
a92e286a2a6e3340bb888a80936a91efa804cdb0956f52feb1ec0a74752f2210
def url2filename(url: str) -> str: "Return basename corresponding to url.\n >>> print(url2filename('http://example.com/path/to/file%C3%80?opt=1'))\n fileÀ\n >>> print(url2filename('http://example.com/slash%2fname')) # '/' in name\n Traceback (most recent call last):\n ...\n ValueError\n " urlpath = urlsplit(url).path basename = posixpath.basename(unquote(urlpath)) if ((os.path.basename(basename) != basename) or (unquote(posixpath.basename(urlpath)) != basename)): raise ValueError return basename
Return basename corresponding to url. >>> print(url2filename('http://example.com/path/to/file%C3%80?opt=1')) fileÀ >>> print(url2filename('http://example.com/slash%2fname')) # '/' in name Traceback (most recent call last): ... ValueError
libraries/ml-lab-py/lab_client/commons/request_utils.py
url2filename
Felipe-Renck/machine-learning-lab
55
python
def url2filename(url: str) -> str: "Return basename corresponding to url.\n >>> print(url2filename('http://example.com/path/to/file%C3%80?opt=1'))\n fileÀ\n >>> print(url2filename('http://example.com/slash%2fname')) # '/' in name\n Traceback (most recent call last):\n ...\n ValueError\n " urlpath = urlsplit(url).path basename = posixpath.basename(unquote(urlpath)) if ((os.path.basename(basename) != basename) or (unquote(posixpath.basename(urlpath)) != basename)): raise ValueError return basename
def url2filename(url: str) -> str: "Return basename corresponding to url.\n >>> print(url2filename('http://example.com/path/to/file%C3%80?opt=1'))\n fileÀ\n >>> print(url2filename('http://example.com/slash%2fname')) # '/' in name\n Traceback (most recent call last):\n ...\n ValueError\n " urlpath = urlsplit(url).path basename = posixpath.basename(unquote(urlpath)) if ((os.path.basename(basename) != basename) or (unquote(posixpath.basename(urlpath)) != basename)): raise ValueError return basename<|docstring|>Return basename corresponding to url. >>> print(url2filename('http://example.com/path/to/file%C3%80?opt=1')) fileÀ >>> print(url2filename('http://example.com/slash%2fname')) # '/' in name Traceback (most recent call last): ... ValueError<|endoftext|>
f27f6d17569e04b4295e7363520b915fa4f23f71d15b4f01c386b0ca3206e67a
def get_storages_images(self): '\n Function return all images on all storages\n ' images = [] storages = self.get_storages(only_actives=True) for storage in storages: stg = self.get_storage(storage) try: stg.refresh(0) except: pass for img in stg.listVolumes(): if img.endswith('.iso'): pass else: images.append(img) return images
Function return all images on all storages
vrtManager/create.py
get_storages_images
torjean/webvirtcloud
0
python
def get_storages_images(self): '\n \n ' images = [] storages = self.get_storages(only_actives=True) for storage in storages: stg = self.get_storage(storage) try: stg.refresh(0) except: pass for img in stg.listVolumes(): if img.endswith('.iso'): pass else: images.append(img) return images
def get_storages_images(self): '\n \n ' images = [] storages = self.get_storages(only_actives=True) for storage in storages: stg = self.get_storage(storage) try: stg.refresh(0) except: pass for img in stg.listVolumes(): if img.endswith('.iso'): pass else: images.append(img) return images<|docstring|>Function return all images on all storages<|endoftext|>
ac676b1df9e684e7bfc982b6fc4761e0c3c12df3265aa05b077acb45f1e29d13
def get_os_type(self): 'Get guest capabilities' return util.get_xml_path(self.get_cap_xml(), '/capabilities/guest/os_type')
Get guest capabilities
vrtManager/create.py
get_os_type
torjean/webvirtcloud
0
python
def get_os_type(self): return util.get_xml_path(self.get_cap_xml(), '/capabilities/guest/os_type')
def get_os_type(self): return util.get_xml_path(self.get_cap_xml(), '/capabilities/guest/os_type')<|docstring|>Get guest capabilities<|endoftext|>
fc8fb683d9fb8c511293772cc3e885b32b791ae9335a031cbeee62a31bad44de
def get_host_arch(self): 'Get guest capabilities' return util.get_xml_path(self.get_cap_xml(), '/capabilities/host/cpu/arch')
Get guest capabilities
vrtManager/create.py
get_host_arch
torjean/webvirtcloud
0
python
def get_host_arch(self): return util.get_xml_path(self.get_cap_xml(), '/capabilities/host/cpu/arch')
def get_host_arch(self): return util.get_xml_path(self.get_cap_xml(), '/capabilities/host/cpu/arch')<|docstring|>Get guest capabilities<|endoftext|>
b190450538f04b1907b31ce3ff30fa9ff492935350c871197754a77fe653c437
def create_instance(self, name, memory, vcpu, host_model, uuid, images, cache_mode, networks, virtio, listen_addr, nwfilter=None, video='cirrus', console_pass='random', mac=None, qemu_ga=False): '\n Create VM function\n ' memory = (int(memory) * 1024) if self.is_kvm_supported(): hypervisor_type = 'kvm' else: hypervisor_type = 'qemu' xml = ("\n <domain type='%s'>\n <name>%s</name>\n <description>None</description>\n <uuid>%s</uuid>\n <memory unit='KiB'>%s</memory>\n <vcpu>%s</vcpu>" % (hypervisor_type, name, uuid, memory, vcpu)) if host_model: xml += "<cpu mode='host-model'/>" xml += ("<os>\n <type arch='%s'>%s</type>\n <boot dev='hd'/>\n <boot dev='cdrom'/>\n <bootmenu enable='yes'/>\n </os>" % (self.get_host_arch(), self.get_os_type())) xml += '<features>\n <acpi/><apic/><pae/>\n </features>\n <clock offset="utc"/>\n <on_poweroff>destroy</on_poweroff>\n <on_reboot>restart</on_reboot>\n <on_crash>restart</on_crash>\n <devices>' vd_disk_letters = list(string.lowercase) fd_disk_letters = list(string.lowercase) hd_disk_letters = list(string.lowercase) sd_disk_letters = list(string.lowercase) add_cd = True for volume in images: stg = self.get_storage_by_vol_path(volume['path']) stg_type = util.get_xml_path(stg.XMLDesc(0), '/pool/@type') if (volume['device'] == 'cdrom'): add_cd = False if (stg_type == 'rbd'): (ceph_user, secret_uuid, ceph_hosts) = get_rbd_storage_data(stg) xml += ("<disk type='network' device='disk'>\n <driver name='qemu' type='%s' cache='%s'/>\n <auth username='%s'>\n <secret type='ceph' uuid='%s'/>\n </auth>\n <source protocol='rbd' name='%s'>" % (volume['type'], cache_mode, ceph_user, secret_uuid, volume['path'])) if isinstance(ceph_hosts, list): for host in ceph_hosts: if host.get('port'): xml += ("\n <host name='%s' port='%s'/>" % (host.get('name'), host.get('port'))) else: xml += ("\n <host name='%s'/>" % host.get('name')) xml += '\n </source>' else: xml += ("<disk type='file' device='%s'>\n <driver name='qemu' type='%s' cache='%s'/>\n <source file='%s'/>" % (volume['device'], volume['type'], cache_mode, volume['path'])) if (volume['bus'] == 'virtio'): xml += ("<target dev='vd%s' bus='%s'/>" % (vd_disk_letters.pop(0), volume['bus'])) elif (volume['bus'] == 'ide'): xml += ("<target dev='hd%s' bus='%s'/>" % (hd_disk_letters.pop(0), volume['bus'])) elif (volume['bus'] == 'fdc'): xml += ("<target dev='fd%s' bus='%s'/>" % (fd_disk_letters.pop(0), volume['bus'])) else: xml += ("<target dev='sd%s' bus='%s'/>" % (sd_disk_letters.pop(0), volume['bus'])) xml += '</disk>' if add_cd: xml += (" <disk type='file' device='cdrom'>\n <driver name='qemu' type='raw'/>\n <source file=''/>\n <target dev='hd%s' bus='ide'/>\n <readonly/>\n </disk>" % (hd_disk_letters.pop(0),)) for net in networks.split(','): xml += "<interface type='network'>" if mac: xml += ("<mac address='%s'/>" % mac) xml += ("<source network='%s'/>" % net) if nwfilter: xml += ("<filterref filter='%s'/>" % nwfilter) if virtio: xml += "<model type='virtio'/>" xml += '</interface>' if (console_pass == 'random'): console_pass = (("passwd='" + util.randomPasswd()) + "'") elif (not (console_pass == '')): console_pass = (("passwd='" + console_pass) + "'") xml += (" <input type='mouse' bus='ps2'/>\n <input type='tablet' bus='usb'/>\n <graphics type='%s' port='-1' autoport='yes' %s listen='%s'/>\n <console type='pty'/> " % (QEMU_CONSOLE_DEFAULT_TYPE, console_pass, listen_addr)) if qemu_ga: xml += " <channel type='unix'>\n <target type='virtio' name='org.qemu.guest_agent.0'/>\n </channel>" xml += (" <video>\n <model type='%s'/>\n </video>\n <memballoon model='virtio'/>\n </devices>\n </domain>" % video) self._defineXML(xml)
Create VM function
vrtManager/create.py
create_instance
torjean/webvirtcloud
0
python
def create_instance(self, name, memory, vcpu, host_model, uuid, images, cache_mode, networks, virtio, listen_addr, nwfilter=None, video='cirrus', console_pass='random', mac=None, qemu_ga=False): '\n \n ' memory = (int(memory) * 1024) if self.is_kvm_supported(): hypervisor_type = 'kvm' else: hypervisor_type = 'qemu' xml = ("\n <domain type='%s'>\n <name>%s</name>\n <description>None</description>\n <uuid>%s</uuid>\n <memory unit='KiB'>%s</memory>\n <vcpu>%s</vcpu>" % (hypervisor_type, name, uuid, memory, vcpu)) if host_model: xml += "<cpu mode='host-model'/>" xml += ("<os>\n <type arch='%s'>%s</type>\n <boot dev='hd'/>\n <boot dev='cdrom'/>\n <bootmenu enable='yes'/>\n </os>" % (self.get_host_arch(), self.get_os_type())) xml += '<features>\n <acpi/><apic/><pae/>\n </features>\n <clock offset="utc"/>\n <on_poweroff>destroy</on_poweroff>\n <on_reboot>restart</on_reboot>\n <on_crash>restart</on_crash>\n <devices>' vd_disk_letters = list(string.lowercase) fd_disk_letters = list(string.lowercase) hd_disk_letters = list(string.lowercase) sd_disk_letters = list(string.lowercase) add_cd = True for volume in images: stg = self.get_storage_by_vol_path(volume['path']) stg_type = util.get_xml_path(stg.XMLDesc(0), '/pool/@type') if (volume['device'] == 'cdrom'): add_cd = False if (stg_type == 'rbd'): (ceph_user, secret_uuid, ceph_hosts) = get_rbd_storage_data(stg) xml += ("<disk type='network' device='disk'>\n <driver name='qemu' type='%s' cache='%s'/>\n <auth username='%s'>\n <secret type='ceph' uuid='%s'/>\n </auth>\n <source protocol='rbd' name='%s'>" % (volume['type'], cache_mode, ceph_user, secret_uuid, volume['path'])) if isinstance(ceph_hosts, list): for host in ceph_hosts: if host.get('port'): xml += ("\n <host name='%s' port='%s'/>" % (host.get('name'), host.get('port'))) else: xml += ("\n <host name='%s'/>" % host.get('name')) xml += '\n </source>' else: xml += ("<disk type='file' device='%s'>\n <driver name='qemu' type='%s' cache='%s'/>\n <source file='%s'/>" % (volume['device'], volume['type'], cache_mode, volume['path'])) if (volume['bus'] == 'virtio'): xml += ("<target dev='vd%s' bus='%s'/>" % (vd_disk_letters.pop(0), volume['bus'])) elif (volume['bus'] == 'ide'): xml += ("<target dev='hd%s' bus='%s'/>" % (hd_disk_letters.pop(0), volume['bus'])) elif (volume['bus'] == 'fdc'): xml += ("<target dev='fd%s' bus='%s'/>" % (fd_disk_letters.pop(0), volume['bus'])) else: xml += ("<target dev='sd%s' bus='%s'/>" % (sd_disk_letters.pop(0), volume['bus'])) xml += '</disk>' if add_cd: xml += (" <disk type='file' device='cdrom'>\n <driver name='qemu' type='raw'/>\n <source file=/>\n <target dev='hd%s' bus='ide'/>\n <readonly/>\n </disk>" % (hd_disk_letters.pop(0),)) for net in networks.split(','): xml += "<interface type='network'>" if mac: xml += ("<mac address='%s'/>" % mac) xml += ("<source network='%s'/>" % net) if nwfilter: xml += ("<filterref filter='%s'/>" % nwfilter) if virtio: xml += "<model type='virtio'/>" xml += '</interface>' if (console_pass == 'random'): console_pass = (("passwd='" + util.randomPasswd()) + "'") elif (not (console_pass == )): console_pass = (("passwd='" + console_pass) + "'") xml += (" <input type='mouse' bus='ps2'/>\n <input type='tablet' bus='usb'/>\n <graphics type='%s' port='-1' autoport='yes' %s listen='%s'/>\n <console type='pty'/> " % (QEMU_CONSOLE_DEFAULT_TYPE, console_pass, listen_addr)) if qemu_ga: xml += " <channel type='unix'>\n <target type='virtio' name='org.qemu.guest_agent.0'/>\n </channel>" xml += (" <video>\n <model type='%s'/>\n </video>\n <memballoon model='virtio'/>\n </devices>\n </domain>" % video) self._defineXML(xml)
def create_instance(self, name, memory, vcpu, host_model, uuid, images, cache_mode, networks, virtio, listen_addr, nwfilter=None, video='cirrus', console_pass='random', mac=None, qemu_ga=False): '\n \n ' memory = (int(memory) * 1024) if self.is_kvm_supported(): hypervisor_type = 'kvm' else: hypervisor_type = 'qemu' xml = ("\n <domain type='%s'>\n <name>%s</name>\n <description>None</description>\n <uuid>%s</uuid>\n <memory unit='KiB'>%s</memory>\n <vcpu>%s</vcpu>" % (hypervisor_type, name, uuid, memory, vcpu)) if host_model: xml += "<cpu mode='host-model'/>" xml += ("<os>\n <type arch='%s'>%s</type>\n <boot dev='hd'/>\n <boot dev='cdrom'/>\n <bootmenu enable='yes'/>\n </os>" % (self.get_host_arch(), self.get_os_type())) xml += '<features>\n <acpi/><apic/><pae/>\n </features>\n <clock offset="utc"/>\n <on_poweroff>destroy</on_poweroff>\n <on_reboot>restart</on_reboot>\n <on_crash>restart</on_crash>\n <devices>' vd_disk_letters = list(string.lowercase) fd_disk_letters = list(string.lowercase) hd_disk_letters = list(string.lowercase) sd_disk_letters = list(string.lowercase) add_cd = True for volume in images: stg = self.get_storage_by_vol_path(volume['path']) stg_type = util.get_xml_path(stg.XMLDesc(0), '/pool/@type') if (volume['device'] == 'cdrom'): add_cd = False if (stg_type == 'rbd'): (ceph_user, secret_uuid, ceph_hosts) = get_rbd_storage_data(stg) xml += ("<disk type='network' device='disk'>\n <driver name='qemu' type='%s' cache='%s'/>\n <auth username='%s'>\n <secret type='ceph' uuid='%s'/>\n </auth>\n <source protocol='rbd' name='%s'>" % (volume['type'], cache_mode, ceph_user, secret_uuid, volume['path'])) if isinstance(ceph_hosts, list): for host in ceph_hosts: if host.get('port'): xml += ("\n <host name='%s' port='%s'/>" % (host.get('name'), host.get('port'))) else: xml += ("\n <host name='%s'/>" % host.get('name')) xml += '\n </source>' else: xml += ("<disk type='file' device='%s'>\n <driver name='qemu' type='%s' cache='%s'/>\n <source file='%s'/>" % (volume['device'], volume['type'], cache_mode, volume['path'])) if (volume['bus'] == 'virtio'): xml += ("<target dev='vd%s' bus='%s'/>" % (vd_disk_letters.pop(0), volume['bus'])) elif (volume['bus'] == 'ide'): xml += ("<target dev='hd%s' bus='%s'/>" % (hd_disk_letters.pop(0), volume['bus'])) elif (volume['bus'] == 'fdc'): xml += ("<target dev='fd%s' bus='%s'/>" % (fd_disk_letters.pop(0), volume['bus'])) else: xml += ("<target dev='sd%s' bus='%s'/>" % (sd_disk_letters.pop(0), volume['bus'])) xml += '</disk>' if add_cd: xml += (" <disk type='file' device='cdrom'>\n <driver name='qemu' type='raw'/>\n <source file=/>\n <target dev='hd%s' bus='ide'/>\n <readonly/>\n </disk>" % (hd_disk_letters.pop(0),)) for net in networks.split(','): xml += "<interface type='network'>" if mac: xml += ("<mac address='%s'/>" % mac) xml += ("<source network='%s'/>" % net) if nwfilter: xml += ("<filterref filter='%s'/>" % nwfilter) if virtio: xml += "<model type='virtio'/>" xml += '</interface>' if (console_pass == 'random'): console_pass = (("passwd='" + util.randomPasswd()) + "'") elif (not (console_pass == )): console_pass = (("passwd='" + console_pass) + "'") xml += (" <input type='mouse' bus='ps2'/>\n <input type='tablet' bus='usb'/>\n <graphics type='%s' port='-1' autoport='yes' %s listen='%s'/>\n <console type='pty'/> " % (QEMU_CONSOLE_DEFAULT_TYPE, console_pass, listen_addr)) if qemu_ga: xml += " <channel type='unix'>\n <target type='virtio' name='org.qemu.guest_agent.0'/>\n </channel>" xml += (" <video>\n <model type='%s'/>\n </video>\n <memballoon model='virtio'/>\n </devices>\n </domain>" % video) self._defineXML(xml)<|docstring|>Create VM function<|endoftext|>
6a7c4ee0795abd0b4e74b5a76e251d463bfff482f4ddac7249891535f8a45ff6
def build_attention_model(params, src_vocab, trg_vocab, source_placeholders, target_placeholders, beam_size=1, mode=MODE.TRAIN, burn_in_step=100000, increment_step=10000, teacher_rate=1.0, max_step=100): "\n Build a model.\n\n :param params: dict.\n {encoder: {rnn_cell: {},\n ...},\n decoder: {rnn_cell: {},\n ...}}\n for example:\n {'encoder': {'rnn_cell': {'state_size': 512,\n 'cell_name': 'BasicLSTMCell',\n 'num_layers': 2,\n 'input_keep_prob': 1.0,\n 'output_keep_prob': 1.0},\n 'attention_key_size': attention_size},\n 'decoder': {'rnn_cell': {'cell_name': 'BasicLSTMCell',\n 'state_size': 512,\n 'num_layers': 1,\n 'input_keep_prob': 1.0,\n 'output_keep_prob': 1.0},\n 'trg_vocab_size': trg_vocab_size}}\n :param src_vocab: Vocab of source symbols.\n :param trg_vocab: Vocab of target symbols.\n :param source_ids: placeholder\n :param source_seq_length: placeholder\n :param target_ids: placeholder\n :param target_seq_length: placeholder\n :param beam_size: used in beam inference\n :param mode:\n :return:\n " if (mode != MODE.TRAIN): params = sq.disable_dropout(params) tf.logging.info(json.dumps(params, indent=4)) decoder_params = params['decoder'] source_ids = source_placeholders['src'] source_seq_length = source_placeholders['src_len'] source_sample_matrix = source_placeholders['src_sample_matrix'] source_word_seq_length = source_placeholders['src_word_len'] target_ids = target_placeholders['trg'] target_seq_length = target_placeholders['trg_len'] source_char_embedding_table = sq.LookUpOp(src_vocab.vocab_size, src_vocab.embedding_dim, name='source') source_char_embedded = source_char_embedding_table(source_ids) char_encoder = sq.StackRNNEncoder(params['char_encoder'], params['attention_key_size']['char'], name='char_rnn', mode=mode) char_encoded_representation = char_encoder.encode(source_char_embedded, source_seq_length) char_encoder_outputs = char_encoded_representation.outputs char_encoder_outputs = tf.transpose(char_encoder_outputs, perm=(1, 0, 2)) sampled_word_embedded = tf.matmul(source_sample_matrix, char_encoder_outputs) source_embedded = tf.transpose(sampled_word_embedded, perm=(1, 0, 2)) encoder = sq.StackBidirectionalRNNEncoder(params['encoder'], params['attention_key_size']['word'], name='stack_rnn', mode=mode) encoded_representation = encoder.encode(source_embedded, source_word_seq_length) attention_keys = encoded_representation.attention_keys attention_values = encoded_representation.attention_values attention_length = encoded_representation.attention_length encoder_final_states_bw = encoded_representation.final_state[(- 1)][(- 1)].h if (mode == MODE.RL): tf.logging.info('BUILDING RL TRAIN FEEDBACK......') dynamical_batch_size = tf.shape(attention_keys)[1] feedback = sq.RLTrainingFeedBack(target_ids, target_seq_length, trg_vocab, dynamical_batch_size, burn_in_step=burn_in_step, increment_step=increment_step, max_step=max_step) elif (mode == MODE.TRAIN): tf.logging.info('BUILDING TRAIN FEEDBACK WITH {} TEACHER_RATE......'.format(teacher_rate)) feedback = sq.TrainingFeedBack(target_ids, target_seq_length, trg_vocab, teacher_rate, max_step=max_step) elif (mode == MODE.EVAL): tf.logging.info('BUILDING EVAL FEEDBACK ......') feedback = sq.TrainingFeedBack(target_ids, target_seq_length, trg_vocab, 0.0, max_step=max_step) else: tf.logging.info('BUILDING INFER FEEDBACK WITH BEAM_SIZE {}......'.format(beam_size)) infer_key_size = attention_keys.get_shape().as_list()[(- 1)] infer_value_size = attention_values.get_shape().as_list()[(- 1)] infer_states_bw_shape = encoder_final_states_bw.get_shape().as_list()[(- 1)] encoder_final_states_bw = tf.reshape(tf.tile(encoder_final_states_bw, [1, beam_size]), [(- 1), infer_states_bw_shape]) if TIME_MAJOR: dynamical_batch_size = tf.shape(attention_keys)[1] final_key_shape = [(- 1), (dynamical_batch_size * beam_size), infer_key_size] final_value_shape = [(- 1), (dynamical_batch_size * beam_size), infer_value_size] attention_keys = tf.reshape(tf.tile(attention_keys, [1, 1, beam_size]), final_key_shape) attention_values = tf.reshape(tf.tile(attention_values, [1, 1, beam_size]), final_value_shape) else: dynamical_batch_size = tf.shape(attention_keys)[0] final_key_shape = [(dynamical_batch_size * beam_size), (- 1), infer_key_size] final_value_shape = [(dynamical_batch_size * beam_size), (- 1), infer_value_size] attention_keys = tf.reshape(tf.tile(attention_keys, [1, beam_size, 1]), final_key_shape) attention_values = tf.reshape(tf.tile(attention_values, [1, beam_size, 1]), final_value_shape) attention_length = tf.reshape(tf.transpose(tf.tile([attention_length], [beam_size, 1])), [(- 1)]) feedback = sq.BeamFeedBack(trg_vocab, beam_size, dynamical_batch_size, max_step=max_step) encoder_decoder_bridge = EncoderDecoderBridge(encoder_final_states_bw.get_shape().as_list()[(- 1)], decoder_params['rnn_cell']) decoder_state_size = decoder_params['rnn_cell']['state_size'] attention = sq.Attention(decoder_state_size, attention_keys, attention_values, attention_length) context_size = attention.context_size with tf.variable_scope('logits_func'): attention_mix = LinearOp(((context_size + feedback.embedding_dim) + decoder_state_size), decoder_state_size, name='attention_mix') attention_mix_middle = LinearOp(decoder_state_size, (decoder_state_size // 2), name='attention_mix_middle') logits_trans = LinearOp((decoder_state_size // 2), feedback.vocab_size, name='logits_trans') logits_func = (lambda _softmax: logits_trans(tf.nn.relu(attention_mix_middle(tf.nn.relu(attention_mix(_softmax)))))) decoder = sq.AttentionRNNDecoder(decoder_params, attention, feedback, logits_func=logits_func, init_state=encoder_decoder_bridge(encoder_final_states_bw), mode=mode) (decoder_output, decoder_final_state) = sq.dynamic_decode(decoder, swap_memory=True, scope='decoder') if ((mode == MODE.EVAL) or (mode == MODE.INFER)): return (decoder_output, decoder_final_state) if (not TIME_MAJOR): ground_truth_ids = tf.transpose(target_ids, [1, 0]) else: ground_truth_ids = target_ids if (mode == MODE.RL): global_step_tensor = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='global_step')[0] rl_time_steps = tf.floordiv(tf.maximum((global_step_tensor - burn_in_step), 0), increment_step) start_rl_step = (target_seq_length - rl_time_steps) baseline_states = tf.stop_gradient(decoder_output.baseline_states) predict_ids = tf.stop_gradient(decoder_output.predicted_ids) ground_or_predict_ids = tf.cond(tf.greater(rl_time_steps, 0), (lambda : predict_ids), (lambda : ground_truth_ids)) (reward, sequence_length) = tf.py_func(func=_py_func, inp=[ground_or_predict_ids, ground_truth_ids, trg_vocab.eos_id], Tout=[tf.float32, tf.int32], name='reward') sequence_length.set_shape((None,)) (total_loss_avg, entropy_loss_avg, reward_loss_rmse, reward_predicted) = rl_sequence_loss(logits=decoder_output.logits, predict_ids=predict_ids, sequence_length=sequence_length, baseline_states=baseline_states, start_rl_step=start_rl_step, reward=reward) return (decoder_output, total_loss_avg, entropy_loss_avg, reward_loss_rmse, reward_predicted) else: total_loss_avg = cross_entropy_sequence_loss(logits=decoder_output.logits, targets=ground_truth_ids, sequence_length=target_seq_length) return (decoder_output, total_loss_avg, total_loss_avg, tf.to_float(0.0), tf.to_float(0.0))
Build a model. :param params: dict. {encoder: {rnn_cell: {}, ...}, decoder: {rnn_cell: {}, ...}} for example: {'encoder': {'rnn_cell': {'state_size': 512, 'cell_name': 'BasicLSTMCell', 'num_layers': 2, 'input_keep_prob': 1.0, 'output_keep_prob': 1.0}, 'attention_key_size': attention_size}, 'decoder': {'rnn_cell': {'cell_name': 'BasicLSTMCell', 'state_size': 512, 'num_layers': 1, 'input_keep_prob': 1.0, 'output_keep_prob': 1.0}, 'trg_vocab_size': trg_vocab_size}} :param src_vocab: Vocab of source symbols. :param trg_vocab: Vocab of target symbols. :param source_ids: placeholder :param source_seq_length: placeholder :param target_ids: placeholder :param target_seq_length: placeholder :param beam_size: used in beam inference :param mode: :return:
char_nmt/build_model.py
build_attention_model
SwordYork/sequencing
45
python
def build_attention_model(params, src_vocab, trg_vocab, source_placeholders, target_placeholders, beam_size=1, mode=MODE.TRAIN, burn_in_step=100000, increment_step=10000, teacher_rate=1.0, max_step=100): "\n Build a model.\n\n :param params: dict.\n {encoder: {rnn_cell: {},\n ...},\n decoder: {rnn_cell: {},\n ...}}\n for example:\n {'encoder': {'rnn_cell': {'state_size': 512,\n 'cell_name': 'BasicLSTMCell',\n 'num_layers': 2,\n 'input_keep_prob': 1.0,\n 'output_keep_prob': 1.0},\n 'attention_key_size': attention_size},\n 'decoder': {'rnn_cell': {'cell_name': 'BasicLSTMCell',\n 'state_size': 512,\n 'num_layers': 1,\n 'input_keep_prob': 1.0,\n 'output_keep_prob': 1.0},\n 'trg_vocab_size': trg_vocab_size}}\n :param src_vocab: Vocab of source symbols.\n :param trg_vocab: Vocab of target symbols.\n :param source_ids: placeholder\n :param source_seq_length: placeholder\n :param target_ids: placeholder\n :param target_seq_length: placeholder\n :param beam_size: used in beam inference\n :param mode:\n :return:\n " if (mode != MODE.TRAIN): params = sq.disable_dropout(params) tf.logging.info(json.dumps(params, indent=4)) decoder_params = params['decoder'] source_ids = source_placeholders['src'] source_seq_length = source_placeholders['src_len'] source_sample_matrix = source_placeholders['src_sample_matrix'] source_word_seq_length = source_placeholders['src_word_len'] target_ids = target_placeholders['trg'] target_seq_length = target_placeholders['trg_len'] source_char_embedding_table = sq.LookUpOp(src_vocab.vocab_size, src_vocab.embedding_dim, name='source') source_char_embedded = source_char_embedding_table(source_ids) char_encoder = sq.StackRNNEncoder(params['char_encoder'], params['attention_key_size']['char'], name='char_rnn', mode=mode) char_encoded_representation = char_encoder.encode(source_char_embedded, source_seq_length) char_encoder_outputs = char_encoded_representation.outputs char_encoder_outputs = tf.transpose(char_encoder_outputs, perm=(1, 0, 2)) sampled_word_embedded = tf.matmul(source_sample_matrix, char_encoder_outputs) source_embedded = tf.transpose(sampled_word_embedded, perm=(1, 0, 2)) encoder = sq.StackBidirectionalRNNEncoder(params['encoder'], params['attention_key_size']['word'], name='stack_rnn', mode=mode) encoded_representation = encoder.encode(source_embedded, source_word_seq_length) attention_keys = encoded_representation.attention_keys attention_values = encoded_representation.attention_values attention_length = encoded_representation.attention_length encoder_final_states_bw = encoded_representation.final_state[(- 1)][(- 1)].h if (mode == MODE.RL): tf.logging.info('BUILDING RL TRAIN FEEDBACK......') dynamical_batch_size = tf.shape(attention_keys)[1] feedback = sq.RLTrainingFeedBack(target_ids, target_seq_length, trg_vocab, dynamical_batch_size, burn_in_step=burn_in_step, increment_step=increment_step, max_step=max_step) elif (mode == MODE.TRAIN): tf.logging.info('BUILDING TRAIN FEEDBACK WITH {} TEACHER_RATE......'.format(teacher_rate)) feedback = sq.TrainingFeedBack(target_ids, target_seq_length, trg_vocab, teacher_rate, max_step=max_step) elif (mode == MODE.EVAL): tf.logging.info('BUILDING EVAL FEEDBACK ......') feedback = sq.TrainingFeedBack(target_ids, target_seq_length, trg_vocab, 0.0, max_step=max_step) else: tf.logging.info('BUILDING INFER FEEDBACK WITH BEAM_SIZE {}......'.format(beam_size)) infer_key_size = attention_keys.get_shape().as_list()[(- 1)] infer_value_size = attention_values.get_shape().as_list()[(- 1)] infer_states_bw_shape = encoder_final_states_bw.get_shape().as_list()[(- 1)] encoder_final_states_bw = tf.reshape(tf.tile(encoder_final_states_bw, [1, beam_size]), [(- 1), infer_states_bw_shape]) if TIME_MAJOR: dynamical_batch_size = tf.shape(attention_keys)[1] final_key_shape = [(- 1), (dynamical_batch_size * beam_size), infer_key_size] final_value_shape = [(- 1), (dynamical_batch_size * beam_size), infer_value_size] attention_keys = tf.reshape(tf.tile(attention_keys, [1, 1, beam_size]), final_key_shape) attention_values = tf.reshape(tf.tile(attention_values, [1, 1, beam_size]), final_value_shape) else: dynamical_batch_size = tf.shape(attention_keys)[0] final_key_shape = [(dynamical_batch_size * beam_size), (- 1), infer_key_size] final_value_shape = [(dynamical_batch_size * beam_size), (- 1), infer_value_size] attention_keys = tf.reshape(tf.tile(attention_keys, [1, beam_size, 1]), final_key_shape) attention_values = tf.reshape(tf.tile(attention_values, [1, beam_size, 1]), final_value_shape) attention_length = tf.reshape(tf.transpose(tf.tile([attention_length], [beam_size, 1])), [(- 1)]) feedback = sq.BeamFeedBack(trg_vocab, beam_size, dynamical_batch_size, max_step=max_step) encoder_decoder_bridge = EncoderDecoderBridge(encoder_final_states_bw.get_shape().as_list()[(- 1)], decoder_params['rnn_cell']) decoder_state_size = decoder_params['rnn_cell']['state_size'] attention = sq.Attention(decoder_state_size, attention_keys, attention_values, attention_length) context_size = attention.context_size with tf.variable_scope('logits_func'): attention_mix = LinearOp(((context_size + feedback.embedding_dim) + decoder_state_size), decoder_state_size, name='attention_mix') attention_mix_middle = LinearOp(decoder_state_size, (decoder_state_size // 2), name='attention_mix_middle') logits_trans = LinearOp((decoder_state_size // 2), feedback.vocab_size, name='logits_trans') logits_func = (lambda _softmax: logits_trans(tf.nn.relu(attention_mix_middle(tf.nn.relu(attention_mix(_softmax)))))) decoder = sq.AttentionRNNDecoder(decoder_params, attention, feedback, logits_func=logits_func, init_state=encoder_decoder_bridge(encoder_final_states_bw), mode=mode) (decoder_output, decoder_final_state) = sq.dynamic_decode(decoder, swap_memory=True, scope='decoder') if ((mode == MODE.EVAL) or (mode == MODE.INFER)): return (decoder_output, decoder_final_state) if (not TIME_MAJOR): ground_truth_ids = tf.transpose(target_ids, [1, 0]) else: ground_truth_ids = target_ids if (mode == MODE.RL): global_step_tensor = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='global_step')[0] rl_time_steps = tf.floordiv(tf.maximum((global_step_tensor - burn_in_step), 0), increment_step) start_rl_step = (target_seq_length - rl_time_steps) baseline_states = tf.stop_gradient(decoder_output.baseline_states) predict_ids = tf.stop_gradient(decoder_output.predicted_ids) ground_or_predict_ids = tf.cond(tf.greater(rl_time_steps, 0), (lambda : predict_ids), (lambda : ground_truth_ids)) (reward, sequence_length) = tf.py_func(func=_py_func, inp=[ground_or_predict_ids, ground_truth_ids, trg_vocab.eos_id], Tout=[tf.float32, tf.int32], name='reward') sequence_length.set_shape((None,)) (total_loss_avg, entropy_loss_avg, reward_loss_rmse, reward_predicted) = rl_sequence_loss(logits=decoder_output.logits, predict_ids=predict_ids, sequence_length=sequence_length, baseline_states=baseline_states, start_rl_step=start_rl_step, reward=reward) return (decoder_output, total_loss_avg, entropy_loss_avg, reward_loss_rmse, reward_predicted) else: total_loss_avg = cross_entropy_sequence_loss(logits=decoder_output.logits, targets=ground_truth_ids, sequence_length=target_seq_length) return (decoder_output, total_loss_avg, total_loss_avg, tf.to_float(0.0), tf.to_float(0.0))
def build_attention_model(params, src_vocab, trg_vocab, source_placeholders, target_placeholders, beam_size=1, mode=MODE.TRAIN, burn_in_step=100000, increment_step=10000, teacher_rate=1.0, max_step=100): "\n Build a model.\n\n :param params: dict.\n {encoder: {rnn_cell: {},\n ...},\n decoder: {rnn_cell: {},\n ...}}\n for example:\n {'encoder': {'rnn_cell': {'state_size': 512,\n 'cell_name': 'BasicLSTMCell',\n 'num_layers': 2,\n 'input_keep_prob': 1.0,\n 'output_keep_prob': 1.0},\n 'attention_key_size': attention_size},\n 'decoder': {'rnn_cell': {'cell_name': 'BasicLSTMCell',\n 'state_size': 512,\n 'num_layers': 1,\n 'input_keep_prob': 1.0,\n 'output_keep_prob': 1.0},\n 'trg_vocab_size': trg_vocab_size}}\n :param src_vocab: Vocab of source symbols.\n :param trg_vocab: Vocab of target symbols.\n :param source_ids: placeholder\n :param source_seq_length: placeholder\n :param target_ids: placeholder\n :param target_seq_length: placeholder\n :param beam_size: used in beam inference\n :param mode:\n :return:\n " if (mode != MODE.TRAIN): params = sq.disable_dropout(params) tf.logging.info(json.dumps(params, indent=4)) decoder_params = params['decoder'] source_ids = source_placeholders['src'] source_seq_length = source_placeholders['src_len'] source_sample_matrix = source_placeholders['src_sample_matrix'] source_word_seq_length = source_placeholders['src_word_len'] target_ids = target_placeholders['trg'] target_seq_length = target_placeholders['trg_len'] source_char_embedding_table = sq.LookUpOp(src_vocab.vocab_size, src_vocab.embedding_dim, name='source') source_char_embedded = source_char_embedding_table(source_ids) char_encoder = sq.StackRNNEncoder(params['char_encoder'], params['attention_key_size']['char'], name='char_rnn', mode=mode) char_encoded_representation = char_encoder.encode(source_char_embedded, source_seq_length) char_encoder_outputs = char_encoded_representation.outputs char_encoder_outputs = tf.transpose(char_encoder_outputs, perm=(1, 0, 2)) sampled_word_embedded = tf.matmul(source_sample_matrix, char_encoder_outputs) source_embedded = tf.transpose(sampled_word_embedded, perm=(1, 0, 2)) encoder = sq.StackBidirectionalRNNEncoder(params['encoder'], params['attention_key_size']['word'], name='stack_rnn', mode=mode) encoded_representation = encoder.encode(source_embedded, source_word_seq_length) attention_keys = encoded_representation.attention_keys attention_values = encoded_representation.attention_values attention_length = encoded_representation.attention_length encoder_final_states_bw = encoded_representation.final_state[(- 1)][(- 1)].h if (mode == MODE.RL): tf.logging.info('BUILDING RL TRAIN FEEDBACK......') dynamical_batch_size = tf.shape(attention_keys)[1] feedback = sq.RLTrainingFeedBack(target_ids, target_seq_length, trg_vocab, dynamical_batch_size, burn_in_step=burn_in_step, increment_step=increment_step, max_step=max_step) elif (mode == MODE.TRAIN): tf.logging.info('BUILDING TRAIN FEEDBACK WITH {} TEACHER_RATE......'.format(teacher_rate)) feedback = sq.TrainingFeedBack(target_ids, target_seq_length, trg_vocab, teacher_rate, max_step=max_step) elif (mode == MODE.EVAL): tf.logging.info('BUILDING EVAL FEEDBACK ......') feedback = sq.TrainingFeedBack(target_ids, target_seq_length, trg_vocab, 0.0, max_step=max_step) else: tf.logging.info('BUILDING INFER FEEDBACK WITH BEAM_SIZE {}......'.format(beam_size)) infer_key_size = attention_keys.get_shape().as_list()[(- 1)] infer_value_size = attention_values.get_shape().as_list()[(- 1)] infer_states_bw_shape = encoder_final_states_bw.get_shape().as_list()[(- 1)] encoder_final_states_bw = tf.reshape(tf.tile(encoder_final_states_bw, [1, beam_size]), [(- 1), infer_states_bw_shape]) if TIME_MAJOR: dynamical_batch_size = tf.shape(attention_keys)[1] final_key_shape = [(- 1), (dynamical_batch_size * beam_size), infer_key_size] final_value_shape = [(- 1), (dynamical_batch_size * beam_size), infer_value_size] attention_keys = tf.reshape(tf.tile(attention_keys, [1, 1, beam_size]), final_key_shape) attention_values = tf.reshape(tf.tile(attention_values, [1, 1, beam_size]), final_value_shape) else: dynamical_batch_size = tf.shape(attention_keys)[0] final_key_shape = [(dynamical_batch_size * beam_size), (- 1), infer_key_size] final_value_shape = [(dynamical_batch_size * beam_size), (- 1), infer_value_size] attention_keys = tf.reshape(tf.tile(attention_keys, [1, beam_size, 1]), final_key_shape) attention_values = tf.reshape(tf.tile(attention_values, [1, beam_size, 1]), final_value_shape) attention_length = tf.reshape(tf.transpose(tf.tile([attention_length], [beam_size, 1])), [(- 1)]) feedback = sq.BeamFeedBack(trg_vocab, beam_size, dynamical_batch_size, max_step=max_step) encoder_decoder_bridge = EncoderDecoderBridge(encoder_final_states_bw.get_shape().as_list()[(- 1)], decoder_params['rnn_cell']) decoder_state_size = decoder_params['rnn_cell']['state_size'] attention = sq.Attention(decoder_state_size, attention_keys, attention_values, attention_length) context_size = attention.context_size with tf.variable_scope('logits_func'): attention_mix = LinearOp(((context_size + feedback.embedding_dim) + decoder_state_size), decoder_state_size, name='attention_mix') attention_mix_middle = LinearOp(decoder_state_size, (decoder_state_size // 2), name='attention_mix_middle') logits_trans = LinearOp((decoder_state_size // 2), feedback.vocab_size, name='logits_trans') logits_func = (lambda _softmax: logits_trans(tf.nn.relu(attention_mix_middle(tf.nn.relu(attention_mix(_softmax)))))) decoder = sq.AttentionRNNDecoder(decoder_params, attention, feedback, logits_func=logits_func, init_state=encoder_decoder_bridge(encoder_final_states_bw), mode=mode) (decoder_output, decoder_final_state) = sq.dynamic_decode(decoder, swap_memory=True, scope='decoder') if ((mode == MODE.EVAL) or (mode == MODE.INFER)): return (decoder_output, decoder_final_state) if (not TIME_MAJOR): ground_truth_ids = tf.transpose(target_ids, [1, 0]) else: ground_truth_ids = target_ids if (mode == MODE.RL): global_step_tensor = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='global_step')[0] rl_time_steps = tf.floordiv(tf.maximum((global_step_tensor - burn_in_step), 0), increment_step) start_rl_step = (target_seq_length - rl_time_steps) baseline_states = tf.stop_gradient(decoder_output.baseline_states) predict_ids = tf.stop_gradient(decoder_output.predicted_ids) ground_or_predict_ids = tf.cond(tf.greater(rl_time_steps, 0), (lambda : predict_ids), (lambda : ground_truth_ids)) (reward, sequence_length) = tf.py_func(func=_py_func, inp=[ground_or_predict_ids, ground_truth_ids, trg_vocab.eos_id], Tout=[tf.float32, tf.int32], name='reward') sequence_length.set_shape((None,)) (total_loss_avg, entropy_loss_avg, reward_loss_rmse, reward_predicted) = rl_sequence_loss(logits=decoder_output.logits, predict_ids=predict_ids, sequence_length=sequence_length, baseline_states=baseline_states, start_rl_step=start_rl_step, reward=reward) return (decoder_output, total_loss_avg, entropy_loss_avg, reward_loss_rmse, reward_predicted) else: total_loss_avg = cross_entropy_sequence_loss(logits=decoder_output.logits, targets=ground_truth_ids, sequence_length=target_seq_length) return (decoder_output, total_loss_avg, total_loss_avg, tf.to_float(0.0), tf.to_float(0.0))<|docstring|>Build a model. :param params: dict. {encoder: {rnn_cell: {}, ...}, decoder: {rnn_cell: {}, ...}} for example: {'encoder': {'rnn_cell': {'state_size': 512, 'cell_name': 'BasicLSTMCell', 'num_layers': 2, 'input_keep_prob': 1.0, 'output_keep_prob': 1.0}, 'attention_key_size': attention_size}, 'decoder': {'rnn_cell': {'cell_name': 'BasicLSTMCell', 'state_size': 512, 'num_layers': 1, 'input_keep_prob': 1.0, 'output_keep_prob': 1.0}, 'trg_vocab_size': trg_vocab_size}} :param src_vocab: Vocab of source symbols. :param trg_vocab: Vocab of target symbols. :param source_ids: placeholder :param source_seq_length: placeholder :param target_ids: placeholder :param target_seq_length: placeholder :param beam_size: used in beam inference :param mode: :return:<|endoftext|>
ef0403a399d0d074e8b238c1c1d07b0f8b25eacaf110e1679ad58eedf2de42d4
def plot_base(strip_list, ax=None, *, cell_size=4.5): 'Make a nice plot of the strip layout.' if (ax is None): import matplotlib.pyplot as plt (fig, ax) = plt.subplots() cmap = mcm.get_cmap('gray') norm = mcolors.Normalize(0, 100) cells = {} labels = {} for strip in strip_list: cells[strip] = [] pair = single_strip_transform_factory(*astuple(strip)) for (j, ti_frac) in enumerate(strip.ti_fractions): color = cmap(norm(ti_frac)) (x, y) = pair.forward(ti_frac, strip.temperature, strip.annealing_time, strip.thickness) d = (strip.start_distance - (j * cell_size)) rect = mpatches.Rectangle(((x - (cell_size / 2)), (y - (cell_size / 2))), cell_size, cell_size, color=color, zorder=(- 1)) ax.add_patch(rect) cells[strip].append(ax.text(x, y, f'{ti_frac}', ha='center', va='center', color='w')) cells[strip].append(rect) d = ((cell_size * (len(strip.ti_fractions) - 0.5)) - strip.start_distance) labels[strip] = ax.annotate(f'''{strip.temperature}°C {strip.annealing_time}s''', xy=(((strip.reference_x - d) - (cell_size / 2)), (strip.reference_y - (np.sin(strip.angle) * d))), xytext=(10, 0), textcoords='offset points', va='center', ha='left', clip_on=False) ax.relim() ax.autoscale() ax.figure.tight_layout() ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.invert_yaxis() ax.invert_xaxis() ax.set_aspect('equal')
Make a nice plot of the strip layout.
ae_gpcam/bluesky_config/scripts/live_status.py
plot_base
NSLS-II-XPD/ae-gpcam
0
python
def plot_base(strip_list, ax=None, *, cell_size=4.5): if (ax is None): import matplotlib.pyplot as plt (fig, ax) = plt.subplots() cmap = mcm.get_cmap('gray') norm = mcolors.Normalize(0, 100) cells = {} labels = {} for strip in strip_list: cells[strip] = [] pair = single_strip_transform_factory(*astuple(strip)) for (j, ti_frac) in enumerate(strip.ti_fractions): color = cmap(norm(ti_frac)) (x, y) = pair.forward(ti_frac, strip.temperature, strip.annealing_time, strip.thickness) d = (strip.start_distance - (j * cell_size)) rect = mpatches.Rectangle(((x - (cell_size / 2)), (y - (cell_size / 2))), cell_size, cell_size, color=color, zorder=(- 1)) ax.add_patch(rect) cells[strip].append(ax.text(x, y, f'{ti_frac}', ha='center', va='center', color='w')) cells[strip].append(rect) d = ((cell_size * (len(strip.ti_fractions) - 0.5)) - strip.start_distance) labels[strip] = ax.annotate(f'{strip.temperature}°C {strip.annealing_time}s', xy=(((strip.reference_x - d) - (cell_size / 2)), (strip.reference_y - (np.sin(strip.angle) * d))), xytext=(10, 0), textcoords='offset points', va='center', ha='left', clip_on=False) ax.relim() ax.autoscale() ax.figure.tight_layout() ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.invert_yaxis() ax.invert_xaxis() ax.set_aspect('equal')
def plot_base(strip_list, ax=None, *, cell_size=4.5): if (ax is None): import matplotlib.pyplot as plt (fig, ax) = plt.subplots() cmap = mcm.get_cmap('gray') norm = mcolors.Normalize(0, 100) cells = {} labels = {} for strip in strip_list: cells[strip] = [] pair = single_strip_transform_factory(*astuple(strip)) for (j, ti_frac) in enumerate(strip.ti_fractions): color = cmap(norm(ti_frac)) (x, y) = pair.forward(ti_frac, strip.temperature, strip.annealing_time, strip.thickness) d = (strip.start_distance - (j * cell_size)) rect = mpatches.Rectangle(((x - (cell_size / 2)), (y - (cell_size / 2))), cell_size, cell_size, color=color, zorder=(- 1)) ax.add_patch(rect) cells[strip].append(ax.text(x, y, f'{ti_frac}', ha='center', va='center', color='w')) cells[strip].append(rect) d = ((cell_size * (len(strip.ti_fractions) - 0.5)) - strip.start_distance) labels[strip] = ax.annotate(f'{strip.temperature}°C {strip.annealing_time}s', xy=(((strip.reference_x - d) - (cell_size / 2)), (strip.reference_y - (np.sin(strip.angle) * d))), xytext=(10, 0), textcoords='offset points', va='center', ha='left', clip_on=False) ax.relim() ax.autoscale() ax.figure.tight_layout() ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.invert_yaxis() ax.invert_xaxis() ax.set_aspect('equal')<|docstring|>Make a nice plot of the strip layout.<|endoftext|>
2120628e5ab8761b72c1edd79d111d8ec117b353690950a0a37fa39f2cb55646
def __init__(self, args: Namespace, atom_targets, bond_targets=None, atom_constraints=None, bond_constraints=None, attention=False): '\n\n :param args:\n :param args:\n :param constraints:\n ' features_size = args.hidden_size hidden_size = args.ffn_hidden_size num_layers = args.ffn_num_layers output_size = args.output_size dropout = nn.Dropout(args.dropout) activation = get_activation_function(args.activation) super(MultiReadout, self).__init__() for (i, a_target) in enumerate(atom_targets): constraint = (atom_constraints[i] if ((atom_constraints is not None) and (i < len(atom_constraints))) else None) if attention: self.add_module(f'readout_{i}', FFNAtten(features_size, hidden_size, num_layers, output_size, dropout, activation, constraint, ffn_type='atom')) else: self.add_module(f'readout_{i}', FFN(features_size, hidden_size, num_layers, output_size, dropout, activation, constraint, ffn_type='atom')) i += 1 for (j, b_target) in enumerate(bond_targets): i += j constraint = (bond_constraints[i] if (bond_constraints and (j < len(bond_constraints))) else None) self.add_module(f'readout_{i}', FFN(features_size, hidden_size, num_layers, output_size, dropout, activation, constraint, ffn_type='bond')) self.ffn_list = AttrProxy(self, 'readout_')
:param args: :param args: :param constraints:
chemprop/models/ffn.py
__init__
yanfeiguan/chemprop
11
python
def __init__(self, args: Namespace, atom_targets, bond_targets=None, atom_constraints=None, bond_constraints=None, attention=False): '\n\n :param args:\n :param args:\n :param constraints:\n ' features_size = args.hidden_size hidden_size = args.ffn_hidden_size num_layers = args.ffn_num_layers output_size = args.output_size dropout = nn.Dropout(args.dropout) activation = get_activation_function(args.activation) super(MultiReadout, self).__init__() for (i, a_target) in enumerate(atom_targets): constraint = (atom_constraints[i] if ((atom_constraints is not None) and (i < len(atom_constraints))) else None) if attention: self.add_module(f'readout_{i}', FFNAtten(features_size, hidden_size, num_layers, output_size, dropout, activation, constraint, ffn_type='atom')) else: self.add_module(f'readout_{i}', FFN(features_size, hidden_size, num_layers, output_size, dropout, activation, constraint, ffn_type='atom')) i += 1 for (j, b_target) in enumerate(bond_targets): i += j constraint = (bond_constraints[i] if (bond_constraints and (j < len(bond_constraints))) else None) self.add_module(f'readout_{i}', FFN(features_size, hidden_size, num_layers, output_size, dropout, activation, constraint, ffn_type='bond')) self.ffn_list = AttrProxy(self, 'readout_')
def __init__(self, args: Namespace, atom_targets, bond_targets=None, atom_constraints=None, bond_constraints=None, attention=False): '\n\n :param args:\n :param args:\n :param constraints:\n ' features_size = args.hidden_size hidden_size = args.ffn_hidden_size num_layers = args.ffn_num_layers output_size = args.output_size dropout = nn.Dropout(args.dropout) activation = get_activation_function(args.activation) super(MultiReadout, self).__init__() for (i, a_target) in enumerate(atom_targets): constraint = (atom_constraints[i] if ((atom_constraints is not None) and (i < len(atom_constraints))) else None) if attention: self.add_module(f'readout_{i}', FFNAtten(features_size, hidden_size, num_layers, output_size, dropout, activation, constraint, ffn_type='atom')) else: self.add_module(f'readout_{i}', FFN(features_size, hidden_size, num_layers, output_size, dropout, activation, constraint, ffn_type='atom')) i += 1 for (j, b_target) in enumerate(bond_targets): i += j constraint = (bond_constraints[i] if (bond_constraints and (j < len(bond_constraints))) else None) self.add_module(f'readout_{i}', FFN(features_size, hidden_size, num_layers, output_size, dropout, activation, constraint, ffn_type='bond')) self.ffn_list = AttrProxy(self, 'readout_')<|docstring|>:param args: :param args: :param constraints:<|endoftext|>
25abc21004f6c12ad0ad2efe6909eaf590431989ccb76d505f368d6a1fa3f74b
def __init__(self, features_size, hidden_size, num_layers, output_size, dropout, activation, constraint=None, ffn_type='atom'): 'Initializes the FFN.\n\n args: Arguments.\n constraints: constraints applied to output\n ' super(FFN, self).__init__() self.ffn = DenseLayers(features_size, hidden_size, num_layers, hidden_size, dropout, activation) self.ffn_readout = DenseLayers(hidden_size, hidden_size, 1, output_size, dropout, activation) self.weights_readout = DenseLayers(first_linear_dim=hidden_size, output_size=output_size, num_layers=2, dropout=dropout, activation=activation) self.constraint = constraint
Initializes the FFN. args: Arguments. constraints: constraints applied to output
chemprop/models/ffn.py
__init__
yanfeiguan/chemprop
11
python
def __init__(self, features_size, hidden_size, num_layers, output_size, dropout, activation, constraint=None, ffn_type='atom'): 'Initializes the FFN.\n\n args: Arguments.\n constraints: constraints applied to output\n ' super(FFN, self).__init__() self.ffn = DenseLayers(features_size, hidden_size, num_layers, hidden_size, dropout, activation) self.ffn_readout = DenseLayers(hidden_size, hidden_size, 1, output_size, dropout, activation) self.weights_readout = DenseLayers(first_linear_dim=hidden_size, output_size=output_size, num_layers=2, dropout=dropout, activation=activation) self.constraint = constraint
def __init__(self, features_size, hidden_size, num_layers, output_size, dropout, activation, constraint=None, ffn_type='atom'): 'Initializes the FFN.\n\n args: Arguments.\n constraints: constraints applied to output\n ' super(FFN, self).__init__() self.ffn = DenseLayers(features_size, hidden_size, num_layers, hidden_size, dropout, activation) self.ffn_readout = DenseLayers(hidden_size, hidden_size, 1, output_size, dropout, activation) self.weights_readout = DenseLayers(first_linear_dim=hidden_size, output_size=output_size, num_layers=2, dropout=dropout, activation=activation) self.constraint = constraint<|docstring|>Initializes the FFN. args: Arguments. constraints: constraints applied to output<|endoftext|>
fac7e4aef25078215828924810cc92431f4fd7186098346590ac0097b48d3aa0
def forward(self, input): '\n Runs the FFN on input\n\n :param input:\n :return:\n ' (a_hidden, a_scope, b_hidden, b_scope, b2br, bond_types) = input hidden = a_hidden scope = a_scope output_hidden = self.ffn(hidden) output = self.ffn_readout(output_hidden) weights = self.weights_readout(output_hidden) constrained_output = [] for (i, (a_start, a_size)) in enumerate(scope): if (a_size == 0): continue else: cur_weights = weights.narrow(0, a_start, a_size) cur_output = output.narrow(0, a_start, a_size) cur_weights = torch.nn.Softmax()(cur_weights) cur_output_sum = cur_output.sum() cur_output = (cur_output + (cur_weights * (self.constraint - cur_output_sum))) constrained_output.append(cur_output) output = torch.cat(constrained_output, dim=0) return output
Runs the FFN on input :param input: :return:
chemprop/models/ffn.py
forward
yanfeiguan/chemprop
11
python
def forward(self, input): '\n Runs the FFN on input\n\n :param input:\n :return:\n ' (a_hidden, a_scope, b_hidden, b_scope, b2br, bond_types) = input hidden = a_hidden scope = a_scope output_hidden = self.ffn(hidden) output = self.ffn_readout(output_hidden) weights = self.weights_readout(output_hidden) constrained_output = [] for (i, (a_start, a_size)) in enumerate(scope): if (a_size == 0): continue else: cur_weights = weights.narrow(0, a_start, a_size) cur_output = output.narrow(0, a_start, a_size) cur_weights = torch.nn.Softmax()(cur_weights) cur_output_sum = cur_output.sum() cur_output = (cur_output + (cur_weights * (self.constraint - cur_output_sum))) constrained_output.append(cur_output) output = torch.cat(constrained_output, dim=0) return output
def forward(self, input): '\n Runs the FFN on input\n\n :param input:\n :return:\n ' (a_hidden, a_scope, b_hidden, b_scope, b2br, bond_types) = input hidden = a_hidden scope = a_scope output_hidden = self.ffn(hidden) output = self.ffn_readout(output_hidden) weights = self.weights_readout(output_hidden) constrained_output = [] for (i, (a_start, a_size)) in enumerate(scope): if (a_size == 0): continue else: cur_weights = weights.narrow(0, a_start, a_size) cur_output = output.narrow(0, a_start, a_size) cur_weights = torch.nn.Softmax()(cur_weights) cur_output_sum = cur_output.sum() cur_output = (cur_output + (cur_weights * (self.constraint - cur_output_sum))) constrained_output.append(cur_output) output = torch.cat(constrained_output, dim=0) return output<|docstring|>Runs the FFN on input :param input: :return:<|endoftext|>
d0aaa957a428487d85a364d0b01f92a7f0d481430f33c821d8c384fbc4a27319
def __init__(self, features_size, hidden_size, num_layers, output_size, dropout, activation, constraint=None, ffn_type='atom', attention=False): 'Initializes the FFN.\n\n args: Arguments.\n constraints: constraints applied to output\n ' super(FFN, self).__init__() if (ffn_type == 'atom'): self.ffn = DenseLayers(features_size, hidden_size, num_layers, output_size, dropout, activation) elif (ffn_type == 'bond'): self.ffn = DenseLayers((2 * features_size), hidden_size, num_layers, output_size, dropout, activation) self.ffn_type = ffn_type self.attention = attention if (constraint is not None): self.weights_readout = DenseLayers(features_size, hidden_size, num_layers, output_size, dropout, activation) if attention: self.weights_readout = DenseLayers(first_linear_dim=hidden_size, output_size=1, num_layers=1, dropout=dropout, activation=activation) self.constraint = constraint else: self.constraint = None
Initializes the FFN. args: Arguments. constraints: constraints applied to output
chemprop/models/ffn.py
__init__
yanfeiguan/chemprop
11
python
def __init__(self, features_size, hidden_size, num_layers, output_size, dropout, activation, constraint=None, ffn_type='atom', attention=False): 'Initializes the FFN.\n\n args: Arguments.\n constraints: constraints applied to output\n ' super(FFN, self).__init__() if (ffn_type == 'atom'): self.ffn = DenseLayers(features_size, hidden_size, num_layers, output_size, dropout, activation) elif (ffn_type == 'bond'): self.ffn = DenseLayers((2 * features_size), hidden_size, num_layers, output_size, dropout, activation) self.ffn_type = ffn_type self.attention = attention if (constraint is not None): self.weights_readout = DenseLayers(features_size, hidden_size, num_layers, output_size, dropout, activation) if attention: self.weights_readout = DenseLayers(first_linear_dim=hidden_size, output_size=1, num_layers=1, dropout=dropout, activation=activation) self.constraint = constraint else: self.constraint = None
def __init__(self, features_size, hidden_size, num_layers, output_size, dropout, activation, constraint=None, ffn_type='atom', attention=False): 'Initializes the FFN.\n\n args: Arguments.\n constraints: constraints applied to output\n ' super(FFN, self).__init__() if (ffn_type == 'atom'): self.ffn = DenseLayers(features_size, hidden_size, num_layers, output_size, dropout, activation) elif (ffn_type == 'bond'): self.ffn = DenseLayers((2 * features_size), hidden_size, num_layers, output_size, dropout, activation) self.ffn_type = ffn_type self.attention = attention if (constraint is not None): self.weights_readout = DenseLayers(features_size, hidden_size, num_layers, output_size, dropout, activation) if attention: self.weights_readout = DenseLayers(first_linear_dim=hidden_size, output_size=1, num_layers=1, dropout=dropout, activation=activation) self.constraint = constraint else: self.constraint = None<|docstring|>Initializes the FFN. args: Arguments. constraints: constraints applied to output<|endoftext|>
a89b6d886200703da51de0fa9c7e639074f5d8408aca8ffb147f121f02c3f278
def forward(self, input): '\n Runs the FFN on input\n\n :param input:\n :return:\n ' (a_hidden, a_scope, b_hidden, b_scope, b2br, bond_types) = input if (self.ffn_type == 'atom'): hidden = a_hidden scope = a_scope output = self.ffn(hidden) if self.attention: weights = self.weights_readout(output) if (self.constraint is not None): weights = self.weights_readout(hidden) constrained_output = [] for (i, (a_start, a_size)) in enumerate(scope): if (a_size == 0): continue else: cur_weights = weights.narrow(0, a_start, a_size) cur_output = output.narrow(0, a_start, a_size) cur_weights_sum = cur_weights.sum() cur_output_sum = cur_output.sum() cur_output = (cur_output + ((cur_weights * (self.constraint - cur_output_sum)) / cur_weights_sum)) constrained_output.append(cur_output) output = torch.cat(constrained_output, dim=0) else: output = output[1:] elif (self.ffn_type == 'bond'): forward_bond = b_hidden[b2br[(:, 0)]] backward_bond = b_hidden[b2br[(:, 1)]] b_hidden = torch.cat([forward_bond, backward_bond], dim=1) output = (self.ffn(b_hidden) + bond_types.reshape((- 1), 1)) return output
Runs the FFN on input :param input: :return:
chemprop/models/ffn.py
forward
yanfeiguan/chemprop
11
python
def forward(self, input): '\n Runs the FFN on input\n\n :param input:\n :return:\n ' (a_hidden, a_scope, b_hidden, b_scope, b2br, bond_types) = input if (self.ffn_type == 'atom'): hidden = a_hidden scope = a_scope output = self.ffn(hidden) if self.attention: weights = self.weights_readout(output) if (self.constraint is not None): weights = self.weights_readout(hidden) constrained_output = [] for (i, (a_start, a_size)) in enumerate(scope): if (a_size == 0): continue else: cur_weights = weights.narrow(0, a_start, a_size) cur_output = output.narrow(0, a_start, a_size) cur_weights_sum = cur_weights.sum() cur_output_sum = cur_output.sum() cur_output = (cur_output + ((cur_weights * (self.constraint - cur_output_sum)) / cur_weights_sum)) constrained_output.append(cur_output) output = torch.cat(constrained_output, dim=0) else: output = output[1:] elif (self.ffn_type == 'bond'): forward_bond = b_hidden[b2br[(:, 0)]] backward_bond = b_hidden[b2br[(:, 1)]] b_hidden = torch.cat([forward_bond, backward_bond], dim=1) output = (self.ffn(b_hidden) + bond_types.reshape((- 1), 1)) return output
def forward(self, input): '\n Runs the FFN on input\n\n :param input:\n :return:\n ' (a_hidden, a_scope, b_hidden, b_scope, b2br, bond_types) = input if (self.ffn_type == 'atom'): hidden = a_hidden scope = a_scope output = self.ffn(hidden) if self.attention: weights = self.weights_readout(output) if (self.constraint is not None): weights = self.weights_readout(hidden) constrained_output = [] for (i, (a_start, a_size)) in enumerate(scope): if (a_size == 0): continue else: cur_weights = weights.narrow(0, a_start, a_size) cur_output = output.narrow(0, a_start, a_size) cur_weights_sum = cur_weights.sum() cur_output_sum = cur_output.sum() cur_output = (cur_output + ((cur_weights * (self.constraint - cur_output_sum)) / cur_weights_sum)) constrained_output.append(cur_output) output = torch.cat(constrained_output, dim=0) else: output = output[1:] elif (self.ffn_type == 'bond'): forward_bond = b_hidden[b2br[(:, 0)]] backward_bond = b_hidden[b2br[(:, 1)]] b_hidden = torch.cat([forward_bond, backward_bond], dim=1) output = (self.ffn(b_hidden) + bond_types.reshape((- 1), 1)) return output<|docstring|>Runs the FFN on input :param input: :return:<|endoftext|>
309a808b5a1f7ff55d645ae946bbf75330b4fd3564b37c40bce8331a3187a741
def __init__(self, first_linear_dim: int, hidden_size: int, num_layers: int, output_size: int, dropout: nn.Module, activation) -> nn.Sequential: '\n :param first_linear_dim:\n :param hidden_size:\n :param num_layers:\n :param output_size:\n :param dropout:\n :param activation:\n ' super(DenseLayers, self).__init__() if (num_layers == 1): layers = [dropout, nn.Linear(first_linear_dim, output_size)] else: layers = [dropout, nn.Linear(first_linear_dim, hidden_size)] for _ in range((num_layers - 2)): layers.extend([activation, dropout, nn.Linear(hidden_size, hidden_size)]) layers.extend([activation, dropout, nn.Linear(hidden_size, output_size)]) self.dense_layers = nn.Sequential(*layers)
:param first_linear_dim: :param hidden_size: :param num_layers: :param output_size: :param dropout: :param activation:
chemprop/models/ffn.py
__init__
yanfeiguan/chemprop
11
python
def __init__(self, first_linear_dim: int, hidden_size: int, num_layers: int, output_size: int, dropout: nn.Module, activation) -> nn.Sequential: '\n :param first_linear_dim:\n :param hidden_size:\n :param num_layers:\n :param output_size:\n :param dropout:\n :param activation:\n ' super(DenseLayers, self).__init__() if (num_layers == 1): layers = [dropout, nn.Linear(first_linear_dim, output_size)] else: layers = [dropout, nn.Linear(first_linear_dim, hidden_size)] for _ in range((num_layers - 2)): layers.extend([activation, dropout, nn.Linear(hidden_size, hidden_size)]) layers.extend([activation, dropout, nn.Linear(hidden_size, output_size)]) self.dense_layers = nn.Sequential(*layers)
def __init__(self, first_linear_dim: int, hidden_size: int, num_layers: int, output_size: int, dropout: nn.Module, activation) -> nn.Sequential: '\n :param first_linear_dim:\n :param hidden_size:\n :param num_layers:\n :param output_size:\n :param dropout:\n :param activation:\n ' super(DenseLayers, self).__init__() if (num_layers == 1): layers = [dropout, nn.Linear(first_linear_dim, output_size)] else: layers = [dropout, nn.Linear(first_linear_dim, hidden_size)] for _ in range((num_layers - 2)): layers.extend([activation, dropout, nn.Linear(hidden_size, hidden_size)]) layers.extend([activation, dropout, nn.Linear(hidden_size, output_size)]) self.dense_layers = nn.Sequential(*layers)<|docstring|>:param first_linear_dim: :param hidden_size: :param num_layers: :param output_size: :param dropout: :param activation:<|endoftext|>
a42d71c0df95d79e8567f043168b466456d3544e9d9cd2ce766b174ac9b04213
@timeit('Part 1') def part_one(x): 'Solves part one' a = Arcade() a.run(x) blocks = np.where((a.screen == 2)) return len(blocks[0])
Solves part one
day_13/main.py
part_one
orrinjelo/AdventOfCode2019
1
python
@timeit('Part 1') def part_one(x): a = Arcade() a.run(x) blocks = np.where((a.screen == 2)) return len(blocks[0])
@timeit('Part 1') def part_one(x): a = Arcade() a.run(x) blocks = np.where((a.screen == 2)) return len(blocks[0])<|docstring|>Solves part one<|endoftext|>
2a9c36c5a2124b86fb2b1ebcc1e21a39d963dbe50a605a3a070e32d28bffbca9
@timeit('Part 2') def part_two(x): 'Solves part two' def dumb(x): (ballx, bally) = a.ball (paddx, paddy) = a.paddle if (ballx > paddx): return 1 elif (ballx < paddx): return (- 1) else: return 0 a = Arcade(input_cb=dumb) x[0] = 2 a.run(x) return a.score
Solves part two
day_13/main.py
part_two
orrinjelo/AdventOfCode2019
1
python
@timeit('Part 2') def part_two(x): def dumb(x): (ballx, bally) = a.ball (paddx, paddy) = a.paddle if (ballx > paddx): return 1 elif (ballx < paddx): return (- 1) else: return 0 a = Arcade(input_cb=dumb) x[0] = 2 a.run(x) return a.score
@timeit('Part 2') def part_two(x): def dumb(x): (ballx, bally) = a.ball (paddx, paddy) = a.paddle if (ballx > paddx): return 1 elif (ballx < paddx): return (- 1) else: return 0 a = Arcade(input_cb=dumb) x[0] = 2 a.run(x) return a.score<|docstring|>Solves part two<|endoftext|>
fcafbe2b8d8780fa5829d6f2721b46de367323a5cd81d0b2d79bd18efa3a7172
def part_two_visualized(x): 'Visualization' frames = [] def dumb(x): (ballx, bally) = a.ball (paddx, paddy) = a.paddle frames.append([plt.imshow(np.transpose(a.screen))]) if (ballx > paddx): return 1 elif (ballx < paddx): return (- 1) else: return 0 a = Arcade(input_cb=dumb) (fig, ax) = plt.subplots() im = plt.imshow(np.transpose(a.screen)) x[0] = 2 a.run(x) import logging animation._log.setLevel(logging.DEBUG) ani = animation.ArtistAnimation(fig, frames[:30], interval=50, blit=True) Writer = animation.writers['ffmpeg'] writer = Writer(fps=1, metadata=dict(artist='Me'), bitrate=1800) ani.save('im.mp4', writer=writer)
Visualization
day_13/main.py
part_two_visualized
orrinjelo/AdventOfCode2019
1
python
def part_two_visualized(x): frames = [] def dumb(x): (ballx, bally) = a.ball (paddx, paddy) = a.paddle frames.append([plt.imshow(np.transpose(a.screen))]) if (ballx > paddx): return 1 elif (ballx < paddx): return (- 1) else: return 0 a = Arcade(input_cb=dumb) (fig, ax) = plt.subplots() im = plt.imshow(np.transpose(a.screen)) x[0] = 2 a.run(x) import logging animation._log.setLevel(logging.DEBUG) ani = animation.ArtistAnimation(fig, frames[:30], interval=50, blit=True) Writer = animation.writers['ffmpeg'] writer = Writer(fps=1, metadata=dict(artist='Me'), bitrate=1800) ani.save('im.mp4', writer=writer)
def part_two_visualized(x): frames = [] def dumb(x): (ballx, bally) = a.ball (paddx, paddy) = a.paddle frames.append([plt.imshow(np.transpose(a.screen))]) if (ballx > paddx): return 1 elif (ballx < paddx): return (- 1) else: return 0 a = Arcade(input_cb=dumb) (fig, ax) = plt.subplots() im = plt.imshow(np.transpose(a.screen)) x[0] = 2 a.run(x) import logging animation._log.setLevel(logging.DEBUG) ani = animation.ArtistAnimation(fig, frames[:30], interval=50, blit=True) Writer = animation.writers['ffmpeg'] writer = Writer(fps=1, metadata=dict(artist='Me'), bitrate=1800) ani.save('im.mp4', writer=writer)<|docstring|>Visualization<|endoftext|>
015dfffc79a644aaac969dc869006ce74b71144438f203642c926f052ae61362
def test(): 'Test functions' assert True
Test functions
day_13/main.py
test
orrinjelo/AdventOfCode2019
1
python
def test(): assert True
def test(): assert True<|docstring|>Test functions<|endoftext|>
a75c64389589e080355fba0ba47d75ca21097de205a04921bc69992d60a7eae2
def pre(self, command, output_dir, vars): 'Called before template is applied.' package_logger = vars['package'] if (package_logger == 'root'): package_logger = 'app' vars['package_logger'] = package_logger for (key, value) in vars.items(): if (value == 'None'): vars[key] = None elif (value == 'True'): vars[key] = True elif (value == 'False'): vars[key] = False
Called before template is applied.
moksha/pastetemplate.py
pre
lmacken/moksha
1
python
def pre(self, command, output_dir, vars): package_logger = vars['package'] if (package_logger == 'root'): package_logger = 'app' vars['package_logger'] = package_logger for (key, value) in vars.items(): if (value == 'None'): vars[key] = None elif (value == 'True'): vars[key] = True elif (value == 'False'): vars[key] = False
def pre(self, command, output_dir, vars): package_logger = vars['package'] if (package_logger == 'root'): package_logger = 'app' vars['package_logger'] = package_logger for (key, value) in vars.items(): if (value == 'None'): vars[key] = None elif (value == 'True'): vars[key] = True elif (value == 'False'): vars[key] = False<|docstring|>Called before template is applied.<|endoftext|>
6ac3d1f96be5c062de245ced443ebe1f35d6cd45f39242fa1a25e8d0d52a036b
def pre(self, command, output_dir, vars): 'Called before template is applied.' if ('widget_name' not in vars): vars['widget_name'] = (vars['package'].title() + 'Widget')
Called before template is applied.
moksha/pastetemplate.py
pre
lmacken/moksha
1
python
def pre(self, command, output_dir, vars): if ('widget_name' not in vars): vars['widget_name'] = (vars['package'].title() + 'Widget')
def pre(self, command, output_dir, vars): if ('widget_name' not in vars): vars['widget_name'] = (vars['package'].title() + 'Widget')<|docstring|>Called before template is applied.<|endoftext|>
8394d8c508a2dcb0a8b1953b6453cf81f01f6126448bf5bc29df4313b2913089
def pre(self, command, output_dir, vars): 'Called before template is applied.' package_logger = vars['package'] if (package_logger == 'root'): package_logger = 'app' vars['package_logger'] = package_logger vars['stream_name'] = (vars['package'].title() + 'Stream')
Called before template is applied.
moksha/pastetemplate.py
pre
lmacken/moksha
1
python
def pre(self, command, output_dir, vars): package_logger = vars['package'] if (package_logger == 'root'): package_logger = 'app' vars['package_logger'] = package_logger vars['stream_name'] = (vars['package'].title() + 'Stream')
def pre(self, command, output_dir, vars): package_logger = vars['package'] if (package_logger == 'root'): package_logger = 'app' vars['package_logger'] = package_logger vars['stream_name'] = (vars['package'].title() + 'Stream')<|docstring|>Called before template is applied.<|endoftext|>
8ef8a2d30b79e28d750cf88993fd23b258db711df49f3e41772e709648cd5601
def pre(self, command, output_dir, vars): 'Called before template is applied.' package_logger = vars['package'] if (package_logger == 'root'): package_logger = 'app' vars['package_logger'] = package_logger vars['consumer_name'] = (vars['package'].title() + 'Consumer')
Called before template is applied.
moksha/pastetemplate.py
pre
lmacken/moksha
1
python
def pre(self, command, output_dir, vars): package_logger = vars['package'] if (package_logger == 'root'): package_logger = 'app' vars['package_logger'] = package_logger vars['consumer_name'] = (vars['package'].title() + 'Consumer')
def pre(self, command, output_dir, vars): package_logger = vars['package'] if (package_logger == 'root'): package_logger = 'app' vars['package_logger'] = package_logger vars['consumer_name'] = (vars['package'].title() + 'Consumer')<|docstring|>Called before template is applied.<|endoftext|>
00313b53709d519c7ec157efd2b1b2ae2ce8b497439d7cc911e86edf3e04dc2e
def pre(self, command, output_dir, vars): 'Called before template is applied.' package_logger = vars['package'] if (package_logger == 'root'): package_logger = 'app' vars['package_logger'] = package_logger vars['connector_name'] = (vars['package'].title() + 'Connector')
Called before template is applied.
moksha/pastetemplate.py
pre
lmacken/moksha
1
python
def pre(self, command, output_dir, vars): package_logger = vars['package'] if (package_logger == 'root'): package_logger = 'app' vars['package_logger'] = package_logger vars['connector_name'] = (vars['package'].title() + 'Connector')
def pre(self, command, output_dir, vars): package_logger = vars['package'] if (package_logger == 'root'): package_logger = 'app' vars['package_logger'] = package_logger vars['connector_name'] = (vars['package'].title() + 'Connector')<|docstring|>Called before template is applied.<|endoftext|>
10a8e7787f7a102620d4af25e532db42b47d2eafef3e6ac0863531a67870a5fb
def pre(self, command, output_dir, vars): 'Called before template is applied.' package_logger = vars['package'] if (package_logger == 'root'): package_logger = 'app' vars['package_logger'] = package_logger vars['controller_name'] = (vars['package'].title() + 'Controller')
Called before template is applied.
moksha/pastetemplate.py
pre
lmacken/moksha
1
python
def pre(self, command, output_dir, vars): package_logger = vars['package'] if (package_logger == 'root'): package_logger = 'app' vars['package_logger'] = package_logger vars['controller_name'] = (vars['package'].title() + 'Controller')
def pre(self, command, output_dir, vars): package_logger = vars['package'] if (package_logger == 'root'): package_logger = 'app' vars['package_logger'] = package_logger vars['controller_name'] = (vars['package'].title() + 'Controller')<|docstring|>Called before template is applied.<|endoftext|>
6641cac04bdfab97cb1eab0419a8b31e615eb52227c440e2dee21d831007c20d
def __init__(self, dimensions): 'Initialize an array specifier with a list\nof ints for dimensions' self.dimensions = [] if (not dimensions): raise InvalidTypeDimensionsError(str(type(dimensions))) for k in dimensions: if (not isinstance(k, int)): raise InvalidValueDimensionError(str(type(k))) self.dimensions.append(k)
Initialize an array specifier with a list of ints for dimensions
c_ast/hir/ArraySpecifier.py
__init__
qram9/c_ast
0
python
def __init__(self, dimensions): 'Initialize an array specifier with a list\nof ints for dimensions' self.dimensions = [] if (not dimensions): raise InvalidTypeDimensionsError(str(type(dimensions))) for k in dimensions: if (not isinstance(k, int)): raise InvalidValueDimensionError(str(type(k))) self.dimensions.append(k)
def __init__(self, dimensions): 'Initialize an array specifier with a list\nof ints for dimensions' self.dimensions = [] if (not dimensions): raise InvalidTypeDimensionsError(str(type(dimensions))) for k in dimensions: if (not isinstance(k, int)): raise InvalidValueDimensionError(str(type(k))) self.dimensions.append(k)<|docstring|>Initialize an array specifier with a list of ints for dimensions<|endoftext|>
22272f8ea6e592fcb758524adbf0b1e14c7f0b9fbbe593ef9b3a9e05185d1111
def getNumDimensions(self): 'Returns the number of dimensions\nin the dimensions list specifier' return self.dimensions.size()
Returns the number of dimensions in the dimensions list specifier
c_ast/hir/ArraySpecifier.py
getNumDimensions
qram9/c_ast
0
python
def getNumDimensions(self): 'Returns the number of dimensions\nin the dimensions list specifier' return self.dimensions.size()
def getNumDimensions(self): 'Returns the number of dimensions\nin the dimensions list specifier' return self.dimensions.size()<|docstring|>Returns the number of dimensions in the dimensions list specifier<|endoftext|>
5a2fd65f5888f5951917be446906f2699a9715865a7e8cca9a4e47adc8c6ffb9
def getDimension(self, n): 'Returns a specified dimension' try: return self.dimensions[n] except IndexError: raise InvalidIndexDimensionsError(('%d %d' % (len(self.dimensions), n)))
Returns a specified dimension
c_ast/hir/ArraySpecifier.py
getDimension
qram9/c_ast
0
python
def getDimension(self, n): try: return self.dimensions[n] except IndexError: raise InvalidIndexDimensionsError(('%d %d' % (len(self.dimensions), n)))
def getDimension(self, n): try: return self.dimensions[n] except IndexError: raise InvalidIndexDimensionsError(('%d %d' % (len(self.dimensions), n)))<|docstring|>Returns a specified dimension<|endoftext|>
bf5c95cacab2ee65c0dbff3f7e611883dbde98bba3347cc8333d575c25782a40
def setDimension(self, n, val): 'Sets a specified dimension to given value' self.dimensions[n] = val
Sets a specified dimension to given value
c_ast/hir/ArraySpecifier.py
setDimension
qram9/c_ast
0
python
def setDimension(self, n, val): self.dimensions[n] = val
def setDimension(self, n, val): self.dimensions[n] = val<|docstring|>Sets a specified dimension to given value<|endoftext|>
bbd3c2a9c17d77fdf1ab51e78247c9efe0fb21e45bc7f78ae6552e288ff63559
def __repr__(self): 'Returns a string representation of the contents \nof the array specifier object. Currently the returned \nstring is in AnsiC. For example [1][2][3].\nChange this function to return different \na representation.' retval = '' for k in self.dimensions: retval += '[' retval += str(k) retval += ']' return retval
Returns a string representation of the contents of the array specifier object. Currently the returned string is in AnsiC. For example [1][2][3]. Change this function to return different a representation.
c_ast/hir/ArraySpecifier.py
__repr__
qram9/c_ast
0
python
def __repr__(self): 'Returns a string representation of the contents \nof the array specifier object. Currently the returned \nstring is in AnsiC. For example [1][2][3].\nChange this function to return different \na representation.' retval = for k in self.dimensions: retval += '[' retval += str(k) retval += ']' return retval
def __repr__(self): 'Returns a string representation of the contents \nof the array specifier object. Currently the returned \nstring is in AnsiC. For example [1][2][3].\nChange this function to return different \na representation.' retval = for k in self.dimensions: retval += '[' retval += str(k) retval += ']' return retval<|docstring|>Returns a string representation of the contents of the array specifier object. Currently the returned string is in AnsiC. For example [1][2][3]. Change this function to return different a representation.<|endoftext|>
bb950b4c3dfe6384b4cb6db5e2caa4c72b96f262ab278829c108463d386ee9db
def items(self): "Returns the 'dimensions' list of ints" items = {} items['dimensions'] = self.dimensions for k in ArraySpecifier.__bases__: if hasattr(k, 'items'): supitems = k.items(self) for (k, v) in list(supitems.items()): items[k] = v return dict(items)
Returns the 'dimensions' list of ints
c_ast/hir/ArraySpecifier.py
items
qram9/c_ast
0
python
def items(self): items = {} items['dimensions'] = self.dimensions for k in ArraySpecifier.__bases__: if hasattr(k, 'items'): supitems = k.items(self) for (k, v) in list(supitems.items()): items[k] = v return dict(items)
def items(self): items = {} items['dimensions'] = self.dimensions for k in ArraySpecifier.__bases__: if hasattr(k, 'items'): supitems = k.items(self) for (k, v) in list(supitems.items()): items[k] = v return dict(items)<|docstring|>Returns the 'dimensions' list of ints<|endoftext|>
76a865d7170cec3318d7841402aaf6ec3347d110e158d6b0488ac43ff4175301
def __getstate__(self): "Returns the 'dimensions' list of ints. Calls items directly" return dict(self.items())
Returns the 'dimensions' list of ints. Calls items directly
c_ast/hir/ArraySpecifier.py
__getstate__
qram9/c_ast
0
python
def __getstate__(self): return dict(self.items())
def __getstate__(self): return dict(self.items())<|docstring|>Returns the 'dimensions' list of ints. Calls items directly<|endoftext|>
0db375efca85d14fce3b1d1740c56acf6cf30c3b7dbdae46bec90ddaec916e0c
def __setstate__(self, statedict): 'Blindly sets the state of this object, using a statedict' for (k, v) in list(statedict.items()): setattr(self, k, v)
Blindly sets the state of this object, using a statedict
c_ast/hir/ArraySpecifier.py
__setstate__
qram9/c_ast
0
python
def __setstate__(self, statedict): for (k, v) in list(statedict.items()): setattr(self, k, v)
def __setstate__(self, statedict): for (k, v) in list(statedict.items()): setattr(self, k, v)<|docstring|>Blindly sets the state of this object, using a statedict<|endoftext|>
bc1fe74641166fa843eb3f56de6884ba565a7ec824ee86dd7fa270952d8252ad
def __init__(self, *lru_args, **lru_kwargs): '\n \n :param lru_args: \n :param lru_kwargs: \n ' self.lru_args = lru_args self.lru_kwargs = lru_kwargs
:param lru_args: :param lru_kwargs:
shot_detector/utils/functool_lru_cache_method.py
__init__
w495/shot_detector
18
python
def __init__(self, *lru_args, **lru_kwargs): '\n \n :param lru_args: \n :param lru_kwargs: \n ' self.lru_args = lru_args self.lru_kwargs = lru_kwargs
def __init__(self, *lru_args, **lru_kwargs): '\n \n :param lru_args: \n :param lru_kwargs: \n ' self.lru_args = lru_args self.lru_kwargs = lru_kwargs<|docstring|>:param lru_args: :param lru_kwargs:<|endoftext|>
f95035609fe7d258a8618bc3d888df92fe8a9c90681080114b051ad5cf29f0f9
def __call__(self, func): '\n \n :param func: \n :return: \n ' @functools.wraps(func) def wrapped_func(wrapped_self, *wrapped_args, **wrapped_kwargs): '\n \n :param wrapped_self: \n :param wrapped_args: \n :param wrapped_kwargs: \n :return: \n ' weak_wrapped_self = weakref.ref(wrapped_self) @functools.wraps(func) @functools.lru_cache(*self.lru_args, **self.lru_kwargs) def cached_method(cached_args, cached_kwargs): '\n \n :param cached_args: \n :param cached_kwargs: \n :return: \n ' result = func(weak_wrapped_self(), *cached_args, **cached_kwargs) return result setattr(wrapped_self, func.__name__, cached_method) cached_result = cached_method(wrapped_args, wrapped_kwargs) return cached_result return wrapped_func
:param func: :return:
shot_detector/utils/functool_lru_cache_method.py
__call__
w495/shot_detector
18
python
def __call__(self, func): '\n \n :param func: \n :return: \n ' @functools.wraps(func) def wrapped_func(wrapped_self, *wrapped_args, **wrapped_kwargs): '\n \n :param wrapped_self: \n :param wrapped_args: \n :param wrapped_kwargs: \n :return: \n ' weak_wrapped_self = weakref.ref(wrapped_self) @functools.wraps(func) @functools.lru_cache(*self.lru_args, **self.lru_kwargs) def cached_method(cached_args, cached_kwargs): '\n \n :param cached_args: \n :param cached_kwargs: \n :return: \n ' result = func(weak_wrapped_self(), *cached_args, **cached_kwargs) return result setattr(wrapped_self, func.__name__, cached_method) cached_result = cached_method(wrapped_args, wrapped_kwargs) return cached_result return wrapped_func
def __call__(self, func): '\n \n :param func: \n :return: \n ' @functools.wraps(func) def wrapped_func(wrapped_self, *wrapped_args, **wrapped_kwargs): '\n \n :param wrapped_self: \n :param wrapped_args: \n :param wrapped_kwargs: \n :return: \n ' weak_wrapped_self = weakref.ref(wrapped_self) @functools.wraps(func) @functools.lru_cache(*self.lru_args, **self.lru_kwargs) def cached_method(cached_args, cached_kwargs): '\n \n :param cached_args: \n :param cached_kwargs: \n :return: \n ' result = func(weak_wrapped_self(), *cached_args, **cached_kwargs) return result setattr(wrapped_self, func.__name__, cached_method) cached_result = cached_method(wrapped_args, wrapped_kwargs) return cached_result return wrapped_func<|docstring|>:param func: :return:<|endoftext|>
91c70bd61a5c3853b209c31ff6e389b4aaf0d05dddce5d0b3f84fbcc18d76660
@functools.wraps(func) def wrapped_func(wrapped_self, *wrapped_args, **wrapped_kwargs): '\n \n :param wrapped_self: \n :param wrapped_args: \n :param wrapped_kwargs: \n :return: \n ' weak_wrapped_self = weakref.ref(wrapped_self) @functools.wraps(func) @functools.lru_cache(*self.lru_args, **self.lru_kwargs) def cached_method(cached_args, cached_kwargs): '\n \n :param cached_args: \n :param cached_kwargs: \n :return: \n ' result = func(weak_wrapped_self(), *cached_args, **cached_kwargs) return result setattr(wrapped_self, func.__name__, cached_method) cached_result = cached_method(wrapped_args, wrapped_kwargs) return cached_result
:param wrapped_self: :param wrapped_args: :param wrapped_kwargs: :return:
shot_detector/utils/functool_lru_cache_method.py
wrapped_func
w495/shot_detector
18
python
@functools.wraps(func) def wrapped_func(wrapped_self, *wrapped_args, **wrapped_kwargs): '\n \n :param wrapped_self: \n :param wrapped_args: \n :param wrapped_kwargs: \n :return: \n ' weak_wrapped_self = weakref.ref(wrapped_self) @functools.wraps(func) @functools.lru_cache(*self.lru_args, **self.lru_kwargs) def cached_method(cached_args, cached_kwargs): '\n \n :param cached_args: \n :param cached_kwargs: \n :return: \n ' result = func(weak_wrapped_self(), *cached_args, **cached_kwargs) return result setattr(wrapped_self, func.__name__, cached_method) cached_result = cached_method(wrapped_args, wrapped_kwargs) return cached_result
@functools.wraps(func) def wrapped_func(wrapped_self, *wrapped_args, **wrapped_kwargs): '\n \n :param wrapped_self: \n :param wrapped_args: \n :param wrapped_kwargs: \n :return: \n ' weak_wrapped_self = weakref.ref(wrapped_self) @functools.wraps(func) @functools.lru_cache(*self.lru_args, **self.lru_kwargs) def cached_method(cached_args, cached_kwargs): '\n \n :param cached_args: \n :param cached_kwargs: \n :return: \n ' result = func(weak_wrapped_self(), *cached_args, **cached_kwargs) return result setattr(wrapped_self, func.__name__, cached_method) cached_result = cached_method(wrapped_args, wrapped_kwargs) return cached_result<|docstring|>:param wrapped_self: :param wrapped_args: :param wrapped_kwargs: :return:<|endoftext|>
7f4f16a6ee57ddb71b2c8e77d15ae3c29b9c9824f3d0ca7ce3203be1d76f9c98
@functools.wraps(func) @functools.lru_cache(*self.lru_args, **self.lru_kwargs) def cached_method(cached_args, cached_kwargs): '\n \n :param cached_args: \n :param cached_kwargs: \n :return: \n ' result = func(weak_wrapped_self(), *cached_args, **cached_kwargs) return result
:param cached_args: :param cached_kwargs: :return:
shot_detector/utils/functool_lru_cache_method.py
cached_method
w495/shot_detector
18
python
@functools.wraps(func) @functools.lru_cache(*self.lru_args, **self.lru_kwargs) def cached_method(cached_args, cached_kwargs): '\n \n :param cached_args: \n :param cached_kwargs: \n :return: \n ' result = func(weak_wrapped_self(), *cached_args, **cached_kwargs) return result
@functools.wraps(func) @functools.lru_cache(*self.lru_args, **self.lru_kwargs) def cached_method(cached_args, cached_kwargs): '\n \n :param cached_args: \n :param cached_kwargs: \n :return: \n ' result = func(weak_wrapped_self(), *cached_args, **cached_kwargs) return result<|docstring|>:param cached_args: :param cached_kwargs: :return:<|endoftext|>
528a480bc84bb495662b8143046c1a1345b6f451871ceb0656e3e1d9cd1cbb69
def __init__(self, viscosity, isotropic=False): '\n Arguments:\n viscosity dynamic viscosity µ of surrounding fluid\n isotropic (bool) True if the drag is isotropic\n ' self.viscosity = viscosity self.isotropic = isotropic
Arguments: viscosity dynamic viscosity µ of surrounding fluid isotropic (bool) True if the drag is isotropic
stoked/drag.py
__init__
johnaparker/stoked
1
python
def __init__(self, viscosity, isotropic=False): '\n Arguments:\n viscosity dynamic viscosity µ of surrounding fluid\n isotropic (bool) True if the drag is isotropic\n ' self.viscosity = viscosity self.isotropic = isotropic
def __init__(self, viscosity, isotropic=False): '\n Arguments:\n viscosity dynamic viscosity µ of surrounding fluid\n isotropic (bool) True if the drag is isotropic\n ' self.viscosity = viscosity self.isotropic = isotropic<|docstring|>Arguments: viscosity dynamic viscosity µ of surrounding fluid isotropic (bool) True if the drag is isotropic<|endoftext|>
2d7a08bab7c7148e8b4cc12fb644937ad559ce17bd74a63553074e893df04a7c
@property def drag_T(self): 'translational drag coeffecient' return self._drag_T()
translational drag coeffecient
stoked/drag.py
drag_T
johnaparker/stoked
1
python
@property def drag_T(self): return self._drag_T()
@property def drag_T(self): return self._drag_T()<|docstring|>translational drag coeffecient<|endoftext|>
90d67ae1b4cc885a2e1fd5fcef5100c63a0a92470da5333d0940c93787b08b70
@property def drag_R(self): 'rotational drag coeffecient' return self._drag_R()
rotational drag coeffecient
stoked/drag.py
drag_R
johnaparker/stoked
1
python
@property def drag_R(self): return self._drag_R()
@property def drag_R(self): return self._drag_R()<|docstring|>rotational drag coeffecient<|endoftext|>
ee164101513a8c37b46b39a87a65126d18cbc3d57f3f6d3f291ce3fc059315d5
def __init__(self, radius, viscosity): '\n Arguments:\n radius[N] sphere radii\n viscosity dynamic viscosity µ of surrounding fluid\n ' super().__init__(viscosity, isotropic=True) self.radius = np.asarray(radius, dtype=float)
Arguments: radius[N] sphere radii viscosity dynamic viscosity µ of surrounding fluid
stoked/drag.py
__init__
johnaparker/stoked
1
python
def __init__(self, radius, viscosity): '\n Arguments:\n radius[N] sphere radii\n viscosity dynamic viscosity µ of surrounding fluid\n ' super().__init__(viscosity, isotropic=True) self.radius = np.asarray(radius, dtype=float)
def __init__(self, radius, viscosity): '\n Arguments:\n radius[N] sphere radii\n viscosity dynamic viscosity µ of surrounding fluid\n ' super().__init__(viscosity, isotropic=True) self.radius = np.asarray(radius, dtype=float)<|docstring|>Arguments: radius[N] sphere radii viscosity dynamic viscosity µ of surrounding fluid<|endoftext|>
372adfeb008edc47b5933b234ffb3aa57f02f881cd6d635460151c0a8b0665cf
def __init__(self, radii, viscosity): '\n Arguments:\n radii[N,3] ellipsoid radii\n viscosity dynamic viscosity µ of surrounding fluid\n ' super().__init__(viscosity) self.radii = np.atleast_2d(np.asarray(radii, dtype=float)) self.chi_0 = np.zeros(len(self.radii), dtype=float) self.alpha_0 = np.zeros_like(self.radii) for i in range(len(self.radii)): (a, b, c) = self.radii[i] integrand = (lambda t: (1 / np.sqrt((((1 + t) * (((b / a) ** 2) + t)) * (((c / a) ** 2) + t))))) self.chi_0[i] = ((b * c) * quad(integrand, 0, np.inf)[0]) for (j, comp) in enumerate([a, b, c]): integrand = (lambda t: (1 / ((1 + t) * np.sqrt((((((a / comp) ** 2) + t) * (((b / comp) ** 2) + t)) * (((c / comp) ** 2) + t)))))) self.alpha_0[i][j] = ((((a * b) * c) / comp) * quad(integrand, 0, np.inf)[0])
Arguments: radii[N,3] ellipsoid radii viscosity dynamic viscosity µ of surrounding fluid
stoked/drag.py
__init__
johnaparker/stoked
1
python
def __init__(self, radii, viscosity): '\n Arguments:\n radii[N,3] ellipsoid radii\n viscosity dynamic viscosity µ of surrounding fluid\n ' super().__init__(viscosity) self.radii = np.atleast_2d(np.asarray(radii, dtype=float)) self.chi_0 = np.zeros(len(self.radii), dtype=float) self.alpha_0 = np.zeros_like(self.radii) for i in range(len(self.radii)): (a, b, c) = self.radii[i] integrand = (lambda t: (1 / np.sqrt((((1 + t) * (((b / a) ** 2) + t)) * (((c / a) ** 2) + t))))) self.chi_0[i] = ((b * c) * quad(integrand, 0, np.inf)[0]) for (j, comp) in enumerate([a, b, c]): integrand = (lambda t: (1 / ((1 + t) * np.sqrt((((((a / comp) ** 2) + t) * (((b / comp) ** 2) + t)) * (((c / comp) ** 2) + t)))))) self.alpha_0[i][j] = ((((a * b) * c) / comp) * quad(integrand, 0, np.inf)[0])
def __init__(self, radii, viscosity): '\n Arguments:\n radii[N,3] ellipsoid radii\n viscosity dynamic viscosity µ of surrounding fluid\n ' super().__init__(viscosity) self.radii = np.atleast_2d(np.asarray(radii, dtype=float)) self.chi_0 = np.zeros(len(self.radii), dtype=float) self.alpha_0 = np.zeros_like(self.radii) for i in range(len(self.radii)): (a, b, c) = self.radii[i] integrand = (lambda t: (1 / np.sqrt((((1 + t) * (((b / a) ** 2) + t)) * (((c / a) ** 2) + t))))) self.chi_0[i] = ((b * c) * quad(integrand, 0, np.inf)[0]) for (j, comp) in enumerate([a, b, c]): integrand = (lambda t: (1 / ((1 + t) * np.sqrt((((((a / comp) ** 2) + t) * (((b / comp) ** 2) + t)) * (((c / comp) ** 2) + t)))))) self.alpha_0[i][j] = ((((a * b) * c) / comp) * quad(integrand, 0, np.inf)[0])<|docstring|>Arguments: radii[N,3] ellipsoid radii viscosity dynamic viscosity µ of surrounding fluid<|endoftext|>
2cb1a6974c424862ef2b820877b70eae16dd67f156a2bb3df8ac4fbd57e0418e
def primitive_nurbs_surface_circle_add(radius: float=1.0, view_align: bool=False, enter_editmode: bool=False, location: float=(0.0, 0.0, 0.0), rotation: float=(0.0, 0.0, 0.0), layers: bool=(False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False)): 'Construct a Nurbs surface Circle \n\n :param radius: Radius \n :type radius: float\n :param view_align: Align to View, Align the new object to the view \n :type view_align: bool\n :param enter_editmode: Enter Editmode, Enter editmode when adding this object \n :type enter_editmode: bool\n :param location: Location, Location for the newly added object \n :type location: float\n :param rotation: Rotation, Rotation for the newly added object \n :type rotation: float\n :param layers: Layer \n :type layers: bool\n ' pass
Construct a Nurbs surface Circle :param radius: Radius :type radius: float :param view_align: Align to View, Align the new object to the view :type view_align: bool :param enter_editmode: Enter Editmode, Enter editmode when adding this object :type enter_editmode: bool :param location: Location, Location for the newly added object :type location: float :param rotation: Rotation, Rotation for the newly added object :type rotation: float :param layers: Layer :type layers: bool
thirdparty/blender_autocomplete-master/2.79/bpy/ops/surface.py
primitive_nurbs_surface_circle_add
Ray1184/HPMSBatch
0
python
def primitive_nurbs_surface_circle_add(radius: float=1.0, view_align: bool=False, enter_editmode: bool=False, location: float=(0.0, 0.0, 0.0), rotation: float=(0.0, 0.0, 0.0), layers: bool=(False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False)): 'Construct a Nurbs surface Circle \n\n :param radius: Radius \n :type radius: float\n :param view_align: Align to View, Align the new object to the view \n :type view_align: bool\n :param enter_editmode: Enter Editmode, Enter editmode when adding this object \n :type enter_editmode: bool\n :param location: Location, Location for the newly added object \n :type location: float\n :param rotation: Rotation, Rotation for the newly added object \n :type rotation: float\n :param layers: Layer \n :type layers: bool\n ' pass
def primitive_nurbs_surface_circle_add(radius: float=1.0, view_align: bool=False, enter_editmode: bool=False, location: float=(0.0, 0.0, 0.0), rotation: float=(0.0, 0.0, 0.0), layers: bool=(False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False)): 'Construct a Nurbs surface Circle \n\n :param radius: Radius \n :type radius: float\n :param view_align: Align to View, Align the new object to the view \n :type view_align: bool\n :param enter_editmode: Enter Editmode, Enter editmode when adding this object \n :type enter_editmode: bool\n :param location: Location, Location for the newly added object \n :type location: float\n :param rotation: Rotation, Rotation for the newly added object \n :type rotation: float\n :param layers: Layer \n :type layers: bool\n ' pass<|docstring|>Construct a Nurbs surface Circle :param radius: Radius :type radius: float :param view_align: Align to View, Align the new object to the view :type view_align: bool :param enter_editmode: Enter Editmode, Enter editmode when adding this object :type enter_editmode: bool :param location: Location, Location for the newly added object :type location: float :param rotation: Rotation, Rotation for the newly added object :type rotation: float :param layers: Layer :type layers: bool<|endoftext|>
fefcd97640ad8a96cadc0a2797d2d9b2827710a3a14edb530ea0a0f4739fce78
def primitive_nurbs_surface_curve_add(radius: float=1.0, view_align: bool=False, enter_editmode: bool=False, location: float=(0.0, 0.0, 0.0), rotation: float=(0.0, 0.0, 0.0), layers: bool=(False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False)): 'Construct a Nurbs surface Curve \n\n :param radius: Radius \n :type radius: float\n :param view_align: Align to View, Align the new object to the view \n :type view_align: bool\n :param enter_editmode: Enter Editmode, Enter editmode when adding this object \n :type enter_editmode: bool\n :param location: Location, Location for the newly added object \n :type location: float\n :param rotation: Rotation, Rotation for the newly added object \n :type rotation: float\n :param layers: Layer \n :type layers: bool\n ' pass
Construct a Nurbs surface Curve :param radius: Radius :type radius: float :param view_align: Align to View, Align the new object to the view :type view_align: bool :param enter_editmode: Enter Editmode, Enter editmode when adding this object :type enter_editmode: bool :param location: Location, Location for the newly added object :type location: float :param rotation: Rotation, Rotation for the newly added object :type rotation: float :param layers: Layer :type layers: bool
thirdparty/blender_autocomplete-master/2.79/bpy/ops/surface.py
primitive_nurbs_surface_curve_add
Ray1184/HPMSBatch
0
python
def primitive_nurbs_surface_curve_add(radius: float=1.0, view_align: bool=False, enter_editmode: bool=False, location: float=(0.0, 0.0, 0.0), rotation: float=(0.0, 0.0, 0.0), layers: bool=(False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False)): 'Construct a Nurbs surface Curve \n\n :param radius: Radius \n :type radius: float\n :param view_align: Align to View, Align the new object to the view \n :type view_align: bool\n :param enter_editmode: Enter Editmode, Enter editmode when adding this object \n :type enter_editmode: bool\n :param location: Location, Location for the newly added object \n :type location: float\n :param rotation: Rotation, Rotation for the newly added object \n :type rotation: float\n :param layers: Layer \n :type layers: bool\n ' pass
def primitive_nurbs_surface_curve_add(radius: float=1.0, view_align: bool=False, enter_editmode: bool=False, location: float=(0.0, 0.0, 0.0), rotation: float=(0.0, 0.0, 0.0), layers: bool=(False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False)): 'Construct a Nurbs surface Curve \n\n :param radius: Radius \n :type radius: float\n :param view_align: Align to View, Align the new object to the view \n :type view_align: bool\n :param enter_editmode: Enter Editmode, Enter editmode when adding this object \n :type enter_editmode: bool\n :param location: Location, Location for the newly added object \n :type location: float\n :param rotation: Rotation, Rotation for the newly added object \n :type rotation: float\n :param layers: Layer \n :type layers: bool\n ' pass<|docstring|>Construct a Nurbs surface Curve :param radius: Radius :type radius: float :param view_align: Align to View, Align the new object to the view :type view_align: bool :param enter_editmode: Enter Editmode, Enter editmode when adding this object :type enter_editmode: bool :param location: Location, Location for the newly added object :type location: float :param rotation: Rotation, Rotation for the newly added object :type rotation: float :param layers: Layer :type layers: bool<|endoftext|>
2bafe7cb7dd25abb3e208c884b76d5367199c8fb7049b262cf8caa4efb7519cc
def primitive_nurbs_surface_cylinder_add(radius: float=1.0, view_align: bool=False, enter_editmode: bool=False, location: float=(0.0, 0.0, 0.0), rotation: float=(0.0, 0.0, 0.0), layers: bool=(False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False)): 'Construct a Nurbs surface Cylinder \n\n :param radius: Radius \n :type radius: float\n :param view_align: Align to View, Align the new object to the view \n :type view_align: bool\n :param enter_editmode: Enter Editmode, Enter editmode when adding this object \n :type enter_editmode: bool\n :param location: Location, Location for the newly added object \n :type location: float\n :param rotation: Rotation, Rotation for the newly added object \n :type rotation: float\n :param layers: Layer \n :type layers: bool\n ' pass
Construct a Nurbs surface Cylinder :param radius: Radius :type radius: float :param view_align: Align to View, Align the new object to the view :type view_align: bool :param enter_editmode: Enter Editmode, Enter editmode when adding this object :type enter_editmode: bool :param location: Location, Location for the newly added object :type location: float :param rotation: Rotation, Rotation for the newly added object :type rotation: float :param layers: Layer :type layers: bool
thirdparty/blender_autocomplete-master/2.79/bpy/ops/surface.py
primitive_nurbs_surface_cylinder_add
Ray1184/HPMSBatch
0
python
def primitive_nurbs_surface_cylinder_add(radius: float=1.0, view_align: bool=False, enter_editmode: bool=False, location: float=(0.0, 0.0, 0.0), rotation: float=(0.0, 0.0, 0.0), layers: bool=(False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False)): 'Construct a Nurbs surface Cylinder \n\n :param radius: Radius \n :type radius: float\n :param view_align: Align to View, Align the new object to the view \n :type view_align: bool\n :param enter_editmode: Enter Editmode, Enter editmode when adding this object \n :type enter_editmode: bool\n :param location: Location, Location for the newly added object \n :type location: float\n :param rotation: Rotation, Rotation for the newly added object \n :type rotation: float\n :param layers: Layer \n :type layers: bool\n ' pass
def primitive_nurbs_surface_cylinder_add(radius: float=1.0, view_align: bool=False, enter_editmode: bool=False, location: float=(0.0, 0.0, 0.0), rotation: float=(0.0, 0.0, 0.0), layers: bool=(False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False)): 'Construct a Nurbs surface Cylinder \n\n :param radius: Radius \n :type radius: float\n :param view_align: Align to View, Align the new object to the view \n :type view_align: bool\n :param enter_editmode: Enter Editmode, Enter editmode when adding this object \n :type enter_editmode: bool\n :param location: Location, Location for the newly added object \n :type location: float\n :param rotation: Rotation, Rotation for the newly added object \n :type rotation: float\n :param layers: Layer \n :type layers: bool\n ' pass<|docstring|>Construct a Nurbs surface Cylinder :param radius: Radius :type radius: float :param view_align: Align to View, Align the new object to the view :type view_align: bool :param enter_editmode: Enter Editmode, Enter editmode when adding this object :type enter_editmode: bool :param location: Location, Location for the newly added object :type location: float :param rotation: Rotation, Rotation for the newly added object :type rotation: float :param layers: Layer :type layers: bool<|endoftext|>
b0ab27f7e06a0d57f6d32bb67c11b5fb0d28a0c1b4b476dfe54c22267693e9f1
def primitive_nurbs_surface_sphere_add(radius: float=1.0, view_align: bool=False, enter_editmode: bool=False, location: float=(0.0, 0.0, 0.0), rotation: float=(0.0, 0.0, 0.0), layers: bool=(False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False)): 'Construct a Nurbs surface Sphere \n\n :param radius: Radius \n :type radius: float\n :param view_align: Align to View, Align the new object to the view \n :type view_align: bool\n :param enter_editmode: Enter Editmode, Enter editmode when adding this object \n :type enter_editmode: bool\n :param location: Location, Location for the newly added object \n :type location: float\n :param rotation: Rotation, Rotation for the newly added object \n :type rotation: float\n :param layers: Layer \n :type layers: bool\n ' pass
Construct a Nurbs surface Sphere :param radius: Radius :type radius: float :param view_align: Align to View, Align the new object to the view :type view_align: bool :param enter_editmode: Enter Editmode, Enter editmode when adding this object :type enter_editmode: bool :param location: Location, Location for the newly added object :type location: float :param rotation: Rotation, Rotation for the newly added object :type rotation: float :param layers: Layer :type layers: bool
thirdparty/blender_autocomplete-master/2.79/bpy/ops/surface.py
primitive_nurbs_surface_sphere_add
Ray1184/HPMSBatch
0
python
def primitive_nurbs_surface_sphere_add(radius: float=1.0, view_align: bool=False, enter_editmode: bool=False, location: float=(0.0, 0.0, 0.0), rotation: float=(0.0, 0.0, 0.0), layers: bool=(False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False)): 'Construct a Nurbs surface Sphere \n\n :param radius: Radius \n :type radius: float\n :param view_align: Align to View, Align the new object to the view \n :type view_align: bool\n :param enter_editmode: Enter Editmode, Enter editmode when adding this object \n :type enter_editmode: bool\n :param location: Location, Location for the newly added object \n :type location: float\n :param rotation: Rotation, Rotation for the newly added object \n :type rotation: float\n :param layers: Layer \n :type layers: bool\n ' pass
def primitive_nurbs_surface_sphere_add(radius: float=1.0, view_align: bool=False, enter_editmode: bool=False, location: float=(0.0, 0.0, 0.0), rotation: float=(0.0, 0.0, 0.0), layers: bool=(False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False)): 'Construct a Nurbs surface Sphere \n\n :param radius: Radius \n :type radius: float\n :param view_align: Align to View, Align the new object to the view \n :type view_align: bool\n :param enter_editmode: Enter Editmode, Enter editmode when adding this object \n :type enter_editmode: bool\n :param location: Location, Location for the newly added object \n :type location: float\n :param rotation: Rotation, Rotation for the newly added object \n :type rotation: float\n :param layers: Layer \n :type layers: bool\n ' pass<|docstring|>Construct a Nurbs surface Sphere :param radius: Radius :type radius: float :param view_align: Align to View, Align the new object to the view :type view_align: bool :param enter_editmode: Enter Editmode, Enter editmode when adding this object :type enter_editmode: bool :param location: Location, Location for the newly added object :type location: float :param rotation: Rotation, Rotation for the newly added object :type rotation: float :param layers: Layer :type layers: bool<|endoftext|>
d657ef0de3316d40ee28a7ca91b660b621dc5d2e12f6c1a4be60eb9bfc35d0c7
def primitive_nurbs_surface_surface_add(radius: float=1.0, view_align: bool=False, enter_editmode: bool=False, location: float=(0.0, 0.0, 0.0), rotation: float=(0.0, 0.0, 0.0), layers: bool=(False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False)): 'Construct a Nurbs surface Patch \n\n :param radius: Radius \n :type radius: float\n :param view_align: Align to View, Align the new object to the view \n :type view_align: bool\n :param enter_editmode: Enter Editmode, Enter editmode when adding this object \n :type enter_editmode: bool\n :param location: Location, Location for the newly added object \n :type location: float\n :param rotation: Rotation, Rotation for the newly added object \n :type rotation: float\n :param layers: Layer \n :type layers: bool\n ' pass
Construct a Nurbs surface Patch :param radius: Radius :type radius: float :param view_align: Align to View, Align the new object to the view :type view_align: bool :param enter_editmode: Enter Editmode, Enter editmode when adding this object :type enter_editmode: bool :param location: Location, Location for the newly added object :type location: float :param rotation: Rotation, Rotation for the newly added object :type rotation: float :param layers: Layer :type layers: bool
thirdparty/blender_autocomplete-master/2.79/bpy/ops/surface.py
primitive_nurbs_surface_surface_add
Ray1184/HPMSBatch
0
python
def primitive_nurbs_surface_surface_add(radius: float=1.0, view_align: bool=False, enter_editmode: bool=False, location: float=(0.0, 0.0, 0.0), rotation: float=(0.0, 0.0, 0.0), layers: bool=(False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False)): 'Construct a Nurbs surface Patch \n\n :param radius: Radius \n :type radius: float\n :param view_align: Align to View, Align the new object to the view \n :type view_align: bool\n :param enter_editmode: Enter Editmode, Enter editmode when adding this object \n :type enter_editmode: bool\n :param location: Location, Location for the newly added object \n :type location: float\n :param rotation: Rotation, Rotation for the newly added object \n :type rotation: float\n :param layers: Layer \n :type layers: bool\n ' pass
def primitive_nurbs_surface_surface_add(radius: float=1.0, view_align: bool=False, enter_editmode: bool=False, location: float=(0.0, 0.0, 0.0), rotation: float=(0.0, 0.0, 0.0), layers: bool=(False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False)): 'Construct a Nurbs surface Patch \n\n :param radius: Radius \n :type radius: float\n :param view_align: Align to View, Align the new object to the view \n :type view_align: bool\n :param enter_editmode: Enter Editmode, Enter editmode when adding this object \n :type enter_editmode: bool\n :param location: Location, Location for the newly added object \n :type location: float\n :param rotation: Rotation, Rotation for the newly added object \n :type rotation: float\n :param layers: Layer \n :type layers: bool\n ' pass<|docstring|>Construct a Nurbs surface Patch :param radius: Radius :type radius: float :param view_align: Align to View, Align the new object to the view :type view_align: bool :param enter_editmode: Enter Editmode, Enter editmode when adding this object :type enter_editmode: bool :param location: Location, Location for the newly added object :type location: float :param rotation: Rotation, Rotation for the newly added object :type rotation: float :param layers: Layer :type layers: bool<|endoftext|>
8cdd832ec4a0d1906d9fb0c741f80529e59b826373ea540e22ccc7c5c3f877a3
def primitive_nurbs_surface_torus_add(radius: float=1.0, view_align: bool=False, enter_editmode: bool=False, location: float=(0.0, 0.0, 0.0), rotation: float=(0.0, 0.0, 0.0), layers: bool=(False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False)): 'Construct a Nurbs surface Torus \n\n :param radius: Radius \n :type radius: float\n :param view_align: Align to View, Align the new object to the view \n :type view_align: bool\n :param enter_editmode: Enter Editmode, Enter editmode when adding this object \n :type enter_editmode: bool\n :param location: Location, Location for the newly added object \n :type location: float\n :param rotation: Rotation, Rotation for the newly added object \n :type rotation: float\n :param layers: Layer \n :type layers: bool\n ' pass
Construct a Nurbs surface Torus :param radius: Radius :type radius: float :param view_align: Align to View, Align the new object to the view :type view_align: bool :param enter_editmode: Enter Editmode, Enter editmode when adding this object :type enter_editmode: bool :param location: Location, Location for the newly added object :type location: float :param rotation: Rotation, Rotation for the newly added object :type rotation: float :param layers: Layer :type layers: bool
thirdparty/blender_autocomplete-master/2.79/bpy/ops/surface.py
primitive_nurbs_surface_torus_add
Ray1184/HPMSBatch
0
python
def primitive_nurbs_surface_torus_add(radius: float=1.0, view_align: bool=False, enter_editmode: bool=False, location: float=(0.0, 0.0, 0.0), rotation: float=(0.0, 0.0, 0.0), layers: bool=(False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False)): 'Construct a Nurbs surface Torus \n\n :param radius: Radius \n :type radius: float\n :param view_align: Align to View, Align the new object to the view \n :type view_align: bool\n :param enter_editmode: Enter Editmode, Enter editmode when adding this object \n :type enter_editmode: bool\n :param location: Location, Location for the newly added object \n :type location: float\n :param rotation: Rotation, Rotation for the newly added object \n :type rotation: float\n :param layers: Layer \n :type layers: bool\n ' pass
def primitive_nurbs_surface_torus_add(radius: float=1.0, view_align: bool=False, enter_editmode: bool=False, location: float=(0.0, 0.0, 0.0), rotation: float=(0.0, 0.0, 0.0), layers: bool=(False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False)): 'Construct a Nurbs surface Torus \n\n :param radius: Radius \n :type radius: float\n :param view_align: Align to View, Align the new object to the view \n :type view_align: bool\n :param enter_editmode: Enter Editmode, Enter editmode when adding this object \n :type enter_editmode: bool\n :param location: Location, Location for the newly added object \n :type location: float\n :param rotation: Rotation, Rotation for the newly added object \n :type rotation: float\n :param layers: Layer \n :type layers: bool\n ' pass<|docstring|>Construct a Nurbs surface Torus :param radius: Radius :type radius: float :param view_align: Align to View, Align the new object to the view :type view_align: bool :param enter_editmode: Enter Editmode, Enter editmode when adding this object :type enter_editmode: bool :param location: Location, Location for the newly added object :type location: float :param rotation: Rotation, Rotation for the newly added object :type rotation: float :param layers: Layer :type layers: bool<|endoftext|>
ac6212d6ee5e31d9be6ec7450b4b14d186d6942e52c9ea7ef8875420cb8c7ee9
def get_movie_data(films, people): 'Request handler for movie api' success = False movies = {} if ((films.status_code == 200) and (people.status_code == 200)): movies = map_cast(films.content, people.content) if (len(movies) > 0): success = True message = 'Request to ghibli was a success' else: message = f''' The api call to ghibli failed because the response code from ghibli for film api was {films.status_code} and for people api, it was {people.status_code}. Please call ghibli support''' logger.error('Could not get data from ghibli because of %s', message) return {'is_success': success, 'message': message, 'movies': movies}
Request handler for movie api
studio_ghibli/movies/api_data/get_movie_data.py
get_movie_data
hbansal0122/studio_ghibli_project
0
python
def get_movie_data(films, people): success = False movies = {} if ((films.status_code == 200) and (people.status_code == 200)): movies = map_cast(films.content, people.content) if (len(movies) > 0): success = True message = 'Request to ghibli was a success' else: message = f' The api call to ghibli failed because the response code from ghibli for film api was {films.status_code} and for people api, it was {people.status_code}. Please call ghibli support' logger.error('Could not get data from ghibli because of %s', message) return {'is_success': success, 'message': message, 'movies': movies}
def get_movie_data(films, people): success = False movies = {} if ((films.status_code == 200) and (people.status_code == 200)): movies = map_cast(films.content, people.content) if (len(movies) > 0): success = True message = 'Request to ghibli was a success' else: message = f' The api call to ghibli failed because the response code from ghibli for film api was {films.status_code} and for people api, it was {people.status_code}. Please call ghibli support' logger.error('Could not get data from ghibli because of %s', message) return {'is_success': success, 'message': message, 'movies': movies}<|docstring|>Request handler for movie api<|endoftext|>
94aae500d58e84c10e9aa09acf78487bb181c306dfa48ecc7cb84956ad548d19
def map_cast(films, people): 'Mapping of person with relevent movie' mapped_people_films = [] if (len(films) > 0): updated_people_obj = [] for person in json.loads(people): for film_by_person in person['films']: film_id = film_by_person.split('/')[(- 1)] person[film_id] = film_by_person person.pop('films', None) updated_people_obj.append(person) for single_film in json.loads(films): cast_each_film = [] for person in updated_people_obj: if person.get(single_film['id']): cast_each_film.append(person) single_film['people'] = cast_each_film mapped_people_films.append(single_film) return mapped_people_films
Mapping of person with relevent movie
studio_ghibli/movies/api_data/get_movie_data.py
map_cast
hbansal0122/studio_ghibli_project
0
python
def map_cast(films, people): mapped_people_films = [] if (len(films) > 0): updated_people_obj = [] for person in json.loads(people): for film_by_person in person['films']: film_id = film_by_person.split('/')[(- 1)] person[film_id] = film_by_person person.pop('films', None) updated_people_obj.append(person) for single_film in json.loads(films): cast_each_film = [] for person in updated_people_obj: if person.get(single_film['id']): cast_each_film.append(person) single_film['people'] = cast_each_film mapped_people_films.append(single_film) return mapped_people_films
def map_cast(films, people): mapped_people_films = [] if (len(films) > 0): updated_people_obj = [] for person in json.loads(people): for film_by_person in person['films']: film_id = film_by_person.split('/')[(- 1)] person[film_id] = film_by_person person.pop('films', None) updated_people_obj.append(person) for single_film in json.loads(films): cast_each_film = [] for person in updated_people_obj: if person.get(single_film['id']): cast_each_film.append(person) single_film['people'] = cast_each_film mapped_people_films.append(single_film) return mapped_people_films<|docstring|>Mapping of person with relevent movie<|endoftext|>
cf4a43970e6b6428b7a9cca730971d92d34824bff326ce1d1ca4ef27fd8c8831
def connectHBase(): '\n 连接远程HBase\n :return: 连接HBase的客户端实例\n ' socket = TSocket.TSocket('10.0.86.245', 9090) socket.setTimeout(5000) transport = TTransport.TBufferedTransport(socket) protocol = TBinaryProtocol.TBinaryProtocol(transport) client = Hbase.Client(protocol) socket.open() return client
连接远程HBase :return: 连接HBase的客户端实例
PythonConnectHBase/scannerGet.py
connectHBase
SparksFly8/Learning_Python
47
python
def connectHBase(): '\n 连接远程HBase\n :return: 连接HBase的客户端实例\n ' socket = TSocket.TSocket('10.0.86.245', 9090) socket.setTimeout(5000) transport = TTransport.TBufferedTransport(socket) protocol = TBinaryProtocol.TBinaryProtocol(transport) client = Hbase.Client(protocol) socket.open() return client
def connectHBase(): '\n 连接远程HBase\n :return: 连接HBase的客户端实例\n ' socket = TSocket.TSocket('10.0.86.245', 9090) socket.setTimeout(5000) transport = TTransport.TBufferedTransport(socket) protocol = TBinaryProtocol.TBinaryProtocol(transport) client = Hbase.Client(protocol) socket.open() return client<|docstring|>连接远程HBase :return: 连接HBase的客户端实例<|endoftext|>
559ab6d89971d6425b5171472b9cbce3b4fe376ed8abb568c8ac0db901e89ce5
def ListTables(client): '\n 列出所有表\n ' print(client.getTableNames())
列出所有表
PythonConnectHBase/scannerGet.py
ListTables
SparksFly8/Learning_Python
47
python
def ListTables(client): '\n \n ' print(client.getTableNames())
def ListTables(client): '\n \n ' print(client.getTableNames())<|docstring|>列出所有表<|endoftext|>
744a99704da5f8a6721021686b0b0db1c39164c81f819b78c401048697191fa9
def scannerGetSelect(client, tableName, columns, startRow, stopRow=None, rowsCnt=2000): '\n 依次扫描HBase指定表的每行数据(根据起始行,扫描到表的最后一行或指定行的前一行)\n :param client: 连接HBase的客户端实例\n :param tableName: 表名\n :param columns: 一个包含(一个或多个列簇下对应列名的)列表\n :param startRow: 起始扫描行\n :param stopRow: 停止扫描行(默认为空)\n :param rowsCnt: 需要扫描的行数\n :return MutilRowsDict: 返回一个包含多行数据的字典,以每行行键定位是哪一行\n ' if (stopRow is None): scannerId = client.scannerOpen(tableName, startRow, columns) else: scannerId = client.scannerOpenWithStop(tableName, startRow, stopRow, columns) results = client.scannerGetList(scannerId, rowsCnt) if results: MutilRowsDict = {} for result in results: RowDict = {} colFamilyDict = {} preColFamily = None cnt = 0 for (key, TCell_value) in result.columns.items(): cnt += 1 rowKey = result.row colFamily_colName = key.split(':') colFamily = colFamily_colName[0] colName = colFamily_colName[1] if ((preColFamily is None) or (preColFamily == colFamily)): colFamilyDict[colName] = TCell_value.value preColFamily = colFamily else: RowDict[preColFamily] = colFamilyDict colFamilyDict = {} colFamilyDict[colName] = TCell_value.value preColFamily = None if (cnt == len(result.columns.items())): RowDict[colFamily] = colFamilyDict MutilRowsDict[rowKey] = RowDict return MutilRowsDict else: return []
依次扫描HBase指定表的每行数据(根据起始行,扫描到表的最后一行或指定行的前一行) :param client: 连接HBase的客户端实例 :param tableName: 表名 :param columns: 一个包含(一个或多个列簇下对应列名的)列表 :param startRow: 起始扫描行 :param stopRow: 停止扫描行(默认为空) :param rowsCnt: 需要扫描的行数 :return MutilRowsDict: 返回一个包含多行数据的字典,以每行行键定位是哪一行
PythonConnectHBase/scannerGet.py
scannerGetSelect
SparksFly8/Learning_Python
47
python
def scannerGetSelect(client, tableName, columns, startRow, stopRow=None, rowsCnt=2000): '\n 依次扫描HBase指定表的每行数据(根据起始行,扫描到表的最后一行或指定行的前一行)\n :param client: 连接HBase的客户端实例\n :param tableName: 表名\n :param columns: 一个包含(一个或多个列簇下对应列名的)列表\n :param startRow: 起始扫描行\n :param stopRow: 停止扫描行(默认为空)\n :param rowsCnt: 需要扫描的行数\n :return MutilRowsDict: 返回一个包含多行数据的字典,以每行行键定位是哪一行\n ' if (stopRow is None): scannerId = client.scannerOpen(tableName, startRow, columns) else: scannerId = client.scannerOpenWithStop(tableName, startRow, stopRow, columns) results = client.scannerGetList(scannerId, rowsCnt) if results: MutilRowsDict = {} for result in results: RowDict = {} colFamilyDict = {} preColFamily = None cnt = 0 for (key, TCell_value) in result.columns.items(): cnt += 1 rowKey = result.row colFamily_colName = key.split(':') colFamily = colFamily_colName[0] colName = colFamily_colName[1] if ((preColFamily is None) or (preColFamily == colFamily)): colFamilyDict[colName] = TCell_value.value preColFamily = colFamily else: RowDict[preColFamily] = colFamilyDict colFamilyDict = {} colFamilyDict[colName] = TCell_value.value preColFamily = None if (cnt == len(result.columns.items())): RowDict[colFamily] = colFamilyDict MutilRowsDict[rowKey] = RowDict return MutilRowsDict else: return []
def scannerGetSelect(client, tableName, columns, startRow, stopRow=None, rowsCnt=2000): '\n 依次扫描HBase指定表的每行数据(根据起始行,扫描到表的最后一行或指定行的前一行)\n :param client: 连接HBase的客户端实例\n :param tableName: 表名\n :param columns: 一个包含(一个或多个列簇下对应列名的)列表\n :param startRow: 起始扫描行\n :param stopRow: 停止扫描行(默认为空)\n :param rowsCnt: 需要扫描的行数\n :return MutilRowsDict: 返回一个包含多行数据的字典,以每行行键定位是哪一行\n ' if (stopRow is None): scannerId = client.scannerOpen(tableName, startRow, columns) else: scannerId = client.scannerOpenWithStop(tableName, startRow, stopRow, columns) results = client.scannerGetList(scannerId, rowsCnt) if results: MutilRowsDict = {} for result in results: RowDict = {} colFamilyDict = {} preColFamily = None cnt = 0 for (key, TCell_value) in result.columns.items(): cnt += 1 rowKey = result.row colFamily_colName = key.split(':') colFamily = colFamily_colName[0] colName = colFamily_colName[1] if ((preColFamily is None) or (preColFamily == colFamily)): colFamilyDict[colName] = TCell_value.value preColFamily = colFamily else: RowDict[preColFamily] = colFamilyDict colFamilyDict = {} colFamilyDict[colName] = TCell_value.value preColFamily = None if (cnt == len(result.columns.items())): RowDict[colFamily] = colFamilyDict MutilRowsDict[rowKey] = RowDict return MutilRowsDict else: return []<|docstring|>依次扫描HBase指定表的每行数据(根据起始行,扫描到表的最后一行或指定行的前一行) :param client: 连接HBase的客户端实例 :param tableName: 表名 :param columns: 一个包含(一个或多个列簇下对应列名的)列表 :param startRow: 起始扫描行 :param stopRow: 停止扫描行(默认为空) :param rowsCnt: 需要扫描的行数 :return MutilRowsDict: 返回一个包含多行数据的字典,以每行行键定位是哪一行<|endoftext|>
5dea36b255554e8644cc645f9a975b254cbb809adc0361456f003bf878b934c5
def load_guidata(filename, report): 'Check if we have a GUI document.' report({'INFO'}, 'load guidata..') guidata = None zdoc = zipfile.ZipFile(filename) if zdoc: if ('GuiDocument.xml' in zdoc.namelist()): gf = zdoc.open('GuiDocument.xml') guidata = gf.read() gf.close() Handler = FreeCAD_xml_handler() xml.sax.parseString(guidata, Handler) guidata = Handler.guidata for (key, properties) in guidata.items(): if ('DiffuseColor' in properties): df = zdoc.open(guidata[key]['DiffuseColor']) buf = df.read() df.close() cols = [] for i in range(1, int((len(buf) / 4))): cols.append((buf[((i * 4) + 3)], buf[((i * 4) + 2)], buf[((i * 4) + 1)], buf[(i * 4)])) guidata[key]['DiffuseColor'] = cols zdoc.close() report({'INFO'}, 'load guidata done.') return guidata
Check if we have a GUI document.
import_fcstd/guidata.py
load_guidata
s-light/io_import_fcstd
6
python
def load_guidata(filename, report): report({'INFO'}, 'load guidata..') guidata = None zdoc = zipfile.ZipFile(filename) if zdoc: if ('GuiDocument.xml' in zdoc.namelist()): gf = zdoc.open('GuiDocument.xml') guidata = gf.read() gf.close() Handler = FreeCAD_xml_handler() xml.sax.parseString(guidata, Handler) guidata = Handler.guidata for (key, properties) in guidata.items(): if ('DiffuseColor' in properties): df = zdoc.open(guidata[key]['DiffuseColor']) buf = df.read() df.close() cols = [] for i in range(1, int((len(buf) / 4))): cols.append((buf[((i * 4) + 3)], buf[((i * 4) + 2)], buf[((i * 4) + 1)], buf[(i * 4)])) guidata[key]['DiffuseColor'] = cols zdoc.close() report({'INFO'}, 'load guidata done.') return guidata
def load_guidata(filename, report): report({'INFO'}, 'load guidata..') guidata = None zdoc = zipfile.ZipFile(filename) if zdoc: if ('GuiDocument.xml' in zdoc.namelist()): gf = zdoc.open('GuiDocument.xml') guidata = gf.read() gf.close() Handler = FreeCAD_xml_handler() xml.sax.parseString(guidata, Handler) guidata = Handler.guidata for (key, properties) in guidata.items(): if ('DiffuseColor' in properties): df = zdoc.open(guidata[key]['DiffuseColor']) buf = df.read() df.close() cols = [] for i in range(1, int((len(buf) / 4))): cols.append((buf[((i * 4) + 3)], buf[((i * 4) + 2)], buf[((i * 4) + 1)], buf[(i * 4)])) guidata[key]['DiffuseColor'] = cols zdoc.close() report({'INFO'}, 'load guidata done.') return guidata<|docstring|>Check if we have a GUI document.<|endoftext|>
e78e98541e519cbfebd02830e3582064a346c39facd7fdd947d472dadcfc0a8d
def __init__(self): 'Init.' self.guidata = {} self.current = None self.properties = {} self.currentprop = None self.currentval = None
Init.
import_fcstd/guidata.py
__init__
s-light/io_import_fcstd
6
python
def __init__(self): self.guidata = {} self.current = None self.properties = {} self.currentprop = None self.currentval = None
def __init__(self): self.guidata = {} self.current = None self.properties = {} self.currentprop = None self.currentval = None<|docstring|>Init.<|endoftext|>
a178456c911c01f9dc235ea0753befe2f0ce38ab981481d22b4e084132448fd6
def startElement(self, tag, attributes): 'Call when an element starts.' if (tag == 'ViewProvider'): self.current = attributes['name'] elif (tag == 'Property'): name = attributes['name'] element_names = ['Visibility', 'ShapeColor', 'Transparency', 'DiffuseColor'] if (name in element_names): self.currentprop = name elif (tag == 'Bool'): if (attributes['value'] == 'true'): self.currentval = True else: self.currentval = False elif (tag == 'PropertyColor'): c = int(attributes['value']) r = (float(((c >> 24) & 255)) / 255.0) g = (float(((c >> 16) & 255)) / 255.0) b = (float(((c >> 8) & 255)) / 255.0) self.currentval = (r, g, b) elif (tag == 'Integer'): self.currentval = int(attributes['value']) elif (tag == 'Float'): self.currentval = float(attributes['value']) elif (tag == 'ColorList'): self.currentval = attributes['file']
Call when an element starts.
import_fcstd/guidata.py
startElement
s-light/io_import_fcstd
6
python
def startElement(self, tag, attributes): if (tag == 'ViewProvider'): self.current = attributes['name'] elif (tag == 'Property'): name = attributes['name'] element_names = ['Visibility', 'ShapeColor', 'Transparency', 'DiffuseColor'] if (name in element_names): self.currentprop = name elif (tag == 'Bool'): if (attributes['value'] == 'true'): self.currentval = True else: self.currentval = False elif (tag == 'PropertyColor'): c = int(attributes['value']) r = (float(((c >> 24) & 255)) / 255.0) g = (float(((c >> 16) & 255)) / 255.0) b = (float(((c >> 8) & 255)) / 255.0) self.currentval = (r, g, b) elif (tag == 'Integer'): self.currentval = int(attributes['value']) elif (tag == 'Float'): self.currentval = float(attributes['value']) elif (tag == 'ColorList'): self.currentval = attributes['file']
def startElement(self, tag, attributes): if (tag == 'ViewProvider'): self.current = attributes['name'] elif (tag == 'Property'): name = attributes['name'] element_names = ['Visibility', 'ShapeColor', 'Transparency', 'DiffuseColor'] if (name in element_names): self.currentprop = name elif (tag == 'Bool'): if (attributes['value'] == 'true'): self.currentval = True else: self.currentval = False elif (tag == 'PropertyColor'): c = int(attributes['value']) r = (float(((c >> 24) & 255)) / 255.0) g = (float(((c >> 16) & 255)) / 255.0) b = (float(((c >> 8) & 255)) / 255.0) self.currentval = (r, g, b) elif (tag == 'Integer'): self.currentval = int(attributes['value']) elif (tag == 'Float'): self.currentval = float(attributes['value']) elif (tag == 'ColorList'): self.currentval = attributes['file']<|docstring|>Call when an element starts.<|endoftext|>
1fe92faae405ca5af05d62fd0f1bc45c578e32448a622392fb0c1176802faacc
def endElement(self, tag): 'Call when an elements ends.' if (tag == 'ViewProvider'): if (self.current and self.properties): self.guidata[self.current] = self.properties self.current = None self.properties = {} elif (tag == 'Property'): if (self.currentprop and (self.currentval is not None)): self.properties[self.currentprop] = self.currentval self.currentprop = None self.currentval = None
Call when an elements ends.
import_fcstd/guidata.py
endElement
s-light/io_import_fcstd
6
python
def endElement(self, tag): if (tag == 'ViewProvider'): if (self.current and self.properties): self.guidata[self.current] = self.properties self.current = None self.properties = {} elif (tag == 'Property'): if (self.currentprop and (self.currentval is not None)): self.properties[self.currentprop] = self.currentval self.currentprop = None self.currentval = None
def endElement(self, tag): if (tag == 'ViewProvider'): if (self.current and self.properties): self.guidata[self.current] = self.properties self.current = None self.properties = {} elif (tag == 'Property'): if (self.currentprop and (self.currentval is not None)): self.properties[self.currentprop] = self.currentval self.currentprop = None self.currentval = None<|docstring|>Call when an elements ends.<|endoftext|>
1034322426abc569b05b74aa80e6fd0956bab6c312a7b5a0e51e0bbd9c0a4db1
@cached_property def openapi_types(): '\n This must be a method because a pod_model may have properties that are\n of type self, this must run after the class is loaded\n\n Returns\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n ' return {'count': (int, none_type), 'skip': (int, none_type), 'limit': (int, none_type), 'query': (RoomSearchCriteria, none_type), 'rooms': ([V2RoomDetail], none_type), 'faceted_match_count': ([FacetedMatchCount], none_type)}
This must be a method because a pod_model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type.
symphony/bdk/gen/pod_model/room_search_results.py
openapi_types
symphony-mariacristina/symphony-bdk-python
17
python
@cached_property def openapi_types(): '\n This must be a method because a pod_model may have properties that are\n of type self, this must run after the class is loaded\n\n Returns\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n ' return {'count': (int, none_type), 'skip': (int, none_type), 'limit': (int, none_type), 'query': (RoomSearchCriteria, none_type), 'rooms': ([V2RoomDetail], none_type), 'faceted_match_count': ([FacetedMatchCount], none_type)}
@cached_property def openapi_types(): '\n This must be a method because a pod_model may have properties that are\n of type self, this must run after the class is loaded\n\n Returns\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n ' return {'count': (int, none_type), 'skip': (int, none_type), 'limit': (int, none_type), 'query': (RoomSearchCriteria, none_type), 'rooms': ([V2RoomDetail], none_type), 'faceted_match_count': ([FacetedMatchCount], none_type)}<|docstring|>This must be a method because a pod_model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type.<|endoftext|>
ac27c4b90fede555ceef9f6091da7d33ed84d654322301923c07affb2dd076ec
@convert_js_args_to_python_args def __init__(self, *args, **kwargs): 'RoomSearchResults - a pod_model defined in OpenAPI\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the pod_model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n count (int): The total number of rooms matched by the search.. [optional] # noqa: E501\n skip (int): The number of skipped results.. [optional] # noqa: E501\n limit (int): The number of returned results.. [optional] # noqa: E501\n query (RoomSearchCriteria): [optional] # noqa: E501\n rooms ([V2RoomDetail]): A list of rooms matched by the query.. [optional] # noqa: E501\n faceted_match_count ([FacetedMatchCount]): Detailed counts of matched rooms per search criterion.. [optional] # noqa: E501\n ' _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,)) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = (_visited_composed_classes + (self.__class__,)) self.count: int = None self.skip: int = None self.limit: int = None self.query: RoomSearchCriteria = None self.rooms: List[V2RoomDetail] = None self.faceted_match_count: List[FacetedMatchCount] = None for (var_name, var_value) in kwargs.items(): if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)): continue setattr(self, var_name, var_value)
RoomSearchResults - a pod_model defined in OpenAPI Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the pod_model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) count (int): The total number of rooms matched by the search.. [optional] # noqa: E501 skip (int): The number of skipped results.. [optional] # noqa: E501 limit (int): The number of returned results.. [optional] # noqa: E501 query (RoomSearchCriteria): [optional] # noqa: E501 rooms ([V2RoomDetail]): A list of rooms matched by the query.. [optional] # noqa: E501 faceted_match_count ([FacetedMatchCount]): Detailed counts of matched rooms per search criterion.. [optional] # noqa: E501
symphony/bdk/gen/pod_model/room_search_results.py
__init__
symphony-mariacristina/symphony-bdk-python
17
python
@convert_js_args_to_python_args def __init__(self, *args, **kwargs): 'RoomSearchResults - a pod_model defined in OpenAPI\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the pod_model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n count (int): The total number of rooms matched by the search.. [optional] # noqa: E501\n skip (int): The number of skipped results.. [optional] # noqa: E501\n limit (int): The number of returned results.. [optional] # noqa: E501\n query (RoomSearchCriteria): [optional] # noqa: E501\n rooms ([V2RoomDetail]): A list of rooms matched by the query.. [optional] # noqa: E501\n faceted_match_count ([FacetedMatchCount]): Detailed counts of matched rooms per search criterion.. [optional] # noqa: E501\n ' _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,)) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = (_visited_composed_classes + (self.__class__,)) self.count: int = None self.skip: int = None self.limit: int = None self.query: RoomSearchCriteria = None self.rooms: List[V2RoomDetail] = None self.faceted_match_count: List[FacetedMatchCount] = None for (var_name, var_value) in kwargs.items(): if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)): continue setattr(self, var_name, var_value)
@convert_js_args_to_python_args def __init__(self, *args, **kwargs): 'RoomSearchResults - a pod_model defined in OpenAPI\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the pod_model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n count (int): The total number of rooms matched by the search.. [optional] # noqa: E501\n skip (int): The number of skipped results.. [optional] # noqa: E501\n limit (int): The number of returned results.. [optional] # noqa: E501\n query (RoomSearchCriteria): [optional] # noqa: E501\n rooms ([V2RoomDetail]): A list of rooms matched by the query.. [optional] # noqa: E501\n faceted_match_count ([FacetedMatchCount]): Detailed counts of matched rooms per search criterion.. [optional] # noqa: E501\n ' _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,)) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = (_visited_composed_classes + (self.__class__,)) self.count: int = None self.skip: int = None self.limit: int = None self.query: RoomSearchCriteria = None self.rooms: List[V2RoomDetail] = None self.faceted_match_count: List[FacetedMatchCount] = None for (var_name, var_value) in kwargs.items(): if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)): continue setattr(self, var_name, var_value)<|docstring|>RoomSearchResults - a pod_model defined in OpenAPI Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the pod_model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) count (int): The total number of rooms matched by the search.. [optional] # noqa: E501 skip (int): The number of skipped results.. [optional] # noqa: E501 limit (int): The number of returned results.. [optional] # noqa: E501 query (RoomSearchCriteria): [optional] # noqa: E501 rooms ([V2RoomDetail]): A list of rooms matched by the query.. [optional] # noqa: E501 faceted_match_count ([FacetedMatchCount]): Detailed counts of matched rooms per search criterion.. [optional] # noqa: E501<|endoftext|>
c1aae4f5fba6c4ab6a6d72cfa2257be3f044677be0c87319f1c4a8fdda4ba6ee
def append_abc(key, keys=[]): "\n >>> append_abc('Author2000')\n 'Author2000b'\n >>> append_abc('Author2000b')\n 'Author2000c'\n >>> append_abc('Author2000', ['Author2000', 'Author2000b'])\n 'Author2000c'\n " letters = list('abcdefghijklmnopqrstuvwxyz') if (key[(- 1)] in letters): i = letters.index(key[(- 1)]) letters = letters[(i + 1):] key = key[:(- 1)] else: letters = letters[1:] for l in letters: Key = (key + l) if (Key not in keys): key = Key break assert (Key not in keys), 'not enough letters in the alphabets to solve key conflict? or maybe something went wrong...' return Key
>>> append_abc('Author2000') 'Author2000b' >>> append_abc('Author2000b') 'Author2000c' >>> append_abc('Author2000', ['Author2000', 'Author2000b']) 'Author2000c'
papers/bib.py
append_abc
aiotter/papers
105
python
def append_abc(key, keys=[]): "\n >>> append_abc('Author2000')\n 'Author2000b'\n >>> append_abc('Author2000b')\n 'Author2000c'\n >>> append_abc('Author2000', ['Author2000', 'Author2000b'])\n 'Author2000c'\n " letters = list('abcdefghijklmnopqrstuvwxyz') if (key[(- 1)] in letters): i = letters.index(key[(- 1)]) letters = letters[(i + 1):] key = key[:(- 1)] else: letters = letters[1:] for l in letters: Key = (key + l) if (Key not in keys): key = Key break assert (Key not in keys), 'not enough letters in the alphabets to solve key conflict? or maybe something went wrong...' return Key
def append_abc(key, keys=[]): "\n >>> append_abc('Author2000')\n 'Author2000b'\n >>> append_abc('Author2000b')\n 'Author2000c'\n >>> append_abc('Author2000', ['Author2000', 'Author2000b'])\n 'Author2000c'\n " letters = list('abcdefghijklmnopqrstuvwxyz') if (key[(- 1)] in letters): i = letters.index(key[(- 1)]) letters = letters[(i + 1):] key = key[:(- 1)] else: letters = letters[1:] for l in letters: Key = (key + l) if (Key not in keys): key = Key break assert (Key not in keys), 'not enough letters in the alphabets to solve key conflict? or maybe something went wrong...' return Key<|docstring|>>>> append_abc('Author2000') 'Author2000b' >>> append_abc('Author2000b') 'Author2000c' >>> append_abc('Author2000', ['Author2000', 'Author2000b']) 'Author2000c'<|endoftext|>
ca5a497689e0ea37e988db334c8cc57e120d0194ce57c14c6488cec9521f116d
def _simplify_string(s): ' replace unicode, strip, lower case ' s = _remove_unicode(s) return s.lower().strip()
replace unicode, strip, lower case
papers/bib.py
_simplify_string
aiotter/papers
105
python
def _simplify_string(s): ' ' s = _remove_unicode(s) return s.lower().strip()
def _simplify_string(s): ' ' s = _remove_unicode(s) return s.lower().strip()<|docstring|>replace unicode, strip, lower case<|endoftext|>
e15cee9dff8d86bab1e92309e350a7a77a1240e5a51acad7f078f0b1e7f3b20d
def entry_id(e): 'entry identifier which is not the bibtex key\n ' authortitle = ''.join([author_id(e), title_id(e)]) return (e.get('doi', '').lower(), authortitle)
entry identifier which is not the bibtex key
papers/bib.py
entry_id
aiotter/papers
105
python
def entry_id(e): '\n ' authortitle = .join([author_id(e), title_id(e)]) return (e.get('doi', ).lower(), authortitle)
def entry_id(e): '\n ' authortitle = .join([author_id(e), title_id(e)]) return (e.get('doi', ).lower(), authortitle)<|docstring|>entry identifier which is not the bibtex key<|endoftext|>
46ecabb6510f635dd3619de62d762832f4a725f6ba9397a1e806b38a4add4ee1
def compare_entries(e1, e2, fuzzy=False): "assess two entries' similarity\n " if (e1 == e2): return EXACT_DUPLICATES id1 = entry_id(e1) id2 = entry_id(e2) logger.debug('{} ?= {}'.format(id1, id2)) if (id1 == id2): score = GOOD_DUPLICATES elif all([(f1 == f2) for (f1, f2) in zip(id1, id2) if (f1 and f2)]): score = FAIR_DUPLICATES elif any([(f1 == f2) for (f1, f2) in zip(id1, id2) if (f1 and f2)]): score = PARTIAL_DUPLICATES elif (not fuzzy): score = 0 else: from rapidfuzz.fuzz import token_set_ratio (doi1, tag1) = id1 (doi2, tag2) = id2 score = token_set_ratio(tag1, tag2) return score
assess two entries' similarity
papers/bib.py
compare_entries
aiotter/papers
105
python
def compare_entries(e1, e2, fuzzy=False): "\n " if (e1 == e2): return EXACT_DUPLICATES id1 = entry_id(e1) id2 = entry_id(e2) logger.debug('{} ?= {}'.format(id1, id2)) if (id1 == id2): score = GOOD_DUPLICATES elif all([(f1 == f2) for (f1, f2) in zip(id1, id2) if (f1 and f2)]): score = FAIR_DUPLICATES elif any([(f1 == f2) for (f1, f2) in zip(id1, id2) if (f1 and f2)]): score = PARTIAL_DUPLICATES elif (not fuzzy): score = 0 else: from rapidfuzz.fuzz import token_set_ratio (doi1, tag1) = id1 (doi2, tag2) = id2 score = token_set_ratio(tag1, tag2) return score
def compare_entries(e1, e2, fuzzy=False): "\n " if (e1 == e2): return EXACT_DUPLICATES id1 = entry_id(e1) id2 = entry_id(e2) logger.debug('{} ?= {}'.format(id1, id2)) if (id1 == id2): score = GOOD_DUPLICATES elif all([(f1 == f2) for (f1, f2) in zip(id1, id2) if (f1 and f2)]): score = FAIR_DUPLICATES elif any([(f1 == f2) for (f1, f2) in zip(id1, id2) if (f1 and f2)]): score = PARTIAL_DUPLICATES elif (not fuzzy): score = 0 else: from rapidfuzz.fuzz import token_set_ratio (doi1, tag1) = id1 (doi2, tag2) = id2 score = token_set_ratio(tag1, tag2) return score<|docstring|>assess two entries' similarity<|endoftext|>
12e7720dd484a94239c36d79ed0a2ca76619ee26c1865589f71bce051d02a494
def hidden_bibtex(direc): ' save metadata for a bundle of files ' dirname = os.path.basename(direc) return os.path.join(direc, (('.' + dirname) + '.bib'))
save metadata for a bundle of files
papers/bib.py
hidden_bibtex
aiotter/papers
105
python
def hidden_bibtex(direc): ' ' dirname = os.path.basename(direc) return os.path.join(direc, (('.' + dirname) + '.bib'))
def hidden_bibtex(direc): ' ' dirname = os.path.basename(direc) return os.path.join(direc, (('.' + dirname) + '.bib'))<|docstring|>save metadata for a bundle of files<|endoftext|>
226b10389b0a69cf9e5c8e4adf95d3b9c7159f352b5141a864f66a731dac8bb1
def read_entry_dir(self, direc, update_files=True): 'add a directory that contain files from a single entry\n ' dirname = os.path.basename(direc) hidden_bib = hidden_bibtex(direc) if (not os.path.exists(hidden_bib)): raise TypeError('hidden bib missing: not an entry dir') db = bibtexparser.loads(open(hidden_bib).read()) assert (len(db.entries) == 1), ('hidden bib must have one entry, got: ' + str(len(db.entries))) entry = db.entries[0] if update_files: for (root, direcs, files) in os.walk(direc): break files = [os.path.join(direc, file) for file in files if (not file.startswith('.'))] entry['file'] = format_file(files) return entry
add a directory that contain files from a single entry
papers/bib.py
read_entry_dir
aiotter/papers
105
python
def read_entry_dir(self, direc, update_files=True): '\n ' dirname = os.path.basename(direc) hidden_bib = hidden_bibtex(direc) if (not os.path.exists(hidden_bib)): raise TypeError('hidden bib missing: not an entry dir') db = bibtexparser.loads(open(hidden_bib).read()) assert (len(db.entries) == 1), ('hidden bib must have one entry, got: ' + str(len(db.entries))) entry = db.entries[0] if update_files: for (root, direcs, files) in os.walk(direc): break files = [os.path.join(direc, file) for file in files if (not file.startswith('.'))] entry['file'] = format_file(files) return entry
def read_entry_dir(self, direc, update_files=True): '\n ' dirname = os.path.basename(direc) hidden_bib = hidden_bibtex(direc) if (not os.path.exists(hidden_bib)): raise TypeError('hidden bib missing: not an entry dir') db = bibtexparser.loads(open(hidden_bib).read()) assert (len(db.entries) == 1), ('hidden bib must have one entry, got: ' + str(len(db.entries))) entry = db.entries[0] if update_files: for (root, direcs, files) in os.walk(direc): break files = [os.path.join(direc, file) for file in files if (not file.startswith('.'))] entry['file'] = format_file(files) return entry<|docstring|>add a directory that contain files from a single entry<|endoftext|>
e6fb5d0144139e92bd76637b666792bdebebc1a7beeb3c3c974db029254e8f21
def entry_filecheck_metadata(e, file, image=False): ' parse pdf metadata and compare with entry: only doi for now\n ' if ('doi' not in e): raise ValueError((e['ID'] + ': no doi, skip PDF parsing')) try: doi = extract_pdf_doi(file, image=image) except Exception as error: raise ValueError((e['ID'] + ': failed to parse doi: "{}"'.format(file))) if (not isvaliddoi(doi)): raise ValueError(((e['ID'] + ': invalid parsed doi: ') + doi)) if (doi.lower() != e['doi'].lower()): raise ValueError((e['ID'] + ': doi: entry <=> pdf : {} <=> {}'.format(e['doi'].lower(), doi.lower())))
parse pdf metadata and compare with entry: only doi for now
papers/bib.py
entry_filecheck_metadata
aiotter/papers
105
python
def entry_filecheck_metadata(e, file, image=False): ' \n ' if ('doi' not in e): raise ValueError((e['ID'] + ': no doi, skip PDF parsing')) try: doi = extract_pdf_doi(file, image=image) except Exception as error: raise ValueError((e['ID'] + ': failed to parse doi: "{}"'.format(file))) if (not isvaliddoi(doi)): raise ValueError(((e['ID'] + ': invalid parsed doi: ') + doi)) if (doi.lower() != e['doi'].lower()): raise ValueError((e['ID'] + ': doi: entry <=> pdf : {} <=> {}'.format(e['doi'].lower(), doi.lower())))
def entry_filecheck_metadata(e, file, image=False): ' \n ' if ('doi' not in e): raise ValueError((e['ID'] + ': no doi, skip PDF parsing')) try: doi = extract_pdf_doi(file, image=image) except Exception as error: raise ValueError((e['ID'] + ': failed to parse doi: "{}"'.format(file))) if (not isvaliddoi(doi)): raise ValueError(((e['ID'] + ': invalid parsed doi: ') + doi)) if (doi.lower() != e['doi'].lower()): raise ValueError((e['ID'] + ': doi: entry <=> pdf : {} <=> {}'.format(e['doi'].lower(), doi.lower())))<|docstring|>parse pdf metadata and compare with entry: only doi for now<|endoftext|>
d735c925d0b24cdcef17bbf8acac1fdb93964b7d48cf3b7b6625f35355425449
def generate_key(self, entry): ' generate a unique key not yet present in the record ' keys = set((self.key(e) for e in self.db.entries)) return generate_key(entry, keys=keys, nauthor=self.nauthor, ntitle=self.ntitle)
generate a unique key not yet present in the record
papers/bib.py
generate_key
aiotter/papers
105
python
def generate_key(self, entry): ' ' keys = set((self.key(e) for e in self.db.entries)) return generate_key(entry, keys=keys, nauthor=self.nauthor, ntitle=self.ntitle)
def generate_key(self, entry): ' ' keys = set((self.key(e) for e in self.db.entries)) return generate_key(entry, keys=keys, nauthor=self.nauthor, ntitle=self.ntitle)<|docstring|>generate a unique key not yet present in the record<|endoftext|>
dd491a9cc09ab5470c8557a4b0943e357725c78079e775b14d7728a5531a4f55
def check_duplicates(self, key=None, eq=None, mode='i'): 'remove duplicates, in some sensse (see papers.conflict.check_duplicates)\n ' self.entries = check_duplicates(self.entries, key=key, eq=(eq or self.eq), issorted=(key is self.key), mode=mode) self.sort()
remove duplicates, in some sensse (see papers.conflict.check_duplicates)
papers/bib.py
check_duplicates
aiotter/papers
105
python
def check_duplicates(self, key=None, eq=None, mode='i'): '\n ' self.entries = check_duplicates(self.entries, key=key, eq=(eq or self.eq), issorted=(key is self.key), mode=mode) self.sort()
def check_duplicates(self, key=None, eq=None, mode='i'): '\n ' self.entries = check_duplicates(self.entries, key=key, eq=(eq or self.eq), issorted=(key is self.key), mode=mode) self.sort()<|docstring|>remove duplicates, in some sensse (see papers.conflict.check_duplicates)<|endoftext|>
abc19cb95aad479943ca263b5d34b3916ce0d08bc30cb31af39a8b7d9bb5a39d
def wrap_function(lib, funcname, restype, argtypes): 'Simplify wrapping ctypes functions' func = lib.__getattr__(funcname) func.restype = restype func.argtypes = argtypes return func
Simplify wrapping ctypes functions
gym_battlesnake/gymbattlesnake.py
wrap_function
SahibSodhi/Tackling-BattleSnake-Game
37
python
def wrap_function(lib, funcname, restype, argtypes): func = lib.__getattr__(funcname) func.restype = restype func.argtypes = argtypes return func
def wrap_function(lib, funcname, restype, argtypes): func = lib.__getattr__(funcname) func.restype = restype func.argtypes = argtypes return func<|docstring|>Simplify wrapping ctypes functions<|endoftext|>
760bacbbbc27b1e8e9de6f19d9ebc8735b2baee510b71429da59972f44b2e5fc
def __init__(self, app=None): 'Extension initialization.' if app: self.app = app self.init_app(app)
Extension initialization.
reana_server/ext.py
__init__
diegodelemos/reana-server
0
python
def __init__(self, app=None): if app: self.app = app self.init_app(app)
def __init__(self, app=None): if app: self.app = app self.init_app(app)<|docstring|>Extension initialization.<|endoftext|>
52f6d8eb61e9fce41d069343d3372ba603dc49ba14426e9ff76abf414115ae56
def init_app(self, app): 'Flask application initialization.' self.init_config(app) Menu(app=app) @app.teardown_appcontext def shutdown_reana_db_session(response_or_exc): 'Close session on app teardown.' from reana_db.database import Session as reana_db_session from invenio_db import db as invenio_db reana_db_session.remove() invenio_db.session.remove() return response_or_exc @app.before_first_request def connect_signals(): 'Connect OAuthClient signals.' from invenio_oauthclient.signals import account_info_received from flask_security.signals import user_registered from .utils import _create_and_associate_local_user, _create_and_associate_oauth_user account_info_received.connect(_create_and_associate_oauth_user) user_registered.connect(_create_and_associate_local_user)
Flask application initialization.
reana_server/ext.py
init_app
diegodelemos/reana-server
0
python
def init_app(self, app): self.init_config(app) Menu(app=app) @app.teardown_appcontext def shutdown_reana_db_session(response_or_exc): 'Close session on app teardown.' from reana_db.database import Session as reana_db_session from invenio_db import db as invenio_db reana_db_session.remove() invenio_db.session.remove() return response_or_exc @app.before_first_request def connect_signals(): 'Connect OAuthClient signals.' from invenio_oauthclient.signals import account_info_received from flask_security.signals import user_registered from .utils import _create_and_associate_local_user, _create_and_associate_oauth_user account_info_received.connect(_create_and_associate_oauth_user) user_registered.connect(_create_and_associate_local_user)
def init_app(self, app): self.init_config(app) Menu(app=app) @app.teardown_appcontext def shutdown_reana_db_session(response_or_exc): 'Close session on app teardown.' from reana_db.database import Session as reana_db_session from invenio_db import db as invenio_db reana_db_session.remove() invenio_db.session.remove() return response_or_exc @app.before_first_request def connect_signals(): 'Connect OAuthClient signals.' from invenio_oauthclient.signals import account_info_received from flask_security.signals import user_registered from .utils import _create_and_associate_local_user, _create_and_associate_oauth_user account_info_received.connect(_create_and_associate_oauth_user) user_registered.connect(_create_and_associate_local_user)<|docstring|>Flask application initialization.<|endoftext|>
0c028c4e55a4aea61abec9ded1d150d846a104ff39582483bbdab428ac4839d3
def init_config(self, app): 'Initialize configuration.' for k in dir(config): if k.startswith('REANA_'): app.config.setdefault(k, getattr(config, k))
Initialize configuration.
reana_server/ext.py
init_config
diegodelemos/reana-server
0
python
def init_config(self, app): for k in dir(config): if k.startswith('REANA_'): app.config.setdefault(k, getattr(config, k))
def init_config(self, app): for k in dir(config): if k.startswith('REANA_'): app.config.setdefault(k, getattr(config, k))<|docstring|>Initialize configuration.<|endoftext|>
5f919c23fd0fbdc6635e20a5d3467be9270a734902d393bbba4082c4d4348d70
@app.teardown_appcontext def shutdown_reana_db_session(response_or_exc): 'Close session on app teardown.' from reana_db.database import Session as reana_db_session from invenio_db import db as invenio_db reana_db_session.remove() invenio_db.session.remove() return response_or_exc
Close session on app teardown.
reana_server/ext.py
shutdown_reana_db_session
diegodelemos/reana-server
0
python
@app.teardown_appcontext def shutdown_reana_db_session(response_or_exc): from reana_db.database import Session as reana_db_session from invenio_db import db as invenio_db reana_db_session.remove() invenio_db.session.remove() return response_or_exc
@app.teardown_appcontext def shutdown_reana_db_session(response_or_exc): from reana_db.database import Session as reana_db_session from invenio_db import db as invenio_db reana_db_session.remove() invenio_db.session.remove() return response_or_exc<|docstring|>Close session on app teardown.<|endoftext|>
6284c110c45af211858f97a0a6332939f666f27a3cf98680f1c93c194a05a553
@app.before_first_request def connect_signals(): 'Connect OAuthClient signals.' from invenio_oauthclient.signals import account_info_received from flask_security.signals import user_registered from .utils import _create_and_associate_local_user, _create_and_associate_oauth_user account_info_received.connect(_create_and_associate_oauth_user) user_registered.connect(_create_and_associate_local_user)
Connect OAuthClient signals.
reana_server/ext.py
connect_signals
diegodelemos/reana-server
0
python
@app.before_first_request def connect_signals(): from invenio_oauthclient.signals import account_info_received from flask_security.signals import user_registered from .utils import _create_and_associate_local_user, _create_and_associate_oauth_user account_info_received.connect(_create_and_associate_oauth_user) user_registered.connect(_create_and_associate_local_user)
@app.before_first_request def connect_signals(): from invenio_oauthclient.signals import account_info_received from flask_security.signals import user_registered from .utils import _create_and_associate_local_user, _create_and_associate_oauth_user account_info_received.connect(_create_and_associate_oauth_user) user_registered.connect(_create_and_associate_local_user)<|docstring|>Connect OAuthClient signals.<|endoftext|>
b97f3f6e6bf4172ca134509268b82b749d02a27900fa8fedeef82962ade6825f
@classmethod def from_center(cls, center=None, width=None): '\n SpectralRegion class method that enables the definition of a\n `SpectralRegion` from the center and width rather than lower and\n upper bounds.\n\n Parameters\n ----------\n center : Scalar `~astropy.units.Quantity` with pixel or any valid ``spectral_axis`` unit\n The center of the spectral region.\n width : Scalar `~astropy.units.Quantity` with pixel or any valid ``spectral_axis`` unit\n The full width of the spectral region (upper bound - lower bound).\n ' if (width.value <= 0): raise ValueError('SpectralRegion width must be positive.') if (center.unit.physical_type not in ('length', 'unknown')): return cls((center + (width / 2)), (center - (width / 2))) return cls((center - (width / 2)), (center + (width / 2)))
SpectralRegion class method that enables the definition of a `SpectralRegion` from the center and width rather than lower and upper bounds. Parameters ---------- center : Scalar `~astropy.units.Quantity` with pixel or any valid ``spectral_axis`` unit The center of the spectral region. width : Scalar `~astropy.units.Quantity` with pixel or any valid ``spectral_axis`` unit The full width of the spectral region (upper bound - lower bound).
specutils/spectra/spectral_region.py
from_center
havok2063/specutils
118
python
@classmethod def from_center(cls, center=None, width=None): '\n SpectralRegion class method that enables the definition of a\n `SpectralRegion` from the center and width rather than lower and\n upper bounds.\n\n Parameters\n ----------\n center : Scalar `~astropy.units.Quantity` with pixel or any valid ``spectral_axis`` unit\n The center of the spectral region.\n width : Scalar `~astropy.units.Quantity` with pixel or any valid ``spectral_axis`` unit\n The full width of the spectral region (upper bound - lower bound).\n ' if (width.value <= 0): raise ValueError('SpectralRegion width must be positive.') if (center.unit.physical_type not in ('length', 'unknown')): return cls((center + (width / 2)), (center - (width / 2))) return cls((center - (width / 2)), (center + (width / 2)))
@classmethod def from_center(cls, center=None, width=None): '\n SpectralRegion class method that enables the definition of a\n `SpectralRegion` from the center and width rather than lower and\n upper bounds.\n\n Parameters\n ----------\n center : Scalar `~astropy.units.Quantity` with pixel or any valid ``spectral_axis`` unit\n The center of the spectral region.\n width : Scalar `~astropy.units.Quantity` with pixel or any valid ``spectral_axis`` unit\n The full width of the spectral region (upper bound - lower bound).\n ' if (width.value <= 0): raise ValueError('SpectralRegion width must be positive.') if (center.unit.physical_type not in ('length', 'unknown')): return cls((center + (width / 2)), (center - (width / 2))) return cls((center - (width / 2)), (center + (width / 2)))<|docstring|>SpectralRegion class method that enables the definition of a `SpectralRegion` from the center and width rather than lower and upper bounds. Parameters ---------- center : Scalar `~astropy.units.Quantity` with pixel or any valid ``spectral_axis`` unit The center of the spectral region. width : Scalar `~astropy.units.Quantity` with pixel or any valid ``spectral_axis`` unit The full width of the spectral region (upper bound - lower bound).<|endoftext|>
946f971ad0ea63d44ea052478cd9a91391ce600dbf647eeb10e55d79f1b2aebe
@classmethod def from_line_list(cls, table, width=1): "\n Generate a ``SpectralRegion`` instance from the `~astropy.table.QTable`\n object returned from `~specutils.fitting.find_lines_derivative` or\n `~specutils.fitting.find_lines_threshold`.\n\n Parameters\n ----------\n table : `~astropy.table.QTable`\n List of found lines.\n width : float\n The width of the spectral line region. If not unit information is\n provided, it's assumed to be the same units as used in the line\n list table.\n\n Returns\n -------\n `~specutils.SpectralRegion`\n The spectral region based on the line list.\n " width = u.Quantity(width, table['line_center'].unit) return cls([((x - (width * 0.5)), (x + (width * 0.5))) for x in table['line_center']])
Generate a ``SpectralRegion`` instance from the `~astropy.table.QTable` object returned from `~specutils.fitting.find_lines_derivative` or `~specutils.fitting.find_lines_threshold`. Parameters ---------- table : `~astropy.table.QTable` List of found lines. width : float The width of the spectral line region. If not unit information is provided, it's assumed to be the same units as used in the line list table. Returns ------- `~specutils.SpectralRegion` The spectral region based on the line list.
specutils/spectra/spectral_region.py
from_line_list
havok2063/specutils
118
python
@classmethod def from_line_list(cls, table, width=1): "\n Generate a ``SpectralRegion`` instance from the `~astropy.table.QTable`\n object returned from `~specutils.fitting.find_lines_derivative` or\n `~specutils.fitting.find_lines_threshold`.\n\n Parameters\n ----------\n table : `~astropy.table.QTable`\n List of found lines.\n width : float\n The width of the spectral line region. If not unit information is\n provided, it's assumed to be the same units as used in the line\n list table.\n\n Returns\n -------\n `~specutils.SpectralRegion`\n The spectral region based on the line list.\n " width = u.Quantity(width, table['line_center'].unit) return cls([((x - (width * 0.5)), (x + (width * 0.5))) for x in table['line_center']])
@classmethod def from_line_list(cls, table, width=1): "\n Generate a ``SpectralRegion`` instance from the `~astropy.table.QTable`\n object returned from `~specutils.fitting.find_lines_derivative` or\n `~specutils.fitting.find_lines_threshold`.\n\n Parameters\n ----------\n table : `~astropy.table.QTable`\n List of found lines.\n width : float\n The width of the spectral line region. If not unit information is\n provided, it's assumed to be the same units as used in the line\n list table.\n\n Returns\n -------\n `~specutils.SpectralRegion`\n The spectral region based on the line list.\n " width = u.Quantity(width, table['line_center'].unit) return cls([((x - (width * 0.5)), (x + (width * 0.5))) for x in table['line_center']])<|docstring|>Generate a ``SpectralRegion`` instance from the `~astropy.table.QTable` object returned from `~specutils.fitting.find_lines_derivative` or `~specutils.fitting.find_lines_threshold`. Parameters ---------- table : `~astropy.table.QTable` List of found lines. width : float The width of the spectral line region. If not unit information is provided, it's assumed to be the same units as used in the line list table. Returns ------- `~specutils.SpectralRegion` The spectral region based on the line list.<|endoftext|>
8cbc878464b951da73dadfec8c3f099f683c3087e1b3dc747f73ed17101d7ff3
def _info(self): '\n Pretty print the sub-regions.\n ' toreturn = 'Spectral Region, {} sub-regions:\n'.format(len(self._subregions)) subregion_text = [] for (ii, subregion) in enumerate(self._subregions): subregion_text.append(' ({}, {})'.format(subregion[0], subregion[1])) max_len = (max((len(srt) for srt in subregion_text)) + 1) ncols = (70 // max_len) fmt = (('{' + ':<{}'.format(max_len)) + '}') for (ii, srt) in enumerate(subregion_text): toreturn += fmt.format(srt) if ((ii % ncols) == (ncols - 1)): toreturn += '\n' return toreturn
Pretty print the sub-regions.
specutils/spectra/spectral_region.py
_info
havok2063/specutils
118
python
def _info(self): '\n \n ' toreturn = 'Spectral Region, {} sub-regions:\n'.format(len(self._subregions)) subregion_text = [] for (ii, subregion) in enumerate(self._subregions): subregion_text.append(' ({}, {})'.format(subregion[0], subregion[1])) max_len = (max((len(srt) for srt in subregion_text)) + 1) ncols = (70 // max_len) fmt = (('{' + ':<{}'.format(max_len)) + '}') for (ii, srt) in enumerate(subregion_text): toreturn += fmt.format(srt) if ((ii % ncols) == (ncols - 1)): toreturn += '\n' return toreturn
def _info(self): '\n \n ' toreturn = 'Spectral Region, {} sub-regions:\n'.format(len(self._subregions)) subregion_text = [] for (ii, subregion) in enumerate(self._subregions): subregion_text.append(' ({}, {})'.format(subregion[0], subregion[1])) max_len = (max((len(srt) for srt in subregion_text)) + 1) ncols = (70 // max_len) fmt = (('{' + ':<{}'.format(max_len)) + '}') for (ii, srt) in enumerate(subregion_text): toreturn += fmt.format(srt) if ((ii % ncols) == (ncols - 1)): toreturn += '\n' return toreturn<|docstring|>Pretty print the sub-regions.<|endoftext|>
ff589355e9cace6694557cd086e3ad629e46add08e8c83c1443e49b9d18d56c8
def __add__(self, other): '\n Ability to add two SpectralRegion classes together.\n ' return SpectralRegion((self._subregions + other._subregions))
Ability to add two SpectralRegion classes together.
specutils/spectra/spectral_region.py
__add__
havok2063/specutils
118
python
def __add__(self, other): '\n \n ' return SpectralRegion((self._subregions + other._subregions))
def __add__(self, other): '\n \n ' return SpectralRegion((self._subregions + other._subregions))<|docstring|>Ability to add two SpectralRegion classes together.<|endoftext|>
8e2557ccab9a3e1b5be05f1f25ed50ec9eedba5e9a190b973fa54709eb1a8489
def __iadd__(self, other): '\n Ability to add one SpectralRegion to another using +=.\n ' self._subregions += other._subregions self._reorder() return self
Ability to add one SpectralRegion to another using +=.
specutils/spectra/spectral_region.py
__iadd__
havok2063/specutils
118
python
def __iadd__(self, other): '\n \n ' self._subregions += other._subregions self._reorder() return self
def __iadd__(self, other): '\n \n ' self._subregions += other._subregions self._reorder() return self<|docstring|>Ability to add one SpectralRegion to another using +=.<|endoftext|>
1a0b3305db6927fc33683f396171dac0ead72dd3c8c3763acf2e85dac83cc976
def __len__(self): '\n Number of spectral regions.\n ' return len(self._subregions)
Number of spectral regions.
specutils/spectra/spectral_region.py
__len__
havok2063/specutils
118
python
def __len__(self): '\n \n ' return len(self._subregions)
def __len__(self): '\n \n ' return len(self._subregions)<|docstring|>Number of spectral regions.<|endoftext|>
4abd63b36d789413899383c002f824d223bc275e7d35a375a8a6c1e5c442e250
def __getslice__(self, item): '\n Enable slicing of the SpectralRegion list.\n ' return SpectralRegion(self._subregions[item])
Enable slicing of the SpectralRegion list.
specutils/spectra/spectral_region.py
__getslice__
havok2063/specutils
118
python
def __getslice__(self, item): '\n \n ' return SpectralRegion(self._subregions[item])
def __getslice__(self, item): '\n \n ' return SpectralRegion(self._subregions[item])<|docstring|>Enable slicing of the SpectralRegion list.<|endoftext|>
46d844c4f6475a34847389a26bdf58980ac898aea43fa882098da9c36480f8f4
def __getitem__(self, item): '\n Enable slicing or extracting the SpectralRegion.\n ' if isinstance(item, slice): return self.__getslice__(item) else: return SpectralRegion([self._subregions[item]])
Enable slicing or extracting the SpectralRegion.
specutils/spectra/spectral_region.py
__getitem__
havok2063/specutils
118
python
def __getitem__(self, item): '\n \n ' if isinstance(item, slice): return self.__getslice__(item) else: return SpectralRegion([self._subregions[item]])
def __getitem__(self, item): '\n \n ' if isinstance(item, slice): return self.__getslice__(item) else: return SpectralRegion([self._subregions[item]])<|docstring|>Enable slicing or extracting the SpectralRegion.<|endoftext|>
de086df5a2670c68a38ba78ce8d4ad1b2b5614d97800e695a32f2fb714afd0e8
def __delitem__(self, item): '\n Delete a specific item from the list.\n ' del self._subregions[item]
Delete a specific item from the list.
specutils/spectra/spectral_region.py
__delitem__
havok2063/specutils
118
python
def __delitem__(self, item): '\n \n ' del self._subregions[item]
def __delitem__(self, item): '\n \n ' del self._subregions[item]<|docstring|>Delete a specific item from the list.<|endoftext|>