code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
import os
import copy
from parser import Parser
import json
import argparse
from tqdm import tqdm
def get_data_paths(ace2005_path):
test_files, dev_files, train_files = [], [], []
with open('./data_list.csv', mode='r') as csv_file:
rows = csv_file.readlines()
for row in rows[1:]:
items = row.replace('\n', '').split(',')
data_type = items[0]
name = items[1]
path = os.path.join(ace2005_path, name)
if data_type == 'test':
test_files.append(path)
elif data_type == 'dev':
dev_files.append(path)
elif data_type == 'train':
train_files.append(path)
return test_files, dev_files, train_files
def find_all(sub, s):
index_list = []
index = s.find(sub)
while index != -1:
index_list.append(index)
index = s.find(sub, index + 1)
if len(index_list) > 0:
return index_list
else:
return [-1]
def find_token_index(tokens, start_pos, end_pos, phrase):
start_idx, end_idx = start_pos, end_pos
token = tokens[start_idx: end_idx]
if token != phrase:
# print(tokens)
pos = find_all(phrase, tokens)
if pos[0] == -1:
start_idx, end_idx = -10, -10
print(tokens)
elif len(pos) == 1:
start_idx = pos[0]
end_idx = start_idx + len(phrase)
else:
rela = [abs(a - start_idx) for a in pos]
start_idx = pos[rela.index(min(rela))]
end_idx = start_idx + len(phrase)
return start_idx, end_idx
def preprocessing(data_type, files):
result = []
event_count, entity_count, sent_count = 0, 0, 0
event_count_2 = 0
print('-' * 20)
print('[preprocessing] type: ', data_type)
for file in tqdm(files):
parser = Parser(path=file)
entity_count += len(parser.entity_mentions)
event_count += len(parser.event_mentions)
sent_count += len(parser.sents_with_pos)
for item in parser.get_data():
data = dict()
data['sentence'] = item['sentence']
data['golden-entity-mentions'] = []
data['golden-event-mentions'] = []
tokens = item['sentence']
sent_start_pos = item['position'][0]
# 由parser预处理的文件进一步处理,得到entity_mention在句子中的相对位置
for entity_mention in item['golden-entity-mentions']:
position = entity_mention['position']
start_idx, end_idx = find_token_index(
tokens=tokens,
start_pos=position[0] - sent_start_pos,
end_pos=position[1] - sent_start_pos + 1,
phrase=entity_mention['text'],
)
entity_mention['start'] = start_idx
entity_mention['end'] = end_idx
del entity_mention['position']
data['golden-entity-mentions'].append(entity_mention)
# 由parser预处理的文件进一步处理,得到event_mention在句子中的相对位置
for event_mention in item['golden-event-mentions']:
# same event mention cab be shared
event_mention = copy.deepcopy(event_mention)
position = event_mention['trigger']['position']
start_idx, end_idx = find_token_index(
tokens=tokens,
start_pos=position[0] - sent_start_pos,
end_pos=position[1] - sent_start_pos + 1,
phrase=event_mention['trigger']['text'],
)
event_mention['trigger']['start'] = start_idx
event_mention['trigger']['end'] = end_idx
del event_mention['trigger']['position']
del event_mention['position']
# 由parser预处理的文件进一步处理,得到arguments在句子中的相对位置
arguments = []
for argument in event_mention['arguments']:
position = argument['position']
start_idx, end_idx = find_token_index(
tokens=tokens,
start_pos=position[0] - sent_start_pos,
end_pos=position[1] - sent_start_pos + 1,
phrase=argument['text'],
)
argument['start'] = start_idx
argument['end'] = end_idx
del argument['position']
arguments.append(argument)
event_mention['arguments'] = arguments
data['golden-event-mentions'].append(event_mention)
result.append(data)
print('sent_count :', sent_count)
print('event_count :', event_count)
print('entity_count :', entity_count)
with open('output/Chinese/{}.json'.format(data_type), 'w') as f:
json.dump(result, f, indent=2, ensure_ascii=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data', help="Path of ACE2005 Chinese data", default='./data/Chinese')
args = parser.parse_args()
test_files, dev_files, train_files = get_data_paths(args.data)
preprocessing('test', test_files)
preprocessing('dev', dev_files)
preprocessing('train', train_files)
| [
"argparse.ArgumentParser",
"tqdm.tqdm",
"os.path.join",
"parser.Parser",
"copy.deepcopy",
"json.dump"
] | [((1837, 1848), 'tqdm.tqdm', 'tqdm', (['files'], {}), '(files)\n', (1841, 1848), False, 'from tqdm import tqdm\n'), ((4968, 4993), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4991, 4993), False, 'import argparse\n'), ((1867, 1884), 'parser.Parser', 'Parser', ([], {'path': 'file'}), '(path=file)\n', (1873, 1884), False, 'from parser import Parser\n'), ((4875, 4925), 'json.dump', 'json.dump', (['result', 'f'], {'indent': '(2)', 'ensure_ascii': '(False)'}), '(result, f, indent=2, ensure_ascii=False)\n', (4884, 4925), False, 'import json\n'), ((442, 474), 'os.path.join', 'os.path.join', (['ace2005_path', 'name'], {}), '(ace2005_path, name)\n', (454, 474), False, 'import os\n'), ((3222, 3250), 'copy.deepcopy', 'copy.deepcopy', (['event_mention'], {}), '(event_mention)\n', (3235, 3250), False, 'import copy\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 6 21:00:25 2018
@author: Vishwesh
"""
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
DATA_DIR = "/tmp/data"
NUM_STEPS=1000
MINIBATCH_SIZE=32
data = input_data.read_data_sets(DATA_DIR,one_hot=True)
x = tf.placeholder(tf.float32,[None,784])
W = tf.Variable(tf.zeros([784,10]))
y_true = tf.placeholder(tf.float32,[None,10])
y_pred = tf.matmul(x,W)
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=y_pred,
labels=y_true))
gd_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
correct_mask = tf.equal(tf.argmax(y_pred,1),
tf.argmax(y_true,1))
accuracy = tf.reduce_mean(tf.cast(correct_mask,tf.float32))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for _ in range(NUM_STEPS):
batch_xs,batch_ys = data.train.next_batch(MINIBATCH_SIZE)
sess.run(gd_step,feed_dict={x: batch_xs,y_true: batch_ys})
ans = sess.run(accuracy,feed_dict={x:data.test.images,
y_true:data.test.labels})
print("Accuracy = {:.4}%".format(ans*100))
| [
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"tensorflow.argmax",
"tensorflow.global_variables_initializer",
"tensorflow.matmul",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tenso... | [((250, 299), 'tensorflow.examples.tutorials.mnist.input_data.read_data_sets', 'input_data.read_data_sets', (['DATA_DIR'], {'one_hot': '(True)'}), '(DATA_DIR, one_hot=True)\n', (275, 299), False, 'from tensorflow.examples.tutorials.mnist import input_data\n'), ((306, 345), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 784]'], {}), '(tf.float32, [None, 784])\n', (320, 345), True, 'import tensorflow as tf\n'), ((393, 431), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 10]'], {}), '(tf.float32, [None, 10])\n', (407, 431), True, 'import tensorflow as tf\n'), ((440, 455), 'tensorflow.matmul', 'tf.matmul', (['x', 'W'], {}), '(x, W)\n', (449, 455), True, 'import tensorflow as tf\n'), ((361, 380), 'tensorflow.zeros', 'tf.zeros', (['[784, 10]'], {}), '([784, 10])\n', (369, 380), True, 'import tensorflow as tf\n'), ((489, 558), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'logits': 'y_pred', 'labels': 'y_true'}), '(logits=y_pred, labels=y_true)\n', (528, 558), True, 'import tensorflow as tf\n'), ((682, 702), 'tensorflow.argmax', 'tf.argmax', (['y_pred', '(1)'], {}), '(y_pred, 1)\n', (691, 702), True, 'import tensorflow as tf\n'), ((729, 749), 'tensorflow.argmax', 'tf.argmax', (['y_true', '(1)'], {}), '(y_true, 1)\n', (738, 749), True, 'import tensorflow as tf\n'), ((779, 812), 'tensorflow.cast', 'tf.cast', (['correct_mask', 'tf.float32'], {}), '(correct_mask, tf.float32)\n', (786, 812), True, 'import tensorflow as tf\n'), ((821, 833), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (831, 833), True, 'import tensorflow as tf\n'), ((592, 630), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['(0.5)'], {}), '(0.5)\n', (625, 630), True, 'import tensorflow as tf\n'), ((857, 890), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (888, 890), True, 'import tensorflow as tf\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-28 09:09
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('planner', '0019_auto_20171028_1706'),
]
operations = [
migrations.CreateModel(
name='FeatureDetail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('priority', models.CharField(choices=[('p1', 'P1'), ('p2', 'P2'), ('p3', 'P3')], default='p1', max_length=10)),
('sim_req', models.CharField(max_length=128, verbose_name='Simulation Requirements')),
('seq_req', models.CharField(max_length=128, verbose_name='Sequence Requirements')),
('check_desp', models.CharField(max_length=128, verbose_name='Checking Description')),
('func_cov_req', models.CharField(max_length=128, verbose_name='Func Cov Requirements')),
('measure_src', models.TextField(verbose_name='Measure Source')),
('test_cov', models.PositiveSmallIntegerField(default=0, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='Testcase Coverage')),
('line_cov', models.PositiveSmallIntegerField(default=0, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='Line Coverage')),
('con_cov', models.PositiveSmallIntegerField(default=0, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='Conditional Coverage')),
('toggle_cov', models.PositiveSmallIntegerField(default=0, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='Toggle Coverage')),
('fsm_cov', models.PositiveSmallIntegerField(default=0, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='FSM Coverage')),
('branch_cov', models.PositiveSmallIntegerField(default=0, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='Branch Coverage')),
('assert_cov', models.PositiveSmallIntegerField(default=0, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='Assertion Coverage')),
('func_cov', models.PositiveSmallIntegerField(default=0, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='Functional Coverage')),
('feature', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='planner.Feature')),
],
),
migrations.RenameModel(
old_name='OperationLogs',
new_name='ChangeList',
),
migrations.RemoveField(
model_name='featureitem',
name='feature',
),
migrations.DeleteModel(
name='FeatureItem',
),
]
| [
"django.db.models.OneToOneField",
"django.db.migrations.DeleteModel",
"django.db.models.TextField",
"django.db.migrations.RenameModel",
"django.db.models.AutoField",
"django.db.migrations.RemoveField",
"django.db.models.CharField"
] | [((2682, 2753), 'django.db.migrations.RenameModel', 'migrations.RenameModel', ([], {'old_name': '"""OperationLogs"""', 'new_name': '"""ChangeList"""'}), "(old_name='OperationLogs', new_name='ChangeList')\n", (2704, 2753), False, 'from django.db import migrations, models\n'), ((2798, 2862), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""featureitem"""', 'name': '"""feature"""'}), "(model_name='featureitem', name='feature')\n", (2820, 2862), False, 'from django.db import migrations, models\n'), ((2907, 2949), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""FeatureItem"""'}), "(name='FeatureItem')\n", (2929, 2949), False, 'from django.db import migrations, models\n'), ((465, 558), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (481, 558), False, 'from django.db import migrations, models\n'), ((586, 687), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('p1', 'P1'), ('p2', 'P2'), ('p3', 'P3')]", 'default': '"""p1"""', 'max_length': '(10)'}), "(choices=[('p1', 'P1'), ('p2', 'P2'), ('p3', 'P3')],\n default='p1', max_length=10)\n", (602, 687), False, 'from django.db import migrations, models\n'), ((714, 786), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)', 'verbose_name': '"""Simulation Requirements"""'}), "(max_length=128, verbose_name='Simulation Requirements')\n", (730, 786), False, 'from django.db import migrations, models\n'), ((817, 887), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)', 'verbose_name': '"""Sequence Requirements"""'}), "(max_length=128, verbose_name='Sequence Requirements')\n", (833, 887), False, 'from django.db import migrations, models\n'), ((921, 990), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)', 'verbose_name': '"""Checking Description"""'}), "(max_length=128, verbose_name='Checking Description')\n", (937, 990), False, 'from django.db import migrations, models\n'), ((1026, 1096), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)', 'verbose_name': '"""Func Cov Requirements"""'}), "(max_length=128, verbose_name='Func Cov Requirements')\n", (1042, 1096), False, 'from django.db import migrations, models\n'), ((1131, 1178), 'django.db.models.TextField', 'models.TextField', ([], {'verbose_name': '"""Measure Source"""'}), "(verbose_name='Measure Source')\n", (1147, 1178), False, 'from django.db import migrations, models\n'), ((2558, 2650), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""planner.Feature"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'planner.Feature')\n", (2578, 2650), False, 'from django.db import migrations, models\n')] |
from __future__ import annotations
from conflowgen.posthoc_analyses.inbound_and_outbound_vehicle_capacity_analysis import \
InboundAndOutboundVehicleCapacityAnalysis
from conflowgen.reporting import AbstractReportWithMatplotlib
class InboundAndOutboundVehicleCapacityAnalysisReport(AbstractReportWithMatplotlib):
"""
This analysis report takes the data structure as generated by :class:`.InboundAndOutboundVehicleCapacityAnalysis`
and creates a comprehensible representation for the user, either as text or as a graph.
"""
report_description = """
Analyze the vehicle capacity by vehicle type for the inbound and outbound journeys and check for the maximum
capacity of each vehicle type.
If e.g. for the vehicle type 'feeder' the maximum outbound capacity is used up, most likely there are more vehicles
that deliver containers destined for feeder vessels than there are feeder vessels planned during the period of data
generation (between `start_date` and `end_date`).
"""
def __init__(self):
super().__init__()
self.analysis = InboundAndOutboundVehicleCapacityAnalysis(
transportation_buffer=self.transportation_buffer
)
def get_report_as_text(self) -> str:
inbound_capacities, outbound_actual_capacities, outbound_maximum_capacities = self._get_capacities()
# create string representation
report = "\n"
report += "vehicle type "
report += "inbound capacity "
report += "outbound actual capacity "
report += "outbound max capacity"
report += "\n"
for vehicle_type in self.order_of_vehicle_types_in_report:
vehicle_type_as_text = str(vehicle_type).replace("_", " ")
report += f"{vehicle_type_as_text:<15} "
report += f"{inbound_capacities[vehicle_type]:>16.1f} "
report += f"{outbound_actual_capacities[vehicle_type]:>24.1f} "
report += f"{outbound_maximum_capacities[vehicle_type]:>21.1f}"
report += "\n"
report += "(rounding errors might exist)\n"
return report
def get_report_as_graph(self) -> object:
"""
The report as a graph is represented as a bar chart using pandas.
Returns:
The matplotlib axis of the bar chart.
"""
import pandas as pd # pylint: disable=import-outside-toplevel
import seaborn as sns # pylint: disable=import-outside-toplevel
sns.set_palette(sns.color_palette())
inbound_capacities, outbound_actual_capacities, outbound_maximum_capacities = self._get_capacities()
df = pd.DataFrame({
"inbound capacities": inbound_capacities,
"outbound actual capacities": outbound_actual_capacities,
"outbound maximum capacities": outbound_maximum_capacities
})
df.index = [str(i).replace("_", " ") for i in df.index]
ax = df.plot.barh()
ax.set_xlabel("Capacity (in TEU)")
ax.set_title("Inbound and outbound vehicle capacity analysis")
return ax
def _get_capacities(self):
assert self.transportation_buffer is not None
self.analysis.update(
transportation_buffer=self.transportation_buffer
)
# gather data
inbound_capacities = self.analysis.get_inbound_capacity_of_vehicles()
outbound_actual_capacities, outbound_maximum_capacities = self.analysis.get_outbound_capacity_of_vehicles()
return inbound_capacities, outbound_actual_capacities, outbound_maximum_capacities
| [
"pandas.DataFrame",
"conflowgen.posthoc_analyses.inbound_and_outbound_vehicle_capacity_analysis.InboundAndOutboundVehicleCapacityAnalysis",
"seaborn.color_palette"
] | [((1102, 1198), 'conflowgen.posthoc_analyses.inbound_and_outbound_vehicle_capacity_analysis.InboundAndOutboundVehicleCapacityAnalysis', 'InboundAndOutboundVehicleCapacityAnalysis', ([], {'transportation_buffer': 'self.transportation_buffer'}), '(transportation_buffer=self.\n transportation_buffer)\n', (1143, 1198), False, 'from conflowgen.posthoc_analyses.inbound_and_outbound_vehicle_capacity_analysis import InboundAndOutboundVehicleCapacityAnalysis\n'), ((2653, 2835), 'pandas.DataFrame', 'pd.DataFrame', (["{'inbound capacities': inbound_capacities, 'outbound actual capacities':\n outbound_actual_capacities, 'outbound maximum capacities':\n outbound_maximum_capacities}"], {}), "({'inbound capacities': inbound_capacities,\n 'outbound actual capacities': outbound_actual_capacities,\n 'outbound maximum capacities': outbound_maximum_capacities})\n", (2665, 2835), True, 'import pandas as pd\n'), ((2509, 2528), 'seaborn.color_palette', 'sns.color_palette', ([], {}), '()\n', (2526, 2528), True, 'import seaborn as sns\n')] |
"""Transformer from 'Attention is all you need' (Vaswani et al., 2017)"""
# Reference: https://www.tensorflow.org/text/tutorials/transformer
# Reference: https://keras.io/examples/nlp/text_classification_with_transformer/
import numpy as np
import tensorflow as tf
class Transformer(tf.keras.Model):
def __init__(
self,
num_layers,
d_model,
num_heads,
dff,
input_vocab_size,
target_vocab_size,
pe_input,
pe_target,
rate=0.1,
):
super().__init__()
self.encoder = Encoder(
num_layers, d_model, num_heads, dff, input_vocab_size, pe_input, rate
)
self.decoder = Decoder(
num_layers, d_model, num_heads, dff, target_vocab_size, pe_target, rate
)
self.final_layer = tf.keras.layers.Dense(target_vocab_size)
def call(self, inputs, training):
# Keras models prefer if you pass all your inputs in the first argument
inp, tar = inputs
enc_padding_mask, look_ahead_mask, dec_padding_mask = self.create_masks(
inp, tar
)
enc_output = self.encoder(
inp, training, enc_padding_mask
) # (batch_size, inp_seq_len, d_model)
# dec_output.shape == (batch_size, tar_seq_len, d_model)
dec_output, attention_weights = self.decoder(
tar, enc_output, training, look_ahead_mask, dec_padding_mask
)
final_output = self.final_layer(
dec_output
) # (batch_size, tar_seq_len, target_vocab_size)
return final_output, attention_weights
def create_masks(self, inp, tar):
# Encoder padding mask
enc_padding_mask = _create_padding_mask(inp)
# Used in the 2nd attention block in the decoder
# This padding mask is used to mask the encoder outputs.
dec_padding_mask = _create_padding_mask(inp)
# Used in the 1st attention block in the decoder.
# It is used to pad and mask future tokens in the input received by
# the decoder.
look_ahead_mask = _create_look_ahead_mask(tf.shape(tar)[1])
dec_target_padding_mask = _create_padding_mask(tar)
look_ahead_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask)
return enc_padding_mask, look_ahead_mask, dec_padding_mask
class Encoder(tf.keras.layers.Layer):
"""Transformer encoder from 'Attention is all you need' (Vaswani et al., 2017)
Contains:
1. Input Embedding
2. Positional Encoding
3. N encoder layers
"""
def __init__(
self,
num_layers,
d_model,
num_heads,
dff,
input_vocab_size,
maximum_position_encoding,
rate=0.1,
):
super(Encoder, self).__init__()
self.d_model = d_model
self.num_layers = num_layers
self.embedding = tf.keras.layers.Embedding(input_vocab_size, d_model)
self.pos_encoding = _positional_encoding(
maximum_position_encoding, self.d_model
)
self.enc_layers = [
EncoderLayer(d_model, num_heads, dff, rate) for _ in range(num_layers)
]
self.dropout = tf.keras.layers.Dropout(rate)
def call(self, x, training, mask):
seq_len = tf.shape(x)[1]
# adding embedding and position encoding
x = self.embedding(x) # (batch_size, input_seq_len, d_model)
x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
x = self.dropout(x, training=training)
for i in range(self.num_layers):
x = self.enc_layers[i](x, training, mask)
return x # (batch_size, input_seq_len, d_model)
class Decoder(tf.keras.layers.Layer):
def __init__(
self,
num_layers,
d_model,
num_heads,
dff,
target_vocab_size,
maximum_position_encoding,
rate=0.1,
):
super(Decoder, self).__init__()
self.d_model = d_model
self.num_layers = num_layers
self.embedding = tf.keras.layers.Embedding(target_vocab_size, d_model)
self.pos_encoding = _positional_encoding(maximum_position_encoding, d_model)
self.dec_layers = [
DecoderLayer(d_model, num_heads, dff, rate) for _ in range(num_layers)
]
self.dropout = tf.keras.layers.Dropout(rate)
def call(self, x, enc_output, training, look_ahead_mask, padding_mask):
seq_len = tf.shape(x)[1]
attention_weights = {}
x = self.embedding(x) # (batch_size, target_seq_len, d_model)
x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
x = self.dropout(x, training=training)
for i in range(self.num_layers):
x, block1, block2 = self.dec_layers[i](
x, enc_output, training, look_ahead_mask, padding_mask
)
attention_weights[f"decoder_layer{i+1}_block1"] = block1
attention_weights[f"decoder_layer{i+1}_block2"] = block2
# x.shape == (batch_size, target_seq_len, d_model)
return x, attention_weights
class EncoderLayer(tf.keras.layers.Layer):
"""Transformer encoder layer from 'Attention is all you need' (Vaswani et al., 2017)
One of the main difference between the transformer encoder from decoder is
the self-attention. The reasons for it is detailed in the Section 4 and can
be summarized as a way to reduce the path length between long-range depencies
in the network.
"""
def __init__(self, d_model=512, num_heads=8, dff=2048, rate=0.1):
"""Initializer a Transformer Encoder Layer
Attributes
----------
d_model : int
Model dimension used on all sub-layers and embedding.
num_heads : int
Number of heads. Vaswani et al., 2017 describes as $h$
dff : int
FeedForward dimension.
rate : float
Dropout rate parameter applied after self-attention and
FeedForward.
"""
super(EncoderLayer, self).__init__()
self.mha = MultiHeadAttention(num_heads=num_heads, d_model=d_model)
self.ffn = _point_wise_feed_forward_network(d_model, dff)
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = tf.keras.layers.Dropout(rate)
self.dropout2 = tf.keras.layers.Dropout(rate)
def call(self, x, training, mask):
attn_output, _ = self.mha(x, x, x, mask) # (batch_size, input_seq_len, d_model)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(x + attn_output) # (batch_size, input_seq_len, d_model)
ffn_output = self.ffn(out1) # (batch_size, input_seq_len, d_model)
ffn_output = self.dropout2(ffn_output, training=training)
return self.layernorm2(
out1 + ffn_output
) # (batch_size, input_seq_len, d_model)
class DecoderLayer(tf.keras.layers.Layer):
"""Transformer decoder layer from 'Attention is all you need' (Vaswani et al., 2017)
Decoder layer is similar to encoder but have a third sub-layer performing
multi-head attention over the encoder stack. The self-attention sub-layer
is modified preventing positions from attending to subsequent positions.
Embeddings are also offset by one position, forcing predictions of
position i to depend on the known outputs at positions less than i.
"""
def __init__(self, d_model, num_heads, dff, rate=0.1):
super(DecoderLayer, self).__init__()
self.mha1 = MultiHeadAttention(num_heads=num_heads, d_model=d_model)
self.mha2 = MultiHeadAttention(num_heads=num_heads, d_model=d_model)
self.ffn = _point_wise_feed_forward_network(d_model, dff)
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm3 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = tf.keras.layers.Dropout(rate)
self.dropout2 = tf.keras.layers.Dropout(rate)
self.dropout3 = tf.keras.layers.Dropout(rate)
def call(self, x, enc_output, training, look_ahead_mask, padding_mask):
# enc_output.shape == (batch_size, input_seq_len, d_model)
attn1, attn_weights_block1 = self.mha1(
x, x, x, look_ahead_mask
) # (batch_size, target_seq_len, d_model)
attn1 = self.dropout1(attn1, training=training)
out1 = self.layernorm1(attn1 + x)
attn2, attn_weights_block2 = self.mha2(
enc_output, enc_output, out1, padding_mask
) # (batch_size, target_seq_len, d_model)
attn2 = self.dropout2(attn2, training=training)
out2 = self.layernorm2(attn2 + out1) # (batch_size, target_seq_len, d_model)
ffn_output = self.ffn(out2) # (batch_size, target_seq_len, d_model)
ffn_output = self.dropout3(ffn_output, training=training)
out3 = self.layernorm3(
ffn_output + out2
) # (batch_size, target_seq_len, d_model)
return out3, attn_weights_block1, attn_weights_block2
def _point_wise_feed_forward_network(d_model, dff):
"""Position-wise Feed-Forward Network
It's a fully connnected feed-forward network applied to each position
separately and identically represented by:
```
FFN(x) = max(0, xW_1 + b_1)W_2 + b2$
```
It contains two linear transformation with a ReLU activation in between.
"""
return tf.keras.Sequential(
[
tf.keras.layers.Dense(dff, activation="relu"), # (batch_size, seq_len, dff)
tf.keras.layers.Dense(d_model), # (batch_size, seq_len, d_model)
]
)
def _create_padding_mask(seq):
"""Mask all the pad tokens in the batch of sequence"""
seq = tf.cast(tf.math.equal(seq, 0), tf.float32)
# add extra dimensions to add the padding
# to the attention logits.
return seq[:, tf.newaxis, tf.newaxis, :] # (batch_size, 1, 1, seq_len)
def _create_look_ahead_mask(size):
"""Mask the future tokens in a sequence"""
mask = 1 - tf.linalg.band_part(tf.ones((size, size)), -1, 0)
return mask # (seq_len, seq_len)
def _positional_encoding(position, d_model):
"""Position Encoding (PE)
Because the model contains no recurrence and convolution, positional
encoding is inject to add information about absolute position of the
tokens in the sequence. It can be fixed or learned, however, fixed
has proven to be as efficient as learned.
This is the fixed Positional Encoding and are derived from sine and
cosine functions of different frequencies:
$PE(pos, 2i) = sin(pos/10000^{2i/d_model})
$PE(pos, 2i + 1) = cos(pos/10000^{2i/d_model})
where pos is the absolute position of a token in the sequence and $i$
is the dimension.
"""
def get_angles(pos, i, d_model):
angle_rates = 1 / np.power(10000, (2 * (i // 2)) / np.float32(d_model))
return pos * angle_rates
angle_rads = get_angles(
np.arange(position)[:, np.newaxis], np.arange(d_model)[np.newaxis, :], d_model
)
# apply sin to even indices in the array; 2i
angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
# apply cos to odd indices in the array; 2i+1
angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])
pos_encoding = angle_rads[np.newaxis, ...]
return tf.cast(pos_encoding, dtype=tf.float32)
def scaled_dot_product_attention(q, k, v, mask):
"""Calculate the attention weights.
q, k, v must have matching leading dimensions.
k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.
The mask has different shapes depending on its type(padding or look ahead)
but it must be broadcastable for addition.
Args:
q: query shape == (..., seq_len_q, depth)
k: key shape == (..., seq_len_k, depth)
v: value shape == (..., seq_len_v, depth_v)
mask: Float tensor with shape broadcastable
to (..., seq_len_q, seq_len_k). Defaults to None.
Returns:
output, attention_weights
"""
# (..., seq_len_q, seq_len_k)
matmul_qk = tf.matmul(q, k, transpose_b=True)
# scale matmul_qk
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
# add the mask to the scaled tensor.
if mask is not None:
scaled_attention_logits += mask * -1e9
# softmax is normalized on the last axis (seq_len_k) so that the scores
# add up to 1.
attention_weights = tf.nn.softmax(
scaled_attention_logits, axis=-1
) # (..., seq_len_q, seq_len_k)
output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v)
return output, attention_weights
class MultiHeadAttention(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.d_model = d_model
assert d_model % self.num_heads == 0
self.depth = d_model // self.num_heads
self.wq = tf.keras.layers.Dense(d_model)
self.wk = tf.keras.layers.Dense(d_model)
self.wv = tf.keras.layers.Dense(d_model)
self.dense = tf.keras.layers.Dense(d_model)
def split_heads(self, x, batch_size):
"""Split the last dimension into (num_heads, depth).
Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth)
"""
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, v, k, q, mask):
batch_size = tf.shape(q)[0]
q = self.wq(q) # (batch_size, seq_len, d_model)
k = self.wk(k) # (batch_size, seq_len, d_model)
v = self.wv(v) # (batch_size, seq_len, d_model)
q = self.split_heads(q, batch_size) # (batch_size, num_heads, seq_len_q, depth)
k = self.split_heads(k, batch_size) # (batch_size, num_heads, seq_len_k, depth)
v = self.split_heads(v, batch_size) # (batch_size, num_heads, seq_len_v, depth)
# scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth)
# attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k)
scaled_attention, attention_weights = scaled_dot_product_attention(
q, k, v, mask
)
scaled_attention = tf.transpose(
scaled_attention, perm=[0, 2, 1, 3]
) # (batch_size, seq_len_q, num_heads, depth)
concat_attention = tf.reshape(
scaled_attention, (batch_size, -1, self.d_model)
) # (batch_size, seq_len_q, d_model)
output = self.dense(concat_attention) # (batch_size, seq_len_q, d_model)
return output, attention_weights
def optimizer(d_model):
"""Adam optimizer as of Section 5.3"""
class CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__(self, d_model, warmup_steps=4000):
super(CustomSchedule, self).__init__()
self.d_model = d_model
self.d_model = tf.cast(self.d_model, tf.float32)
self.warmup_steps = warmup_steps
def __call__(self, step):
arg1 = tf.math.rsqrt(step)
arg2 = step * (self.warmup_steps ** -1.5)
return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2)
learning_rate = CustomSchedule(d_model)
return tf.keras.optimizers.Adam(
learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9
)
| [
"tensorflow.shape",
"tensorflow.transpose",
"tensorflow.keras.layers.Dense",
"tensorflow.nn.softmax",
"numpy.sin",
"tensorflow.cast",
"numpy.arange",
"tensorflow.math.minimum",
"tensorflow.math.sqrt",
"tensorflow.matmul",
"tensorflow.math.equal",
"tensorflow.maximum",
"tensorflow.keras.layer... | [((11454, 11481), 'numpy.sin', 'np.sin', (['angle_rads[:, 0::2]'], {}), '(angle_rads[:, 0::2])\n', (11460, 11481), True, 'import numpy as np\n'), ((11558, 11585), 'numpy.cos', 'np.cos', (['angle_rads[:, 1::2]'], {}), '(angle_rads[:, 1::2])\n', (11564, 11585), True, 'import numpy as np\n'), ((11645, 11684), 'tensorflow.cast', 'tf.cast', (['pos_encoding'], {'dtype': 'tf.float32'}), '(pos_encoding, dtype=tf.float32)\n', (11652, 11684), True, 'import tensorflow as tf\n'), ((12404, 12437), 'tensorflow.matmul', 'tf.matmul', (['q', 'k'], {'transpose_b': '(True)'}), '(q, k, transpose_b=True)\n', (12413, 12437), True, 'import tensorflow as tf\n'), ((12800, 12847), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['scaled_attention_logits'], {'axis': '(-1)'}), '(scaled_attention_logits, axis=-1)\n', (12813, 12847), True, 'import tensorflow as tf\n'), ((12907, 12938), 'tensorflow.matmul', 'tf.matmul', (['attention_weights', 'v'], {}), '(attention_weights, v)\n', (12916, 12938), True, 'import tensorflow as tf\n'), ((15706, 15785), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['learning_rate'], {'beta_1': '(0.9)', 'beta_2': '(0.98)', 'epsilon': '(1e-09)'}), '(learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-09)\n', (15730, 15785), True, 'import tensorflow as tf\n'), ((825, 865), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['target_vocab_size'], {}), '(target_vocab_size)\n', (846, 865), True, 'import tensorflow as tf\n'), ((2236, 2288), 'tensorflow.maximum', 'tf.maximum', (['dec_target_padding_mask', 'look_ahead_mask'], {}), '(dec_target_padding_mask, look_ahead_mask)\n', (2246, 2288), True, 'import tensorflow as tf\n'), ((2911, 2963), 'tensorflow.keras.layers.Embedding', 'tf.keras.layers.Embedding', (['input_vocab_size', 'd_model'], {}), '(input_vocab_size, d_model)\n', (2936, 2963), True, 'import tensorflow as tf\n'), ((3222, 3251), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['rate'], {}), '(rate)\n', (3245, 3251), True, 'import tensorflow as tf\n'), ((4118, 4171), 'tensorflow.keras.layers.Embedding', 'tf.keras.layers.Embedding', (['target_vocab_size', 'd_model'], {}), '(target_vocab_size, d_model)\n', (4143, 4171), True, 'import tensorflow as tf\n'), ((4402, 4431), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['rate'], {}), '(rate)\n', (4425, 4431), True, 'import tensorflow as tf\n'), ((6346, 6395), 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'epsilon': '(1e-06)'}), '(epsilon=1e-06)\n', (6380, 6395), True, 'import tensorflow as tf\n'), ((6421, 6470), 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'epsilon': '(1e-06)'}), '(epsilon=1e-06)\n', (6455, 6470), True, 'import tensorflow as tf\n'), ((6495, 6524), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['rate'], {}), '(rate)\n', (6518, 6524), True, 'import tensorflow as tf\n'), ((6549, 6578), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['rate'], {}), '(rate)\n', (6572, 6578), True, 'import tensorflow as tf\n'), ((7992, 8041), 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'epsilon': '(1e-06)'}), '(epsilon=1e-06)\n', (8026, 8041), True, 'import tensorflow as tf\n'), ((8067, 8116), 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'epsilon': '(1e-06)'}), '(epsilon=1e-06)\n', (8101, 8116), True, 'import tensorflow as tf\n'), ((8142, 8191), 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'epsilon': '(1e-06)'}), '(epsilon=1e-06)\n', (8176, 8191), True, 'import tensorflow as tf\n'), ((8216, 8245), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['rate'], {}), '(rate)\n', (8239, 8245), True, 'import tensorflow as tf\n'), ((8270, 8299), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['rate'], {}), '(rate)\n', (8293, 8299), True, 'import tensorflow as tf\n'), ((8324, 8353), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['rate'], {}), '(rate)\n', (8347, 8353), True, 'import tensorflow as tf\n'), ((10058, 10079), 'tensorflow.math.equal', 'tf.math.equal', (['seq', '(0)'], {}), '(seq, 0)\n', (10071, 10079), True, 'import tensorflow as tf\n'), ((12549, 12565), 'tensorflow.math.sqrt', 'tf.math.sqrt', (['dk'], {}), '(dk)\n', (12561, 12565), True, 'import tensorflow as tf\n'), ((13331, 13361), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['d_model'], {}), '(d_model)\n', (13352, 13361), True, 'import tensorflow as tf\n'), ((13380, 13410), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['d_model'], {}), '(d_model)\n', (13401, 13410), True, 'import tensorflow as tf\n'), ((13429, 13459), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['d_model'], {}), '(d_model)\n', (13450, 13459), True, 'import tensorflow as tf\n'), ((13482, 13512), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['d_model'], {}), '(d_model)\n', (13503, 13512), True, 'import tensorflow as tf\n'), ((13733, 13792), 'tensorflow.reshape', 'tf.reshape', (['x', '(batch_size, -1, self.num_heads, self.depth)'], {}), '(x, (batch_size, -1, self.num_heads, self.depth))\n', (13743, 13792), True, 'import tensorflow as tf\n'), ((13808, 13842), 'tensorflow.transpose', 'tf.transpose', (['x'], {'perm': '[0, 2, 1, 3]'}), '(x, perm=[0, 2, 1, 3])\n', (13820, 13842), True, 'import tensorflow as tf\n'), ((14657, 14706), 'tensorflow.transpose', 'tf.transpose', (['scaled_attention'], {'perm': '[0, 2, 1, 3]'}), '(scaled_attention, perm=[0, 2, 1, 3])\n', (14669, 14706), True, 'import tensorflow as tf\n'), ((14802, 14862), 'tensorflow.reshape', 'tf.reshape', (['scaled_attention', '(batch_size, -1, self.d_model)'], {}), '(scaled_attention, (batch_size, -1, self.d_model))\n', (14812, 14862), True, 'import tensorflow as tf\n'), ((3310, 3321), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (3318, 3321), True, 'import tensorflow as tf\n'), ((3471, 3504), 'tensorflow.cast', 'tf.cast', (['self.d_model', 'tf.float32'], {}), '(self.d_model, tf.float32)\n', (3478, 3504), True, 'import tensorflow as tf\n'), ((4528, 4539), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (4536, 4539), True, 'import tensorflow as tf\n'), ((4672, 4705), 'tensorflow.cast', 'tf.cast', (['self.d_model', 'tf.float32'], {}), '(self.d_model, tf.float32)\n', (4679, 4705), True, 'import tensorflow as tf\n'), ((9776, 9821), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['dff'], {'activation': '"""relu"""'}), "(dff, activation='relu')\n", (9797, 9821), True, 'import tensorflow as tf\n'), ((9865, 9895), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['d_model'], {}), '(d_model)\n', (9886, 9895), True, 'import tensorflow as tf\n'), ((10366, 10387), 'tensorflow.ones', 'tf.ones', (['(size, size)'], {}), '((size, size))\n', (10373, 10387), True, 'import tensorflow as tf\n'), ((11293, 11312), 'numpy.arange', 'np.arange', (['position'], {}), '(position)\n', (11302, 11312), True, 'import numpy as np\n'), ((11329, 11347), 'numpy.arange', 'np.arange', (['d_model'], {}), '(d_model)\n', (11338, 11347), True, 'import numpy as np\n'), ((12478, 12489), 'tensorflow.shape', 'tf.shape', (['k'], {}), '(k)\n', (12486, 12489), True, 'import tensorflow as tf\n'), ((13900, 13911), 'tensorflow.shape', 'tf.shape', (['q'], {}), '(q)\n', (13908, 13911), True, 'import tensorflow as tf\n'), ((15364, 15397), 'tensorflow.cast', 'tf.cast', (['self.d_model', 'tf.float32'], {}), '(self.d_model, tf.float32)\n', (15371, 15397), True, 'import tensorflow as tf\n'), ((15498, 15517), 'tensorflow.math.rsqrt', 'tf.math.rsqrt', (['step'], {}), '(step)\n', (15511, 15517), True, 'import tensorflow as tf\n'), ((2132, 2145), 'tensorflow.shape', 'tf.shape', (['tar'], {}), '(tar)\n', (2140, 2145), True, 'import tensorflow as tf\n'), ((15592, 15619), 'tensorflow.math.rsqrt', 'tf.math.rsqrt', (['self.d_model'], {}), '(self.d_model)\n', (15605, 15619), True, 'import tensorflow as tf\n'), ((15622, 15649), 'tensorflow.math.minimum', 'tf.math.minimum', (['arg1', 'arg2'], {}), '(arg1, arg2)\n', (15637, 15649), True, 'import tensorflow as tf\n'), ((11201, 11220), 'numpy.float32', 'np.float32', (['d_model'], {}), '(d_model)\n', (11211, 11220), True, 'import numpy as np\n')] |
from oil.utils.utils import export
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.animation as animation
import numpy as np
@export
class Animation(object):
def __init__(self, qt,body=None):
""" [qt (T,n,d)"""
self.qt = qt.data.numpy()
T,n,d = qt.shape
assert d in (2,3), "too many dimensions for animation"
self.fig = plt.figure()
self.ax = self.fig.add_axes([0, 0, 1, 1],projection='3d') if d==3 else self.fig.add_axes([0, 0, 1, 1])
#self.ax.axis('equal')
xyzmin = self.qt.min(0).min(0)#.min(dim=0)[0].min(dim=0)[0]
xyzmax = self.qt.max(0).max(0)#.max(dim=0)[0].max(dim=0)[0]
delta = xyzmax-xyzmin
lower = xyzmin-.1*delta; upper = xyzmax+.1*delta
self.ax.set_xlim((min(lower),max(upper)))
self.ax.set_ylim((min(lower),max(upper)))
if d==3: self.ax.set_zlim((min(lower),max(upper)))
if d!=3: self.ax.set_aspect("equal")
#elf.ax.auto_scale_xyz()
empty = d*[[]]
self.colors = np.random.choice([f"C{i}" for i in range(10)],size=n,replace=False)
self.objects = {
'pts':sum([self.ax.plot(*empty, "o", ms=6,color=self.colors[i]) for i in range(n)], []),
'traj_lines':sum([self.ax.plot(*empty, "-",color=self.colors[i]) for i in range(n)], []),
}
def init(self):
empty = 2*[[]]
for obj in self.objects.values():
for elem in obj:
elem.set_data(*empty)
if self.qt.shape[-1]==3: elem.set_3d_properties([])
return sum(self.objects.values(),[])
def update(self, i=0):
T,n,d = self.qt.shape
trail_len = 150
for j in range(n):
# trails
xyz = self.qt[max(i - trail_len,0): i + 1,j,:]
#chunks = xyz.shape[0]//10
#xyz_chunks = torch.chunk(xyz,chunks)
#for i,xyz in enumerate(xyz_chunks):
self.objects['traj_lines'][j].set_data(*xyz[...,:2].T)
if d==3: self.objects['traj_lines'][j].set_3d_properties(xyz[...,2].T)
self.objects['pts'][j].set_data(*xyz[-1:,...,:2].T)
if d==3: self.objects['pts'][j].set_3d_properties(xyz[-1:,...,2].T)
#self.fig.canvas.draw()
return sum(self.objects.values(),[])
def animate(self):
return self._animate().to_html5_video()
def _animate(self):
return animation.FuncAnimation(self.fig,self.update,frames=self.qt.shape[0],
interval=33,init_func=self.init,blit=True,)
| [
"matplotlib.animation.FuncAnimation",
"matplotlib.pyplot.figure"
] | [((420, 432), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (430, 432), True, 'import matplotlib.pyplot as plt\n'), ((2511, 2631), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['self.fig', 'self.update'], {'frames': 'self.qt.shape[0]', 'interval': '(33)', 'init_func': 'self.init', 'blit': '(True)'}), '(self.fig, self.update, frames=self.qt.shape[0],\n interval=33, init_func=self.init, blit=True)\n', (2534, 2631), True, 'import matplotlib.animation as animation\n')] |
from functools import wraps
from opentracing import global_tracer, tags, logs
from contextlib import contextmanager
def operation_name(query: str):
# TODO: some statement should contain two words. For example CREATE TABLE.
query = query.strip().split(' ')[0].strip(';').upper()
return 'asyncpg ' + query
@contextmanager
def con_context(handler, query, query_args):
_tags = {
tags.DATABASE_TYPE: 'SQL',
tags.DATABASE_STATEMENT: query,
tags.DATABASE_USER: handler._params.user,
tags.DATABASE_INSTANCE: handler._params.database,
'db.params': query_args,
tags.SPAN_KIND: tags.SPAN_KIND_RPC_CLIENT,
}
with global_tracer().start_active_span(
operation_name=operation_name(query),
tags=_tags
) as scope:
try:
yield
except Exception as e:
scope.span.log_kv({
logs.EVENT: 'error',
logs.ERROR_KIND: type(e).__name__,
logs.ERROR_OBJECT: e,
logs.MESSAGE: str(e)
})
raise
def wrap(coro):
@wraps(coro)
async def wrapped(self, query, *args, **kwargs):
with con_context(self, query, args):
return await coro(self, query, *args, **kwargs)
return wrapped
def wrap_executemany(coro):
@wraps(coro)
async def wrapped(self, query, args, *_args, **kwargs):
with con_context(self, query, args):
return await coro(self, query, args, *_args, **kwargs)
return wrapped
def tracing_connection(cls):
cls.fetch = wrap(cls.fetch)
cls.fetchval = wrap(cls.fetchval)
cls.fetchrow = wrap(cls.fetchrow)
cls.execute = wrap(cls.execute)
cls.executemany = wrap_executemany(cls.executemany)
return cls
| [
"opentracing.global_tracer",
"functools.wraps"
] | [((1115, 1126), 'functools.wraps', 'wraps', (['coro'], {}), '(coro)\n', (1120, 1126), False, 'from functools import wraps\n'), ((1340, 1351), 'functools.wraps', 'wraps', (['coro'], {}), '(coro)\n', (1345, 1351), False, 'from functools import wraps\n'), ((677, 692), 'opentracing.global_tracer', 'global_tracer', ([], {}), '()\n', (690, 692), False, 'from opentracing import global_tracer, tags, logs\n')] |
from collections import OrderedDict
from evidence import Evidence
def get_name(item, entity='all'):
from ._add_reference_to_evidence import _add_reference_to_evidence
evidence = Evidence()
fullName = item['uniprot']['entry']['protein']['recommendedName']['fullName']
if type(fullName)==str:
evidence.value=fullName
elif type(fullName)==OrderedDict:
if '#text' in fullName:
evidence.value = fullName['#text']
if '@evidence' in fullName:
evidence_numbers_in_db = fullName['@evidence'].split()
for evidence_number_in_db in evidence_numbers_in_db:
evidence_in_db = item['uniprot']['entry']['evidence'][int(evidence_number_in_db)-1]
if evidence_in_db['@key']!=evidence_number_in_db:
raise ValueError('Evidence number does not match evidence @key')
_add_reference_to_evidence(evidence, evidence_in_db)
accession = item['uniprot']['entry']['accession'][0]
evidence.add_reference({'database':'UniProtKB', 'id':accession})
return evidence
| [
"evidence.Evidence"
] | [((189, 199), 'evidence.Evidence', 'Evidence', ([], {}), '()\n', (197, 199), False, 'from evidence import Evidence\n')] |
#!/usr/bin/env python3
from dataknead import Knead
from facetool import config, media, util
from facetool.constants import *
from facetool.path import Path
from facetool.profiler import Profiler
from facetool.errors import ArgumentError
from facetool.util import message, force_mkdir, sample_remove, is_json_path
from random import random
from tqdm import tqdm
import argparse
import logging
import json
import os
import pandas as pd
import pdb
import shutil
import sys
COMMANDS = (
"average",
"classify",
"cluster",
"combineaudio",
"combineframes",
"count",
"distance",
"crop",
"encode",
"extractframes",
"landmarks",
"locate",
"pose",
"probe",
"sample",
"swap",
)
OUTPUT_FORMAT_CHOICES = (
"default",
"csv",
"json"
)
SWAP_METHODS = [
"faceswap",
"faceswap3d"
]
logger = logging.getLogger(__name__)
# Note that we always profile, we just don't print the output if the
# option is not enabled
profiler = Profiler("facetool.py")
def get_parser():
parser = argparse.ArgumentParser(description = "Manipulate faces in videos and images")
# Essentials
parser.add_argument("command", choices = COMMANDS, nargs = "?")
parser.add_argument("-i", "--input", type = str,
required = True,
help = "Input file or folder, 'face' when swapping"
)
parser.add_argument("-o", "--output", type = str,
help = "Output file or folder",
default = None
)
parser.add_argument("-t", "--target", type = str,
help = "'Head' when swapping"
)
# Extra arguments
parser.add_argument("-ai", "--audio-input", type = str,
default = None,
help = "Add a separate audio file with the end result movie"
)
parser.add_argument("--as-percentage", action = "store_true",
help = "Show face distances as percentages"
)
parser.add_argument("-bl", "--blur", type = float,
default = BLUR_AMOUNT,
help = "Amount of blur to use during colour correction"
)
parser.add_argument("-dd", "--data-directory", type = str,
default = DATA_DIRECTORY,
help = "Directory where the data files are located"
)
parser.add_argument("-f", "--force", action = "store_true",
help = "Force commands and ignore warnings, like with sample"
)
parser.add_argument("-fr", "--framerate", type = str,
default = DEFAULT_FRAMERATE
)
parser.add_argument("-fa", "--feather", type = int,
default = FEATHER_AMOUNT,
help = "Softness of edges on a swapped face"
)
parser.add_argument("-if", "--ignore-nofaces", action = "store_true",
default = False,
help = "When having no faces to swap, keep the original input image"
)
parser.add_argument("-ih", "--image-height", type = int,
default = DEFAULT_IMAGE_HEIGHT,
help = "Height of output image / height"
)
parser.add_argument("-iw", "--image-width", type = int,
default = DEFAULT_IMAGE_WIDTH,
help = "Width of output image / video"
)
parser.add_argument("-kt", "--keep-temp", action = "store_true",
help = "Keep temporary files (used with video swapping)"
)
parser.add_argument("-m", "--model", type = str,
help = "Use a precalculated model (for calculating distances)"
)
parser.add_argument("--no-audio", action = "store_true")
parser.add_argument("-nocc", "--no-colour-correct", action = "store_true",
help = "Don't colour correct"
)
parser.add_argument("--no-eyesbrows", action = "store_true")
parser.add_argument("--no-nosemouth", action = "store_true")
parser.add_argument("--no-threading", action = "store_true",
help = "Don't use multithreading"
)
parser.add_argument("--only-mouth", action="store_true")
parser.add_argument("-of", "--output-format",
choices = OUTPUT_FORMAT_CHOICES,
help = "Specify output format"
)
parser.add_argument("-pp", "--predictor-path", type = str,
default = PREDICTOR_PATH
)
parser.add_argument("--profile", action = "store_true",
help = "Show profiler information"
)
parser.add_argument("-q", "--quiet", action = "store_true",
help = "Don't print output to the console"
)
parser.add_argument("-s", "--swap", action = "store_true",
help = "Swap input and target"
)
parser.add_argument("--save-originals", action = "store_true",
help = "Save original images when averaging faces"
)
parser.add_argument("--save-warped", action = "store_true",
help = "Save warped images when averaging faces"
)
parser.add_argument("--swap-method",
choices = SWAP_METHODS,
default = SWAP_METHODS[0],
help = f"Swap method for faceswap (options are: {SWAP_METHODS}"
)
parser.add_argument("-so", "--swap-order", type = str,
help = "Comma-separated list with order of faceswaps on target, implies a multiswap"
)
parser.add_argument("-sp", "--sample-percentage", type = float,
help = "Percentage of files in a directory to randomly remove (used for the sample command)"
)
parser.add_argument("-sr", "--swap-order-repeat", action = "store_true", default = False,
help = "When using --swap-order and there are not enough target faces, repeat the sequence"
)
parser.add_argument("--temp-dir", type = str,
help = "Define the directory where temporary files should be placed"
)
parser.add_argument("-v", "--verbose", action = "store_true",
help = "Show debug information"
)
parser.add_argument("-vv", "--extra-verbose", action = "store_true",
help = "Show debug information AND raise / abort on exceptions"
)
parser.add_argument("--warp-3d", action="store_true",
help = "Swap faces and morph to coordinates of target face"
)
return parser
def main(args):
if args.verbose or args.extra_verbose:
logging.basicConfig(level=logging.DEBUG)
logging.debug(args)
config.PROFILE = args.profile
config.QUIET = args.quiet
config.VERBOSE = args.verbose or args.extra_verbose
# Check for invalid argument combinations
if any([args.output_format == "csv", args.output_format == "json"]) and not args.output:
raise ArgumentError("With CSV as output format, a filename (-o) is required")
# Swap around input and target
if args.swap:
args.input, args.target = args.target, args.input
# Okay, the main stuff, get the command
# Extract all frames from a movie to a set of jpg files
if args.command == "extractframes":
util.mkdir_if_not_exists(args.output)
media.extractframes(args.input, args.output)
# Combine all frames from a set of jpg files to a movie
elif args.command == "combineframes":
media.combineframes(args.input, args.output, framerate = args.framerate)
# Combine audio with an input movie
elif args.command == "combineaudio":
media.combineaudio(args.input, args.audio_input, args.output)
# Randomly remove (sample) a percentage of files from a given directory
elif args.command == "sample":
if not args.sample_percentage:
raise ArgumentError("The sample command needs a sample percentage (-sp)")
sample_remove(args.input, args.sample_percentage, force_delete = args.force)
# Show metadata on a media file
elif args.command == "probe":
try:
data = media.probe(args.input)
except:
raise ArgumentError(f"Could not probe '{args.input}', probably not a video/image file")
else:
jsondata = json.dumps(data, indent = 4)
message(jsondata)
elif args.command == "landmarks":
from facetool.landmarks import Landmarks
landmarks = Landmarks(predictor_path = args.predictor_path)
save_data = args.output_format and args.output_format != "default"
if save_data:
data = []
# Check if we *could* have an output directory, and if so,
# create it
if args.output and Path(args.output).could_be_dir():
Path(args.output).mkdir_if_not_exists()
for pathobj in Path(args.input).images():
path = str(pathobj)
logging.debug(f"Processing {path}")
logging.debug(f"Getting landmarks of {path}")
if not args.output:
outpath = None
else:
out = Path(args.output)
if out.is_dir():
outpath = f"{out}/{Path(path).name}"
else:
outpath = str(out)
marks = landmarks.get_landmarks(str(path), outpath = outpath)
if marks and save_data:
points = [str(path)]
[points.extend([m.x, m.y]) for m in marks]
data.append(points)
message(path, marks)
if save_data:
df = pd.DataFrame(data)
if args.output_format == "csv":
df.to_csv(args.output)
elif args.output_format == "json":
df.to_json(args.output)
else:
raise ArgumentError(f"Invalid output format: {args.output_format}")
elif args.command == "pose":
from facetool.poser import Poser
poser = Poser(predictor_path = args.predictor_path)
# Check if we *could* have an output directory, and if so,
# create it
if args.output and Path(args.output).could_be_dir():
Path(args.output).mkdir_if_not_exists()
for pathobj in Path(args.input).images():
path = str(pathobj)
logging.debug(f"Processing {path}")
if not args.output:
outpath = None
else:
out = Path(args.output)
if out.is_dir():
outpath = f"{out}/{Path(path).name}"
else:
outpath = str(out)
poses = poser.get_poses(path, outpath = outpath)
message(f"{path}: {poses}")
elif args.command == "count":
from facetool.detect import Detect
detect = Detect()
if args.output_format == "csv":
csv = []
for path in Path(args.input).images():
count = detect.count(path)
message(f"Number of faces in '{path}': {count}")
if args.output_format == "csv":
csv.append({
"path" : path,
"count" : count
})
if args.output_format == "csv":
df = pd.DataFrame(csv)
df.to_csv(args.output)
elif args.command == "locate":
from facetool.detect import Detect
detect = Detect()
for path in Path(args.input).images():
to_directory = os.path.isdir(args.input)
locations = detect.locate(path, args.output, to_directory = to_directory)
message(f"Face locations in '{args.input}': {locations}")
elif args.command == "crop":
from facetool.detect import Detect
from facetool.media import extractframes
# We can't crop to an image path, because an input image might
# have multiple faces, so throw an error in that case
if Path(args.output).is_image():
raise ArgumentError(f"Can't crop with an image as output")
detect = Detect()
# FIXME: we need some general mechanism for juggling frames around
TMP_DIR = "crop-tmp"
IS_VIDEO = Path(args.input).is_video()
logging.debug(f"Cropping. Input is video? {IS_VIDEO}")
if IS_VIDEO:
force_mkdir(TMP_DIR)
extractframes(args.input, TMP_DIR)
images = Path(TMP_DIR).images()
else:
images = Path(args.input).images()
for path in images:
logging.debug(f"Cropping <{path}>")
detect.crop(str(path), args.output)
if IS_VIDEO:
shutil.rmtree(TMP_DIR)
elif args.command == "classify":
from facetool.classifier import Classifier
classifier = Classifier(
data_directory = args.data_directory,
output_format = args.output_format,
predictor_path = args.predictor_path
)
for path in Path(args.input).images():
logging.debug(f"Classifying <{path}>")
classifier.classify(str(path))
if args.output_format == "csv":
classifier.to_csv(args.output)
elif args.command == "average":
from facetool.averager import Averager
profiler.tick("start averaging")
averager = Averager(
predictor_path = args.predictor_path,
img_height = args.image_height,
img_width = args.image_width,
save_originals = args.save_originals,
save_warped = args.save_warped
)
TMP_DIR = "average-tmp"
path = Path(args.input)
# If this is a video, extract all images and average those
if path.is_file() and path.is_video():
# First create a temporary directory to hold all frames
util.mkdir_if_not_exists(TMP_DIR)
media.extractframes(args.input, TMP_DIR)
# Now average
averager.average(TMP_DIR, args.output)
# And remove the temporary directory
logging.debug(f"Removing {TMP_DIR}")
shutil.rmtree(TMP_DIR)
# Not a video, so if it's a file it's probably an image
# extract all faces and average those
elif path.is_file():
# First create a temporary directory
util.mkdir_if_not_exists(TMP_DIR)
# Now extract all the images to said directory
from facetool.detect import Detect
detect = Detect()
logging.debug(f"Cropping <{args.input}> to {TMP_DIR}")
detect.crop(str(args.input), TMP_DIR)
# Average the stuff
averager.average(TMP_DIR, args.output)
# And remove the temporary directory
logging.debug(f"Removing {TMP_DIR}")
shutil.rmtree(TMP_DIR)
elif path.is_dir():
# Just a directory, use this
averager.average(args.input, args.output)
else:
raise ArgumentError("Invalid input for averaging")
profiler.tick("done averaging")
elif args.command == "distance":
from facetool.recognizer import Recognizer
if not all([args.input, any([args.target, args.model])]):
raise ArgumentError("For the recognizer you need an input and target/model")
logging.debug(f"Trying to recognize {args.input} in {args.target}{args.model}")
recognizer = Recognizer()
results = recognizer.recognize(
input_path = args.input,
model_path = args.model,
target_path = args.target,
as_percentage = args.as_percentage
)
if args.output_format == "csv":
pd.Series(results).to_csv(args.output, header = False)
elif args.output_format == "json":
pd.Series(results).to_json(args.output)
else:
message(f"{args.input} distance to {args.target}")
for path, distance in results.items():
message(f"{path}: {distance}")
elif args.command == "encode":
from facetool.recognizer import Recognizer
if not all([args.input, args.output]):
raise ArgumentError("For encoding faces you need both input and output")
recognizer = Recognizer()
encodings = recognizer.encode_path(args.input)
with open(args.output, "w") as f:
f.write(encodings)
message(f"Written encodings of {args.input} to {args.output}")
elif args.command == "cluster":
from facetool.clusterer import Clusterer
# A .json file with encodings is also valid, if that is give, use that
# instead
if is_json_path(args.input):
encodings = Knead(args.input).data()["encodings"]
else:
from facetool.recognizer import Recognizer
recognizer = Recognizer()
encodings = recognizer.encode_path(args.input, return_type = "dict")
encodings = encodings["encodings"]
clusterer = Clusterer()
output = clusterer.cluster_encodings(encodings)
if args.output:
if is_json_path(args.output):
Knead(output).write(args.output)
else:
force_mkdir(args.output)
clusterer.move_files(output, args.output)
else:
# Just print the output
Knead(output).print()
elif args.command == "swap":
from facetool.swapper import Swapper
profiler.tick("start swapping")
# First check if all arguments are given
arguments = [args.input, args.target]
if not all(arguments + [args.output]):
raise ArgumentError("Input, target and output are required for swapping")
# And if these things are paths or files
if not all([os.path.exists(a) for a in arguments]):
raise ArgumentError("Input and target should be valid files or directories")
pbar = tqdm()
def update_pbar():
pbar.total = swapper.filecount
pbar.update()
if args.verbose:
pbar.write(swapper.last_message)
# That is out of the way, set up the swapper
swapper = Swapper(
predictor_path = args.predictor_path,
feather = args.feather,
blur = args.blur,
keep_temp = args.keep_temp,
swap_audio = not args.no_audio,
overlay_eyesbrows = not args.no_eyesbrows,
overlay_nosemouth = not args.no_nosemouth,
only_mouth = args.only_mouth,
reporthook = update_pbar,
swap_method = args.swap_method,
warp_3d = args.warp_3d,
swap_order = args.swap_order,
swap_order_repeat = args.swap_order_repeat,
ignore_nofaces = args.ignore_nofaces,
concurrent = not args.no_threading,
colour_correct = not args.no_colour_correct,
temp_dir = args.temp_dir
)
# Directory of faces to directory of heads
if Path(args.input).is_dir() and Path(args.target).is_dir():
swapper.swap_directory_to_directory(args.input, args.target, args.output)
# Face to directory of heads
elif media.is_image(args.input) and Path(args.target).is_dir():
swapper.swap_image_to_directory(args.input, args.target, args.output)
# Directory of faces to head
elif Path(args.input).is_dir() and media.is_image(args.target):
swapper.swap_directory_to_image(args.input, args.target, args.output)
# Face in image to video
elif media.is_video(args.target) and media.is_image(args.input):
swapper.swap_image_to_video(args.target, args.input, args.output)
# Face of video to head in other video
elif media.is_video(args.target) and media.is_video(args.input):
swapper.swap_video_to_video(args.target, args.input, args.output)
# Image to image
elif media.is_image(args.target) and media.is_image(args.input):
swapper.swap_image_to_image(args.target, args.input, args.output)
# I don't even know if there is an option that isn't in the list above,
# but if it isn't, you'll get this
else:
raise ArgumentError("Invalid swap options")
pbar.close()
profiler.tick("done swapping")
else:
# No arguments, just display help
parser.print_help()
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
try:
main(args)
except IsADirectoryError as e:
print(f"Can't use a directory as an argument: {e}")
if config.PROFILE:
profiler.dump_events() | [
"logging.getLogger",
"facetool.media.probe",
"facetool.media.combineaudio",
"logging.debug",
"facetool.util.mkdir_if_not_exists",
"facetool.averager.Averager",
"facetool.media.combineframes",
"facetool.detect.Detect",
"facetool.path.Path",
"facetool.util.sample_remove",
"facetool.poser.Poser",
... | [((861, 888), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (878, 888), False, 'import logging\n'), ((994, 1017), 'facetool.profiler.Profiler', 'Profiler', (['"""facetool.py"""'], {}), "('facetool.py')\n", (1002, 1017), False, 'from facetool.profiler import Profiler\n'), ((1050, 1126), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Manipulate faces in videos and images"""'}), "(description='Manipulate faces in videos and images')\n", (1073, 1126), False, 'import argparse\n'), ((6056, 6075), 'logging.debug', 'logging.debug', (['args'], {}), '(args)\n', (6069, 6075), False, 'import logging\n'), ((6010, 6050), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (6029, 6050), False, 'import logging\n'), ((6351, 6422), 'facetool.errors.ArgumentError', 'ArgumentError', (['"""With CSV as output format, a filename (-o) is required"""'], {}), "('With CSV as output format, a filename (-o) is required')\n", (6364, 6422), False, 'from facetool.errors import ArgumentError\n'), ((6688, 6725), 'facetool.util.mkdir_if_not_exists', 'util.mkdir_if_not_exists', (['args.output'], {}), '(args.output)\n', (6712, 6725), False, 'from facetool import config, media, util\n'), ((6734, 6778), 'facetool.media.extractframes', 'media.extractframes', (['args.input', 'args.output'], {}), '(args.input, args.output)\n', (6753, 6778), False, 'from facetool import config, media, util\n'), ((6890, 6960), 'facetool.media.combineframes', 'media.combineframes', (['args.input', 'args.output'], {'framerate': 'args.framerate'}), '(args.input, args.output, framerate=args.framerate)\n', (6909, 6960), False, 'from facetool import config, media, util\n'), ((7053, 7114), 'facetool.media.combineaudio', 'media.combineaudio', (['args.input', 'args.audio_input', 'args.output'], {}), '(args.input, args.audio_input, args.output)\n', (7071, 7114), False, 'from facetool import config, media, util\n'), ((7361, 7435), 'facetool.util.sample_remove', 'sample_remove', (['args.input', 'args.sample_percentage'], {'force_delete': 'args.force'}), '(args.input, args.sample_percentage, force_delete=args.force)\n', (7374, 7435), False, 'from facetool.util import message, force_mkdir, sample_remove, is_json_path\n'), ((7284, 7351), 'facetool.errors.ArgumentError', 'ArgumentError', (['"""The sample command needs a sample percentage (-sp)"""'], {}), "('The sample command needs a sample percentage (-sp)')\n", (7297, 7351), False, 'from facetool.errors import ArgumentError\n'), ((7541, 7564), 'facetool.media.probe', 'media.probe', (['args.input'], {}), '(args.input)\n', (7552, 7564), False, 'from facetool import config, media, util\n'), ((7718, 7744), 'json.dumps', 'json.dumps', (['data'], {'indent': '(4)'}), '(data, indent=4)\n', (7728, 7744), False, 'import json\n'), ((7759, 7776), 'facetool.util.message', 'message', (['jsondata'], {}), '(jsondata)\n', (7766, 7776), False, 'from facetool.util import message, force_mkdir, sample_remove, is_json_path\n'), ((7886, 7931), 'facetool.landmarks.Landmarks', 'Landmarks', ([], {'predictor_path': 'args.predictor_path'}), '(predictor_path=args.predictor_path)\n', (7895, 7931), False, 'from facetool.landmarks import Landmarks\n'), ((7599, 7685), 'facetool.errors.ArgumentError', 'ArgumentError', (['f"""Could not probe \'{args.input}\', probably not a video/image file"""'], {}), '(\n f"Could not probe \'{args.input}\', probably not a video/image file")\n', (7612, 7685), False, 'from facetool.errors import ArgumentError\n'), ((8351, 8386), 'logging.debug', 'logging.debug', (['f"""Processing {path}"""'], {}), "(f'Processing {path}')\n", (8364, 8386), False, 'import logging\n'), ((8400, 8445), 'logging.debug', 'logging.debug', (['f"""Getting landmarks of {path}"""'], {}), "(f'Getting landmarks of {path}')\n", (8413, 8445), False, 'import logging\n'), ((8977, 8997), 'facetool.util.message', 'message', (['path', 'marks'], {}), '(path, marks)\n', (8984, 8997), False, 'from facetool.util import message, force_mkdir, sample_remove, is_json_path\n'), ((9038, 9056), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (9050, 9056), True, 'import pandas as pd\n'), ((9422, 9463), 'facetool.poser.Poser', 'Poser', ([], {'predictor_path': 'args.predictor_path'}), '(predictor_path=args.predictor_path)\n', (9427, 9463), False, 'from facetool.poser import Poser\n'), ((8280, 8296), 'facetool.path.Path', 'Path', (['args.input'], {}), '(args.input)\n', (8284, 8296), False, 'from facetool.path import Path\n'), ((8550, 8567), 'facetool.path.Path', 'Path', (['args.output'], {}), '(args.output)\n', (8554, 8567), False, 'from facetool.path import Path\n'), ((9762, 9797), 'logging.debug', 'logging.debug', (['f"""Processing {path}"""'], {}), "(f'Processing {path}')\n", (9775, 9797), False, 'import logging\n'), ((10147, 10174), 'facetool.util.message', 'message', (['f"""{path}: {poses}"""'], {}), "(f'{path}: {poses}')\n", (10154, 10174), False, 'from facetool.util import message, force_mkdir, sample_remove, is_json_path\n'), ((10271, 10279), 'facetool.detect.Detect', 'Detect', ([], {}), '()\n', (10277, 10279), False, 'from facetool.detect import Detect\n'), ((8170, 8187), 'facetool.path.Path', 'Path', (['args.output'], {}), '(args.output)\n', (8174, 8187), False, 'from facetool.path import Path\n'), ((8216, 8233), 'facetool.path.Path', 'Path', (['args.output'], {}), '(args.output)\n', (8220, 8233), False, 'from facetool.path import Path\n'), ((9268, 9329), 'facetool.errors.ArgumentError', 'ArgumentError', (['f"""Invalid output format: {args.output_format}"""'], {}), "(f'Invalid output format: {args.output_format}')\n", (9281, 9329), False, 'from facetool.errors import ArgumentError\n'), ((9691, 9707), 'facetool.path.Path', 'Path', (['args.input'], {}), '(args.input)\n', (9695, 9707), False, 'from facetool.path import Path\n'), ((9902, 9919), 'facetool.path.Path', 'Path', (['args.output'], {}), '(args.output)\n', (9906, 9919), False, 'from facetool.path import Path\n'), ((10442, 10490), 'facetool.util.message', 'message', (['f"""Number of faces in \'{path}\': {count}"""'], {}), '(f"Number of faces in \'{path}\': {count}")\n', (10449, 10490), False, 'from facetool.util import message, force_mkdir, sample_remove, is_json_path\n'), ((10713, 10730), 'pandas.DataFrame', 'pd.DataFrame', (['csv'], {}), '(csv)\n', (10725, 10730), True, 'import pandas as pd\n'), ((10863, 10871), 'facetool.detect.Detect', 'Detect', ([], {}), '()\n', (10869, 10871), False, 'from facetool.detect import Detect\n'), ((9581, 9598), 'facetool.path.Path', 'Path', (['args.output'], {}), '(args.output)\n', (9585, 9598), False, 'from facetool.path import Path\n'), ((9627, 9644), 'facetool.path.Path', 'Path', (['args.output'], {}), '(args.output)\n', (9631, 9644), False, 'from facetool.path import Path\n'), ((10363, 10379), 'facetool.path.Path', 'Path', (['args.input'], {}), '(args.input)\n', (10367, 10379), False, 'from facetool.path import Path\n'), ((10947, 10972), 'os.path.isdir', 'os.path.isdir', (['args.input'], {}), '(args.input)\n', (10960, 10972), False, 'import os\n'), ((11071, 11128), 'facetool.util.message', 'message', (['f"""Face locations in \'{args.input}\': {locations}"""'], {}), '(f"Face locations in \'{args.input}\': {locations}")\n', (11078, 11128), False, 'from facetool.util import message, force_mkdir, sample_remove, is_json_path\n'), ((11519, 11527), 'facetool.detect.Detect', 'Detect', ([], {}), '()\n', (11525, 11527), False, 'from facetool.detect import Detect\n'), ((11689, 11743), 'logging.debug', 'logging.debug', (['f"""Cropping. Input is video? {IS_VIDEO}"""'], {}), "(f'Cropping. Input is video? {IS_VIDEO}')\n", (11702, 11743), False, 'import logging\n'), ((10893, 10909), 'facetool.path.Path', 'Path', (['args.input'], {}), '(args.input)\n', (10897, 10909), False, 'from facetool.path import Path\n'), ((11448, 11500), 'facetool.errors.ArgumentError', 'ArgumentError', (['f"""Can\'t crop with an image as output"""'], {}), '(f"Can\'t crop with an image as output")\n', (11461, 11500), False, 'from facetool.errors import ArgumentError\n'), ((11778, 11798), 'facetool.util.force_mkdir', 'force_mkdir', (['TMP_DIR'], {}), '(TMP_DIR)\n', (11789, 11798), False, 'from facetool.util import message, force_mkdir, sample_remove, is_json_path\n'), ((11811, 11845), 'facetool.media.extractframes', 'extractframes', (['args.input', 'TMP_DIR'], {}), '(args.input, TMP_DIR)\n', (11824, 11845), False, 'from facetool.media import extractframes\n'), ((11992, 12027), 'logging.debug', 'logging.debug', (['f"""Cropping <{path}>"""'], {}), "(f'Cropping <{path}>')\n", (12005, 12027), False, 'import logging\n'), ((12110, 12132), 'shutil.rmtree', 'shutil.rmtree', (['TMP_DIR'], {}), '(TMP_DIR)\n', (12123, 12132), False, 'import shutil\n'), ((12244, 12365), 'facetool.classifier.Classifier', 'Classifier', ([], {'data_directory': 'args.data_directory', 'output_format': 'args.output_format', 'predictor_path': 'args.predictor_path'}), '(data_directory=args.data_directory, output_format=args.\n output_format, predictor_path=args.predictor_path)\n', (12254, 12365), False, 'from facetool.classifier import Classifier\n'), ((8641, 8651), 'facetool.path.Path', 'Path', (['path'], {}), '(path)\n', (8645, 8651), False, 'from facetool.path import Path\n'), ((11400, 11417), 'facetool.path.Path', 'Path', (['args.output'], {}), '(args.output)\n', (11404, 11417), False, 'from facetool.path import Path\n'), ((11652, 11668), 'facetool.path.Path', 'Path', (['args.input'], {}), '(args.input)\n', (11656, 11668), False, 'from facetool.path import Path\n'), ((12473, 12511), 'logging.debug', 'logging.debug', (['f"""Classifying <{path}>"""'], {}), "(f'Classifying <{path}>')\n", (12486, 12511), False, 'import logging\n'), ((12785, 12961), 'facetool.averager.Averager', 'Averager', ([], {'predictor_path': 'args.predictor_path', 'img_height': 'args.image_height', 'img_width': 'args.image_width', 'save_originals': 'args.save_originals', 'save_warped': 'args.save_warped'}), '(predictor_path=args.predictor_path, img_height=args.image_height,\n img_width=args.image_width, save_originals=args.save_originals,\n save_warped=args.save_warped)\n', (12793, 12961), False, 'from facetool.averager import Averager\n'), ((13082, 13098), 'facetool.path.Path', 'Path', (['args.input'], {}), '(args.input)\n', (13086, 13098), False, 'from facetool.path import Path\n'), ((9993, 10003), 'facetool.path.Path', 'Path', (['path'], {}), '(path)\n', (9997, 10003), False, 'from facetool.path import Path\n'), ((11867, 11880), 'facetool.path.Path', 'Path', (['TMP_DIR'], {}), '(TMP_DIR)\n', (11871, 11880), False, 'from facetool.path import Path\n'), ((11925, 11941), 'facetool.path.Path', 'Path', (['args.input'], {}), '(args.input)\n', (11929, 11941), False, 'from facetool.path import Path\n'), ((12434, 12450), 'facetool.path.Path', 'Path', (['args.input'], {}), '(args.input)\n', (12438, 12450), False, 'from facetool.path import Path\n'), ((13294, 13327), 'facetool.util.mkdir_if_not_exists', 'util.mkdir_if_not_exists', (['TMP_DIR'], {}), '(TMP_DIR)\n', (13318, 13327), False, 'from facetool import config, media, util\n'), ((13340, 13380), 'facetool.media.extractframes', 'media.extractframes', (['args.input', 'TMP_DIR'], {}), '(args.input, TMP_DIR)\n', (13359, 13380), False, 'from facetool import config, media, util\n'), ((13521, 13557), 'logging.debug', 'logging.debug', (['f"""Removing {TMP_DIR}"""'], {}), "(f'Removing {TMP_DIR}')\n", (13534, 13557), False, 'import logging\n'), ((13570, 13592), 'shutil.rmtree', 'shutil.rmtree', (['TMP_DIR'], {}), '(TMP_DIR)\n', (13583, 13592), False, 'import shutil\n'), ((14796, 14875), 'logging.debug', 'logging.debug', (['f"""Trying to recognize {args.input} in {args.target}{args.model}"""'], {}), "(f'Trying to recognize {args.input} in {args.target}{args.model}')\n", (14809, 14875), False, 'import logging\n'), ((14898, 14910), 'facetool.recognizer.Recognizer', 'Recognizer', ([], {}), '()\n', (14908, 14910), False, 'from facetool.recognizer import Recognizer\n'), ((13793, 13826), 'facetool.util.mkdir_if_not_exists', 'util.mkdir_if_not_exists', (['TMP_DIR'], {}), '(TMP_DIR)\n', (13817, 13826), False, 'from facetool import config, media, util\n'), ((13956, 13964), 'facetool.detect.Detect', 'Detect', ([], {}), '()\n', (13962, 13964), False, 'from facetool.detect import Detect\n'), ((13978, 14032), 'logging.debug', 'logging.debug', (['f"""Cropping <{args.input}> to {TMP_DIR}"""'], {}), "(f'Cropping <{args.input}> to {TMP_DIR}')\n", (13991, 14032), False, 'import logging\n'), ((14229, 14265), 'logging.debug', 'logging.debug', (['f"""Removing {TMP_DIR}"""'], {}), "(f'Removing {TMP_DIR}')\n", (14242, 14265), False, 'import logging\n'), ((14278, 14300), 'shutil.rmtree', 'shutil.rmtree', (['TMP_DIR'], {}), '(TMP_DIR)\n', (14291, 14300), False, 'import shutil\n'), ((14716, 14786), 'facetool.errors.ArgumentError', 'ArgumentError', (['"""For the recognizer you need an input and target/model"""'], {}), "('For the recognizer you need an input and target/model')\n", (14729, 14786), False, 'from facetool.errors import ArgumentError\n'), ((15742, 15754), 'facetool.recognizer.Recognizer', 'Recognizer', ([], {}), '()\n', (15752, 15754), False, 'from facetool.recognizer import Recognizer\n'), ((15893, 15955), 'facetool.util.message', 'message', (['f"""Written encodings of {args.input} to {args.output}"""'], {}), "(f'Written encodings of {args.input} to {args.output}')\n", (15900, 15955), False, 'from facetool.util import message, force_mkdir, sample_remove, is_json_path\n'), ((14456, 14500), 'facetool.errors.ArgumentError', 'ArgumentError', (['"""Invalid input for averaging"""'], {}), "('Invalid input for averaging')\n", (14469, 14500), False, 'from facetool.errors import ArgumentError\n'), ((15351, 15401), 'facetool.util.message', 'message', (['f"""{args.input} distance to {args.target}"""'], {}), "(f'{args.input} distance to {args.target}')\n", (15358, 15401), False, 'from facetool.util import message, force_mkdir, sample_remove, is_json_path\n'), ((15653, 15719), 'facetool.errors.ArgumentError', 'ArgumentError', (['"""For encoding faces you need both input and output"""'], {}), "('For encoding faces you need both input and output')\n", (15666, 15719), False, 'from facetool.errors import ArgumentError\n'), ((16151, 16175), 'facetool.util.is_json_path', 'is_json_path', (['args.input'], {}), '(args.input)\n', (16163, 16175), False, 'from facetool.util import message, force_mkdir, sample_remove, is_json_path\n'), ((16495, 16506), 'facetool.clusterer.Clusterer', 'Clusterer', ([], {}), '()\n', (16504, 16506), False, 'from facetool.clusterer import Clusterer\n'), ((15175, 15193), 'pandas.Series', 'pd.Series', (['results'], {}), '(results)\n', (15184, 15193), True, 'import pandas as pd\n'), ((15469, 15499), 'facetool.util.message', 'message', (['f"""{path}: {distance}"""'], {}), "(f'{path}: {distance}')\n", (15476, 15499), False, 'from facetool.util import message, force_mkdir, sample_remove, is_json_path\n'), ((16333, 16345), 'facetool.recognizer.Recognizer', 'Recognizer', ([], {}), '()\n', (16343, 16345), False, 'from facetool.recognizer import Recognizer\n'), ((16603, 16628), 'facetool.util.is_json_path', 'is_json_path', (['args.output'], {}), '(args.output)\n', (16615, 16628), False, 'from facetool.util import message, force_mkdir, sample_remove, is_json_path\n'), ((17444, 17450), 'tqdm.tqdm', 'tqdm', ([], {}), '()\n', (17448, 17450), False, 'from tqdm import tqdm\n'), ((17699, 18262), 'facetool.swapper.Swapper', 'Swapper', ([], {'predictor_path': 'args.predictor_path', 'feather': 'args.feather', 'blur': 'args.blur', 'keep_temp': 'args.keep_temp', 'swap_audio': '(not args.no_audio)', 'overlay_eyesbrows': '(not args.no_eyesbrows)', 'overlay_nosemouth': '(not args.no_nosemouth)', 'only_mouth': 'args.only_mouth', 'reporthook': 'update_pbar', 'swap_method': 'args.swap_method', 'warp_3d': 'args.warp_3d', 'swap_order': 'args.swap_order', 'swap_order_repeat': 'args.swap_order_repeat', 'ignore_nofaces': 'args.ignore_nofaces', 'concurrent': '(not args.no_threading)', 'colour_correct': '(not args.no_colour_correct)', 'temp_dir': 'args.temp_dir'}), '(predictor_path=args.predictor_path, feather=args.feather, blur=args\n .blur, keep_temp=args.keep_temp, swap_audio=not args.no_audio,\n overlay_eyesbrows=not args.no_eyesbrows, overlay_nosemouth=not args.\n no_nosemouth, only_mouth=args.only_mouth, reporthook=update_pbar,\n swap_method=args.swap_method, warp_3d=args.warp_3d, swap_order=args.\n swap_order, swap_order_repeat=args.swap_order_repeat, ignore_nofaces=\n args.ignore_nofaces, concurrent=not args.no_threading, colour_correct=\n not args.no_colour_correct, temp_dir=args.temp_dir)\n', (17706, 18262), False, 'from facetool.swapper import Swapper\n'), ((15285, 15303), 'pandas.Series', 'pd.Series', (['results'], {}), '(results)\n', (15294, 15303), True, 'import pandas as pd\n'), ((16713, 16737), 'facetool.util.force_mkdir', 'force_mkdir', (['args.output'], {}), '(args.output)\n', (16724, 16737), False, 'from facetool.util import message, force_mkdir, sample_remove, is_json_path\n'), ((17161, 17228), 'facetool.errors.ArgumentError', 'ArgumentError', (['"""Input, target and output are required for swapping"""'], {}), "('Input, target and output are required for swapping')\n", (17174, 17228), False, 'from facetool.errors import ArgumentError\n'), ((17357, 17427), 'facetool.errors.ArgumentError', 'ArgumentError', (['"""Input and target should be valid files or directories"""'], {}), "('Input and target should be valid files or directories')\n", (17370, 17427), False, 'from facetool.errors import ArgumentError\n'), ((16858, 16871), 'dataknead.Knead', 'Knead', (['output'], {}), '(output)\n', (16863, 16871), False, 'from dataknead import Knead\n'), ((18736, 18762), 'facetool.media.is_image', 'media.is_image', (['args.input'], {}), '(args.input)\n', (18750, 18762), False, 'from facetool import config, media, util\n'), ((16201, 16218), 'dataknead.Knead', 'Knead', (['args.input'], {}), '(args.input)\n', (16206, 16218), False, 'from dataknead import Knead\n'), ((16646, 16659), 'dataknead.Knead', 'Knead', (['output'], {}), '(output)\n', (16651, 16659), False, 'from dataknead import Knead\n'), ((17299, 17316), 'os.path.exists', 'os.path.exists', (['a'], {}), '(a)\n', (17313, 17316), False, 'import os\n'), ((18541, 18557), 'facetool.path.Path', 'Path', (['args.input'], {}), '(args.input)\n', (18545, 18557), False, 'from facetool.path import Path\n'), ((18571, 18588), 'facetool.path.Path', 'Path', (['args.target'], {}), '(args.target)\n', (18575, 18588), False, 'from facetool.path import Path\n'), ((18958, 18985), 'facetool.media.is_image', 'media.is_image', (['args.target'], {}), '(args.target)\n', (18972, 18985), False, 'from facetool import config, media, util\n'), ((18767, 18784), 'facetool.path.Path', 'Path', (['args.target'], {}), '(args.target)\n', (18771, 18784), False, 'from facetool.path import Path\n'), ((19116, 19143), 'facetool.media.is_video', 'media.is_video', (['args.target'], {}), '(args.target)\n', (19130, 19143), False, 'from facetool import config, media, util\n'), ((19148, 19174), 'facetool.media.is_image', 'media.is_image', (['args.input'], {}), '(args.input)\n', (19162, 19174), False, 'from facetool import config, media, util\n'), ((18928, 18944), 'facetool.path.Path', 'Path', (['args.input'], {}), '(args.input)\n', (18932, 18944), False, 'from facetool.path import Path\n'), ((19315, 19342), 'facetool.media.is_video', 'media.is_video', (['args.target'], {}), '(args.target)\n', (19329, 19342), False, 'from facetool import config, media, util\n'), ((19347, 19373), 'facetool.media.is_video', 'media.is_video', (['args.input'], {}), '(args.input)\n', (19361, 19373), False, 'from facetool import config, media, util\n'), ((19492, 19519), 'facetool.media.is_image', 'media.is_image', (['args.target'], {}), '(args.target)\n', (19506, 19519), False, 'from facetool import config, media, util\n'), ((19524, 19550), 'facetool.media.is_image', 'media.is_image', (['args.input'], {}), '(args.input)\n', (19538, 19550), False, 'from facetool import config, media, util\n'), ((19786, 19823), 'facetool.errors.ArgumentError', 'ArgumentError', (['"""Invalid swap options"""'], {}), "('Invalid swap options')\n", (19799, 19823), False, 'from facetool.errors import ArgumentError\n')] |
from torchvision.datasets import VisionDataset
from datamodules.dsfunction import imread
from torch.utils.data import Dataset, RandomSampler, Sampler, DataLoader, TensorDataset, random_split, ConcatDataset
import os
import glob
from typing import List, Sequence, Tuple
from itertools import cycle, islice
import torch
from math import ceil
class DataFolder(VisionDataset):
def __init__(self, root, loader: callable, pattern: str, transforms=None, transform=None, target_transform=None):
super().__init__(root, transforms, transform, target_transform)
self.loader = loader
self.samples = glob.glob(os.path.join(root, pattern))
def __len__(self) -> int:
return len(self.samples)
def __getitem__(self, index: int):
path = self.samples[index]
sample = self.loader(path)
if self.transform is not None:
sample = self.transform(**sample)
return sample
def size(self, idx):
return len(self.samples)
class ImageFolder(VisionDataset):
def __init__(self, root, transforms=None, transform=None, target_transform=None):
super().__init__(root, transforms, transform, target_transform)
self.loader = imread
self.samples = os.listdir(root)
def __len__(self) -> int:
return len(self.samples)
def __getitem__(self, index: int):
path = self.samples[index]
sample = self.loader(self.root + '/' + path)
if self.transform is not None:
sample = self.transform(sample)
return sample
def size(self, idx):
return len(self.samples)
class ImagePaths(VisionDataset):
def __init__(self, paths=List[str], transforms=None, transform=None, target_transform=None):
super().__init__('.', transforms, transform, target_transform)
self.loader = imread
self.samples = paths
def __len__(self) -> int:
return len(self.samples)
def __getitem__(self, index: int):
path = self.samples[index]
sample = self.loader(path)
if self.transform is not None:
sample = self.transform(sample)
return sample
def size(self, idx):
return len(self.samples)
class MergeDataset(Dataset):
def __init__(self, *tensors):
"""Merge two dataset to one Dataset
"""
self.tensors = tensors
self.sizes = [len(tensor) for tensor in tensors]
def __getitem__(self, indexs: List[int]):
return tuple(tensor[idx] for idx, tensor in zip(indexs, self.tensors))
def __len__(self):
return max(self.sizes)
class MultiRandomSampler(RandomSampler):
def __init__(self, data_source: MergeDataset, replacement=True, num_samples=None, generator=None):
""" a Random Sampler for MergeDataset. NOTE will padding all dataset to same length
Args:
data_source (MergeDataset): MergeDataset object
replacement (bool, optional): shuffle index use replacement. Defaults to True.
num_samples ([type], optional): Defaults to None.
generator ([type], optional): Defaults to None.
"""
self.data_source: MergeDataset = data_source
self.replacement = replacement
self._num_samples = num_samples
self.generator = generator
self.maxn = len(self.data_source)
@property
def num_samples(self):
# dataset size might change at runtime
if self._num_samples is None:
self._num_samples = self.data_source.sizes
return self._num_samples
def __iter__(self):
rands = []
for size in self.num_samples:
if self.maxn == size:
rands.append(torch.randperm(size, generator=self.generator).tolist())
else:
rands.append(torch.randint(high=size, size=(self.maxn,),
dtype=torch.int64, generator=self.generator).tolist())
return zip(*rands)
def __len__(self):
return len(self.data_source)
class MultiSequentialSampler(Sampler):
r"""Samples elements sequentially, always in the same order.
NOTE: it whill expand all dataset to same length
Arguments:
data_source (Dataset): dataset to sample from
"""
def __init__(self, data_source: MergeDataset):
self.data_source: MergeDataset = data_source
self.num_samples = data_source.sizes
self.maxn = len(data_source)
def __iter__(self):
ls = []
for size in self.num_samples:
if self.maxn == size:
ls.append(range(size))
else:
ls.append(islice(cycle(range(size)), self.maxn))
return zip(*ls)
def __len__(self):
return len(self.data_source)
class MultiBatchDataset(MergeDataset):
"""MultiBatchDataset for MultiBatchSampler
NOTE inputs type must be MergeDataset
"""
def __getitem__(self, indexs: List[int]):
dataset_idxs, idxs = indexs
return self.tensors[dataset_idxs][idxs]
class MultiBatchSampler(Sampler):
r"""Sample another sampler by repeats times of mini-batch indices.
NOTE always drop last !
Args:
samplers (Sampler or Iterable): Base sampler. Can be any iterable object
with ``__len__`` implemented.
repeats (list): repeats time
batch_size (int): Size of mini-batch.
"""
def __init__(self, samplers: list, repeats: list, batch_size):
# Since collections.abc.Iterable does not check for `__getitem__`, which
# is one way for an object to be an iterable, we don't do an `isinstance`
# check here.
if not isinstance(batch_size, int) or isinstance(batch_size, bool) or \
batch_size <= 0:
raise ValueError("batch_size should be a positive integer value, "
"but got batch_size={}".format(batch_size))
assert len(samplers) == len(repeats), 'Samplers number must equal repeats number'
minweight = min(repeats)
minlength = len(samplers[repeats.index(minweight)])
self.sampler_loop = cycle([i for i, w in enumerate(repeats) for _ in range(w)])
# expand to target length
self.repeats = repeats
self.sizes = [minlength * ceil(w / minweight) for w in repeats]
self.size = sum(self.sizes)
self.batch_size = batch_size
self.samplers: List[Sampler] = samplers
self.new_samplers = []
def __iter__(self):
self.new_samplers.clear()
self.new_samplers = [islice(cycle(smp), size)
for smp, size in
zip(self.samplers, self.sizes)]
return self
def __next__(self):
# NOTE sampler_idx choice dataset
sampler_idx = next(self.sampler_loop)
sampler: Sampler = self.new_samplers[sampler_idx]
return [(sampler_idx, next(sampler)) for _ in range(self.batch_size)]
def __len__(self):
# NOTE find min batch scale factor
scale = ((min(self.sizes) // self.batch_size) // min(self.repeats))
return sum([n * scale for n in self.repeats])
| [
"itertools.cycle",
"os.listdir",
"math.ceil",
"torch.randperm",
"os.path.join",
"torch.randint"
] | [((1178, 1194), 'os.listdir', 'os.listdir', (['root'], {}), '(root)\n', (1188, 1194), False, 'import os\n'), ((613, 640), 'os.path.join', 'os.path.join', (['root', 'pattern'], {}), '(root, pattern)\n', (625, 640), False, 'import os\n'), ((5843, 5862), 'math.ceil', 'ceil', (['(w / minweight)'], {}), '(w / minweight)\n', (5847, 5862), False, 'from math import ceil\n'), ((6102, 6112), 'itertools.cycle', 'cycle', (['smp'], {}), '(smp)\n', (6107, 6112), False, 'from itertools import cycle, islice\n'), ((3437, 3483), 'torch.randperm', 'torch.randperm', (['size'], {'generator': 'self.generator'}), '(size, generator=self.generator)\n', (3451, 3483), False, 'import torch\n'), ((3527, 3620), 'torch.randint', 'torch.randint', ([], {'high': 'size', 'size': '(self.maxn,)', 'dtype': 'torch.int64', 'generator': 'self.generator'}), '(high=size, size=(self.maxn,), dtype=torch.int64, generator=\n self.generator)\n', (3540, 3620), False, 'import torch\n')] |
"""
Script for analyzing model's performance
"""
import argparse
import sys
import collections
import yaml
import tensorflow as tf
import tqdm
import numpy as np
import net.data
import net.ml
import net.utilities
def report_iou_results(categories_intersections_counts_map, categories_unions_counts_map):
"""
Reports iou analysis results
:param categories_intersections_counts_map: dictionary mapping categories to a list of intersection counts
for different images for that category
:param categories_unions_counts_map: dictionary mapping categories to a list of unions counts
for different images for that category
"""
categories = sorted(categories_intersections_counts_map.keys())
categories_means = []
for category in categories:
category_intersections_counts = categories_intersections_counts_map[category]
category_unions_counts = categories_unions_counts_map[category]
category_mean = np.sum(category_intersections_counts) / np.sum(category_unions_counts)
print("{} mean iou -> {:.5f}".format(category, category_mean))
categories_means.append(category_mean)
print("\nMean iou across all categories: {:.5f}".format(np.mean(categories_means)))
def get_segmentation_cubes_generator(samples_generator, model, indices_to_colors_map, void_color):
"""
Get a generator that uses samples_generator to obtain (image, segmentation) tuple and yields a tuple
(ground_truth_segmentation_cube, predicted_segmentation_cube)
:param samples_generator: generator that yields (image, segmentation) tuple
:param model: net.ml.Model instance
:param indices_to_colors_map: dictionary mapping categories indices to their colors in segmentation images
:param void_color: 3-elements tuple that represents color of pixels without a category
:return: generator that yields (ground_truth_segmentation_cube, predicted_segmentation_cube) tuples
"""
while True:
image, segmentation = next(samples_generator)
ground_truth_segmentation_cube = net.data.get_segmentation_cube(segmentation, indices_to_colors_map)
# Raw predictions are floats before thresholding
raw_predicted_segmentation_cube = model.predict(image)
predicted_segmentation_image = net.data.get_segmentation_image(
raw_predicted_segmentation_cube, indices_to_colors_map, void_color)
predicted_segmentation_cube = net.data.get_segmentation_cube(
predicted_segmentation_image, indices_to_colors_map)
yield ground_truth_segmentation_cube, predicted_segmentation_cube
def analyze_iou(model, generator_factory, config):
"""
Analyses intersection over union of model predictions with ground truth using VOC validation dataset
:param model: net.ml.Model instance
:param generator_factory: VOCSamplesGeneratorFactory instance
:param config: object with configuration details
"""
indices_to_colors_map, void_color = net.data.get_colors_info(len(config["categories"]))
segmentation_cubes_generator = get_segmentation_cubes_generator(
generator_factory.get_generator(), model, indices_to_colors_map, void_color)
categories_intersections_counts_map = collections.defaultdict(list)
categories_unions_counts_map = collections.defaultdict(list)
# for _ in tqdm.tqdm(range(10)):
for _ in tqdm.tqdm(range(generator_factory.get_size())):
ground_truth_segmentation_cube, predicted_segmentation_cube = next(segmentation_cubes_generator)
# Get iou for each category that is present in ground truth cube
for index, category in enumerate(config["categories"]):
intersection_pixels = np.logical_and(
ground_truth_segmentation_cube[:, :, index], predicted_segmentation_cube[:, :, index])
categories_intersections_counts_map[category].append(np.sum(intersection_pixels))
union_pixels = np.logical_or(
ground_truth_segmentation_cube[:, :, index], predicted_segmentation_cube[:, :, index])
categories_unions_counts_map[category].append(np.sum(union_pixels))
report_iou_results(categories_intersections_counts_map, categories_unions_counts_map)
def main():
"""
Script entry point
"""
parser = argparse.ArgumentParser()
parser.add_argument('--config', action="store", required=True)
arguments = parser.parse_args(sys.argv[1:])
with open(arguments.config) as file:
config = yaml.safe_load(file)
network = net.ml.FullyConvolutionalNetwork(categories_count=len(config["categories"]))
session = tf.keras.backend.get_session()
model = net.ml.Model(session, network, config["categories"])
model.load(config["model_checkpoint_path"])
generator_factory = net.data.VOCSamplesGeneratorFactory(
config["voc"]["data_directory"], config["voc"]["validation_set_path"], config["size_factor"])
analyze_iou(model, generator_factory, config)
if __name__ == "__main__":
main()
| [
"numpy.mean",
"argparse.ArgumentParser",
"numpy.logical_and",
"tensorflow.keras.backend.get_session",
"numpy.logical_or",
"yaml.safe_load",
"numpy.sum",
"collections.defaultdict"
] | [((3250, 3279), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (3273, 3279), False, 'import collections\n'), ((3315, 3344), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (3338, 3344), False, 'import collections\n'), ((4322, 4347), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4345, 4347), False, 'import argparse\n'), ((4650, 4680), 'tensorflow.keras.backend.get_session', 'tf.keras.backend.get_session', ([], {}), '()\n', (4678, 4680), True, 'import tensorflow as tf\n'), ((4523, 4543), 'yaml.safe_load', 'yaml.safe_load', (['file'], {}), '(file)\n', (4537, 4543), False, 'import yaml\n'), ((967, 1004), 'numpy.sum', 'np.sum', (['category_intersections_counts'], {}), '(category_intersections_counts)\n', (973, 1004), True, 'import numpy as np\n'), ((1007, 1037), 'numpy.sum', 'np.sum', (['category_unions_counts'], {}), '(category_unions_counts)\n', (1013, 1037), True, 'import numpy as np\n'), ((1218, 1243), 'numpy.mean', 'np.mean', (['categories_means'], {}), '(categories_means)\n', (1225, 1243), True, 'import numpy as np\n'), ((3723, 3828), 'numpy.logical_and', 'np.logical_and', (['ground_truth_segmentation_cube[:, :, index]', 'predicted_segmentation_cube[:, :, index]'], {}), '(ground_truth_segmentation_cube[:, :, index],\n predicted_segmentation_cube[:, :, index])\n', (3737, 3828), True, 'import numpy as np\n'), ((3965, 4069), 'numpy.logical_or', 'np.logical_or', (['ground_truth_segmentation_cube[:, :, index]', 'predicted_segmentation_cube[:, :, index]'], {}), '(ground_truth_segmentation_cube[:, :, index],\n predicted_segmentation_cube[:, :, index])\n', (3978, 4069), True, 'import numpy as np\n'), ((3908, 3935), 'numpy.sum', 'np.sum', (['intersection_pixels'], {}), '(intersection_pixels)\n', (3914, 3935), True, 'import numpy as np\n'), ((4142, 4162), 'numpy.sum', 'np.sum', (['union_pixels'], {}), '(union_pixels)\n', (4148, 4162), True, 'import numpy as np\n')] |
# --- Day 12: Rain Risk ---
# https://adventofcode.com/2020/day/12
import time
simple = False
verbose = 1
if simple:
data = 'F10\nN3\nF7\nR90\nF11'.splitlines()
else:
file = open('12_input.txt', 'r')
data = file.read().splitlines()
class Ship(object):
def __init__(self, d=0, x=0, y=0, m=0, wx=0, wy=0):
self.dir = d # 0=N, 1=E, 2=S, 3=W
self.dirAsc = ['north', 'east', 'south', 'west']
self.x = x # +N / -S
self.wx = wx
self.y = y # +E / -W
self.wy = wy
self.m = m # ship mode: 0=part1, 1=part2
self.validCmd = ['N', 'S', 'E', 'W', 'L', 'R', 'F']
def reset(self, d=0, x=0, y=0, m=0, wx=0, wy=0):
self.dir = d
self.m = m
self.x = x
self.wx = wx
self.y = y
self.wy = wy
def command(self, cmd):
if len(cmd) > 1:
if cmd[0] in self.validCmd:
dist = int(cmd[1:])
if self.m == 0: # part1
if cmd[0] == 'N':
self.x += dist
elif cmd[0] == 'S':
self.x -= dist
elif cmd[0] == 'E':
self.y += dist
elif cmd[0] == 'W':
self.y -= dist
elif cmd[0] == 'L':
self.dir -= int(dist/90)
self.dir %= 4
elif cmd[0] == 'R':
self.dir += int(dist/90)
self.dir %= 4
elif cmd[0] == 'F':
if self.dir == 0:
self.x += dist
elif self.dir == 1:
self.y += dist
elif self.dir == 2:
self.x -= dist
else:
self.y -= dist
else: # part2
if cmd[0] == 'N':
self.wx += dist
elif cmd[0] == 'S':
self.wx -= dist
elif cmd[0] == 'E':
self.wy += dist
elif cmd[0] == 'W':
self.wy -= dist
elif cmd[0] == 'L':
self.dir = -int(dist/90) % 4
elif cmd[0] == 'R': # todo
self.dir = int(dist/90) % 4
elif cmd[0] == 'F':
self.x += dist * self.wx
self.y += dist * self.wy
if self.dir > 0:
if self.dir == 1: # 90 CW
tmp = self.wx
self.wx = -self.wy
self.wy = tmp
elif self.dir == 2: # 180
self.wx = -self.wx
self.wy = -self.wy
else: # 90 CCW
tmp = self.wx
self.wx = self.wy
self.wy = -tmp
self.dir = 0
else:
print('invalid command')
else:
print('command too short')
def print(self):
if self.m == 0:
print('Ship position: {} units {}, {} units {}, facing {}'
.format(abs(self.y), self.dirAsc[1] if self.y >= 0 else self.dirAsc[3],
abs(self.x), self.dirAsc[0] if self.x >= 0 else self.dirAsc[2],
self.dirAsc[self.dir]))
else:
print('Ship position: {} units {}, {} units {}\n'
'Waypoint position: {} units {}, {} units {}'
.format(abs(self.y), self.dirAsc[1] if self.y >= 0 else self.dirAsc[3],
abs(self.x), self.dirAsc[0] if self.x >= 0 else self.dirAsc[2],
abs(self.wy), self.dirAsc[1] if self.wy >= 0 else self.dirAsc[3],
abs(self.wx), self.dirAsc[0] if self.wx >= 0 else self.dirAsc[2]))
def main():
start_time = time.time()
# part 1
ship = Ship(d=1)
for row in data:
ship.command(row)
if verbose > 1:
print('cmd: {}'.format(row))
ship.print()
if verbose > 0:
ship.print()
print('distance {}'.format(abs(ship.x) + abs(ship.y)))
middle_time = time.time()
print("time elapsed: %s" % (middle_time - start_time))
# part 2
ship.reset(m=1, wx=1, wy=10)
for row in data:
ship.command(row)
if verbose > 1:
print('cmd: {}'.format(row))
ship.print()
if verbose > 0:
ship.print()
print('distance {}'.format(abs(ship.x) + abs(ship.y)))
end_time = time.time()
print("time elapsed: %s" % (end_time - middle_time))
if __name__ == '__main__':
main()
| [
"time.time"
] | [((4279, 4290), 'time.time', 'time.time', ([], {}), '()\n', (4288, 4290), False, 'import time\n'), ((4595, 4606), 'time.time', 'time.time', ([], {}), '()\n', (4604, 4606), False, 'import time\n'), ((4980, 4991), 'time.time', 'time.time', ([], {}), '()\n', (4989, 4991), False, 'import time\n')] |
from collections import OrderedDict
from .abstract_model_helper import ModelHelper
from tflite.Tensor import Tensor
from tflite.Model import Model
from tflite.TensorType import TensorType
from typing import List
class TFLiteModelHelper(ModelHelper):
TFLITE_TENSOR_TYPE_TO_DTYPE = {}
TFLITE_TENSOR_TYPE_TO_DTYPE[TensorType.UINT8] = "uint8"
TFLITE_TENSOR_TYPE_TO_DTYPE[TensorType.FLOAT32] = "float32"
TFLITE_TENSOR_TYPE_TO_DTYPE[TensorType.INT32] = "int32"
TFLITE_TENSOR_TYPE_TO_DTYPE[TensorType.INT64] = "int64"
def __init__(self, model_path: str) -> None:
super(TFLiteModelHelper, self).__init__(model_path)
self.__tflite_model = None
self.__input_dtypes_dict = {}
self.__input_tensors = []
self.__output_tensors = []
@property
def input_tensors(self) -> List[Tensor]:
return self.__input_tensors
@property
def output_tensors(self) -> List[Tensor]:
return self.__output_tensors
@property
def input_dtypes_dict(self) -> {str: str}:
dtypes_inputs = {}
for tensor in self.input_tensors:
dtypes_inputs[tensor.Name().decode("utf-8")] = self.TFLITE_TENSOR_TYPE_TO_DTYPE[tensor.Type()]
return dtypes_inputs
@property
def tflite_model(self) -> Model:
return self.__tflite_model
@staticmethod
def get_supported_tflite_input_tensor_type() -> List[TensorType]:
return [TensorType.FLOAT32, TensorType.UINT8]
def load_model(self) -> None:
try:
import tflite.Model
except ImportError:
raise ImportError("The tflite package must be installed")
with open(self.model_path, "rb") as f:
tflite_model_buf = f.read()
self.__tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0)
def extract_input_and_output_tensors(self, user_shape_dict=None) -> None:
if user_shape_dict is None:
raise Exception("Model input names and shapes must be provided")
subgraph = self.tflite_model.Subgraphs(0)
input_tensors = self.__get_input_tensors(subgraph, user_shape_dict)
output_tensors = self.__get_output_tensors(subgraph)
self.__input_tensors = list(input_tensors.values())
self.__output_tensors = list(output_tensors.values())
def __get_input_tensors(self, subgraph, user_shape_dict):
input_tensors = OrderedDict()
model_inputs = subgraph.InputsAsNumpy()
for model_input in model_inputs:
model_input_tensor = subgraph.Tensors(model_input)
model_input_name = model_input_tensor.Name().decode("utf-8")
if model_input_tensor.Type() not in self.get_supported_tflite_input_tensor_type():
raise Exception("Unsupported input data type for input {} with tflite tensor type {}".format(model_input_name, str(model_input_tensor.Type())))
if model_input_name not in user_shape_dict:
raise Exception("Please specify all input layers in data_shape.")
input_tensors[model_input_name] = model_input_tensor
return input_tensors
def __get_output_tensors(self, subgraph):
output_tensors = OrderedDict()
model_outputs = subgraph.OutputsAsNumpy()
for model_output in model_outputs:
model_output_tensor = subgraph.Tensors(model_output)
model_output_name = model_output_tensor.Name().decode("utf-8")
output_tensors[model_output_name] = model_output_tensor
return output_tensors
def get_metadata(self) -> {str: List}:
return {
"Inputs": [
{'name': tensor.Name().decode("utf-8"), 'dtype': self.TFLITE_TENSOR_TYPE_TO_DTYPE[tensor.Type()], 'shape': tensor.ShapeAsNumpy().tolist()}
for tensor in self.input_tensors
],
"Outputs": [
{'name': tensor.Name().decode("utf-8"), 'dtype': self.TFLITE_TENSOR_TYPE_TO_DTYPE[tensor.Type()], 'shape': tensor.ShapeAsNumpy().tolist()}
for tensor in self.output_tensors
]
}
| [
"collections.OrderedDict"
] | [((2415, 2428), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2426, 2428), False, 'from collections import OrderedDict\n'), ((3214, 3227), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3225, 3227), False, 'from collections import OrderedDict\n')] |
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"label": _("WMS"),
"icon": "octicon octicon-briefcase",
"items": [
{
"type": "doctype",
"name": "WMS Lead",
"label": _("WMS Lead")
},
{
"type": "doctype",
"name": "Send SMS",
"label": _("Send SMS")
},
{
"type": "doctype",
"name": "Message Template",
"label": _("Message Template")
},
{
"type": "doctype",
"name": "Group",
"label": _("Group")
},
{
"type": "doctype",
"name": "WhatsApp Setting",
"label": _("WhatsApp Setting")
},
{
"type": "doctype",
"name": "WMS Task",
"label": _("Task")
},
{
"type": "doctype",
"name": "WMS Task Rule",
"label": _("Task Rule")
},
{
"type": "doctype",
"name": "Message Rule",
"label": _("Message Rule")
}
]
},
{
"label": _("Reports"),
"icon": "octicon octicon-briefcase",
"items": [
{
"type": "report",
"name": "Performance Report",
"doctype": "WMS Task",
"is_query_report": True
},
{
"type": "doctype",
"name": "Whatsapp Message Log",
"label": _("Whatsapp Message Log")
}
]
}
]
| [
"frappe._"
] | [((128, 136), 'frappe._', '_', (['"""WMS"""'], {}), "('WMS')\n", (129, 136), False, 'from frappe import _\n'), ((1621, 1633), 'frappe._', '_', (['"""Reports"""'], {}), "('Reports')\n", (1622, 1633), False, 'from frappe import _\n'), ((342, 355), 'frappe._', '_', (['"""WMS Lead"""'], {}), "('WMS Lead')\n", (343, 355), False, 'from frappe import _\n'), ((506, 519), 'frappe._', '_', (['"""Send SMS"""'], {}), "('Send SMS')\n", (507, 519), False, 'from frappe import _\n'), ((678, 699), 'frappe._', '_', (['"""Message Template"""'], {}), "('Message Template')\n", (679, 699), False, 'from frappe import _\n'), ((847, 857), 'frappe._', '_', (['"""Group"""'], {}), "('Group')\n", (848, 857), False, 'from frappe import _\n'), ((1016, 1037), 'frappe._', '_', (['"""WhatsApp Setting"""'], {}), "('WhatsApp Setting')\n", (1017, 1037), False, 'from frappe import _\n'), ((1188, 1197), 'frappe._', '_', (['"""Task"""'], {}), "('Task')\n", (1189, 1197), False, 'from frappe import _\n'), ((1353, 1367), 'frappe._', '_', (['"""Task Rule"""'], {}), "('Task Rule')\n", (1354, 1367), False, 'from frappe import _\n'), ((1522, 1539), 'frappe._', '_', (['"""Message Rule"""'], {}), "('Message Rule')\n", (1523, 1539), False, 'from frappe import _\n'), ((1985, 2010), 'frappe._', '_', (['"""Whatsapp Message Log"""'], {}), "('Whatsapp Message Log')\n", (1986, 2010), False, 'from frappe import _\n')] |
import datetime
from pychpp import ht_model
from pychpp import ht_xml
from pychpp import ht_team, ht_match, ht_datetime
class HTMatchesArchive(ht_model.HTModel):
"""
Hattrick matches archive
"""
_SOURCE_FILE = "matchesarchive"
_SOURCE_FILE_VERSION = "1.4"
# URL PATH with several params available should be urlencoded
_URL_PATH = "%2FClub%2FMatches%2FArchive.aspx%3F"
_ht_attributes = [("team_id", "Team/TeamID", ht_xml.HTXml.ht_int),
("team_name", "Team/TeamName", ht_xml.HTXml.ht_str),
("first_match_date", "Team/FirstMatchDate",
ht_xml.HTXml.ht_datetime_from_text),
("last_match_date", "Team/LastMatchDate",
ht_xml.HTXml.ht_datetime_from_text),
]
def __init__(self, ht_id=None, youth=False, first_match_date=None,
last_match_date=None, season=None, hto=False, **kwargs):
"""
Initialization of a HTMatchesArchive instance
:param ht_id: Hattrick ID of team
:param youth: define if requested team is youth or not
:param first_match_date: begin date to search matches
:param last_match_date: end date to search matches
:param season: season to search matches
:param hto: including or not tounaments matches
:type ht_id: int
:type youth: bool
:type first_match_date: datetime.datetime
:type last_match_date: datetime.datetime
:type season: int
:type hto: bool
:return: a ht_matches_archive.HTMatchesArchive object
:rtype: ht_matches_archive.HTMatchesArchive
:param source: hattrick source to request
('hattrick', 'youth' or 'hto')
:type ht_id: int
:type events: bool
:type source: str
:key chpp: CHPP instance of connected user, must be a chpp.CHPP object
"""
# Check parameters integrity
if not isinstance(ht_id, int) and ht_id is not None:
raise ValueError("ht_id must be None or an integer")
elif not isinstance(youth, bool):
raise ValueError("youth must be a boolean")
elif (not (isinstance(first_match_date, datetime.datetime)
or isinstance(first_match_date, ht_datetime.HTDatetime))
and first_match_date is not None):
raise ValueError("first_match_date must be a datetime "
"or HTDatetime instance")
elif (not (isinstance(last_match_date, datetime.datetime)
or isinstance(last_match_date, ht_datetime.HTDatetime))
and last_match_date is not None):
raise ValueError("last_match_date must be a datetime "
"or HTDatetime instance")
elif not isinstance(season, int) and season is not None:
raise ValueError("season must be a integer")
elif not isinstance(hto, bool):
raise ValueError("hto must be a boolean")
# Define request arguments
self._REQUEST_ARGS = dict()
self._REQUEST_ARGS["teamID"] = str(ht_id) if ht_id is not None else ""
self._REQUEST_ARGS["isYouth"] = "true" if youth is True else "false"
self._REQUEST_ARGS["FirstMatchDate"] = (
ht_xml.HTXml.ht_datetime_to_text(first_match_date)
if first_match_date is not None else "")
self._REQUEST_ARGS["LastMatchDate"] = (
ht_xml.HTXml.ht_datetime_to_text(last_match_date)
if last_match_date is not None else "")
self._REQUEST_ARGS["season"] = (
str(season) if season is not None else "")
self._REQUEST_ARGS["HTO"] = "true" if hto is True else "false"
super().__init__(**kwargs)
self.matches_list = [
HTMatchesArchiveItem(chpp=self._chpp, data=data)
for data in self._data.findall("Team/MatchList/Match")]
def __getitem__(self, item):
return self.matches_list[item]
def __len__(self):
return len(self.matches_list)
def __repr__(self):
return self.matches_list.__repr__()
@property
def url(self):
url_args = []
if self.team_id:
url_args.append(f'TeamID%3D{self.team_id}')
if self._REQUEST_ARGS["season"]:
url_args.append(f'season%3D{self._REQUEST_ARGS["season"]}')
return f'{self._BASE_URL}{self._URL_PATH}{"%26".join(url_args)}'
class HTMatchesArchiveItem(ht_model.HTModel):
"""
Object returned by HTMatchesArchve.search method
"""
_URL_PATH = "/Club/Matches/Match.aspx?matchID="
_ht_attributes = [("ht_id", "MatchID", ht_xml.HTXml.ht_int,),
("home_team_id", "HomeTeam/HomeTeamID",
ht_xml.HTXml.ht_int,),
("home_team_name", "HomeTeam/HomeTeamName",
ht_xml.HTXml.ht_str,),
("away_team_id", "AwayTeam/AwayTeamID",
ht_xml.HTXml.ht_int,),
("away_team_name", "AwayTeam/AwayTeamName",
ht_xml.HTXml.ht_str,),
("datetime", "MatchDate",
ht_xml.HTXml.ht_datetime_from_text,),
("type", "MatchType",
ht_xml.HTXml.ht_int,),
("context_id", "MatchContextId", ht_xml.HTXml.ht_int,),
("rule_id", "MatchRuleId", ht_xml.HTXml.ht_int,),
("cup_level", "CupLevel", ht_xml.HTXml.ht_int,),
("cup_level_index", "CupLevelIndex",
ht_xml.HTXml.ht_int,),
("home_goals", "HomeGoals", ht_xml.HTXml.ht_int,),
("away_goals", "AwayGoals", ht_xml.HTXml.ht_int,),
]
def __repr__(self):
return f"<{self.__class__.__name__} object : " \
f"{self.home_team_name} - {self.away_team_name} ({self.ht_id})>"
@property
def details(self):
return ht_match.HTMatch(chpp=self._chpp, ht_id=self.ht_id)
@property
def home_team(self):
return ht_team.HTTeam(chpp=self._chpp, ht_id=self.home_team_id)
@property
def away_team(self):
return ht_team.HTTeam(chpp=self._chpp, ht_id=self.away_team_id)
| [
"pychpp.ht_team.HTTeam",
"pychpp.ht_match.HTMatch",
"pychpp.ht_xml.HTXml.ht_datetime_to_text"
] | [((6074, 6125), 'pychpp.ht_match.HTMatch', 'ht_match.HTMatch', ([], {'chpp': 'self._chpp', 'ht_id': 'self.ht_id'}), '(chpp=self._chpp, ht_id=self.ht_id)\n', (6090, 6125), False, 'from pychpp import ht_team, ht_match, ht_datetime\n'), ((6181, 6237), 'pychpp.ht_team.HTTeam', 'ht_team.HTTeam', ([], {'chpp': 'self._chpp', 'ht_id': 'self.home_team_id'}), '(chpp=self._chpp, ht_id=self.home_team_id)\n', (6195, 6237), False, 'from pychpp import ht_team, ht_match, ht_datetime\n'), ((6293, 6349), 'pychpp.ht_team.HTTeam', 'ht_team.HTTeam', ([], {'chpp': 'self._chpp', 'ht_id': 'self.away_team_id'}), '(chpp=self._chpp, ht_id=self.away_team_id)\n', (6307, 6349), False, 'from pychpp import ht_team, ht_match, ht_datetime\n'), ((3330, 3380), 'pychpp.ht_xml.HTXml.ht_datetime_to_text', 'ht_xml.HTXml.ht_datetime_to_text', (['first_match_date'], {}), '(first_match_date)\n', (3362, 3380), False, 'from pychpp import ht_xml\n'), ((3494, 3543), 'pychpp.ht_xml.HTXml.ht_datetime_to_text', 'ht_xml.HTXml.ht_datetime_to_text', (['last_match_date'], {}), '(last_match_date)\n', (3526, 3543), False, 'from pychpp import ht_xml\n')] |
"""
langcodes knows what languages are. It knows the standardized codes that
refer to them, such as `en` for English, `es` for Spanish and `hi` for Hindi.
Often, it knows what these languages are called *in* a language, and that
language doesn't have to be English.
See README.md for the main documentation, or read it on GitHub at
https://github.com/LuminosoInsight/langcodes/ . For more specific documentation
on the functions in langcodes, scroll down and read the docstrings.
"""
import warnings
from langcodes.tag_parser import parse_tag
from langcodes.names import code_to_names, name_to_code
from langcodes.distance import raw_distance
from langcodes.data_dicts import (
DEFAULT_SCRIPTS, LANGUAGE_REPLACEMENTS, SCRIPT_REPLACEMENTS,
REGION_REPLACEMENTS, NORMALIZED_MACROLANGUAGES, LIKELY_SUBTAGS
)
# When we're getting natural language information *about* languages, it's in
# U.S. English if you don't specify the language.
DEFAULT_LANGUAGE = 'en-US'
class Language:
"""
The Language class defines the results of parsing a language tag.
Language objects have the following attributes, any of which may be
unspecified (in which case their value is None):
- *language*: the code for the language itself.
- *script*: the 4-letter code for the writing system being used.
- *region*: the 2-letter or 3-digit code for the country or similar region
whose usage of the language appears in this text.
- *extlangs*: a list of more specific language codes that follow the language
code. (This is allowed by the language code syntax, but deprecated.)
- *variants*: codes for specific variations of language usage that aren't
covered by the *script* or *region* codes.
- *extensions*: information that's attached to the language code for use in
some specific system, such as Unicode collation orders.
- *private*: a code starting with `x-` that has no defined meaning.
The `Language.get` method converts a string to a Language instance.
It's also available at the top level of this module as the `get` function.
"""
ATTRIBUTES = ['language', 'extlangs', 'script', 'region',
'variants', 'extensions', 'private']
# When looking up "likely subtags" data, we try looking up the data for
# increasingly less specific versions of the language code.
BROADER_KEYSETS = [
{'language', 'script', 'region'},
{'language', 'region'},
{'language', 'script'},
{'language'},
{'script'},
{}
]
MATCHABLE_KEYSETS = [
{'language', 'script', 'region'},
{'language', 'script'},
{'language'},
]
# Values cached at the class level
_INSTANCES = {}
_PARSE_CACHE = {}
def __init__(self, language=None, extlangs=None, script=None,
region=None, variants=None, extensions=None, private=None):
"""
The constructor for Language objects.
It's inefficient to call this directly, because it can't return
an existing instance. Instead, call Language.make(), which
has the same signature.
"""
self.language = language
self.extlangs = extlangs
self.script = script
self.region = region
self.variants = variants
self.extensions = extensions
self.private = private
# Cached values
self._simplified = None
self._searchable = None
self._matchable_tags = None
self._broader = None
self._assumed = None
self._filled = None
self._macrolanguage = None
self._str_tag = None
self._dict = None
# Make sure the str_tag value is cached
self.to_tag()
@classmethod
def make(cls, language=None, extlangs=None, script=None,
region=None, variants=None, extensions=None, private=None):
"""
Create a Language object by giving any subset of its attributes.
If this value has been created before, return the existing value.
"""
values = (language, tuple(extlangs or ()), script, region,
tuple(variants or ()), tuple(extensions or ()), private)
if values in cls._INSTANCES:
return cls._INSTANCES[values]
instance = cls(
language=language, extlangs=extlangs,
script=script, region=region, variants=variants,
extensions=extensions, private=private
)
cls._INSTANCES[values] = instance
return instance
@staticmethod
def get(tag: {str, 'Language'}, normalize=True) -> 'Language':
"""
Create a Language object from a language tag string.
If normalize=True, non-standard or overlong tags will be replaced as
they're interpreted. This is recommended.
Here are several examples of language codes, which are also test cases.
Most language codes are straightforward, but these examples will get
pretty obscure toward the end.
>>> Language.get('en-US')
Language.make(language='en', region='US')
>>> Language.get('zh-Hant')
Language.make(language='zh', script='Hant')
>>> Language.get('und')
Language.make()
This function is idempotent, in case you already have a Language object:
>>> Language.get(Language.get('en-us'))
Language.make(language='en', region='US')
The non-code 'root' is sometimes used to represent the lack of any
language information, similar to 'und'.
>>> Language.get('root')
Language.make()
By default, getting a Language object will automatically convert
deprecated tags:
>>> Language.get('iw')
Language.make(language='he')
>>> Language.get('in')
Language.make(language='id')
One type of deprecated tag that should be replaced is for sign
languages, which used to all be coded as regional variants of a
fictitious global sign language called 'sgn'. Of course, there is no
global sign language, so sign languages now have their own language
codes.
>>> Language.get('sgn-US')
Language.make(language='ase')
>>> Language.get('sgn-US', normalize=False)
Language.make(language='sgn', region='US')
'en-gb-oed' is a tag that's grandfathered into the standard because it
has been used to mean "spell-check this with Oxford English Dictionary
spelling", but that tag has the wrong shape. We interpret this as the
new standardized tag 'en-gb-oxendict', unless asked not to normalize.
>>> Language.get('en-gb-oed')
Language.make(language='en', region='GB', variants=['oxendict'])
>>> Language.get('en-gb-oed', normalize=False)
Language.make(language='en-gb-oed')
'zh-min-nan' is another oddly-formed tag, used to represent the
Southern Min language, which includes Taiwanese as a regional form. It
now has its own language code.
>>> Language.get('zh-min-nan')
Language.make(language='nan')
There's not much we can do with the vague tag 'zh-min':
>>> Language.get('zh-min')
Language.make(language='zh-min')
Occasionally Wiktionary will use 'extlang' tags in strange ways, such
as using the tag 'und-ibe' for some unspecified Iberian language.
>>> Language.get('und-ibe')
Language.make(extlangs=['ibe'])
Here's an example of replacing multiple deprecated tags.
The language tag 'sh' (Serbo-Croatian) ended up being politically
problematic, and different standards took different steps to address
this. The IANA made it into a macrolanguage that contains 'sr', 'hr',
and 'bs'. Unicode further decided that it's a legacy tag that should
be interpreted as 'sr-Latn', which the language matching rules say
is mutually intelligible with all those languages.
We complicate the example by adding on the region tag 'QU', an old
provisional tag for the European Union, which is now standardized as
'EU'.
>>> Language.get('sh-QU')
Language.make(language='sr', script='Latn', region='EU')
"""
if isinstance(tag, Language):
if not normalize:
# shortcut: we have the tag already
return tag
# We might need to normalize this tag. Convert it back into a
# string tag, to cover all the edge cases of normalization in a
# way that we've already solved.
tag = tag.to_tag()
if (tag, normalize) in Language._PARSE_CACHE:
return Language._PARSE_CACHE[tag, normalize]
data = {}
# if the complete tag appears as something to normalize, do the
# normalization right away. Smash case when checking, because the
# case normalization that comes from parse_tag() hasn't been applied
# yet.
tag_lower = tag.lower()
if normalize and tag_lower in LANGUAGE_REPLACEMENTS:
tag = LANGUAGE_REPLACEMENTS[tag_lower]
components = parse_tag(tag)
for typ, value in components:
if typ == 'extlang' and normalize and 'language' in data:
# smash extlangs when possible
minitag = '%s-%s' % (data['language'], value)
norm = LANGUAGE_REPLACEMENTS.get(minitag.lower())
if norm is not None:
data.update(
Language.get(norm, normalize).to_dict()
)
else:
data.setdefault('extlangs', []).append(value)
elif typ in {'extlang', 'variant', 'extension'}:
data.setdefault(typ + 's', []).append(value)
elif typ == 'language':
if value == 'und':
pass
elif normalize:
replacement = LANGUAGE_REPLACEMENTS.get(value.lower())
if replacement is not None:
# parse the replacement if necessary -- this helps with
# Serbian and Moldovan
data.update(
Language.get(replacement, normalize).to_dict()
)
else:
data['language'] = value
else:
data['language'] = value
elif typ == 'region':
if normalize:
data['region'] = REGION_REPLACEMENTS.get(value.lower(), value)
else:
data['region'] = value
elif typ == 'grandfathered':
# If we got here, we got a grandfathered tag but we were asked
# not to normalize it, or the CLDR data doesn't know how to
# normalize it. The best we can do is set the entire tag as the
# language.
data['language'] = value
else:
data[typ] = value
result = Language.make(**data)
Language._PARSE_CACHE[tag, normalize] = result
return result
def to_tag(self) -> str:
"""
Convert a Language back to a standard language tag, as a string.
This is also the str() representation of a Language object.
>>> Language.make(language='en', region='GB').to_tag()
'en-GB'
>>> Language.make(language='yue', script='Hant', region='HK').to_tag()
'yue-Hant-HK'
>>> Language.make(script='Arab').to_tag()
'und-Arab'
>>> str(Language.make(region='IN'))
'und-IN'
"""
if self._str_tag is not None:
return self._str_tag
subtags = ['und']
if self.language:
subtags[0] = self.language
if self.extlangs:
for extlang in sorted(self.extlangs):
subtags.append(extlang)
if self.script:
subtags.append(self.script)
if self.region:
subtags.append(self.region)
if self.variants:
for variant in sorted(self.variants):
subtags.append(variant)
if self.extensions:
for ext in self.extensions:
subtags.append(ext)
if self.private:
subtags.append(self.private)
self._str_tag = '-'.join(subtags)
return self._str_tag
def simplify_script(self) -> 'Language':
"""
Remove the script from some parsed language data, if the script is
redundant with the language.
>>> Language.make(language='en', script='Latn').simplify_script()
Language.make(language='en')
>>> Language.make(language='yi', script='Latn').simplify_script()
Language.make(language='yi', script='Latn')
>>> Language.make(language='yi', script='Hebr').simplify_script()
Language.make(language='yi')
"""
if self._simplified is not None:
return self._simplified
if self.language and self.script:
if DEFAULT_SCRIPTS.get(self.language) == self.script:
result = self.update_dict({'script': None})
self._simplified = result
return self._simplified
self._simplified = self
return self._simplified
def assume_script(self) -> 'Language':
"""
Fill in the script if it's missing, and if it can be assumed from the
language subtag. This is the opposite of `simplify_script`.
>>> Language.make(language='en').assume_script()
Language.make(language='en', script='Latn')
>>> Language.make(language='yi').assume_script()
Language.make(language='yi', script='Hebr')
>>> Language.make(language='yi', script='Latn').assume_script()
Language.make(language='yi', script='Latn')
This fills in nothing when the script cannot be assumed -- such as when
the language has multiple scripts, or it has no standard orthography:
>>> Language.make(language='sr').assume_script()
Language.make(language='sr')
>>> Language.make(language='eee').assume_script()
Language.make(language='eee')
It also dosn't fill anything in when the language is unspecified.
>>> Language.make(region='US').assume_script()
Language.make(region='US')
"""
if self._assumed is not None:
return self._assumed
if self.language and not self.script:
try:
self._assumed = self.update_dict({'script': DEFAULT_SCRIPTS[self.language]})
except KeyError:
self._assumed = self
else:
self._assumed = self
return self._assumed
def prefer_macrolanguage(self) -> 'Language':
"""
BCP 47 doesn't specify what to do with macrolanguages and the languages
they contain. The Unicode CLDR, on the other hand, says that when a
macrolanguage has a dominant standardized language, the macrolanguage
code should be used for that language. For example, Mandarin Chinese
is 'zh', not 'cmn', according to Unicode, and Malay is 'ms', not 'zsm'.
This isn't a rule you'd want to follow in all cases -- for example, you may
want to be able to specifically say that 'ms' (the Malay macrolanguage)
contains both 'zsm' (Standard Malay) and 'id' (Indonesian). But applying
this rule helps when interoperating with the Unicode CLDR.
So, applying `prefer_macrolanguage` to a Language object will
return a new object, replacing the language with the macrolanguage if
it is the dominant language within that macrolanguage. It will leave
non-dominant languages that have macrolanguages alone.
>>> Language.get('arb').prefer_macrolanguage()
Language.make(language='ar')
>>> Language.get('cmn-Hant').prefer_macrolanguage()
Language.make(language='zh', script='Hant')
>>> Language.get('yue-Hant').prefer_macrolanguage()
Language.make(language='yue', script='Hant')
"""
if self._macrolanguage is not None:
return self._macrolanguage
language = self.language or 'und'
if language in NORMALIZED_MACROLANGUAGES:
self._macrolanguage = self.update_dict({
'language': NORMALIZED_MACROLANGUAGES[language]
})
else:
self._macrolanguage = self
return self._macrolanguage
def broaden(self) -> 'List[Language]':
"""
Iterate through increasingly general versions of this parsed language tag.
This isn't actually that useful for matching two arbitrary language tags
against each other, but it is useful for matching them against a known
standardized form, such as in the CLDR data.
The list of broader versions to try appears in UTR 35, section 4.3,
"Likely Subtags".
>>> for langdata in Language.get('nn-Latn-NO-x-thingy').broaden():
... print(langdata)
nn-Latn-NO-x-thingy
nn-Latn-NO
nn-NO
nn-Latn
nn
und-Latn
und
"""
if self._broader is not None:
return self._broader
self._broader = [self]
seen = set(self.to_tag())
for keyset in self.BROADER_KEYSETS:
filtered = self._filter_attributes(keyset)
tag = filtered.to_tag()
if tag not in seen:
self._broader.append(filtered)
seen.add(tag)
return self._broader
def matchable_tags(self) -> 'List[Language]':
if self._matchable_tags is not None:
return self._matchable_tags
self._matchable_tags = []
for keyset in self.MATCHABLE_KEYSETS:
filtered_tag = self._filter_attributes(keyset).to_tag()
self._matchable_tags.append(filtered_tag)
return self._matchable_tags
def maximize(self) -> 'Language':
"""
The Unicode CLDR contains a "likelySubtags" data file, which can guess
reasonable values for fields that are missing from a language tag.
This is particularly useful for comparing, for example, "zh-Hant" and
"zh-TW", two common language tags that say approximately the same thing
via rather different information. (Using traditional Han characters is
not the same as being in Taiwan, but each implies that the other is
likely.)
These implications are provided in the CLDR supplemental data, and are
based on the likelihood of people using the language to transmit
information on the Internet. (This is why the overall default is English,
not Chinese.)
>>> str(Language.get('zh-Hant').maximize())
'zh-Hant-TW'
>>> str(Language.get('zh-TW').maximize())
'zh-Hant-TW'
>>> str(Language.get('ja').maximize())
'ja-Jpan-JP'
>>> str(Language.get('pt').maximize())
'pt-Latn-BR'
>>> str(Language.get('und-Arab').maximize())
'ar-Arab-EG'
>>> str(Language.get('und-CH').maximize())
'de-Latn-CH'
>>> str(Language.make().maximize()) # 'MURICA.
'en-Latn-US'
>>> str(Language.get('und-ibe').maximize())
'en-ibe-Latn-US'
"""
if self._filled is not None:
return self._filled
for broader in self.broaden():
tag = broader.to_tag()
if tag in LIKELY_SUBTAGS:
result = Language.get(LIKELY_SUBTAGS[tag], normalize=False)
result = result.update(self)
self._filled = result
return result
raise RuntimeError(
"Couldn't fill in likely values. This represents a problem with "
"the LIKELY_SUBTAGS data."
)
# Support an old, wordier name for the method
fill_likely_values = maximize
def match_score(self, supported: 'Language') -> int:
"""
Suppose that `self` is the language that the user desires, and
`supported` is a language that is actually supported. This method
returns a number from 0 to 100 indicating how similar the supported
language is (higher numbers are better). This is not a symmetric
relation.
The algorithm here is described (badly) in a Unicode technical report
at http://unicode.org/reports/tr35/#LanguageMatching. If you find these
results bothersome, take it up with Unicode, unless it's particular
tweaks we implemented such as macrolanguage matching.
See :func:`tag_match_score` for a function that works on strings,
instead of requiring you to instantiate Language objects first.
Further documentation and examples appear with that function.
"""
if supported == self:
return 100
desired_complete = self.prefer_macrolanguage().maximize()
supported_complete = supported.prefer_macrolanguage().maximize()
desired_triple = (desired_complete.language, desired_complete.script, desired_complete.region)
supported_triple = (supported_complete.language, supported_complete.script, supported_complete.region)
return 100 - raw_distance(desired_triple, supported_triple)
# These methods help to show what the language tag means in natural
# language. They actually apply the language-matching algorithm to find
# the right language to name things in.
def _get_name(self, attribute: str, language, min_score: int):
assert attribute in self.ATTRIBUTES
if isinstance(language, Language):
language = language.to_tag()
attr_value = getattr(self, attribute)
if attr_value is None:
return None
names = code_to_names(attribute, attr_value)
names['und'] = getattr(self, attribute)
return self._best_name(names, language, min_score)
def _best_name(self, names: dict, language: str, min_score: int):
possible_languages = sorted(names.keys())
target_language, score = best_match(language, possible_languages, min_score)
return names[target_language]
def language_name(self, language=DEFAULT_LANGUAGE, min_score: int=75) -> str:
"""
Give the name of the language (not the entire tag, just the language part)
in a natural language. The target language can be given as a string or
another Language object.
By default, things are named in English:
>>> Language.get('fr').language_name()
'French'
>>> Language.get('el').language_name()
'Greek'
But you can ask for language names in numerous other languages:
>>> Language.get('fr').language_name('fr')
'français'
>>> Language.get('el').language_name('fr')
'grec'
Why does everyone get Slovak and Slovenian confused? Let's ask them.
>>> Language.get('sl').language_name('sl')
'slovenščina'
>>> Language.get('sk').language_name('sk')
'slovenčina'
>>> Language.get('sl').language_name('sk')
'slovinčina'
>>> Language.get('sk').language_name('sl')
'slovaščina'
"""
return self._get_name('language', language, min_score)
def autonym(self, min_score: int=95) -> str:
"""
Give the name of this language *in* this language.
>>> Language.get('fr').autonym()
'français'
>>> Language.get('es').autonym()
'español'
>>> Language.get('ja').autonym()
'日本語'
This doesn't give the name of the region or script, but in some cases
the language can name itself in multiple scripts:
>>> Language.get('sr-Latn').autonym()
'srpski'
>>> Language.get('sr-Cyrl').autonym()
'српски'
>>> Language.get('pa').autonym()
'ਪੰਜਾਬੀ'
>>> Language.get('pa-Arab').autonym()
'پنجابی'
This only works for language codes that CLDR has locale data for. You
can't ask for the autonym of 'ja-Latn' and get 'nihongo'.
"""
return self.language_name(language=self, min_score=min_score)
def script_name(self, language=DEFAULT_LANGUAGE, min_score: int=75) -> str:
"""
Describe the script part of the language tag in a natural language.
"""
return self._get_name('script', language, min_score)
def region_name(self, language=DEFAULT_LANGUAGE, min_score: int=75) -> str:
"""
Describe the region part of the language tag in a natural language.
"""
return self._get_name('region', language, min_score)
def variant_names(self, language=DEFAULT_LANGUAGE, min_score: int=75) -> list:
"""
Describe each of the variant parts of the language tag in a natural
language.
"""
names = []
for variant in self.variants:
var_names = code_to_names('variant', variant)
names.append(self._best_name(var_names, language, min_score))
return names
def describe(self, language=DEFAULT_LANGUAGE, min_score: int=75) -> dict:
"""
Return a dictionary that describes a given language tag in a specified
natural language.
See `language_name` and related methods for more specific versions of this.
The desired `language` will in fact be matched against the available
options using the matching technique that this module provides. We can
illustrate many aspects of this by asking for a description of Shavian
script (a script devised by author <NAME>), and where you
might find it, in various languages.
>>> from pprint import pprint
>>> shaw = Language.make(script='Shaw').maximize()
>>> pprint(shaw.describe('en'))
{'language': 'English', 'region': 'United Kingdom', 'script': 'Shavian'}
>>> pprint(shaw.describe('fr'))
{'language': 'anglais', 'region': 'Royaume-Uni', 'script': 'shavien'}
>>> pprint(shaw.describe('es'))
{'language': 'inglés', 'region': 'Reino Unido', 'script': 'shaviano'}
>>> pprint(shaw.describe('pt'))
{'language': 'inglês', 'region': 'Reino Unido', 'script': 'shaviano'}
>>> pprint(shaw.describe('uk'))
{'language': 'англійська', 'region': 'Велика Британія', 'script': 'шоу'}
>>> pprint(shaw.describe('arb'))
{'language': 'الإنجليزية', 'region': 'المملكة المتحدة', 'script': 'الشواني'}
>>> pprint(shaw.describe('th'))
{'language': 'อังกฤษ', 'region': 'สหราชอาณาจักร', 'script': 'ซอเวียน'}
>>> pprint(shaw.describe('zh-Hans'))
{'language': '英语', 'region': '英国', 'script': '萧伯纳式文'}
>>> pprint(shaw.describe('zh-Hant'))
{'language': '英文', 'region': '英國', 'script': '簫柏納字符'}
>>> pprint(shaw.describe('ja'))
{'language': '英語', 'region': 'イギリス', 'script': 'ショー文字'}
When we don't have a localization for the language, we fall back on
'und', which just shows the language codes.
>>> pprint(shaw.describe('lol'))
{'language': 'en', 'region': 'GB', 'script': 'Shaw'}
Wait, is that a real language?
>>> pprint(Language.get('lol').maximize().describe())
{'language': 'Mongo', 'region': 'Congo - Kinshasa', 'script': 'Latin'}
"""
names = {}
if self.language:
names['language'] = self.language_name(language, min_score)
if self.script:
names['script'] = self.script_name(language, min_score)
if self.region:
names['region'] = self.region_name(language, min_score)
if self.variants:
names['variants'] = self.variant_names(language, min_score)
return names
@staticmethod
def find_name(tagtype: str, name: str, language: {str, 'Language', None}=None):
"""
Find the subtag of a particular `tagtype` that has the given `name`.
The default language, "und", will allow matching names in any language,
so you can get the code 'fr' by looking up "French", "Français", or
"francés".
Occasionally, names are ambiguous in a way that can be resolved by
specifying what name the language is supposed to be in. For example,
there is a language named 'Malayo' in English, but it's different from
the language named 'Malayo' in Spanish (which is Malay). Specifying the
language will look up the name in a trie that is only in that language.
In a previous version, we thought we were going to deprecate the
`language` parameter, as there weren't significant cases of conflicts
in names of things between languages. Well, we got more data, and
conflicts in names are everywhere.
Specifying the language that the name should be in is still not
required, but it will help to make sure that names can be
round-tripped.
>>> Language.find_name('language', 'francés')
Language.make(language='fr')
>>> Language.find_name('region', 'United Kingdom')
Language.make(region='GB')
>>> Language.find_name('script', 'Arabic')
Language.make(script='Arab')
>>> Language.find_name('language', 'norsk bokmål')
Language.make(language='nb')
>>> Language.find_name('language', 'norsk')
Language.make(language='no')
>>> Language.find_name('language', 'norsk', 'en')
Traceback (most recent call last):
...
LookupError: Can't find any language named 'norsk'
>>> Language.find_name('language', 'norsk', 'no')
Language.make(language='no')
>>> Language.find_name('language', 'malayo', 'en')
Language.make(language='mbp')
>>> Language.find_name('language', 'malayo', 'es')
Language.make(language='ms')
Some langauge names resolve to more than a language. For example,
the name 'Brazilian Portuguese' resolves to a language and a region,
and 'Simplified Chinese' resolves to a language and a script. In these
cases, a Language object with multiple subtags will be returned.
>>> Language.find_name('language', 'Brazilian Portuguese', 'en')
Language.make(language='pt', region='BR')
>>> Language.find_name('language', 'Simplified Chinese', 'en')
Language.make(language='zh', script='Hans')
A small amount of fuzzy matching is supported: if the name can be
shortened to match a single language name, you get that language.
This allows, for example, "Hakka dialect" to match "Hakka".
>>> Language.find_name('language', 'Hakka dialect')
Language.make(language='hak')
"""
# No matter what form of language we got, normalize it to a single
# language subtag
if isinstance(language, Language):
language = language.language
elif isinstance(language, str):
language = get(language).language
if language is None:
language = 'und'
code = name_to_code(tagtype, name, language)
if code is None:
raise LookupError("Can't find any %s named %r" % (tagtype, name))
if '-' in code:
return Language.get(code)
else:
data = {tagtype: code}
return Language.make(**data)
@staticmethod
def find(name: str, language: {str, 'Language', None}=None):
"""
A concise version of `find_name`, used to get a language tag by its
name in a natural language. The language can be omitted in the large
majority of cases, where the language name is not ambiguous.
>>> Language.find('Türkçe')
Language.make(language='tr')
>>> Language.find('brazilian portuguese')
Language.make(language='pt', region='BR')
>>> Language.find('simplified chinese')
Language.make(language='zh', script='Hans')
Some language names are ambiguous: for example, there is a language
named 'Fala' in English (with code 'fax'), but 'Fala' is also the
Kwasio word for French. In this case, specifying the language that
the name is in is necessary for disambiguation.
>>> Language.find('fala')
Language.make(language='fr')
>>> Language.find('fala', 'en')
Language.make(language='fax')
"""
return Language.find_name('language', name, language)
def to_dict(self):
"""
Get a dictionary of the attributes of this Language object, which
can be useful for constructing a similar object.
"""
if self._dict is not None:
return self._dict
result = {}
for key in self.ATTRIBUTES:
value = getattr(self, key)
if value:
result[key] = value
self._dict = result
return result
def update(self, other: 'Language') -> 'Language':
"""
Update this Language with the fields of another Language.
"""
return Language.make(
language=other.language or self.language,
extlangs=other.extlangs or self.extlangs,
script=other.script or self.script,
region=other.region or self.region,
variants=other.variants or self.variants,
extensions=other.extensions or self.extensions,
private=other.private or self.private
)
def update_dict(self, newdata: dict) -> 'Language':
"""
Update the attributes of this Language from a dictionary.
"""
return Language.make(
language=newdata.get('language', self.language),
extlangs=newdata.get('extlangs', self.extlangs),
script=newdata.get('script', self.script),
region=newdata.get('region', self.region),
variants=newdata.get('variants', self.variants),
extensions=newdata.get('extensions', self.extensions),
private=newdata.get('private', self.private)
)
@staticmethod
def _filter_keys(d: dict, keys: set) -> dict:
"""
Select a subset of keys from a dictionary.
"""
return {key: d[key] for key in keys if key in d}
def _filter_attributes(self, keyset):
"""
Return a copy of this object with a subset of its attributes set.
"""
filtered = self._filter_keys(self.to_dict(), keyset)
return Language.make(**filtered)
def _searchable_form(self) -> 'Language':
"""
Convert a parsed language tag so that the information it contains is in
the best form for looking up information in the CLDR.
"""
if self._searchable is not None:
return self._searchable
self._searchable = self._filter_attributes(
{'language', 'script', 'region'}
).simplify_script().prefer_macrolanguage()
return self._searchable
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, Language):
return False
return self._str_tag == other._str_tag
def __hash__(self):
return hash(id(self))
def __getitem__(self, key):
if key in self.ATTRIBUTES:
return getattr(self, key)
else:
raise KeyError(key)
def __contains__(self, key):
return key in self.ATTRIBUTES and getattr(self, key)
def __repr__(self):
items = []
for attr in self.ATTRIBUTES:
if getattr(self, attr):
items.append('{0}={1!r}'.format(attr, getattr(self, attr)))
return "Language.make({})".format(', '.join(items))
def __str__(self):
return self.to_tag()
# Make the get(), find(), and find_name() functions available at the top level
get = Language.get
find = Language.find
find_name = Language.find_name
# Make the Language object available under the old name LanguageData
LanguageData = Language
def standardize_tag(tag: {str, Language}, macro: bool=False) -> str:
"""
Standardize a language tag:
- Replace deprecated values with their updated versions (if those exist)
- Remove script tags that are redundant with the language
- If *macro* is True, use a macrolanguage to represent the most common
standardized language within that macrolanguage. For example, 'cmn'
(Mandarin) becomes 'zh' (Chinese), and 'arb' (Modern Standard Arabic)
becomes 'ar' (Arabic).
- Format the result according to the conventions of BCP 47
Macrolanguage replacement is not required by BCP 47, but it is required
by the Unicode CLDR.
>>> standardize_tag('en_US')
'en-US'
>>> standardize_tag('en-Latn')
'en'
>>> standardize_tag('en-uk')
'en-GB'
>>> standardize_tag('eng')
'en'
>>> standardize_tag('arb-Arab', macro=True)
'ar'
>>> standardize_tag('sh-QU')
'sr-Latn-EU'
>>> standardize_tag('sgn-US')
'ase'
>>> standardize_tag('zh-cmn-hans-cn')
'cmn-Hans-CN'
>>> standardize_tag('zh-cmn-hans-cn', macro=True)
'zh-Hans-CN'
>>> standardize_tag('zsm', macro=True)
'ms'
>>> standardize_tag('ja-latn-hepburn')
'ja-Latn-hepburn'
>>> standardize_tag('spa-latn-mx')
'es-MX'
If the tag can't be parsed according to BCP 47, this will raise a
LanguageTagError (a subclass of ValueError):
>>> standardize_tag('spa-mx-latn')
Traceback (most recent call last):
...
langcodes.tag_parser.LanguageTagError: This script subtag, 'latn', is out of place. Expected variant, extension, or end of string.
"""
langdata = Language.get(tag, normalize=True)
if macro:
langdata = langdata.prefer_macrolanguage()
return langdata.simplify_script().to_tag()
def tag_match_score(desired: {str, Language}, supported: {str, Language}) -> int:
"""
Return a number from 0 to 100 indicating the strength of match between the
language the user desires, D, and a supported language, S. Higher numbers
are better. A reasonable cutoff for not messing with your users is to
only accept scores of 75 or more.
A score of 100 means the languages are the same, possibly after normalizing
and filling in likely values.
>>> tag_match_score('en', 'en')
100
>>> tag_match_score('en', 'en-US')
100
>>> tag_match_score('zh-Hant', 'zh-TW')
100
>>> tag_match_score('ru-Cyrl', 'ru')
100
>>> # Serbo-Croatian is a politically contentious idea, but in practice
>>> # it's considered equivalent to Serbian in Latin characters.
>>> tag_match_score('sh', 'sr-Latn')
100
A score of 92 to 97 indicates a regional difference.
>>> tag_match_score('zh-HK', 'zh-MO') # Chinese is similar in Hong Kong and Macao
97
>>> tag_match_score('en-AU', 'en-GB') # Australian English is similar to British English
96
>>> tag_match_score('en-IN', 'en-GB') # Indian English is also similar to British English
96
>>> tag_match_score('es-PR', 'es-419') # Peruvian Spanish is Latin American Spanish
96
>>> tag_match_score('en-US', 'en-GB') # American and British English are somewhat different
94
>>> tag_match_score('es-MX', 'es-ES') # Mexican Spanish is different from Spanish Spanish
92
>>> # Serbian has two scripts, and people might prefer one but understand both
>>> tag_match_score('sr-Latn', 'sr-Cyrl')
95
>>> # European Portuguese is different from the most common form (Brazilian Portuguese)
>>> tag_match_score('pt', 'pt-PT')
92
A score of 86 to 90 indicates that people who use the desired language
are demographically likely to understand the supported language, even if
the languages themselves are unrelated. There are many languages that have
a one-way connection of this kind to English or French.
>>> tag_match_score('ta', 'en') # Tamil to English
86
>>> tag_match_score('mg', 'fr') # Malagasy to French
86
Sometimes it's more straightforward than that: people who use the desired
language are demographically likely to understand the supported language
because it's demographically relevant and highly related.
>>> tag_match_score('af', 'nl') # Afrikaans to Dutch
86
>>> tag_match_score('ms', 'id') # Malay to Indonesian
86
>>> tag_match_score('nn', 'nb') # Nynorsk to Norwegian Bokmål
90
>>> tag_match_score('nb', 'da') # Norwegian Bokmål to Danish
88
A score of 80 to 85 indicates a particularly contentious difference in
script, where people who understand one script can learn the other but
probably won't be happy with it. This specifically applies to Chinese.
>>> tag_match_score('zh-Hans', 'zh-Hant')
85
>>> tag_match_score('zh-CN', 'zh-HK')
85
>>> tag_match_score('zh-CN', 'zh-TW')
85
>>> tag_match_score('zh-Hant', 'zh-Hans')
81
>>> tag_match_score('zh-TW', 'zh-CN')
81
When the supported script is a different one than desired, this is usually
a major difference with score of 60 or less.
>>> tag_match_score('ja', 'ja-Latn-US-hepburn')
56
>>> # You can read the Shavian script, right?
>>> tag_match_score('en', 'en-Shaw')
56
When there is no indication the supported language will be understood, the
score will be 20 or less, to a minimum of 0.
>>> tag_match_score('es', 'fr') # Spanish and French are different.
16
>>> tag_match_score('en', 'ta') # English speakers generally do not know Tamil.
0
CLDR doesn't take into account which languages are considered part of a
common 'macrolanguage'. We have this data, so we can use it in matching.
If two languages have no other rule that would allow them to match, but
share a macrolanguage, they'll get a match score of 20 less than what
they would get if the language matched.
>>> tag_match_score('arz', 'ar') # Egyptian Arabic to Standard Arabic
80
>>> tag_match_score('arz', 'ary') # Egyptian Arabic to Moroccan Arabic
76
Here's an example that has script, region, and language differences, but
a macrolanguage in common.
Written Chinese is usually presumed to be Mandarin Chinese, but colloquial
Cantonese can be written as well. When it is, it probably has region,
script, and language differences from the usual mainland Chinese. But it is
still part of the 'Chinese' macrolanguage, so there is more similarity
than, say, comparing Mandarin to Hindi.
>>> tag_match_score('yue', 'zh')
36
Comparing Swiss German ('gsw') to standardized German ('de') shows how
these scores can be asymmetrical. Swiss German speakers will understand
German, so the score in that direction is 92. Most German speakers find
Swiss German unintelligible, and CLDR in fact assigns this a score of 16.
This seems a little bit extreme, but the asymmetry is certainly there. And
if your text is tagged as 'gsw', it must be that way for a reason.
>>> tag_match_score('gsw', 'de')
92
>>> tag_match_score('de', 'gsw')
16
"""
desired_ld = Language.get(desired)
supported_ld = Language.get(supported)
return desired_ld.match_score(supported_ld)
def best_match(desired_language: {str, Language}, supported_languages: list,
min_score: int=75) -> (str, int):
"""
You have software that supports any of the `supported_languages`. You want
to use `desired_language`. This function lets you choose the right language,
even if there isn't an exact match.
Returns:
- The best-matching language code, which will be one of the
`supported_languages` or 'und'
- The score of the match, from 0 to 100
`min_score` sets the minimum match score. If all languages match with a lower
score than that, the result will be 'und' with a score of 0.
When there is a tie for the best matching language, the first one in the
tie will be used.
Setting `min_score` lower will enable more things to match, at the cost
of possibly mis-handling data or upsetting users. Read the documentation
for :func:`tag_match_score` to understand what the numbers mean.
>>> best_match('fr', ['de', 'en', 'fr'])
('fr', 100)
>>> best_match('sh', ['hr', 'bs', 'sr-Latn', 'sr-Cyrl'])
('sr-Latn', 100)
>>> best_match('zh-CN', ['zh-Hant', 'zh-Hans', 'gan', 'nan'])
('zh-Hans', 100)
>>> best_match('zh-CN', ['cmn-Hant', 'cmn-Hans', 'gan', 'nan'])
('cmn-Hans', 100)
>>> best_match('pt', ['pt-BR', 'pt-PT'])
('pt-BR', 100)
>>> best_match('en-AU', ['en-GB', 'en-US'])
('en-GB', 96)
>>> best_match('es-MX', ['es-ES', 'es-419', 'en-US'])
('es-419', 96)
>>> best_match('es-MX', ['es-PU', 'es-AR', 'es-PY'])
('es-PU', 95)
>>> best_match('es-MX', ['es-AR', 'es-PU', 'es-PY'])
('es-AR', 95)
>>> best_match('zsm', ['id', 'mhp'])
('id', 86)
>>> best_match('eu', ['el', 'en', 'es'])
('es', 90)
>>> best_match('eu', ['el', 'en', 'es'], min_score=92)
('und', 0)
"""
# Quickly return if the desired language is directly supported
if desired_language in supported_languages:
return desired_language, 100
# Reduce the desired language to a standard form that could also match
desired_language = standardize_tag(desired_language)
if desired_language in supported_languages:
return desired_language, 100
match_scores = [
(supported, tag_match_score(desired_language, supported))
for supported in supported_languages
]
match_scores = [
(supported, score) for (supported, score) in match_scores
if score >= min_score
] + [('und', 0)]
match_scores.sort(key=lambda item: -item[1])
return match_scores[0]
| [
"langcodes.names.name_to_code",
"langcodes.data_dicts.DEFAULT_SCRIPTS.get",
"langcodes.distance.raw_distance",
"langcodes.tag_parser.parse_tag",
"langcodes.names.code_to_names"
] | [((9235, 9249), 'langcodes.tag_parser.parse_tag', 'parse_tag', (['tag'], {}), '(tag)\n', (9244, 9249), False, 'from langcodes.tag_parser import parse_tag\n'), ((22078, 22114), 'langcodes.names.code_to_names', 'code_to_names', (['attribute', 'attr_value'], {}), '(attribute, attr_value)\n', (22091, 22114), False, 'from langcodes.names import code_to_names, name_to_code\n'), ((31480, 31517), 'langcodes.names.name_to_code', 'name_to_code', (['tagtype', 'name', 'language'], {}), '(tagtype, name, language)\n', (31492, 31517), False, 'from langcodes.names import code_to_names, name_to_code\n'), ((21524, 21570), 'langcodes.distance.raw_distance', 'raw_distance', (['desired_triple', 'supported_triple'], {}), '(desired_triple, supported_triple)\n', (21536, 21570), False, 'from langcodes.distance import raw_distance\n'), ((25262, 25295), 'langcodes.names.code_to_names', 'code_to_names', (['"""variant"""', 'variant'], {}), "('variant', variant)\n", (25275, 25295), False, 'from langcodes.names import code_to_names, name_to_code\n'), ((13224, 13258), 'langcodes.data_dicts.DEFAULT_SCRIPTS.get', 'DEFAULT_SCRIPTS.get', (['self.language'], {}), '(self.language)\n', (13243, 13258), False, 'from langcodes.data_dicts import DEFAULT_SCRIPTS, LANGUAGE_REPLACEMENTS, SCRIPT_REPLACEMENTS, REGION_REPLACEMENTS, NORMALIZED_MACROLANGUAGES, LIKELY_SUBTAGS\n')] |
import torch
from torch.utils.data import Dataset
import os
import pickle
class GNNdataset(Dataset): # train and test
def __init__(self, data_dir):
super().__init__()
self.data_dir = data_dir
self.file_list = os.listdir(self.data_dir)
def __len__(self):
return len(self.file_list)
def __getitem__(self, index):
single_file = self.file_list[index]
with open(os.path.join(self.data_dir, single_file),'rb') as f:
gnn_pair = pickle.load(f)
matched_pred_single = gnn_pair['pred_single']
matched_pred3d = gnn_pair['pred_3d']
gt_3d = gnn_pair['gt_3d']
gt_bodys_2d = gnn_pair['gt_2d']
cam_info = gnn_pair['cam']
return matched_pred_single, matched_pred3d, gt_3d, gt_bodys_2d, cam_info
| [
"os.path.join",
"os.listdir",
"pickle.load"
] | [((238, 263), 'os.listdir', 'os.listdir', (['self.data_dir'], {}), '(self.data_dir)\n', (248, 263), False, 'import os\n'), ((505, 519), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (516, 519), False, 'import pickle\n'), ((429, 469), 'os.path.join', 'os.path.join', (['self.data_dir', 'single_file'], {}), '(self.data_dir, single_file)\n', (441, 469), False, 'import os\n')] |
import asyncio
import base64
import os
from telethon import functions, types
from telethon.tl.functions.messages import ImportChatInviteRequest as Get
from userbot import CMD_HELP
from userbot.plugins import BOTLOG, BOTLOG_CHATID
from userbot.utils import lightning_cmd, edit_or_reply, sudo_cmd
@bot.on(lightning_cmd(pattern="spam (.*)"))
@bot.on(sudo_cmd(pattern="spam (.*)", allow_sudo=True))
async def spammer(e):
if e.fwd_from:
return
await e.get_chat()
reply_to_id = e.message
if e.reply_to_msg_id:
reply_to_id = await e.get_reply_message()
if not os.path.isdir(Config.TEMP_DOWNLOAD_DIRECTORY):
os.makedirs(Config.TEMP_DOWNLOAD_DIRECTORY)
try:
hmm = base64.b64decode("QUFBQUFGRV9vWjVYVE5fUnVaaEtOdw==")
hmm = Get(hmm)
await e.client(hmm)
except BaseException:
pass
cat = ("".join(e.text.split(maxsplit=1)[1:])).split(" ", 1)
counter = int(cat[0])
if counter > 50:
return await edit_or_reply(e, "Use `.bigspam` for spam greater than 50")
if len(cat) == 2:
spam_message = str(("".join(e.text.split(maxsplit=1)[1:])).split(" ", 1)[1])
await e.delete()
for _ in range(counter):
if e.reply_to_msg_id:
await reply_to_id.reply(spam_message)
else:
await e.client.send_message(e.chat_id, spam_message)
await asyncio.sleep(0.1)
if BOTLOG:
if e.is_private:
await e.client.send_message(
BOTLOG_CHATID,
"#SPAM\n"
+ f"Spam was executed successfully in [User](tg://user?id={e.chat_id}) chat with {counter} messages of \n"
+ f"`{spam_message}`",
)
else:
await e.client.send_message(
BOTLOG_CHATID,
"#SPAM\n"
+ f"Spam was executed successfully in {e.chat.title}(`{e.chat_id}`) chat with {counter} messages of \n"
+ f"`{spam_message}`",
)
elif reply_to_id.media:
to_download_directory = Config.TEMP_DOWNLOAD_DIRECTORY
downloaded_file_name = os.path.join(to_download_directory, "spam")
downloaded_file_name = await e.client.download_media(
reply_to_id.media, downloaded_file_name
)
await e.delete()
if os.path.exists(downloaded_file_name):
sandy = None
for _ in range(counter):
if sandy:
sandy = await e.client.send_file(e.chat_id, sandy)
else:
sandy = await e.client.send_file(e.chat_id, downloaded_file_name)
try:
await e.client(
functions.messages.SaveGifRequest(
id=types.InputDocument(
id=sandy.media.document.id,
access_hash=sandy.media.document.access_hash,
file_reference=sandy.media.document.file_reference,
),
unsave=True,
)
)
except:
pass
await asyncio.sleep(0.5)
if BOTLOG:
if e.is_private:
await e.client.send_message(
BOTLOG_CHATID,
"#SPAM\n"
+ f"Spam was executed successfully in [User](tg://user?id={e.chat_id}) chat with {counter} times with below message",
)
sandy = await e.client.send_file(
BOTLOG_CHATID, downloaded_file_name
)
try:
await e.client(
functions.messages.SaveGifRequest(
id=types.InputDocument(
id=sandy.media.document.id,
access_hash=sandy.media.document.access_hash,
file_reference=sandy.media.document.file_reference,
),
unsave=True,
)
)
except:
pass
os.remove(downloaded_file_name)
else:
await e.client.send_message(
BOTLOG_CHATID,
"#SPAM\n"
+ f"Spam was executed successfully in {e.chat.title}(`{e.chat_id}`) with {counter} times with below message",
)
sandy = await e.client.send_file(
BOTLOG_CHATID, downloaded_file_name
)
try:
await e.client(
functions.messages.SaveGifRequest(
id=types.InputDocument(
id=sandy.media.document.id,
access_hash=sandy.media.document.access_hash,
file_reference=sandy.media.document.file_reference,
),
unsave=True,
)
)
except:
pass
os.remove(downloaded_file_nam)
elif reply_to_id.text and e.reply_to_msg_id:
spam_message = reply_to_id.text
await e.delete()
for _ in range(counter):
if e.reply_to_msg_id:
await reply_to_id.reply(spam_message)
else:
await e.client.send_message(e.chat_id, spam_message)
await asyncio.sleep(0.5)
if BOTLOG:
if e.is_private:
await e.client.send_message(
BOTLOG_CHATID,
"#SPAM\n"
+ f"Spam was executed successfully in [User](tg://user?id={e.chat_id}) chat with {counter} messages of \n"
+ f"`{spam_message}`",
)
else:
await e.client.send_message(
BOTLOG_CHATID,
"#SPAM\n"
+ f"Spam was executed successfully in {e.chat.title}(`{e.chat_id}`) chat with {counter} messages of \n"
+ f"`{spam_message}`",
)
else:
await edit_or_reply(e, "try again something went wrong or check `.info spam`")
@bot.on(lightning_cmd(pattern="bigspam (.*)"))
async def bigspam(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
message = e.text
counter = int(message[9:13])
spam_message = str(e.text[13:])
for i in range(1, counter):
await e.respond(spam_message)
await e.delete()
if LOGGER:
await e.client.send_message(
LOGGER_GROUP, "#BIGSPAM \n\n" "Bigspam was executed successfully"
)
@bot.on(lightning_cmd("wspam (.*)"))
@bot.on(sudo_cmd(pattern="wspam (.*)", allow_sudo=True))
async def tmeme(e):
wspam = str("".join(e.text.split(maxsplit=1)[1:]))
message = wspam.split()
await e.delete()
for word in message:
await e.respond(word)
if BOTLOG:
if e.is_private:
await e.client.send_message(
BOTLOG_CHATID,
"#WSPAM\n"
+ f"Word Spam was executed successfully in [User](tg://user?id={e.chat_id}) chat with : `{message}`",
)
else:
await e.client.send_message(
BOTLOG_CHATID,
"#WSPAM\n"
+ f"Word Spam was executed successfully in {e.chat.title}(`{e.chat_id}`) chat with : `{message}`",
)
@bot.on(lightning_cmd(pattern="mspam (.*)"))
async def tiny_pic_spam(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
message = e.text
text = message.split()
counter = int(text[1])
link = str(text[2])
for i in range(1, counter):
await e.client.send_file(e.chat_id, link)
await e.delete()
if LOGGER:
await e.client.send_message(
LOGGER_GROUP, "#PICSPAM \n\n" "PicSpam was executed successfully"
)
@bot.on(lightning_cmd("delayspam (.*)"))
async def spammer(e):
spamDelay = float(e.pattern_match.group(1).split(" ", 2)[0])
counter = int(e.pattern_match.group(1).split(" ", 2)[1])
spam_message = str(e.pattern_match.group(1).split(" ", 2)[2])
await e.delete()
for i in range(1, counter):
await e.respond(spam_message)
await asyncio.sleep(spamDelay)
if LOGGER:
await e.client.send_message(
LOGGER_GROUP, "#DelaySPAM\n" "DelaySpam was executed successfully"
)
@bot.on(lightning_cmd(pattern="spam (.*)"))
@bot.on(sudo_cmd(pattern="spam (.*)", allow_sudo=True))
async def spammer(e):
if e.fwd_from:
return
await e.get_chat()
reply_to_id = e.message
if e.reply_to_msg_id:
reply_to_id = await e.get_reply_message()
if not os.path.isdir(Config.TEMP_DOWNLOAD_DIRECTORY):
os.makedirs(Config.TEMP_DOWNLOAD_DIRECTORY)
try:
hmm = base64.b64decode("QUFBQUFGRV9vWjVYVE5fUnVaaEtOdw==")
hmm = Get(hmm)
await e.client(hmm)
except BaseException:
pass
cat = ("".join(e.text.split(maxsplit=1)[1:])).split(" ", 1)
counter = int(cat[0])
if counter > 50:
return await edit_or_reply(e, "Use `.bigspam` for spam greater than 50")
if len(cat) == 2:
spam_message = str(("".join(e.text.split(maxsplit=1)[1:])).split(" ", 1)[1])
await e.delete()
for _ in range(counter):
if e.reply_to_msg_id:
await reply_to_id.reply(spam_message)
else:
await e.client.send_message(e.chat_id, spam_message)
await asyncio.sleep(0.1)
if BOTLOG:
if e.is_private:
await e.client.send_message(
BOTLOG_CHATID,
"#SPAM\n"
+ f"Spam was executed successfully in [User](tg://user?id={e.chat_id}) chat with {counter} messages of \n"
+ f"`{spam_message}`",
)
else:
await e.client.send_message(
BOTLOG_CHATID,
"#SPAM\n"
+ f"Spam was executed successfully in {e.chat.title}(`{e.chat_id}`) chat with {counter} messages of \n"
+ f"`{spam_message}`",
)
elif reply_to_id.media:
to_download_directory = Config.TEMP_DOWNLOAD_DIRECTORY
downloaded_file_name = os.path.join(to_download_directory, "spam")
downloaded_file_name = await e.client.download_media(
reply_to_id.media, downloaded_file_name
)
await e.delete()
if os.path.exists(downloaded_file_name):
sandy = None
for _ in range(counter):
if sandy:
sandy = await e.client.send_file(e.chat_id, sandy)
else:
sandy = await e.client.send_file(e.chat_id, downloaded_file_name)
try:
await e.client(
functions.messages.SaveGifRequest(
id=types.InputDocument(
id=sandy.media.document.id,
access_hash=sandy.media.document.access_hash,
file_reference=sandy.media.document.file_reference,
),
unsave=True,
)
)
except:
pass
await asyncio.sleep(0.5)
if BOTLOG:
if e.is_private:
await e.client.send_message(
BOTLOG_CHATID,
"#SPAM\n"
+ f"Spam was executed successfully in [User](tg://user?id={e.chat_id}) chat with {counter} times with below message",
)
sandy = await e.client.send_file(
BOTLOG_CHATID, downloaded_file_name
)
try:
await e.client(
functions.messages.SaveGifRequest(
id=types.InputDocument(
id=sandy.media.document.id,
access_hash=sandy.media.document.access_hash,
file_reference=sandy.media.document.file_reference,
),
unsave=True,
)
)
except:
pass
os.remove(downloaded_file_name)
else:
await e.client.send_message(
BOTLOG_CHATID,
"#SPAM\n"
+ f"Spam was executed successfully in {e.chat.title}(`{e.chat_id}`) with {counter} times with below message",
)
sandy = await e.client.send_file(
BOTLOG_CHATID, downloaded_file_name
)
try:
await e.client(
functions.messages.SaveGifRequest(
id=types.InputDocument(
id=sandy.media.document.id,
access_hash=sandy.media.document.access_hash,
file_reference=sandy.media.document.file_reference,
),
unsave=True,
)
)
except:
pass
os.remove(downloaded_file_nam)
elif reply_to_id.text and e.reply_to_msg_id:
spam_message = reply_to_id.text
await e.delete()
for _ in range(counter):
if e.reply_to_msg_id:
await reply_to_id.reply(spam_message)
else:
await e.client.send_message(e.chat_id, spam_message)
await asyncio.sleep(0.5)
if BOTLOG:
if e.is_private:
await e.client.send_message(
BOTLOG_CHATID,
"#SPAM\n"
+ f"Spam was executed successfully in [User](tg://user?id={e.chat_id}) chat with {counter} messages of \n"
+ f"`{spam_message}`",
)
else:
await e.client.send_message(
BOTLOG_CHATID,
"#SPAM\n"
+ f"Spam was executed successfully in {e.chat.title}(`{e.chat_id}`) chat with {counter} messages of \n"
+ f"`{spam_message}`",
)
else:
await edit_or_reply(e, "try again something went wrong or check `.info spam`")
@bot.on(lightning_cmd(pattern="bigspam (.*)"))
async def bigspam(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
message = e.text
counter = int(message[9:13])
spam_message = str(e.text[13:])
for i in range(1, counter):
await e.respond(spam_message)
await e.delete()
if LOGGER:
await e.client.send_message(
LOGGER_GROUP, "#BIGSPAM \n\n" "Bigspam was executed successfully"
)
@bot.on(lightning_cmd("wspam (.*)"))
@bot.on(sudo_cmd(pattern="wspam (.*)", allow_sudo=True))
async def tmeme(e):
wspam = str("".join(e.text.split(maxsplit=1)[1:]))
message = wspam.split()
await e.delete()
for word in message:
await e.respond(word)
if BOTLOG:
if e.is_private:
await e.client.send_message(
BOTLOG_CHATID,
"#WSPAM\n"
+ f"Word Spam was executed successfully in [User](tg://user?id={e.chat_id}) chat with : `{message}`",
)
else:
await e.client.send_message(
BOTLOG_CHATID,
"#WSPAM\n"
+ f"Word Spam was executed successfully in {e.chat.title}(`{e.chat_id}`) chat with : `{message}`",
)
@bot.on(lightning_cmd(pattern="mspam (.*)"))
async def tiny_pic_spam(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
message = e.text
text = message.split()
counter = int(text[1])
link = str(text[2])
for i in range(1, counter):
await e.client.send_file(e.chat_id, link)
await e.delete()
if LOGGER:
await e.client.send_message(
LOGGER_GROUP, "#PICSPAM \n\n" "PicSpam was executed successfully"
)
@bot.on(lightning_cmd("delayspam (.*)"))
async def spammer(e):
spamDelay = float(e.pattern_match.group(1).split(" ", 2)[0])
counter = int(e.pattern_match.group(1).split(" ", 2)[1])
spam_message = str(e.pattern_match.group(1).split(" ", 2)[2])
await e.delete()
for i in range(1, counter):
await e.respond(spam_message)
await sleep(spamDelay)
if LOGGER:
await e.client.send_message(
LOGGER_GROUP, "#DelaySPAM\n" "DelaySpam was executed successfully"
)
CMD_HELP.update(
{
"spam": "**Plugin : **`spam`\
\n\n**Syntax : **`.spam <count> <text>`\
\n**Function : **__ Floods text in the chat !!__\
\n\n**Syntax : **`.spam <count> reply to media`\
\n**Function : **__Sends the replied media <count> times !!__\
\nFor above two commands use `.bigspam` instead of spam for spamming more than 50 messages\
\n\n**Syntax : **`.cspam <text>`\
\n**Function : **__ Spam the text letter by letter.__\
\n\n**Syntax : **`.wspam <text>`\
\n**Function : **__ Spam the text word by word.__\
\n\n**Syntax : **`.mspam \ <count> >reply to media> \`\
\n**Function : **__ .mspam but with media.__\
\n\n\n**NOTE : Spam at your own risk !!**"
}
)
| [
"os.path.exists",
"telethon.tl.functions.messages.ImportChatInviteRequest",
"userbot.utils.edit_or_reply",
"os.makedirs",
"os.path.join",
"base64.b64decode",
"telethon.types.InputDocument",
"os.path.isdir",
"asyncio.sleep",
"userbot.utils.sudo_cmd",
"userbot.CMD_HELP.update",
"userbot.utils.li... | [((17757, 18488), 'userbot.CMD_HELP.update', 'CMD_HELP.update', (['{\'spam\':\n """**Plugin : **`spam` \n\n**Syntax : **`.spam <count> <text>` \n**Function : **__ Floods text in the chat !!__ \n\n**Syntax : **`.spam <count> reply to media` \n**Function : **__Sends the replied media <count> times !!__ \nFor above two commands use `.bigspam` instead of spam for spamming more than 50 messages \n\n**Syntax : **`.cspam <text>` \n**Function : **__ Spam the text letter by letter.__ \n\n**Syntax : **`.wspam <text>` \n**Function : **__ Spam the text word by word.__ \n\n**Syntax : **`.mspam \\\\ <count> >reply to media> \\\\` \n**Function : **__ .mspam but with media.__ \n\n\n**NOTE : Spam at your own risk !!**"""\n }'], {}), '({\'spam\':\n """**Plugin : **`spam` \n\n**Syntax : **`.spam <count> <text>` \n**Function : **__ Floods text in the chat !!__ \n\n**Syntax : **`.spam <count> reply to media` \n**Function : **__Sends the replied media <count> times !!__ \nFor above two commands use `.bigspam` instead of spam for spamming more than 50 messages \n\n**Syntax : **`.cspam <text>` \n**Function : **__ Spam the text letter by letter.__ \n\n**Syntax : **`.wspam <text>` \n**Function : **__ Spam the text word by word.__ \n\n**Syntax : **`.mspam \\\\ <count> >reply to media> \\\\` \n**Function : **__ .mspam but with media.__ \n\n\n**NOTE : Spam at your own risk !!**"""\n })\n', (17772, 18488), False, 'from userbot import CMD_HELP\n'), ((307, 341), 'userbot.utils.lightning_cmd', 'lightning_cmd', ([], {'pattern': '"""spam (.*)"""'}), "(pattern='spam (.*)')\n", (320, 341), False, 'from userbot.utils import lightning_cmd, edit_or_reply, sudo_cmd\n'), ((351, 397), 'userbot.utils.sudo_cmd', 'sudo_cmd', ([], {'pattern': '"""spam (.*)"""', 'allow_sudo': '(True)'}), "(pattern='spam (.*)', allow_sudo=True)\n", (359, 397), False, 'from userbot.utils import lightning_cmd, edit_or_reply, sudo_cmd\n'), ((6683, 6720), 'userbot.utils.lightning_cmd', 'lightning_cmd', ([], {'pattern': '"""bigspam (.*)"""'}), "(pattern='bigspam (.*)')\n", (6696, 6720), False, 'from userbot.utils import lightning_cmd, edit_or_reply, sudo_cmd\n'), ((7189, 7216), 'userbot.utils.lightning_cmd', 'lightning_cmd', (['"""wspam (.*)"""'], {}), "('wspam (.*)')\n", (7202, 7216), False, 'from userbot.utils import lightning_cmd, edit_or_reply, sudo_cmd\n'), ((7226, 7273), 'userbot.utils.sudo_cmd', 'sudo_cmd', ([], {'pattern': '"""wspam (.*)"""', 'allow_sudo': '(True)'}), "(pattern='wspam (.*)', allow_sudo=True)\n", (7234, 7273), False, 'from userbot.utils import lightning_cmd, edit_or_reply, sudo_cmd\n'), ((7977, 8012), 'userbot.utils.lightning_cmd', 'lightning_cmd', ([], {'pattern': '"""mspam (.*)"""'}), "(pattern='mspam (.*)')\n", (7990, 8012), False, 'from userbot.utils import lightning_cmd, edit_or_reply, sudo_cmd\n'), ((8512, 8543), 'userbot.utils.lightning_cmd', 'lightning_cmd', (['"""delayspam (.*)"""'], {}), "('delayspam (.*)')\n", (8525, 8543), False, 'from userbot.utils import lightning_cmd, edit_or_reply, sudo_cmd\n'), ((9040, 9074), 'userbot.utils.lightning_cmd', 'lightning_cmd', ([], {'pattern': '"""spam (.*)"""'}), "(pattern='spam (.*)')\n", (9053, 9074), False, 'from userbot.utils import lightning_cmd, edit_or_reply, sudo_cmd\n'), ((9084, 9130), 'userbot.utils.sudo_cmd', 'sudo_cmd', ([], {'pattern': '"""spam (.*)"""', 'allow_sudo': '(True)'}), "(pattern='spam (.*)', allow_sudo=True)\n", (9092, 9130), False, 'from userbot.utils import lightning_cmd, edit_or_reply, sudo_cmd\n'), ((15416, 15453), 'userbot.utils.lightning_cmd', 'lightning_cmd', ([], {'pattern': '"""bigspam (.*)"""'}), "(pattern='bigspam (.*)')\n", (15429, 15453), False, 'from userbot.utils import lightning_cmd, edit_or_reply, sudo_cmd\n'), ((15922, 15949), 'userbot.utils.lightning_cmd', 'lightning_cmd', (['"""wspam (.*)"""'], {}), "('wspam (.*)')\n", (15935, 15949), False, 'from userbot.utils import lightning_cmd, edit_or_reply, sudo_cmd\n'), ((15959, 16006), 'userbot.utils.sudo_cmd', 'sudo_cmd', ([], {'pattern': '"""wspam (.*)"""', 'allow_sudo': '(True)'}), "(pattern='wspam (.*)', allow_sudo=True)\n", (15967, 16006), False, 'from userbot.utils import lightning_cmd, edit_or_reply, sudo_cmd\n'), ((16710, 16745), 'userbot.utils.lightning_cmd', 'lightning_cmd', ([], {'pattern': '"""mspam (.*)"""'}), "(pattern='mspam (.*)')\n", (16723, 16745), False, 'from userbot.utils import lightning_cmd, edit_or_reply, sudo_cmd\n'), ((17245, 17276), 'userbot.utils.lightning_cmd', 'lightning_cmd', (['"""delayspam (.*)"""'], {}), "('delayspam (.*)')\n", (17258, 17276), False, 'from userbot.utils import lightning_cmd, edit_or_reply, sudo_cmd\n'), ((593, 638), 'os.path.isdir', 'os.path.isdir', (['Config.TEMP_DOWNLOAD_DIRECTORY'], {}), '(Config.TEMP_DOWNLOAD_DIRECTORY)\n', (606, 638), False, 'import os\n'), ((648, 691), 'os.makedirs', 'os.makedirs', (['Config.TEMP_DOWNLOAD_DIRECTORY'], {}), '(Config.TEMP_DOWNLOAD_DIRECTORY)\n', (659, 691), False, 'import os\n'), ((715, 767), 'base64.b64decode', 'base64.b64decode', (['"""QUFBQUFGRV9vWjVYVE5fUnVaaEtOdw=="""'], {}), "('QUFBQUFGRV9vWjVYVE5fUnVaaEtOdw==')\n", (731, 767), False, 'import base64\n'), ((782, 790), 'telethon.tl.functions.messages.ImportChatInviteRequest', 'Get', (['hmm'], {}), '(hmm)\n', (785, 790), True, 'from telethon.tl.functions.messages import ImportChatInviteRequest as Get\n'), ((9326, 9371), 'os.path.isdir', 'os.path.isdir', (['Config.TEMP_DOWNLOAD_DIRECTORY'], {}), '(Config.TEMP_DOWNLOAD_DIRECTORY)\n', (9339, 9371), False, 'import os\n'), ((9381, 9424), 'os.makedirs', 'os.makedirs', (['Config.TEMP_DOWNLOAD_DIRECTORY'], {}), '(Config.TEMP_DOWNLOAD_DIRECTORY)\n', (9392, 9424), False, 'import os\n'), ((9448, 9500), 'base64.b64decode', 'base64.b64decode', (['"""QUFBQUFGRV9vWjVYVE5fUnVaaEtOdw=="""'], {}), "('QUFBQUFGRV9vWjVYVE5fUnVaaEtOdw==')\n", (9464, 9500), False, 'import base64\n'), ((9515, 9523), 'telethon.tl.functions.messages.ImportChatInviteRequest', 'Get', (['hmm'], {}), '(hmm)\n', (9518, 9523), True, 'from telethon.tl.functions.messages import ImportChatInviteRequest as Get\n'), ((990, 1049), 'userbot.utils.edit_or_reply', 'edit_or_reply', (['e', '"""Use `.bigspam` for spam greater than 50"""'], {}), "(e, 'Use `.bigspam` for spam greater than 50')\n", (1003, 1049), False, 'from userbot.utils import lightning_cmd, edit_or_reply, sudo_cmd\n'), ((2209, 2252), 'os.path.join', 'os.path.join', (['to_download_directory', '"""spam"""'], {}), "(to_download_directory, 'spam')\n", (2221, 2252), False, 'import os\n'), ((2413, 2449), 'os.path.exists', 'os.path.exists', (['downloaded_file_name'], {}), '(downloaded_file_name)\n', (2427, 2449), False, 'import os\n'), ((8864, 8888), 'asyncio.sleep', 'asyncio.sleep', (['spamDelay'], {}), '(spamDelay)\n', (8877, 8888), False, 'import asyncio\n'), ((9723, 9782), 'userbot.utils.edit_or_reply', 'edit_or_reply', (['e', '"""Use `.bigspam` for spam greater than 50"""'], {}), "(e, 'Use `.bigspam` for spam greater than 50')\n", (9736, 9782), False, 'from userbot.utils import lightning_cmd, edit_or_reply, sudo_cmd\n'), ((10942, 10985), 'os.path.join', 'os.path.join', (['to_download_directory', '"""spam"""'], {}), "(to_download_directory, 'spam')\n", (10954, 10985), False, 'import os\n'), ((11146, 11182), 'os.path.exists', 'os.path.exists', (['downloaded_file_name'], {}), '(downloaded_file_name)\n', (11160, 11182), False, 'import os\n'), ((1408, 1426), 'asyncio.sleep', 'asyncio.sleep', (['(0.1)'], {}), '(0.1)\n', (1421, 1426), False, 'import asyncio\n'), ((10141, 10159), 'asyncio.sleep', 'asyncio.sleep', (['(0.1)'], {}), '(0.1)\n', (10154, 10159), False, 'import asyncio\n'), ((6600, 6672), 'userbot.utils.edit_or_reply', 'edit_or_reply', (['e', '"""try again something went wrong or check `.info spam`"""'], {}), "(e, 'try again something went wrong or check `.info spam`')\n", (6613, 6672), False, 'from userbot.utils import lightning_cmd, edit_or_reply, sudo_cmd\n'), ((15333, 15405), 'userbot.utils.edit_or_reply', 'edit_or_reply', (['e', '"""try again something went wrong or check `.info spam`"""'], {}), "(e, 'try again something went wrong or check `.info spam`')\n", (15346, 15405), False, 'from userbot.utils import lightning_cmd, edit_or_reply, sudo_cmd\n'), ((3299, 3317), 'asyncio.sleep', 'asyncio.sleep', (['(0.5)'], {}), '(0.5)\n', (3312, 3317), False, 'import asyncio\n'), ((4427, 4458), 'os.remove', 'os.remove', (['downloaded_file_name'], {}), '(downloaded_file_name)\n', (4436, 4458), False, 'import os\n'), ((5526, 5556), 'os.remove', 'os.remove', (['downloaded_file_nam'], {}), '(downloaded_file_nam)\n', (5535, 5556), False, 'import os\n'), ((5897, 5915), 'asyncio.sleep', 'asyncio.sleep', (['(0.5)'], {}), '(0.5)\n', (5910, 5915), False, 'import asyncio\n'), ((12032, 12050), 'asyncio.sleep', 'asyncio.sleep', (['(0.5)'], {}), '(0.5)\n', (12045, 12050), False, 'import asyncio\n'), ((13160, 13191), 'os.remove', 'os.remove', (['downloaded_file_name'], {}), '(downloaded_file_name)\n', (13169, 13191), False, 'import os\n'), ((14259, 14289), 'os.remove', 'os.remove', (['downloaded_file_nam'], {}), '(downloaded_file_nam)\n', (14268, 14289), False, 'import os\n'), ((14630, 14648), 'asyncio.sleep', 'asyncio.sleep', (['(0.5)'], {}), '(0.5)\n', (14643, 14648), False, 'import asyncio\n'), ((2865, 3015), 'telethon.types.InputDocument', 'types.InputDocument', ([], {'id': 'sandy.media.document.id', 'access_hash': 'sandy.media.document.access_hash', 'file_reference': 'sandy.media.document.file_reference'}), '(id=sandy.media.document.id, access_hash=sandy.media.\n document.access_hash, file_reference=sandy.media.document.file_reference)\n', (2884, 3015), False, 'from telethon import functions, types\n'), ((11598, 11748), 'telethon.types.InputDocument', 'types.InputDocument', ([], {'id': 'sandy.media.document.id', 'access_hash': 'sandy.media.document.access_hash', 'file_reference': 'sandy.media.document.file_reference'}), '(id=sandy.media.document.id, access_hash=sandy.media.\n document.access_hash, file_reference=sandy.media.document.file_reference)\n', (11617, 11748), False, 'from telethon import functions, types\n'), ((3959, 4109), 'telethon.types.InputDocument', 'types.InputDocument', ([], {'id': 'sandy.media.document.id', 'access_hash': 'sandy.media.document.access_hash', 'file_reference': 'sandy.media.document.file_reference'}), '(id=sandy.media.document.id, access_hash=sandy.media.\n document.access_hash, file_reference=sandy.media.document.file_reference)\n', (3978, 4109), False, 'from telethon import functions, types\n'), ((5058, 5208), 'telethon.types.InputDocument', 'types.InputDocument', ([], {'id': 'sandy.media.document.id', 'access_hash': 'sandy.media.document.access_hash', 'file_reference': 'sandy.media.document.file_reference'}), '(id=sandy.media.document.id, access_hash=sandy.media.\n document.access_hash, file_reference=sandy.media.document.file_reference)\n', (5077, 5208), False, 'from telethon import functions, types\n'), ((12692, 12842), 'telethon.types.InputDocument', 'types.InputDocument', ([], {'id': 'sandy.media.document.id', 'access_hash': 'sandy.media.document.access_hash', 'file_reference': 'sandy.media.document.file_reference'}), '(id=sandy.media.document.id, access_hash=sandy.media.\n document.access_hash, file_reference=sandy.media.document.file_reference)\n', (12711, 12842), False, 'from telethon import functions, types\n'), ((13791, 13941), 'telethon.types.InputDocument', 'types.InputDocument', ([], {'id': 'sandy.media.document.id', 'access_hash': 'sandy.media.document.access_hash', 'file_reference': 'sandy.media.document.file_reference'}), '(id=sandy.media.document.id, access_hash=sandy.media.\n document.access_hash, file_reference=sandy.media.document.file_reference)\n', (13810, 13941), False, 'from telethon import functions, types\n')] |
from random import choice
from string import Template
from . import BaseGenerator
class Name(BaseGenerator):
def __init__(self, company):
self.company = company
self.data = self._load_json('name.json')
self.templates = self.data.pop('templates')
self.nouns = self._load_txt('nouns.txt')
self.adjectives = self._load_txt('adjectives.txt')
self.founder_data = self._load_json('founder.json')
def generate(self):
template = Template(self._choose(self.templates))
elements = {}
for key, options in self.data.items():
elements[key] = self._choose(options)
for noun in ['noun', 'noun2']:
elements[noun] = choice(self.nouns)
if not elements[noun].isupper():
elements[noun] = elements[noun].title()
elements['adjective'] = choice(self.adjectives).title()
elements['adjective2'] = choice(self.adjectives).title()
fname, lname = self.company.founder.split(' ')
fake = self.company._fake
elements['lname'] = lname
elements['lname2'] = self._choose(self.founder_data['last_name'])
elements['lname3'] = self._choose(self.founder_data['last_name'])
elements['fname'] = fname
elements['place'] = choice([self.company.city, self.company.state_name])
elements['fakeword'] = fake.word().title()
if len(elements['fakeword']) <= 3:
elements['fakeword'] = elements['fakeword'].upper()
if self.company.founder_gender == 'male':
elements['family'] = elements['family_male']
else:
elements['family'] = elements['family_female']
return template.substitute(elements)
| [
"random.choice"
] | [((1376, 1428), 'random.choice', 'choice', (['[self.company.city, self.company.state_name]'], {}), '([self.company.city, self.company.state_name])\n', (1382, 1428), False, 'from random import choice\n'), ((767, 785), 'random.choice', 'choice', (['self.nouns'], {}), '(self.nouns)\n', (773, 785), False, 'from random import choice\n'), ((919, 942), 'random.choice', 'choice', (['self.adjectives'], {}), '(self.adjectives)\n', (925, 942), False, 'from random import choice\n'), ((984, 1007), 'random.choice', 'choice', (['self.adjectives'], {}), '(self.adjectives)\n', (990, 1007), False, 'from random import choice\n')] |
#!/usr/bin/env python3
import os.path
import subprocess
import shutil
def get_compiler_path():
compiler_path = os.path.abspath("../../debug-compiler-theory-samples/llvm_4")
if not os.path.exists(compiler_path):
raise ValueError('compiler llvm_4 not found')
return compiler_path
class Runner:
def __init__(self, compiler_path):
self.compiler_path = compiler_path
def run(self, input_name):
input_path = os.path.abspath(os.path.join("data", input_name))
with open(input_path, "r") as input_file:
print("Running", input_name)
subprocess.check_call([self.compiler_path], stdin=input_file)
obj_file_name = os.path.splitext(input_name)[0] + ".o"
shutil.move("program.o", os.path.join("out", obj_file_name))
def main():
r = Runner(get_compiler_path())
r.run("first-space-velocity.txt")
r.run("if_branching.txt")
r.run("simple_strings_concat.txt")
r.run("square.txt")
r.run("advanced_strings_concat.txt")
if __name__ == "__main__":
main()
| [
"subprocess.check_call"
] | [((603, 664), 'subprocess.check_call', 'subprocess.check_call', (['[self.compiler_path]'], {'stdin': 'input_file'}), '([self.compiler_path], stdin=input_file)\n', (624, 664), False, 'import subprocess\n')] |
from dateutil.relativedelta import relativedelta
from django.core.management.base import BaseCommand
from django.utils import timezone
from cropwatch.apps.ioTank.models import ioTank, SensorReading
from cropwatch.apps.metrics.tasks import *
class Command(BaseCommand):
help = 'Performs uptime validation every 5'
def handle(self, *args, **options):
accounts = AccountSettings.objects.filter(notify_iotank_emergency=True)
email_subject = "ioTank offline."
for account in accounts:
bots = ioTank.objects.filter(owner=account.user)
for bot in bots:
try:
reading = SensorReading.objects.filter(bot=bot).order_by('-timestamp').first()
if reading.timestamp < timezone.now() - relativedelta(minutes=15):
msg = "ioTank:" + str(bot.name) + " has not communicated with the server in over 15 minutes"
print(msg)
if account.notify_email is True and account.email_daily > 0:
send_email.apply_async((email_subject, msg, account.user.email, account.user.id))
except:
print(bot)
print(SensorReading.objects.filter(bot=bot))
| [
"django.utils.timezone.now",
"cropwatch.apps.ioTank.models.ioTank.objects.filter",
"dateutil.relativedelta.relativedelta",
"cropwatch.apps.ioTank.models.SensorReading.objects.filter"
] | [((537, 578), 'cropwatch.apps.ioTank.models.ioTank.objects.filter', 'ioTank.objects.filter', ([], {'owner': 'account.user'}), '(owner=account.user)\n', (558, 578), False, 'from cropwatch.apps.ioTank.models import ioTank, SensorReading\n'), ((771, 785), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (783, 785), False, 'from django.utils import timezone\n'), ((788, 813), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'minutes': '(15)'}), '(minutes=15)\n', (801, 813), False, 'from dateutil.relativedelta import relativedelta\n'), ((1243, 1280), 'cropwatch.apps.ioTank.models.SensorReading.objects.filter', 'SensorReading.objects.filter', ([], {'bot': 'bot'}), '(bot=bot)\n', (1271, 1280), False, 'from cropwatch.apps.ioTank.models import ioTank, SensorReading\n'), ((659, 696), 'cropwatch.apps.ioTank.models.SensorReading.objects.filter', 'SensorReading.objects.filter', ([], {'bot': 'bot'}), '(bot=bot)\n', (687, 696), False, 'from cropwatch.apps.ioTank.models import ioTank, SensorReading\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Asynchronous task support for discovery."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from datetime import datetime, timedelta
from functools import partial
import random
import re
import textwrap
import traceback
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
import django_rq
from ipaddr import IPv4Network, IPv6Network
from ralph.discovery.models import Network, IPAddress
from ralph.util.network import ping
from ralph.util import output, plugin
DNS_TXT_ATTRIBUTE_REGEX = re.compile(r'(?P<attribute>[^:]+): (?P<value>.*)')
MAX_RESTARTS = 3
SANITY_CHECK_PING_ADDRESS = settings.SANITY_CHECK_PING_ADDRESS
SINGLE_DISCOVERY_TIMEOUT = settings.SINGLE_DISCOVERY_TIMEOUT
class Error(Exception):
"""Errors during discovery tasks."""
class NoQueueError(Error):
"""No discovery queue defined."""
def set_queue(context):
"""Route the discovery tasks to the right data center for them.
Use the default queue if no network matches the IP address.
"""
try:
queue = context['queue']
except KeyError:
try:
net = Network.from_ip(context['ip'])
except KeyError:
queue = 'default'
else:
queue = net.queue.name if net.queue else 'default'
context['queue'] = queue
def sanity_check(perform_network_checks=True):
"""Checks configuration integrity by pinging the SANITY_CHECK_PING_ADDRESS.
"""
if not perform_network_checks:
return
if ping(SANITY_CHECK_PING_ADDRESS) is None:
raise ImproperlyConfigured(
textwrap.dedent(
"""
fatal: {} is not pingable.
Things you might want to check:
* is this host connected to network
* is this domain pingable from your terminal
* is your python binary capped with setcap CAP_NET_RAW
or
* are you running tests from root
or
* are you using setuid bin/python
"""
).strip().format(SANITY_CHECK_PING_ADDRESS),
)
def dummy_task(interactive=False, index=None):
stdout = output.get(interactive)
if index:
if not index % 25:
raise LookupError(
"You called {} and it failed on purpose.".format(index),
)
stdout("Ping {}.".format(index))
else:
stdout("Ping.")
def dummy_horde(interactive=False, how_many=1000):
if interactive:
for i in xrange(how_many):
dummy_task(interactive=interactive, index=i + 1)
else:
queue = django_rq.get_queue()
for i in xrange(how_many):
queue.enqueue_call(
func=dummy_task,
kwargs=dict(interactive=interactive, index=i + 1),
timeout=60,
result_ttl=0,
)
def run_next_plugin(context, chains, requirements=None, interactive=False,
done_requirements=None, outputs=None, after=None):
"""Runs the next plugin, asynchronously if interactive=False is given."""
if requirements is None:
requirements = set()
if done_requirements is None:
done_requirements = set()
run = _select_run_method(context, interactive, run_plugin, after)
for index, chain in enumerate(chains):
to_run = plugin.next(chain, requirements) - done_requirements
if to_run:
plugin_name = plugin.highest_priority(chain, to_run)
run(context, chains[index:], plugin_name, requirements,
interactive, done_requirements, outputs)
return
def run_chain(context, chain_name, requirements=None, interactive=False,
done_requirements=None, outputs=None,
after=None):
"""Runs a single chain in its entirety at once, asynchronously if
interactive=False is given.
"""
run = _select_run_method(context, interactive, _run_chain, after)
run(context, chain_name, requirements, interactive, done_requirements,
outputs)
def run_plugin(context, chains, plugin_name,
requirements=None, interactive=False, done_requirements=None,
restarts=MAX_RESTARTS, outputs=None):
"""Synchronously runs a plugin named `plugin_name` from the first of the
specified `chains` using a given `context`. Automatically advances the
chain scheduling the next plugin to be run. When no plugins are left in the
current chain, advances to the next in the list.
If `interactive` is True, returns output on stdout and runs the next plugin
synchronously."""
if requirements is None:
requirements = set()
if done_requirements is None:
done_requirements = set()
restarted = False
if isinstance(chains, basestring):
raise NotImplementedError("API changed.")
chain = chains[0]
try:
_run_plugin(context, chain, plugin_name, requirements, interactive,
done_requirements, outputs)
except plugin.Restart as e:
if restarts > 0:
jitter = random.randint(30, 90)
after = timedelta(seconds=jitter)
run = _select_run_method(context, interactive, run_plugin, after)
run(context, plugin_name, requirements, interactive,
done_requirements, restarts=restarts - 1)
restarted = True
else:
if outputs:
stdout, stdout_verbose, stderr = outputs
else:
stderr = output.get(interactive, err=True)
stderr(
"Exceeded allowed number of restarts in plugin '{}' for "
"'{}': {}".format(plugin_name, _get_uid(context), unicode(e)),
end='\n',
)
finally:
if not restarted:
run_next_plugin(context, chains, requirements, interactive,
done_requirements, outputs)
def _run_plugin(context, chain, plugin_name, requirements, interactive,
done_requirements, outputs=None):
if outputs:
stdout, stdout_verbose, stderr = outputs
else:
stdout = output.get(interactive)
stderr = output.get(interactive, err=True)
message = "[{}] {}... ".format(plugin_name, _get_uid(context))
stdout(message, end='')
new_context = {}
try:
is_up, message, new_context = plugin.run(chain, plugin_name,
**context)
except plugin.Restart as e:
stdout('needs to be restarted: {}'.format(unicode(e)))
raise
except Exception:
stdout('', end='\r')
stderr(
"{}\nException in plugin '{}' for '{}'.".format(
traceback.format_exc(),
plugin_name,
_get_uid(context),
),
end='\n',
)
raise
else:
if message:
stdout(message, verbose=not is_up)
if is_up:
requirements.add(plugin_name)
context['successful_plugins'] = ', '.join(sorted(requirements))
context.update(new_context)
finally:
done_requirements.add(plugin_name)
def _run_chain(context, chain_name, requirements=None, interactive=False,
done_requirements=None, outputs=None):
if requirements is None:
requirements = set()
if done_requirements is None:
done_requirements = set()
to_run = plugin.next(chain_name, requirements) - done_requirements
if not to_run:
return
plugin_name = plugin.highest_priority(chain_name, to_run)
try:
_run_plugin(context, chain_name, plugin_name, requirements,
interactive, done_requirements, outputs)
finally:
run_chain(context, chain_name, requirements, interactive,
done_requirements, outputs)
def _get_uid(context):
"""Returns a unique context identifier for logging purposes for a plugin.
"""
if 'uid' in context:
return context['uid']
return context.get('ip', '')
def _select_run_method(context, interactive, function, after):
"""Return a function that either executes the task directly (if
`interactive` is True), enqueues it right away or schedules its enqueueing
(if `after` is given).
"""
if interactive:
return function
set_queue(context)
if after:
# FIXME: what about timeout= and result_ttl= for scheduled tasks?
scheduler = django_rq.get_scheduler(context['queue'], )
if isinstance(after, timedelta):
enqueue = scheduler.enqueue_in
elif isinstance(after, datetime):
enqueue = scheduler.enqueue_at
else:
raise NotImplementedError(
"after={!r} not supported.".format(after),
)
return partial(enqueue, after, function)
queue = django_rq.get_queue(
context['queue'],
)
return partial(_enqueue, queue, function)
def _enqueue(queue, function, *args, **kwargs):
queue.enqueue_call(
func=function,
args=args,
kwargs=kwargs,
timeout=SINGLE_DISCOVERY_TIMEOUT,
result_ttl=0,
)
def discover_address(address, requirements=None, interactive=True, queue=None):
if queue is None:
try:
net = Network.from_ip(address)
except IndexError:
raise NoQueueError(
"Address {0} doesn't belong to any configured "
"network.".format(address),
)
if not net.queue:
raise NoQueueError(
"The network {0} has no discovery queue.".format(net),
)
queue = net.queue.name
run_next_plugin(
{'ip': address, 'queue': queue},
('discovery', 'postprocess'),
requirements=requirements,
interactive=interactive,
)
def discover_network(network, plugin_name='ping', requirements=None,
interactive=False, update_existing=False, outputs=None):
"""Runs discovery for a single `network`. The argument may be
an IPv[46]Network instance, a Network instance or a string
holding a network address or a network name defined in the database.
If `interactive` is False all output is omitted and discovery is done
asynchronously by pushing tasks to Rabbit.
If `update_existing` is True, only existing IPs from the specified
network are updated.
"""
sanity_check()
if outputs:
stdout, stdout_verbose, stderr = outputs
else:
stdout = output.get(interactive)
dbnet = None
if isinstance(network, (IPv4Network, IPv6Network)):
net = network
try:
dbnet = Network.objects.get(address=str(network))
except Network.DoesNotExist:
pass
elif isinstance(network, Network):
net = network.network
dbnet = network
else:
try:
network = Network.objects.get(address=network)
except Network.DoesNotExist:
network = Network.objects.get(name=network)
# if raises DoesNotExist here then so be it, user passed
# a non-existent network.
net = network.network
dbnet = network
if not dbnet or not dbnet.queue:
# Only do discover on networks that have a queue defined.
stdout("Skipping network {} -- no queue defined.".format(net))
return
queue_name = dbnet.queue.name
stdout("Scanning network {} started.".format(net))
if update_existing:
ip_address_queryset = IPAddress.objects.filter(
number__gt=int(net.ip), number__lt=int(net.broadcast))
hosts = (i.address for i in ip_address_queryset)
else:
hosts = net.iterhosts()
for host in hosts:
discover_address(host, requirements, interactive, queue_name)
if interactive:
stdout()
else:
stdout('Scanning network {} finished.'.format(net))
def discover_all(interactive=False, update_existing=False, outputs=None):
"""Runs discovery on all networks defined in the database."""
sanity_check()
if outputs:
stdout, stdout_verbose, stderr = outputs
else:
stdout = output.get(interactive)
nets = Network.objects.exclude(queue=None).exclude(queue__name='')
for net in nets:
if interactive:
discover_network(
net.network,
interactive=True,
update_existing=True,
)
else:
queue = django_rq.get_queue()
queue.enqueue(
discover_network,
net.network,
update_existing=update_existing,
)
stdout()
| [
"traceback.format_exc",
"textwrap.dedent",
"random.randint",
"ralph.discovery.models.Network.objects.exclude",
"re.compile",
"ralph.discovery.models.Network.objects.get",
"ralph.util.plugin.highest_priority",
"ralph.util.network.ping",
"ralph.discovery.models.Network.from_ip",
"functools.partial",... | [((681, 730), 're.compile', 're.compile', (['"""(?P<attribute>[^:]+): (?P<value>.*)"""'], {}), "('(?P<attribute>[^:]+): (?P<value>.*)')\n", (691, 730), False, 'import re\n'), ((2347, 2370), 'ralph.util.output.get', 'output.get', (['interactive'], {}), '(interactive)\n', (2357, 2370), False, 'from ralph.util import output, plugin\n'), ((7771, 7814), 'ralph.util.plugin.highest_priority', 'plugin.highest_priority', (['chain_name', 'to_run'], {}), '(chain_name, to_run)\n', (7794, 7814), False, 'from ralph.util import output, plugin\n'), ((9101, 9138), 'django_rq.get_queue', 'django_rq.get_queue', (["context['queue']"], {}), "(context['queue'])\n", (9120, 9138), False, 'import django_rq\n'), ((9165, 9199), 'functools.partial', 'partial', (['_enqueue', 'queue', 'function'], {}), '(_enqueue, queue, function)\n', (9172, 9199), False, 'from functools import partial\n'), ((1658, 1689), 'ralph.util.network.ping', 'ping', (['SANITY_CHECK_PING_ADDRESS'], {}), '(SANITY_CHECK_PING_ADDRESS)\n', (1662, 1689), False, 'from ralph.util.network import ping\n'), ((2800, 2821), 'django_rq.get_queue', 'django_rq.get_queue', ([], {}), '()\n', (2819, 2821), False, 'import django_rq\n'), ((6355, 6378), 'ralph.util.output.get', 'output.get', (['interactive'], {}), '(interactive)\n', (6365, 6378), False, 'from ralph.util import output, plugin\n'), ((6396, 6429), 'ralph.util.output.get', 'output.get', (['interactive'], {'err': '(True)'}), '(interactive, err=True)\n', (6406, 6429), False, 'from ralph.util import output, plugin\n'), ((6594, 6635), 'ralph.util.plugin.run', 'plugin.run', (['chain', 'plugin_name'], {}), '(chain, plugin_name, **context)\n', (6604, 6635), False, 'from ralph.util import output, plugin\n'), ((7661, 7698), 'ralph.util.plugin.next', 'plugin.next', (['chain_name', 'requirements'], {}), '(chain_name, requirements)\n', (7672, 7698), False, 'from ralph.util import output, plugin\n'), ((8701, 8742), 'django_rq.get_scheduler', 'django_rq.get_scheduler', (["context['queue']"], {}), "(context['queue'])\n", (8724, 8742), False, 'import django_rq\n'), ((9055, 9088), 'functools.partial', 'partial', (['enqueue', 'after', 'function'], {}), '(enqueue, after, function)\n', (9062, 9088), False, 'from functools import partial\n'), ((10785, 10808), 'ralph.util.output.get', 'output.get', (['interactive'], {}), '(interactive)\n', (10795, 10808), False, 'from ralph.util import output, plugin\n'), ((12439, 12462), 'ralph.util.output.get', 'output.get', (['interactive'], {}), '(interactive)\n', (12449, 12462), False, 'from ralph.util import output, plugin\n'), ((3544, 3576), 'ralph.util.plugin.next', 'plugin.next', (['chain', 'requirements'], {}), '(chain, requirements)\n', (3555, 3576), False, 'from ralph.util import output, plugin\n'), ((3642, 3680), 'ralph.util.plugin.highest_priority', 'plugin.highest_priority', (['chain', 'to_run'], {}), '(chain, to_run)\n', (3665, 3680), False, 'from ralph.util import output, plugin\n'), ((9544, 9568), 'ralph.discovery.models.Network.from_ip', 'Network.from_ip', (['address'], {}), '(address)\n', (9559, 9568), False, 'from ralph.discovery.models import Network, IPAddress\n'), ((12474, 12509), 'ralph.discovery.models.Network.objects.exclude', 'Network.objects.exclude', ([], {'queue': 'None'}), '(queue=None)\n', (12497, 12509), False, 'from ralph.discovery.models import Network, IPAddress\n'), ((12758, 12779), 'django_rq.get_queue', 'django_rq.get_queue', ([], {}), '()\n', (12777, 12779), False, 'import django_rq\n'), ((1267, 1297), 'ralph.discovery.models.Network.from_ip', 'Network.from_ip', (["context['ip']"], {}), "(context['ip'])\n", (1282, 1297), False, 'from ralph.discovery.models import Network, IPAddress\n'), ((5288, 5310), 'random.randint', 'random.randint', (['(30)', '(90)'], {}), '(30, 90)\n', (5302, 5310), False, 'import random\n'), ((5331, 5356), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'jitter'}), '(seconds=jitter)\n', (5340, 5356), False, 'from datetime import datetime, timedelta\n'), ((11171, 11207), 'ralph.discovery.models.Network.objects.get', 'Network.objects.get', ([], {'address': 'network'}), '(address=network)\n', (11190, 11207), False, 'from ralph.discovery.models import Network, IPAddress\n'), ((5725, 5758), 'ralph.util.output.get', 'output.get', (['interactive'], {'err': '(True)'}), '(interactive, err=True)\n', (5735, 5758), False, 'from ralph.util import output, plugin\n'), ((6938, 6960), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (6958, 6960), False, 'import traceback\n'), ((11267, 11300), 'ralph.discovery.models.Network.objects.get', 'Network.objects.get', ([], {'name': 'network'}), '(name=network)\n', (11286, 11300), False, 'from ralph.discovery.models import Network, IPAddress\n'), ((1747, 2211), 'textwrap.dedent', 'textwrap.dedent', (['"""\n fatal: {} is not pingable.\n\n Things you might want to check:\n * is this host connected to network\n * is this domain pingable from your terminal\n * is your python binary capped with setcap CAP_NET_RAW\n or\n * are you running tests from root\n or\n * are you using setuid bin/python\n """'], {}), '(\n """\n fatal: {} is not pingable.\n\n Things you might want to check:\n * is this host connected to network\n * is this domain pingable from your terminal\n * is your python binary capped with setcap CAP_NET_RAW\n or\n * are you running tests from root\n or\n * are you using setuid bin/python\n """\n )\n', (1762, 2211), False, 'import textwrap\n')] |
'''
Module containing a preprocessor that keeps cells if they match given
expression.
'''
# Author: <NAME> <<EMAIL>>
import re
from typing import Pattern
from traitlets import Unicode
from nbconvert.preprocessors import Preprocessor
class HomeworkPreproccessor(Preprocessor):
'''Keeps cells form a notebook that match a regular expression'''
pattern = Unicode().tag(config=True)
def check_conditions(self, cell):
'''Checks that a cell matches the pattern.
Returns: Boolean.
True means cell should be kept.
'''
regexp_compiled = re.compile(self.pattern)
return regexp_compiled.match(cell.source)
def preprocess(self, nb, resources):
'''Preprocessing to apply to each notebook.'''
if not self.pattern:
return nb, resources
nb.cells = [cell for cell in nb.cells if self.check_conditions(cell)]
return nb, resources | [
"traitlets.Unicode",
"re.compile"
] | [((598, 622), 're.compile', 're.compile', (['self.pattern'], {}), '(self.pattern)\n', (608, 622), False, 'import re\n'), ((364, 373), 'traitlets.Unicode', 'Unicode', ([], {}), '()\n', (371, 373), False, 'from traitlets import Unicode\n')] |
#!/usr/bin/env python3
import socket
import threading
import logging
logging.basicConfig(filename='meca.log', level=logging.DEBUG)
PROGRAM_FILE = 'program_output.txt'
# Dictionary of status indexes in robot status message
statusDict = {'activated': 0,
'homed': 1,
'simulating': 2,
'error': 3,
'paused': 4,
'EOB': 5,
'EOM': 6}
# Ease of use cartesian index labeling
cartDict = {'x': 0,
'y': 1,
'z': 2,
'rx': 3,
'ry': 4,
'rz': 5}
# Dictionary of command responses
responseDict = {'ActivateRobot': [2000, 2001],
'DeactivateRobot': [2004],
'BrakesOn': [2010],
'BrakesOff': [2008],
'Home': [2002, 2003],
'GetJoints': [2026],
'GetPose': [2027],
'ClearMotion': [2044],
'PauseMotion': [2042],
'ResumeMotion': [2043],
'ResetError': [2005],
'GetStatusRobot': [2007],
'GetFwVersion': [2081],
'GetProductType': [2084]}
# Combined control and feedback class for Mecademic
class Robot:
def __init__(self, ip):
self.ip = ip
self.connected = False
# Initialize tool and work reference frames
self.pose = {'stow': [75,0,240,0,90,0], 'home': [110,-150,130,-180,0,-180]}
self.joints = {'stow': [0,-60,60,0,0,0]}
self.toolFrame = {'flange': [0,0,0,0,0,0]}
self.workFrame = {'base': [0,0,0,0,0,0]}
# Connect to both control and feedback servers
def Connect(self):
self.connected = True
self.controlClient = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.controlClient.settimeout(10) # 100ms
self.controlClient.connect((self.ip, 10000))
code, response = self.controlClient.recv(1024).decode('ascii')[1:-2].split('][')
if int(code) != 3000:
if int(code) == 3001:
print('Another user is already connected!')
exit()
logging.warning('Unable to connect to port 10000')
self.connected = False
# Clear initial errors
if self.GetStatus('error'):
logging.info('Error on initialization')
self.ResetError()
self.firmware = self.ReadResponse('GetFwVersion')
self.product = self.ReadResponse('GetProductType')
self.feedbackClient = socket.socket()
self.feedbackClient.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY,1)
self.feedbackClient.settimeout(10) # 100ms
self.feedbackClient.connect((self.ip, 10001))
code = int(self.feedbackClient.recv(1024).decode('ascii')[1:-2].split('][')[0])
if int(code) != 2079:
logging.warning('Unable to connect to port 10001')
self.connected = False
with open(PROGRAM_FILE,'w') as f:
f.write('')
f.close()
return self.connected
# Easy setup routine
def Startup(self):
if self.Activate(): return self.Home()
# Ease of use 0-100% global speed adjustment
def SetSpeed(self, percentage):
# If speed is provided as fractional change to percentage
if percentage < 1: percentage *= 100
self.SetCartAcc(percentage)
self.SetCartAngVel(3*percentage)
self.SetCartLinVel(10*percentage)
self.SetJointAcc(1.5*percentage)
self.SetJointVel(percentage)
# Move robot in +Z of tool frame
def Push(self, mm): self.MoveToolRel([0,0,mm,0,0,0])
# Move robot in -Z of tool frame
def Pull(self, mm): self.MoveToolRel([0,0,-mm,0,0,0])
def Wiggle(self):
self.MoveToolRel([0,0,0,4,0,0])
self.MoveToolRel([0,0,0,-4,0,0])
# Move robot Z-offset of tool frame
def Approach(self, pose, zOffset):
approachPose = pose.copy()
approachPose[2] += zOffset
self.MoveP(approachPose)
# Move robot Z-offset of tool frame
def Depart(self, pose, zOffset):
departPose = pose.copy()
departPose[2] += zOffset
self.MoveL(departPose)
# Power-up robot motors
def Activate(self):
if self.GetStatus('activated'): return True
else: return self.SendCommand('ActivateRobot')
# Power-down robot motors
def Deactivate(self):
if not self.GetStatus('activated'): return True
else: return self.SendCommand('DeactivateRobot')
# De-activate robot and engage brakes
def BrakesOn(self):
if self.GetStatus('activated'): self.Deactivate()
else: return self.SendCommand('BrakesOn')
# Activate robot and disengage brakes
def BrakesOff(self):
if not self.GetStatus('activated'): self.Activate()
else: return self.SendCommand('BrakesOff')
# Home robot motors
def Home(self):
if self.GetStatus('homed'): return True
else: return self.SendCommand('Home')
# Move robot to target "pose" list relative to work plane
def MovePose(self, pose):
if self.GetStatus('paused'): self.ResumeMove()
sentPose = _returnList(self.pose, pose)
if sentPose is not None: return self.SendCommand(f'MovePose{tuple(sentPose)}')
else: return False
# Move robot to target "joints" list
def MoveJoints(self, joints):
if not self._checkJointLimits(joints):
logging.warning("Target position outside joint limits!")
return False
if self.GetStatus('paused'): self.ResumeMove()
sentJoints = _returnList(self.joint, joints)
if sentJoints is not None: return self.SendCommand(f'MoveJoints{tuple(sentJoints)}')
else: return False
# Jog robot at target "joints" speed
def MoveJV(self, joints):
if not self._checkJointSpeedLimits(joints):
logging.warning("Target speed outside joint limits!")
return False
else:
if self.GetStatus('paused'): self.ResumeMove()
return self.SendCommand(f'MoveJointsVel{tuple(joints)}')
# Move robot linearly to target "pose" list relative to work frame
def MoveLinear(self, pose):
if self.GetStatus('paused'): self.ResumeMove()
sentPose = _returnList(self.pose, pose)
if sentPose is not None: return self.SendCommand(f'MoveLin{tuple(sentPose)}')
else: return False
# Move robot in by "pose" list relative to tool frame
def MoveToolRel(self, pose):
return self.SendCommand(f'MoveLinRelTRF{tuple(pose)}')
# Move robot in by "pose" list relative to work frame
def MoveWorkRel(self, pose):
return self.SendCommand(f'MoveLinRelWRF{tuple(pose)}')
# Jog at target "pose" speed relative to tool frame
def MoveToolVel(self, pose):
return self.SendCommand(f'MoveLinVelTRF{tuple(pose)}')
# Jog tool at target "pose" speed relative to work plane
def MoveWorkVel(self, pose):
return self.SendCommand(f'MoveLinVelWRF{tuple(pose)}')
# Set blend radius from 0-100%
def SetBlending(self, percentage):
assert percentage >= 0 and percentage <= 100
return self.SendCommand(f'SetBlending({percentage})')
# Set cartesian acceleration from 0.001-600%
def SetCartAcc(self, percentage):
assert percentage >= .001 and percentage <= 600
return self.SendCommand(f'SetCartAcc({percentage})')
# Set cartesian angular velocity from 0.001-300deg/s
def SetCartAngVel(self, degrees):
assert degrees >= 0.001 and degrees <= 300
return self.SendCommand(f'SetCartAngVel({degrees})')
# Set cartesian linear velocity from 0.001-1,000mm/s
def SetCartLinVel(self, mms):
assert mms >= 0.001 and mms <= 1000
return self.SendCommand(f'SetCartLinVel({mms})')
# Set joint acceleration from 0.001-150%
def SetJointAcc(self, percentage):
return self.SendCommand(f'SetJointAcc({percentage})')
# Set joint velocity from 0.001-100%
def SetJointVel(self, percentage):
return self.SendCommand(f'SetJointVel({percentage})')
# Add a new robot pose
def AddPose(self, poseName, pose):
self.pose[poseName] = pose
# Add a new robot joint position
def AddJoints(self, jointsName, joint):
self.joints[jointsName] = joint
# Set tool frame to existing tool or arbitrary offset
def SetTool(self, toolOffset):
sentTool = _returnList(self.tool, toolOffset)
self.SendCommand(f'SetTRF({sentTool})')
# Add a new tool frame to robot tools
def AddTool(self, toolName, toolOffset):
if len(toolOffset) == 3:
for vector in range(3):
toolOffset.append(0)
self.toolFrame[toolName] = toolOffset
# Set work plane to existing plane or arbitrary offset
def SetWork(self, workPlane):
sentWork = _returnList(self.work, workPlane)
self.SendCommand(f'SetWRF({sentWork})')
# Add a new work plane to robot workFrame dict
def AddWork(self, workName, workPlane):
if len(workPlane) == 3:
for vector in range(3):
workPlane.append(0)
self.workFrame[workName] = workPlane
# Get list of current joint positions in degrees
def GetJoints(self):
return self.ReadResponse('GetJoints')
# Get list of current cartesian position in millimeters
def GetPose(self):
return self.ReadResponse('GetPose')
# Delete current planned move
def ClearMove(self):
return self.SendCommand('ClearMotion')
# Pause current move
def PauseMove(self):
return self.SendCommand('PauseMotion')
# Resume current move
def ResumeMove(self):
return self.SendCommand('ResumeMotion')
# Reset error
def ResetError(self):
return self.SendCommand('ResetError')
def SetCheckpoint(self, step=1):
self.controlClient.send(bytes(f'SetCheckpoint({step})\0','ascii'))
code, response = self._GetMessage()
if code in [2000, 2001]: return True
else: return False
# Set position update rate in ms
def SetMonitoringInterval(self, ms):
assert ms >= 0.001 and ms <= 1
return self.SendCommand(f'SetMonitoringInterval({ms})', client='feedback')
# Get robot status as list of booleans
def GetStatus(self, status='all'):
responseList = self.ReadResponse('GetStatusRobot').split(',')
responseBool = [bool(int(response)) for response in responseList]
if status != 'all':
if status in statusDict.keys():
return responseBool[statusDict[status]]
else:
print(f'Use an available value:\n{statusDict.keys()}')
else:
return responseBool
# Send command and receive confirmation
def SendCommand(self, cmd, client='command'):
if self.connected is False: self.Connect()
if client == 'command':
_writeProgram(cmd)
self.controlClient.send(bytes(f'{cmd}\0','ascii'))
code, response = self.controlClient.recv(1024).decode('ascii')[1:-2].split('][')
if int(code) in self._getCodes(cmd): return True
else:
print(f'Error: {response}')
self.ResetError()
return False
else:
self.feedbackClient.send(bytes(f'{cmd}\0','ascii'))
code, response = self.feedbackClient.recv(1024).decode('ascii')[1:-2].split('][')
print(code, response)
return True
# Send command and receive message
def ReadResponse(self, cmd):
if self.connected is False: self.Connect()
self.controlClient.send(bytes(f'{cmd}\0','ascii'))
code, response = self.controlClient.recv(1024).decode('ascii')[1:-2].split('][')
if int(code) in self._getCodes(cmd): return response
else:
logging.warning(f'Error: {response}')
return None
# Receive current joint or cartesian positions
def ReadPosition(self, cmd):
if self.connected is False: self.Connect()
jointResponse, poseResponse = self.feedbackClient.recv(1024).decode('ascii').split('\x00')[:2]
print(jointResponse, poseResponse)
if cmd == 'GetJoints': msg = jointResponse
elif cmd == 'GetPose': msg = poseResponse
code, responseString = msg[1:-2].split('][')
if not int(code) in self._getCodes(cmd):
logging.warning(f'Error: {responseString}')
return None
responseList = responseString.split(',')
responseFloat = [float(response) for response in responseList]
return responseFloat
# Look up corresponding error code in dictionary
def _getCodes(self, cmd):
if cmd.startswith('Move'):
return [3004,3012]
elif cmd.startswith('Set'):
return [3012]
else:
return responseDict[cmd]
# Move speed checks
def _checkJointLimits(self, joints):
assert abs(joints[0]) <= 175
assert joints[1] >= -70 and joints[1] <= 90
assert joints[2] >= -135 and joints[2] <= 70
assert abs(joints[3]) <= 170
assert abs(joints[4]) <= 115
assert abs(joints[5]) <= 180
return True
def _checkJointSpeedLimits(self, joints):
assert abs(joints[0]) <= 150
assert abs(joints[1]) <= 150
assert abs(joints[2]) <= 180
assert abs(joints[3]) <= 300
assert abs(joints[4]) <= 300
assert abs(joints[5]) <= 500
return True
def _checkPoseSpeedLimits(self, pose):
assert pose[0] >= 0.001 and pose[0] <= 1000
assert pose[1] >= 0.001 and pose[1] <= 1000
assert pose[2] >= 0.001 and pose[2] <= 1000
assert pose[3] >= 0.001 and pose[3] <= 300
assert pose[4] >= 0.001 and pose[4] <= 300
assert pose[5] >= 0.001 and pose[5] <= 500
return True
def _checkPoseRotLimits(self, pose):
for vector in pose:
assert vector >= 0.001 and vector <= 300
# Pose object
class Pose():
def __init__(self, pose, coords='pose'):
self.coords = coords
self.pose = pose
# Ease of use 0-100% global speed adjustment
def SetSpeed(self, percentage):
# If speed is provided as fractional change to percentage
if percentage < 1: percentage *= 100
self.SetCartAcc(percentage)
self.SetCartAngVel(3*percentage)
self.SetCartLinVel(10*percentage)
self.SetJointAcc(1.5*percentage)
self.SetJointVel(percentage)
# Pose object
class CompoundMove():
def __init__(self, pose, coords='pose'):
self.coords = coords
# Convert internal pose to pose list if needed
def _returnList(poseDict, pose):
if type(pose) is str:
if pose in poseDict.keys():
return poseDict[pose]
else:
print('Not a valid pose!')
return None
else:
assert type(pose) is list
return pose
def _writeProgram(command):
with open(PROGRAM_FILE,'a') as f:
f.write(f'{command}\n')
f.close() | [
"logging.basicConfig",
"logging.warning",
"logging.info",
"socket.socket"
] | [((69, 130), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '"""meca.log"""', 'level': 'logging.DEBUG'}), "(filename='meca.log', level=logging.DEBUG)\n", (88, 130), False, 'import logging\n'), ((1719, 1768), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (1732, 1768), False, 'import socket\n'), ((2527, 2542), 'socket.socket', 'socket.socket', ([], {}), '()\n', (2540, 2542), False, 'import socket\n'), ((2122, 2172), 'logging.warning', 'logging.warning', (['"""Unable to connect to port 10000"""'], {}), "('Unable to connect to port 10000')\n", (2137, 2172), False, 'import logging\n'), ((2288, 2327), 'logging.info', 'logging.info', (['"""Error on initialization"""'], {}), "('Error on initialization')\n", (2300, 2327), False, 'import logging\n'), ((2860, 2910), 'logging.warning', 'logging.warning', (['"""Unable to connect to port 10001"""'], {}), "('Unable to connect to port 10001')\n", (2875, 2910), False, 'import logging\n'), ((5544, 5600), 'logging.warning', 'logging.warning', (['"""Target position outside joint limits!"""'], {}), "('Target position outside joint limits!')\n", (5559, 5600), False, 'import logging\n'), ((5999, 6052), 'logging.warning', 'logging.warning', (['"""Target speed outside joint limits!"""'], {}), "('Target speed outside joint limits!')\n", (6014, 6052), False, 'import logging\n'), ((12103, 12140), 'logging.warning', 'logging.warning', (['f"""Error: {response}"""'], {}), "(f'Error: {response}')\n", (12118, 12140), False, 'import logging\n'), ((12662, 12705), 'logging.warning', 'logging.warning', (['f"""Error: {responseString}"""'], {}), "(f'Error: {responseString}')\n", (12677, 12705), False, 'import logging\n')] |
import os
import numpy as np
import tensorflow as tf
from PIL import Image
def modcrop(im, modulo):
if len(im.shape) == 3:
size = np.array(im.shape)
size = size - (size % modulo)
im = im[0 : size[0], 0 : size[1], :]
elif len(im.shape) == 2:
size = np.array(im.shape)
size = size - (size % modulo)
im = im[0 : size[0], 0 : size[1]]
else: raise AttributeError
return im
def shave(im, border):
if len(im.shape) == 3:
return im[border[0] : -border[0],
border[1] : -border[1], :]
elif len(im.shape) == 2:
return im[border[0] : -border[0],
border[1] : -border[1]]
else: raise AttributeError
def compute_psnr(im1, im2):
if im1.shape != im2.shape:
raise Exception('the shapes of two images are not equal')
rmse = np.sqrt(((np.asfarray(im1) - np.asfarray(im2)) ** 2).mean())
psnr = 20 * np.log10(255.0 / rmse)
return psnr
def main():
# folder path
folder = '../datas/Set60/ISO6400'
# generate the file list
filepath = os.listdir(folder)
filepath.sort()
im_input = tf.placeholder('float', [1, None, None, 3], name='im_input')
# create a session for running operations in the graph
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
with tf.device('/gpu:0'):
with open('./graph.pb', 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
output = tf.import_graph_def(graph_def, input_map={'im_input:0': im_input}, return_elements=['output:0'])
record_psnr = []
for i in np.arange(1, 20+1, 1):
for p in np.arange(1, 3+1, 1):
psnrs = []
im = np.array(Image.open(os.path.join(folder, '%03d/%03dMP%d.PNG' % (i, i, p))))
#Image.fromarray(im).show()
for g in np.arange(1, 10+1, 1):
im_n = np.array(Image.open(os.path.join(folder, '%03d/%03dN%02dP%d.PNG' % (i, i, g, p))))
#Image.fromarray(im_n).show()
im_n = im_n.astype(np.float32) / 255.0
im_n = np.expand_dims(im_n, axis=0)
im_dn = sess.run(output, feed_dict={im_input: im_n})
im_dn = np.squeeze(im_dn) * 255.0
im_dn = np.maximum(im_dn, 0)
im_dn = np.minimum(im_dn, 255)
#Image.fromarray(np.asarray(im_dn, dtype=np.uint8)).show()
psnr = compute_psnr(im, np.asarray(im_dn, dtype=np.uint8))
print('i%03d p%d g%02d: %.2f dB' % (i, p, g, psnr))
psnrs.append(psnr)
record_psnr.append(psnrs)
print('%.2f+-%.3f dB' % (np.mean(record_psnr), np.mean(np.std(record_psnr, 1))))
if __name__ == '__main__':
main()
| [
"numpy.log10",
"numpy.asfarray",
"numpy.array",
"numpy.arange",
"numpy.mean",
"os.listdir",
"tensorflow.placeholder",
"tensorflow.Session",
"numpy.asarray",
"tensorflow.GraphDef",
"tensorflow.ConfigProto",
"numpy.maximum",
"tensorflow.device",
"numpy.squeeze",
"tensorflow.import_graph_de... | [((971, 989), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (981, 989), False, 'import os\n'), ((1021, 1081), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[1, None, None, 3]'], {'name': '"""im_input"""'}), "('float', [1, None, None, 3], name='im_input')\n", (1035, 1081), True, 'import tensorflow as tf\n'), ((1149, 1190), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (1163, 1190), True, 'import tensorflow as tf\n'), ((1239, 1264), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (1249, 1264), True, 'import tensorflow as tf\n'), ((1540, 1563), 'numpy.arange', 'np.arange', (['(1)', '(20 + 1)', '(1)'], {}), '(1, 20 + 1, 1)\n', (1549, 1563), True, 'import numpy as np\n'), ((135, 153), 'numpy.array', 'np.array', (['im.shape'], {}), '(im.shape)\n', (143, 153), True, 'import numpy as np\n'), ((832, 854), 'numpy.log10', 'np.log10', (['(255.0 / rmse)'], {}), '(255.0 / rmse)\n', (840, 854), True, 'import numpy as np\n'), ((1273, 1292), 'tensorflow.device', 'tf.device', (['"""/gpu:0"""'], {}), "('/gpu:0')\n", (1282, 1292), True, 'import tensorflow as tf\n'), ((1574, 1596), 'numpy.arange', 'np.arange', (['(1)', '(3 + 1)', '(1)'], {}), '(1, 3 + 1, 1)\n', (1583, 1596), True, 'import numpy as np\n'), ((260, 278), 'numpy.array', 'np.array', (['im.shape'], {}), '(im.shape)\n', (268, 278), True, 'import numpy as np\n'), ((1348, 1361), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (1359, 1361), True, 'import tensorflow as tf\n'), ((1413, 1513), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {'input_map': "{'im_input:0': im_input}", 'return_elements': "['output:0']"}), "(graph_def, input_map={'im_input:0': im_input},\n return_elements=['output:0'])\n", (1432, 1513), True, 'import tensorflow as tf\n'), ((1737, 1760), 'numpy.arange', 'np.arange', (['(1)', '(10 + 1)', '(1)'], {}), '(1, 10 + 1, 1)\n', (1746, 1760), True, 'import numpy as np\n'), ((1943, 1971), 'numpy.expand_dims', 'np.expand_dims', (['im_n'], {'axis': '(0)'}), '(im_n, axis=0)\n', (1957, 1971), True, 'import numpy as np\n'), ((2081, 2101), 'numpy.maximum', 'np.maximum', (['im_dn', '(0)'], {}), '(im_dn, 0)\n', (2091, 2101), True, 'import numpy as np\n'), ((2114, 2136), 'numpy.minimum', 'np.minimum', (['im_dn', '(255)'], {}), '(im_dn, 255)\n', (2124, 2136), True, 'import numpy as np\n'), ((2400, 2420), 'numpy.mean', 'np.mean', (['record_psnr'], {}), '(record_psnr)\n', (2407, 2420), True, 'import numpy as np\n'), ((1638, 1691), 'os.path.join', 'os.path.join', (['folder', "('%03d/%03dMP%d.PNG' % (i, i, p))"], {}), "(folder, '%03d/%03dMP%d.PNG' % (i, i, p))\n", (1650, 1691), False, 'import os\n'), ((2042, 2059), 'numpy.squeeze', 'np.squeeze', (['im_dn'], {}), '(im_dn)\n', (2052, 2059), True, 'import numpy as np\n'), ((2229, 2262), 'numpy.asarray', 'np.asarray', (['im_dn'], {'dtype': 'np.uint8'}), '(im_dn, dtype=np.uint8)\n', (2239, 2262), True, 'import numpy as np\n'), ((2430, 2452), 'numpy.std', 'np.std', (['record_psnr', '(1)'], {}), '(record_psnr, 1)\n', (2436, 2452), True, 'import numpy as np\n'), ((768, 784), 'numpy.asfarray', 'np.asfarray', (['im1'], {}), '(im1)\n', (779, 784), True, 'import numpy as np\n'), ((787, 803), 'numpy.asfarray', 'np.asfarray', (['im2'], {}), '(im2)\n', (798, 803), True, 'import numpy as np\n'), ((1791, 1851), 'os.path.join', 'os.path.join', (['folder', "('%03d/%03dN%02dP%d.PNG' % (i, i, g, p))"], {}), "(folder, '%03d/%03dN%02dP%d.PNG' % (i, i, g, p))\n", (1803, 1851), False, 'import os\n')] |
import sh
from dotenv import load_dotenv
import os
load_dotenv()
PASSWORD = os.environ.get("sudo_password")
def c_registry():
with sh.contrib.sudo(password=PASSWORD, _with=True):
sh.docker('run', '-d', '-p', '5000:5000', '--restart=always', '--name', 'registry', 'registry:2')
| [
"sh.contrib.sudo",
"os.environ.get",
"sh.docker",
"dotenv.load_dotenv"
] | [((53, 66), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (64, 66), False, 'from dotenv import load_dotenv\n'), ((78, 109), 'os.environ.get', 'os.environ.get', (['"""sudo_password"""'], {}), "('sudo_password')\n", (92, 109), False, 'import os\n'), ((140, 186), 'sh.contrib.sudo', 'sh.contrib.sudo', ([], {'password': 'PASSWORD', '_with': '(True)'}), '(password=PASSWORD, _with=True)\n', (155, 186), False, 'import sh\n'), ((196, 297), 'sh.docker', 'sh.docker', (['"""run"""', '"""-d"""', '"""-p"""', '"""5000:5000"""', '"""--restart=always"""', '"""--name"""', '"""registry"""', '"""registry:2"""'], {}), "('run', '-d', '-p', '5000:5000', '--restart=always', '--name',\n 'registry', 'registry:2')\n", (205, 297), False, 'import sh\n')] |
# Generated by Django 3.1.2 on 2020-10-29 04:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recipes', '0001_initial'),
('ingredients', '0001_initial'),
('drinks', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='drink',
name='ingredients',
field=models.ManyToManyField(through='recipes.Recipe', to='ingredients.Ingredient'),
),
]
| [
"django.db.models.ManyToManyField"
] | [((404, 481), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'through': '"""recipes.Recipe"""', 'to': '"""ingredients.Ingredient"""'}), "(through='recipes.Recipe', to='ingredients.Ingredient')\n", (426, 481), False, 'from django.db import migrations, models\n')] |
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Nov. 22, 2021
@author: wangc
StackingRegressor
A Bagging regressor.
"""
#Internal Modules (Lazy Importer)--------------------------------------------------------------------
#Internal Modules (Lazy Importer) End----------------------------------------------------------------
#External Modules------------------------------------------------------------------------------------
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from ....SupervisedLearning.ScikitLearn import ScikitLearnBase
from ....utils import InputData, InputTypes
#Internal Modules End--------------------------------------------------------------------------------
class StackingRegressor(ScikitLearnBase):
"""
Stack of estimators with a final regressor.
"""
info = {'problemtype':'regression', 'normalize':False}
def __init__(self):
"""
Constructor that will appropriately initialize a supervised learning object
@ In, None
@ Out, None
"""
super().__init__()
self.multioutputWrapper = True
import sklearn
import sklearn.ensemble
# check sklearn version, StackingRegressor is stable in sklearn version >= 0.24
version = [int(n) for n in sklearn.__version__.split('.')]
if version[0] < 1 and version[1] <= 24:
self.raiseAnError(IOError, 'StackingRegressor is not available in current sklearn version', sklearn.__version__,
'Please try to update sklearn version to 0.24 or newer!')
self.model = sklearn.ensemble.StackingRegressor
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for
class cls.
@ In, cls, the class for which we are retrieving the specification
@ Out, inputSpecification, InputData.ParameterInput, class to use for
specifying input of cls.
"""
specs = super().getInputSpecification()
specs.description = r"""The \xmlNode{StackingRegressor} consists in stacking the output of individual estimator and
use a regressor to compute the final prediction. Stacking allows to use the strength of each
individual estimator by using their output as input of a final estimator.
"""
estimatorInput = InputData.assemblyInputFactory("estimator", contentType=InputTypes.StringType,
descr=r"""name of a ROM that can be used as an estimator""", default='no-default')
specs.addSub(estimatorInput)
specs.addSub(InputData.parameterInputFactory("final_estimator", contentType=InputTypes.StringType,
descr=r"""The name of estimator which will be used to combine the base estimators.""", default='no-default'))
specs.addSub(InputData.parameterInputFactory("cv", contentType=InputTypes.IntegerType,
descr=r"""specify the number of folds in a (Stratified) KFold,""", default=5))
specs.addSub(InputData.parameterInputFactory("passthrough", contentType=InputTypes.BoolType,
descr=r"""When False, only the predictions of estimators will be used as training
data for final\_estimator. When True, the final\_estimator is trained on the predictions
as well as the original training data.""", default=False))
return specs
def _handleInput(self, paramInput):
"""
Function to handle the common parts of the distribution parameter input.
@ In, paramInput, ParameterInput, the already parsed input.
@ Out, None
"""
super()._handleInput(paramInput)
settings, notFound = paramInput.findNodesAndExtractValues(['final_estimator', 'cv', 'passthrough'])
# notFound must be empty
assert(not notFound)
self.settings = settings
def setEstimator(self, estimatorList):
"""
Initialization method
@ In, estimatorList, list of ROM instances/estimators used by ROM
@ Out, None
"""
super().setEstimator(estimatorList)
estimators = []
foundFinalEstimator = False
for estimator in estimatorList:
interfaceRom = estimator._interfaceROM
if interfaceRom.info['problemtype'] != 'regression':
self.raiseAnError(IOError, 'estimator:', estimator.name, 'with problem type', interfaceRom.info['problemtype'],
'can not be used for', self.name)
# In sklearn, multioutput wrapper can not be used by outer and inner estimator at the same time
# If the outer estimator can handle multioutput, the multioutput wrapper of inner can be kept,
# otherwise, we need to remove the wrapper for inner estimator.
if interfaceRom.multioutputWrapper:
sklEstimator = interfaceRom.model.get_params()['estimator']
else:
sklEstimator = interfaceRom.model
if estimator.name == self.settings['final_estimator']:
self.settings['final_estimator'] = sklEstimator
foundFinalEstimator = True
continue
estimators.append((estimator.name, sklEstimator))
self.settings['estimators'] = estimators
if not foundFinalEstimator:
self.raiseAnError(IOError, 'final_estimator:', self.settings['final_estimator'], 'is not found among provdide estimators:',
','.join([name for name,_ in estimators]))
self.initializeModel(self.settings)
| [
"sklearn.__version__.split"
] | [((1942, 1972), 'sklearn.__version__.split', 'sklearn.__version__.split', (['"""."""'], {}), "('.')\n", (1967, 1972), False, 'import sklearn\n')] |
import tkinter as tk
from abc import ABCMeta, abstractmethod
from ...frames.templates import FrameTemplate
from ...elements import AddButton, EditButton, DeleteButton
class ListFrameTemplate(FrameTemplate, metaclass=ABCMeta):
def __init__(self, top, *args, **kw):
super().__init__(top, *args, **kw)
self.create_head_section(top)
self.create_body_section(top)
@abstractmethod
def create_head_section(self, top):
pass
@abstractmethod
def create_body_section(self, top):
pass
@staticmethod
def create_btns_container(top):
container = tk.Frame(top)
add_btn = AddButton(container)
add_btn.pack(side="left")
edit_btn = EditButton(container)
edit_btn.pack(side="left", padx=10)
delete_btn = DeleteButton(container)
delete_btn.pack(side="left")
return container, (add_btn, edit_btn, delete_btn)
@staticmethod
def create_comment_container(top):
container = tk.Frame(top)
left_cont = tk.Frame(container)
tk.Label(left_cont,
text="Data dodania:",
font="bold").pack(anchor="w")
adding_date = tk.Label(left_cont)
adding_date.pack(anchor="w")
tk.Label(left_cont,
text="Ostatnia modyfikacja:",
font="bold").pack(anchor="w")
modify_date = tk.Label(left_cont)
modify_date.pack(anchor="w")
right_cont = tk.Frame(container)
tk.Label(right_cont,
text="Komentarz:",
font="bold").pack(anchor="w")
comment = tk.Label(right_cont, anchor='w', justify="left")
comment.pack(anchor="w")
left_cont.pack(side="left", anchor="n")
right_cont.pack(side="left", anchor="n", padx=15)
return container, (adding_date, modify_date, comment)
@staticmethod
def set_list(top, tree, columns):
top.update()
tree_width = top.winfo_width()
tree.set_columns(list(columns.keys()))
tree.set_columns_width(tree_width, list(columns.values()))
| [
"tkinter.Frame",
"tkinter.Label"
] | [((612, 625), 'tkinter.Frame', 'tk.Frame', (['top'], {}), '(top)\n', (620, 625), True, 'import tkinter as tk\n'), ((1002, 1015), 'tkinter.Frame', 'tk.Frame', (['top'], {}), '(top)\n', (1010, 1015), True, 'import tkinter as tk\n'), ((1036, 1055), 'tkinter.Frame', 'tk.Frame', (['container'], {}), '(container)\n', (1044, 1055), True, 'import tkinter as tk\n'), ((1192, 1211), 'tkinter.Label', 'tk.Label', (['left_cont'], {}), '(left_cont)\n', (1200, 1211), True, 'import tkinter as tk\n'), ((1393, 1412), 'tkinter.Label', 'tk.Label', (['left_cont'], {}), '(left_cont)\n', (1401, 1412), True, 'import tkinter as tk\n'), ((1472, 1491), 'tkinter.Frame', 'tk.Frame', (['container'], {}), '(container)\n', (1480, 1491), True, 'import tkinter as tk\n'), ((1622, 1670), 'tkinter.Label', 'tk.Label', (['right_cont'], {'anchor': '"""w"""', 'justify': '"""left"""'}), "(right_cont, anchor='w', justify='left')\n", (1630, 1670), True, 'import tkinter as tk\n'), ((1064, 1118), 'tkinter.Label', 'tk.Label', (['left_cont'], {'text': '"""Data dodania:"""', 'font': '"""bold"""'}), "(left_cont, text='Data dodania:', font='bold')\n", (1072, 1118), True, 'import tkinter as tk\n'), ((1257, 1319), 'tkinter.Label', 'tk.Label', (['left_cont'], {'text': '"""Ostatnia modyfikacja:"""', 'font': '"""bold"""'}), "(left_cont, text='Ostatnia modyfikacja:', font='bold')\n", (1265, 1319), True, 'import tkinter as tk\n'), ((1500, 1552), 'tkinter.Label', 'tk.Label', (['right_cont'], {'text': '"""Komentarz:"""', 'font': '"""bold"""'}), "(right_cont, text='Komentarz:', font='bold')\n", (1508, 1552), True, 'import tkinter as tk\n')] |
"""Django command for rebuilding cohort statistics after import."""
import aldjemy
from django.contrib.auth import get_user_model
from django.core.exceptions import ObjectDoesNotExist
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from django.conf import settings
from projectroles.models import Project
from projectroles.plugins import get_backend_api
from variants.variant_stats import rebuild_project_variant_stats
from variants.helpers import SQLALCHEMY_ENGINE
timeline = get_backend_api("timeline_backend")
#: The User model to use.
User = get_user_model()
class Command(BaseCommand):
"""Implementation of rebuilding project-wide statistics.
All steps are executed in a transaction, so no stale state is used or left in the database.
"""
#: Help message displayed on the command line.
help = "Import case from PED file and varfish-annotator output."
def add_arguments(self, parser):
"""Add the command's argument to the ``parser``."""
parser.add_argument(
"--project-uuid", help="UUID of the project to add the case to", required=True
)
@transaction.atomic
def handle(self, *args, **options):
"""Perform rebuilding the statistics."""
try:
self.stdout.write(
"Rebuilding statistics as user: {}".format(settings.PROJECTROLES_ADMIN_OWNER)
)
admin = User.objects.get(username=settings.PROJECTROLES_ADMIN_OWNER)
except User.DoesNotExist as e:
raise CommandError(
"Could not get configured admin user for stats rebuild with username {}".format(
settings.PROJECTROLES_ADMIN_OWNER
)
) from e
project = self._get_project(options["project_uuid"])
rebuild_project_variant_stats(SQLALCHEMY_ENGINE, project, admin, self.stdout.write)
self.stdout.write(self.style.SUCCESS("Done rebuilding project-wide stats"))
def _get_project(self, project_uuid):
"""Get query or raise appropriate exception."""
try:
return Project.objects.get(sodar_uuid=project_uuid)
except ObjectDoesNotExist:
raise CommandError("Project with UUID {} does not exist".format(project_uuid))
| [
"projectroles.plugins.get_backend_api",
"django.contrib.auth.get_user_model",
"projectroles.models.Project.objects.get",
"variants.variant_stats.rebuild_project_variant_stats"
] | [((532, 567), 'projectroles.plugins.get_backend_api', 'get_backend_api', (['"""timeline_backend"""'], {}), "('timeline_backend')\n", (547, 567), False, 'from projectroles.plugins import get_backend_api\n'), ((603, 619), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (617, 619), False, 'from django.contrib.auth import get_user_model\n'), ((1842, 1930), 'variants.variant_stats.rebuild_project_variant_stats', 'rebuild_project_variant_stats', (['SQLALCHEMY_ENGINE', 'project', 'admin', 'self.stdout.write'], {}), '(SQLALCHEMY_ENGINE, project, admin, self.\n stdout.write)\n', (1871, 1930), False, 'from variants.variant_stats import rebuild_project_variant_stats\n'), ((2141, 2185), 'projectroles.models.Project.objects.get', 'Project.objects.get', ([], {'sodar_uuid': 'project_uuid'}), '(sodar_uuid=project_uuid)\n', (2160, 2185), False, 'from projectroles.models import Project\n')] |
import torch
from torch import Tensor
import torch.nn as nn
import torch.nn.functional as F
from .. import BaseModel, register_model
from .knowledge_base import KGEModel
@register_model("rotate")
class RotatE(KGEModel):
r"""
Implementation of RotatE model from the paper `"RotatE: Knowledge Graph Embedding by Relational Rotation in Complex Space"
<https://openreview.net/forum?id=HkgEQnRqYQ>`.
borrowed from `KnowledgeGraphEmbedding<https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding>`
"""
def __init__(
self, nentity, nrelation, hidden_dim, gamma, double_entity_embedding=False, double_relation_embedding=False
):
super(RotatE, self).__init__(nentity, nrelation, hidden_dim, gamma, True, double_relation_embedding)
def score(self, head, relation, tail, mode):
pi = 3.14159265358979323846
re_head, im_head = torch.chunk(head, 2, dim=2)
re_tail, im_tail = torch.chunk(tail, 2, dim=2)
# Make phases of relations uniformly distributed in [-pi, pi]
phase_relation = relation / (self.embedding_range.item() / pi)
re_relation = torch.cos(phase_relation)
im_relation = torch.sin(phase_relation)
if mode == "head-batch":
re_score = re_relation * re_tail + im_relation * im_tail
im_score = re_relation * im_tail - im_relation * re_tail
re_score = re_score - re_head
im_score = im_score - im_head
else:
re_score = re_head * re_relation - im_head * im_relation
im_score = re_head * im_relation + im_head * re_relation
re_score = re_score - re_tail
im_score = im_score - im_tail
score = torch.stack([re_score, im_score], dim=0)
score = score.norm(dim=0)
score = self.gamma.item() - score.sum(dim=2)
return score
| [
"torch.cos",
"torch.chunk",
"torch.sin",
"torch.stack"
] | [((889, 916), 'torch.chunk', 'torch.chunk', (['head', '(2)'], {'dim': '(2)'}), '(head, 2, dim=2)\n', (900, 916), False, 'import torch\n'), ((944, 971), 'torch.chunk', 'torch.chunk', (['tail', '(2)'], {'dim': '(2)'}), '(tail, 2, dim=2)\n', (955, 971), False, 'import torch\n'), ((1138, 1163), 'torch.cos', 'torch.cos', (['phase_relation'], {}), '(phase_relation)\n', (1147, 1163), False, 'import torch\n'), ((1186, 1211), 'torch.sin', 'torch.sin', (['phase_relation'], {}), '(phase_relation)\n', (1195, 1211), False, 'import torch\n'), ((1721, 1761), 'torch.stack', 'torch.stack', (['[re_score, im_score]'], {'dim': '(0)'}), '([re_score, im_score], dim=0)\n', (1732, 1761), False, 'import torch\n')] |
import os
from os import mkdir
from os.path import join
from os.path import exists
import json
import importlib.resources
import jinja2
from jinja2 import Environment
from jinja2 import BaseLoader
with importlib.resources.path('mititools', 'fd_schema.json.jinja') as file:
jinja_environment = Environment(loader=BaseLoader)
fd_schema_file_contents = open(file, 'rt').read()
from ..default_values import fd_package_path
from ..name_manipulation import create_table_filename
from ..name_manipulation import create_auxiliary_table_filename
def write_frictionless(top_variables, data_tables):
json_str = render_json_data_package(top_variables)
json_object = json.loads(json_str)
payload = json.dumps(json_object, indent=2)
json_filename = 'datapackage.json'
if not exists(fd_package_path):
mkdir(fd_package_path)
with open(join(fd_package_path, json_filename), 'wt') as f:
f.write(payload)
for tablename, df in data_tables.items():
if list(df.columns) != ['value']:
filename = create_table_filename(tablename)
else:
filename = create_auxiliary_table_filename(tablename)
df.to_csv(join(fd_package_path, filename), sep='\t', index=False)
def render_json_data_package(variables):
template = jinja_environment.from_string(fd_schema_file_contents)
return template.render(**variables)
| [
"os.path.exists",
"json.loads",
"jinja2.Environment",
"json.dumps",
"os.path.join",
"os.mkdir"
] | [((299, 329), 'jinja2.Environment', 'Environment', ([], {'loader': 'BaseLoader'}), '(loader=BaseLoader)\n', (310, 329), False, 'from jinja2 import Environment\n'), ((675, 695), 'json.loads', 'json.loads', (['json_str'], {}), '(json_str)\n', (685, 695), False, 'import json\n'), ((710, 743), 'json.dumps', 'json.dumps', (['json_object'], {'indent': '(2)'}), '(json_object, indent=2)\n', (720, 743), False, 'import json\n'), ((795, 818), 'os.path.exists', 'exists', (['fd_package_path'], {}), '(fd_package_path)\n', (801, 818), False, 'from os.path import exists\n'), ((828, 850), 'os.mkdir', 'mkdir', (['fd_package_path'], {}), '(fd_package_path)\n', (833, 850), False, 'from os import mkdir\n'), ((865, 901), 'os.path.join', 'join', (['fd_package_path', 'json_filename'], {}), '(fd_package_path, json_filename)\n', (869, 901), False, 'from os.path import join\n'), ((1183, 1214), 'os.path.join', 'join', (['fd_package_path', 'filename'], {}), '(fd_package_path, filename)\n', (1187, 1214), False, 'from os.path import join\n')] |
# Copyright 2018 <NAME> and Cable Television
# Laboratories, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import pkg_resources
from snaps_common.file import file_utils
from snaps_k8s.common.consts import consts
from snaps_k8s.common.utils import config_utils
class ConfigUtilsTests(unittest.TestCase):
"""
Tests for snaps_k8s.common.utils.config_utils.py
"""
def setUp(self):
config_file = pkg_resources.resource_filename(
'tests.conf', 'deployment.yaml')
self.config = file_utils.read_yaml(config_file)
self.node_list = self.config[consts.K8S_KEY][consts.NODE_CONF_KEY]
self.network_list = self.config[consts.K8S_KEY][consts.NETWORKS_KEY]
self.persis_vol = self.config[consts.K8S_KEY][consts.PERSIST_VOL_KEY]
def test_get_proxy_dict(self):
"""
Ensures proxy values are properly parsed
"""
proxy_dict = config_utils.get_proxy_dict(self.config)
expected = self.config[consts.K8S_KEY][consts.PROXIES_KEY]
self.assertEqual(expected, proxy_dict)
def test_get_networks(self):
"""
Ensures network values are properly parsed
"""
networks_data = config_utils.get_networks(self.config)
expected = self.config[consts.K8S_KEY][consts.NETWORKS_KEY]
self.assertEqual(expected, networks_data)
def test_get_multus_network(self):
"""
Ensures MuLtus network configuration is properly parsed
"""
multus_networks_data = config_utils.get_multus_network(self.config)
mult_config = self.network_list[1][consts.MULTUS_NET_KEY]
self.assertEqual(mult_config, multus_networks_data)
def test_get_multus_net_elems(self):
"""
Ensures Multus CNI elements are properly parsed
"""
multus_net_elems = config_utils.get_multus_net_elems(self.config)
expected = self.network_list[1][consts.MULTUS_NET_KEY][0][consts.MULTUS_CNI_KEY]
self.assertEqual(expected, multus_net_elems)
def test_get_multus_cni_cfgs(self):
"""
Ensures Multus CNI element configuration is properly parsed
"""
multus_cni_cfgs = config_utils.get_multus_cni_cfgs(self.config)
expected = self.network_list[1][consts.MULTUS_NET_KEY][1][consts.MULTUS_CNI_CONFIG_KEY]
self.assertEqual(expected, multus_cni_cfgs)
def test_get_multus_cni_flannel_cfgs(self):
"""
Ensures Flannel network values are properly parsed
"""
cni_cfg = config_utils.get_multus_cni_flannel_cfgs(self.config)
multus_cni = self.network_list[1][consts.MULTUS_NET_KEY][1][consts.MULTUS_CNI_CONFIG_KEY]
expected = multus_cni[0][consts.FLANNEL_NET_TYPE]
self.assertEqual(expected, cni_cfg)
def test_multus_cni_macvlan_cfgs(self):
"""
Ensures Macvlan network values are properly parsed
"""
macvlan_cfg = config_utils.get_multus_cni_macvlan_cfgs(self.config)
multus_cni = self.network_list[1][consts.MULTUS_NET_KEY][1][consts.MULTUS_CNI_CONFIG_KEY]
expected = multus_cni[2][consts.MACVLAN_NET_TYPE]
self.assertEqual(expected, macvlan_cfg)
def test_multus_cni_sriov_cfgs(self):
"""
Ensures SRIOV network values are properly parsed
"""
sriov_cfg = config_utils.get_multus_cni_sriov_cfgs(self.config)
multus_cni = self.network_list[1][consts.MULTUS_NET_KEY][1][consts.MULTUS_CNI_CONFIG_KEY]
expected = multus_cni[3][consts.SRIOV_NET_TYPE]
self.assertEqual(expected, sriov_cfg)
def test_get_multus_cni_weave_cfgs(self):
"""
Ensures Weave network values are properly parsed
"""
weave_cfg = config_utils.get_multus_cni_weave_cfgs(self.config)
multus_cni = self.network_list[1][consts.MULTUS_NET_KEY][1][consts.MULTUS_CNI_CONFIG_KEY]
expected = multus_cni[1][consts.WEAVE_NET_TYPE]
self.assertEqual(expected, weave_cfg)
def test_is_multus_cni_enabled(self):
"""
Ensures Multus CNI status is properly parsed
"""
multus_cni = config_utils.is_multus_cni_enabled(self.config)
expected_multus_cni = False
cni_list = self.network_list[1][consts.MULTUS_NET_KEY][0][consts.MULTUS_CNI_KEY]
if (consts.SRIOV_TYPE or consts.FLANNEL_TYPE or consts.WEAVE_TYPE or consts.MACVLAN_TYPE) in cni_list:
expected_multus_cni = True
self.assertEqual(expected_multus_cni, multus_cni)
def test_get_default_network(self):
"""
Ensures default network values are properly parsed
"""
default_network = config_utils.get_default_network(self.config)
expected = self.network_list[0][consts.DFLT_NET_KEY]
self.assertEqual(expected, default_network)
def test_get_service_subnet(self):
"""
Ensures service subnet value of the default network is properly parsed
"""
service_subnet = config_utils.get_service_subnet(self.config)
expected = self.network_list[0][consts.DFLT_NET_KEY][consts.SRVC_SUB_KEY]
self.assertEqual(expected, service_subnet)
def test_get_networking_plugin(self):
"""
Ensures networking plugin value of the default network is properly parsed
"""
networking_plugin = config_utils.get_networking_plugin(self.config)
expected = self.network_list[0][consts.DFLT_NET_KEY][consts.NET_PLUGIN_KEY]
self.assertEqual(expected, networking_plugin)
def test_get_pod_subnet(self):
"""
Ensures pod subnet value of the default network is properly parsed
"""
pod_subnet = config_utils.get_pod_subnet(self.config)
expected = self.network_list[0][consts.DFLT_NET_KEY][consts.POD_SUB_KEY]
self.assertEqual(expected, pod_subnet)
def test_get_version(self):
"""
Ensures Kubernetes version is properly parsed
"""
version_data = config_utils.get_version(self.config)
expected = self.config[consts.K8S_KEY][consts.K8_VER_KEY]
self.assertEqual(expected, version_data)
def test_get_ha_config(self):
"""
Ensures HA configuration values are properly parsed
"""
ha_config = config_utils.get_ha_config(self.config)
expected = self.config[consts.K8S_KEY][consts.HA_CONFIG_KEY]
self.assertEqual(expected, ha_config)
def test_get_ha_lb_ips(self):
"""
Ensures HA loadbalancer IP values are properly parsed
"""
ha_lb_ips = config_utils.get_ha_lb_ips(self.config)
expected_lb_ips_list = list()
for config_element in self.config[consts.K8S_KEY][consts.HA_CONFIG_KEY]:
expected_lb_ips_list.append(config_element[consts.HA_API_EXT_LB_KEY][consts.IP_KEY])
self.assertEqual(expected_lb_ips_list, ha_lb_ips)
def test_get_node_configs(self):
"""
Ensures node configuration settings are properly parsed
"""
node_configs = config_utils.get_node_configs(self.config)
expected = self.config[consts.K8S_KEY][consts.NODE_CONF_KEY]
self.assertEqual(expected, node_configs)
def test_get_hostname_ips_dict(self):
"""
Ensures hostnames and IPs of the nodes are properly parsed
"""
hostname_ips_dict = config_utils.get_hostname_ips_dict(self.config)
hostname_ips = dict()
for node in self.node_list:
hostname_ips[node[consts.HOST_KEY][consts.HOSTNAME_KEY]] = node[consts.HOST_KEY][consts.IP_KEY]
self.assertEqual(hostname_ips, hostname_ips_dict)
def test_get_host_reg_port_dict(self):
"""
Ensures hostnames and registry port value of the nodes are properly parsed
"""
host_reg_port_dict = config_utils.get_host_reg_port_dict(self.config)
host_reg_port = dict()
for node in self.node_list:
host_reg_port[node[consts.HOST_KEY][consts.HOSTNAME_KEY]] = node[consts.HOST_KEY][consts.REG_PORT_KEY]
self.assertEqual(host_reg_port, host_reg_port_dict)
def test_get_host_ips(self):
"""
Ensures the list of host IPs are properly parsed
"""
host_ips = config_utils.get_host_ips(self.config)
host_ips_cfg = list()
for node in self.node_list:
host_ips_cfg.append(node[consts.HOST_KEY][consts.IP_KEY])
self.assertEqual(host_ips_cfg, host_ips)
def test_get_hosts(self):
"""
Ensures the list of hostnames of the configured nodes are properly parsed
"""
hosts = config_utils.get_hosts(self.config)
host_cfg = list()
for node in self.node_list:
host_cfg.append(node[consts.HOST_KEY][consts.HOSTNAME_KEY])
self.assertEqual(host_cfg, hosts)
def test_get_basic_auth(self):
"""
Ensures the basic authentication settings are properly parsed
"""
basic_auth = config_utils.get_basic_auth(self.config)
expected = self.config[consts.K8S_KEY][consts.BASIC_AUTH_KEY]
self.assertEqual(expected, basic_auth)
def test_get_project_name(self):
"""
Ensures the project name value is properly parsed
"""
project_name = config_utils.get_project_name(self.config)
expected = self.config[consts.K8S_KEY][consts.PROJECT_NAME_KEY]
self.assertEqual(expected, project_name)
def test_get_artifact_dir(self):
"""
Ensures the artifact directory location is properly parsed
"""
artifact_dir = config_utils.get_artifact_dir(self.config)
expected = os.path.expanduser('~/tmp')
self.assertEqual(expected, artifact_dir)
def test_get_project_dir(self):
"""
Ensures the project location is properly parsed
"""
expected_artifact_dir = os.path.expanduser('~/tmp')
project_name = config_utils.get_project_name(self.config)
expected = "{}/{}/{}".format(
expected_artifact_dir, consts.PROJ_DIR_NAME, project_name)
proj_dir = config_utils.get_project_artifact_dir(self.config)
self.assertEqual(expected, proj_dir)
def test_get_kubespray_dir(self):
"""
Ensures the kubespray location is properly parsed
"""
expected_artifact_dir = os.path.expanduser('~/tmp')
expected = "{}/{}".format(expected_artifact_dir,
consts.KUBESPRAY_FOLDER_NAME)
proj_dir = config_utils.get_kubespray_dir(self.config)
self.assertEqual(expected, proj_dir)
def test_get_docker_repo(self):
"""
Ensures the Docker Repo settings are properly parsed
"""
docker_repo = config_utils.get_docker_repo(self.config)
expected = self.config[consts.K8S_KEY][consts.DOCKER_REPO_KEY]
self.assertEqual(expected, docker_repo)
def test_get_persis_vol(self):
"""
Ensures the Persistent Volume settings are properly parsed
"""
persis_vol = config_utils.get_persist_vol(self.config)
expected = self.persis_vol
self.assertEqual(expected, persis_vol)
def test_get_ceph_vol(self):
"""
Ensures the Ceph Volume settings are properly parsed
"""
ceph_vol = config_utils.get_ceph_vol(self.config)
expected = self.persis_vol[consts.CEPH_VOLUME_KEY]
self.assertEqual(expected, ceph_vol)
def test_get_ceph_hosts(self):
"""
Ensures the Ceph host settings are properly parsed
"""
ceph_hosts = config_utils.get_ceph_hosts(self.config)
ceph_hosts_cfg = list()
if self.config[consts.K8S_KEY][consts.PERSIST_VOL_KEY][consts.CEPH_VOLUME_KEY]:
for ceph_host in self.persis_vol[consts.CEPH_VOLUME_KEY]:
if consts.HOST_KEY in ceph_host:
ceph_hosts_cfg.append(ceph_host[consts.HOST_KEY])
self.assertEqual(ceph_hosts_cfg, ceph_hosts)
def test_get_ceph_hosts_info(self):
"""
Ensures the hostname, IP and type value of the Ceph hosts are properly parsed
"""
ceph_hosts_info = config_utils.get_ceph_hosts_info(self.config)
ceph_hosts_info_cfg = list()
for ceph_host in self.persis_vol[consts.CEPH_VOLUME_KEY]:
ceph_hosts_info_cfg.append((ceph_host[consts.HOST_KEY][consts.HOSTNAME_KEY],
ceph_host[consts.HOST_KEY][consts.IP_KEY],
ceph_host[consts.HOST_KEY][consts.NODE_TYPE_KEY]))
self.assertEqual(ceph_hosts_info_cfg, ceph_hosts_info)
def test_get_ceph_ctrls(self):
"""
Ensures the Ceph control host configuration is properly parsed
"""
ceph_ctrls = config_utils.get_ceph_ctrls(self.config)
ceph_ctrls_cfg = list()
for ceph_host in self.persis_vol[consts.CEPH_VOLUME_KEY]:
if ceph_host[consts.HOST_KEY][consts.NODE_TYPE_KEY] == consts.CEPH_CTRL_TYPE:
ceph_ctrls_cfg.append(ceph_host[consts.HOST_KEY])
self.assertEqual(ceph_ctrls_cfg, ceph_ctrls)
def test_get_ceph_ctrls_info(self):
"""
Ensures the hostname, IP and type value of the Ceph control hosts are properly parsed
"""
ceph_ctrls_info = config_utils.get_ceph_ctrls_info(self.config)
ceph_ctrls_info_cfg = list()
for ceph_host in self.persis_vol[consts.CEPH_VOLUME_KEY]:
if ceph_host[consts.HOST_KEY][consts.NODE_TYPE_KEY] == consts.CEPH_CTRL_TYPE:
ceph_ctrls_info_cfg.append((ceph_host[consts.HOST_KEY][consts.HOSTNAME_KEY],
ceph_host[consts.HOST_KEY][consts.IP_KEY],
ceph_host[consts.HOST_KEY][consts.NODE_TYPE_KEY]))
self.assertEqual(ceph_ctrls_info_cfg, ceph_ctrls_info)
def test_get_ceph_osds(self):
"""
Ensures the Ceph OSD host settings are properly parsed
"""
ceph_osds = config_utils.get_ceph_osds(self.config)
ceph_osds_cfg = list()
for ceph_host in self.persis_vol[consts.CEPH_VOLUME_KEY]:
if ceph_host[consts.HOST_KEY][consts.NODE_TYPE_KEY] == consts.CEPH_OSD_TYPE:
ceph_osds_cfg.append(ceph_host[consts.HOST_KEY])
self.assertEqual(ceph_osds_cfg, ceph_osds)
def test_get_ceph_osds_info(self):
"""
Ensures the hostname, IP and type value of the Ceph OSD hosts are properly parsed
"""
ceph_osds_info = config_utils.get_ceph_osds_info(self.config)
ceph_osds_info_cfg = list()
for ceph_host in self.persis_vol[consts.CEPH_VOLUME_KEY]:
if ceph_host[consts.HOST_KEY][consts.NODE_TYPE_KEY] == consts.CEPH_OSD_TYPE:
ceph_osds_info_cfg.append((ceph_host[consts.HOST_KEY][consts.HOSTNAME_KEY],
ceph_host[consts.HOST_KEY][consts.IP_KEY],
ceph_host[consts.HOST_KEY][consts.NODE_TYPE_KEY]))
self.assertEqual(ceph_osds_info_cfg, ceph_osds_info)
def test_get_host_vol(self):
"""
Ensures the Host Volume settings are properly parsed
"""
host_vol = config_utils.get_host_vol(self.config)
expected = self.persis_vol[consts.HOST_VOL_KEY]
self.assertEqual(expected, host_vol)
def test_get_persist_vol_claims(self):
"""
Ensures the Claim parameters of the Host Volume are properly parsed
"""
persist_vol_claims = config_utils.get_persist_vol_claims(self.config)
persist_vol_claims_cfg = list()
for persist_vol in self.persis_vol[consts.HOST_VOL_KEY]:
if consts.CLAIM_PARAMS_KEY in persist_vol:
persist_vol_claims_cfg.append(persist_vol[consts.CLAIM_PARAMS_KEY])
self.assertEqual(persist_vol_claims_cfg, persist_vol_claims)
def test_get_first_master_host(self):
"""
Ensures the hostname and IP of the first master host found in the config are properly parsed
"""
hostname, ip = config_utils.get_first_master_host(self.config)
for node in self.node_list:
if node[consts.HOST_KEY][consts.NODE_TYPE_KEY] == consts.NODE_TYPE_MASTER:
hostname_cfg, ip_cfg = node[consts.HOST_KEY][consts.HOSTNAME_KEY], node[consts.HOST_KEY][consts.IP_KEY]
break
self.assertItemsEqual((hostname_cfg, ip_cfg), (hostname, ip))
def test_get_nodes_ip_name_type(self):
"""
Ensures the hostname, IP and type value of all configured hosts are properly parsed
"""
nodes_ip_name_type = config_utils.get_nodes_ip_name_type(self.config)
nodes_ip_name_type_cfg = list()
for node in self.node_list:
nodes_ip_name_type_cfg.append((node[consts.HOST_KEY][consts.HOSTNAME_KEY],
node[consts.HOST_KEY][consts.IP_KEY],
node[consts.HOST_KEY][consts.NODE_TYPE_KEY]))
self.assertEqual(nodes_ip_name_type_cfg, nodes_ip_name_type)
def test_get_master_nodes_ip_name_type(self):
"""
Ensures the hostname, IP and type value of all configured master hosts are properly parsed
"""
master_ip_name_type = config_utils.get_master_nodes_ip_name_type(self.config)
master_ip_name_type_cfg = list()
for node in self.node_list:
if node[consts.HOST_KEY][consts.NODE_TYPE_KEY] == consts.NODE_TYPE_MASTER:
master_ip_name_type_cfg.append((node[consts.HOST_KEY][consts.HOSTNAME_KEY],
node[consts.HOST_KEY][consts.IP_KEY],
node[consts.HOST_KEY][consts.NODE_TYPE_KEY]))
self.assertEqual(master_ip_name_type_cfg, master_ip_name_type)
def test_get_master_node_ips(self):
"""
Ensures the IP address of all configured master hosts are properly parsed
"""
master_node_ips = config_utils.get_master_node_ips(self.config)
master_node_ips_cfg = list()
for node in self.node_list:
if node[consts.HOST_KEY][consts.NODE_TYPE_KEY] == consts.NODE_TYPE_MASTER:
master_node_ips_cfg.append(node[consts.HOST_KEY][consts.IP_KEY])
self.assertEqual(master_node_ips_cfg, master_node_ips)
def test_get_minion_nodes_ip_name_type(self):
"""
Ensures the hostname, IP and type value of all configured minion hosts are properly parsed
"""
minion_ip_name_type = config_utils.get_minion_nodes_ip_name_type(self.config)
minion_ip_name_type_cfg = list()
for node in self.node_list:
if node[consts.HOST_KEY][consts.NODE_TYPE_KEY] == consts.NODE_TYPE_MINION:
minion_ip_name_type_cfg.append((node[consts.HOST_KEY][consts.HOSTNAME_KEY],
node[consts.HOST_KEY][consts.IP_KEY],
node[consts.HOST_KEY][consts.NODE_TYPE_KEY]))
self.assertEqual(minion_ip_name_type_cfg, minion_ip_name_type)
def test_get_minion_node_ips(self):
"""
Ensures the IP address of all configured minion hosts are properly parsed
"""
minion_node_ips = config_utils.get_minion_node_ips(self.config)
minion_node_ips_cfg = list()
for node in self.node_list:
if node[consts.HOST_KEY][consts.NODE_TYPE_KEY] == consts.NODE_TYPE_MINION:
minion_node_ips_cfg.append(node[consts.HOST_KEY][consts.IP_KEY])
self.assertItemsEqual(minion_node_ips_cfg, minion_node_ips)
def test_is_logging_enabled(self):
"""
Tests to ensure that different string and boolean values return their
expected values
"""
this_cfg = {}
this_cfg.update(self.config)
this_cfg[consts.K8S_KEY][consts.ENABLE_LOG_KEY] = True
self.assertTrue(config_utils.is_logging_enabled(self.config))
this_cfg[consts.K8S_KEY][consts.ENABLE_LOG_KEY] = 'True'
self.assertTrue(config_utils.is_logging_enabled(self.config))
this_cfg[consts.K8S_KEY][consts.ENABLE_LOG_KEY] = 'true'
self.assertTrue(config_utils.is_logging_enabled(self.config))
this_cfg[consts.K8S_KEY][consts.ENABLE_LOG_KEY] = 'yes'
self.assertTrue(config_utils.is_logging_enabled(self.config))
this_cfg[consts.K8S_KEY][consts.ENABLE_LOG_KEY] = 'foo'
self.assertFalse(config_utils.is_logging_enabled(self.config))
this_cfg[consts.K8S_KEY][consts.ENABLE_LOG_KEY] = False
self.assertFalse(config_utils.is_logging_enabled(self.config))
this_cfg[consts.K8S_KEY][consts.ENABLE_LOG_KEY] = 'False'
self.assertFalse(config_utils.is_logging_enabled(self.config))
this_cfg[consts.K8S_KEY][consts.ENABLE_LOG_KEY] = 'false'
self.assertFalse(config_utils.is_logging_enabled(self.config))
this_cfg[consts.K8S_KEY][consts.ENABLE_LOG_KEY] = 'no'
self.assertFalse(config_utils.is_logging_enabled(self.config))
this_cfg[consts.K8S_KEY][consts.ENABLE_LOG_KEY] = None
self.assertFalse(config_utils.is_logging_enabled(self.config))
def test_get_log_level(self):
"""
Ensures that the logging level is getting properly parsed
"""
expected_log_level = self.config[consts.K8S_KEY][consts.LOG_LEVEL_KEY]
log_level = config_utils.get_log_level(self.config)
self.assertEqual(expected_log_level, log_level)
def test_get_logging_port(self):
"""
Ensures that the port returned is what is expected and is always a
string
"""
expected_port = self.config[consts.K8S_KEY][consts.LOG_PORT_KEY]
port = config_utils.get_logging_port(self.config)
self.assertEqual(expected_port, port)
# tests that a numeric value is returned as a string
this_cfg = {}
this_cfg.update(self.config)
this_cfg[consts.K8S_KEY][consts.LOG_PORT_KEY] = 1000
port = config_utils.get_logging_port(this_cfg)
self.assertEqual('1000', port)
def test_is_cpu_alloc(self):
"""
Tests to ensure that different string and boolean values return their
expected values
"""
this_cfg = {}
this_cfg.update(self.config)
this_cfg[consts.K8S_KEY][consts.CPU_ALLOC_KEY] = True
self.assertTrue(config_utils.is_cpu_alloc(self.config))
this_cfg[consts.K8S_KEY][consts.CPU_ALLOC_KEY] = 'True'
self.assertTrue(config_utils.is_cpu_alloc(self.config))
this_cfg[consts.K8S_KEY][consts.CPU_ALLOC_KEY] = 'true'
self.assertTrue(config_utils.is_cpu_alloc(self.config))
this_cfg[consts.K8S_KEY][consts.CPU_ALLOC_KEY] = 'yes'
self.assertTrue(config_utils.is_cpu_alloc(self.config))
this_cfg[consts.K8S_KEY][consts.CPU_ALLOC_KEY] = 'foo'
self.assertFalse(config_utils.is_cpu_alloc(self.config))
this_cfg[consts.K8S_KEY][consts.CPU_ALLOC_KEY] = False
self.assertFalse(config_utils.is_cpu_alloc(self.config))
this_cfg[consts.K8S_KEY][consts.CPU_ALLOC_KEY] = 'False'
self.assertFalse(config_utils.is_cpu_alloc(self.config))
this_cfg[consts.K8S_KEY][consts.CPU_ALLOC_KEY] = 'false'
self.assertFalse(config_utils.is_cpu_alloc(self.config))
this_cfg[consts.K8S_KEY][consts.CPU_ALLOC_KEY] = 'no'
self.assertFalse(config_utils.is_cpu_alloc(self.config))
this_cfg[consts.K8S_KEY][consts.CPU_ALLOC_KEY] = None
self.assertFalse(config_utils.is_cpu_alloc(self.config))
def test_is_metrics_server(self):
"""
Tests to ensure that different string and boolean values return their
expected values
"""
this_cfg = {}
this_cfg.update(self.config)
this_cfg[consts.K8S_KEY][consts.METRICS_SERVER_KEY] = True
self.assertTrue(config_utils.is_metrics_server_enabled(self.config))
this_cfg[consts.K8S_KEY][consts.METRICS_SERVER_KEY] = 'True'
self.assertTrue(config_utils.is_metrics_server_enabled(self.config))
this_cfg[consts.K8S_KEY][consts.METRICS_SERVER_KEY] = 'true'
self.assertTrue(config_utils.is_metrics_server_enabled(self.config))
this_cfg[consts.K8S_KEY][consts.METRICS_SERVER_KEY] = 'yes'
self.assertTrue(config_utils.is_metrics_server_enabled(self.config))
this_cfg[consts.K8S_KEY][consts.METRICS_SERVER_KEY] = 'foo'
self.assertFalse(config_utils.is_metrics_server_enabled(self.config))
this_cfg[consts.K8S_KEY][consts.METRICS_SERVER_KEY] = False
self.assertFalse(config_utils.is_metrics_server_enabled(self.config))
this_cfg[consts.K8S_KEY][consts.METRICS_SERVER_KEY] = 'False'
self.assertFalse(config_utils.is_metrics_server_enabled(self.config))
this_cfg[consts.K8S_KEY][consts.METRICS_SERVER_KEY] = 'false'
self.assertFalse(config_utils.is_metrics_server_enabled(self.config))
this_cfg[consts.K8S_KEY][consts.METRICS_SERVER_KEY] = 'no'
self.assertFalse(config_utils.is_metrics_server_enabled(self.config))
this_cfg[consts.K8S_KEY][consts.METRICS_SERVER_KEY] = None
self.assertFalse(config_utils.is_metrics_server_enabled(self.config))
def test_get_password(self):
node_confs = config_utils.get_node_configs(self.config)
for node_conf in node_confs:
password = config_utils.get_node_password(
self.config, node_conf[consts.HOST_KEY][consts.HOSTNAME_KEY])
self.assertEqual('password', password)
| [
"snaps_k8s.common.utils.config_utils.get_minion_node_ips",
"snaps_k8s.common.utils.config_utils.get_hosts",
"snaps_k8s.common.utils.config_utils.get_project_name",
"snaps_k8s.common.utils.config_utils.get_ceph_osds_info",
"snaps_k8s.common.utils.config_utils.is_cpu_alloc",
"snaps_k8s.common.utils.config_u... | [((947, 1011), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""tests.conf"""', '"""deployment.yaml"""'], {}), "('tests.conf', 'deployment.yaml')\n", (978, 1011), False, 'import pkg_resources\n'), ((1047, 1080), 'snaps_common.file.file_utils.read_yaml', 'file_utils.read_yaml', (['config_file'], {}), '(config_file)\n', (1067, 1080), False, 'from snaps_common.file import file_utils\n'), ((1441, 1481), 'snaps_k8s.common.utils.config_utils.get_proxy_dict', 'config_utils.get_proxy_dict', (['self.config'], {}), '(self.config)\n', (1468, 1481), False, 'from snaps_k8s.common.utils import config_utils\n'), ((1729, 1767), 'snaps_k8s.common.utils.config_utils.get_networks', 'config_utils.get_networks', (['self.config'], {}), '(self.config)\n', (1754, 1767), False, 'from snaps_k8s.common.utils import config_utils\n'), ((2045, 2089), 'snaps_k8s.common.utils.config_utils.get_multus_network', 'config_utils.get_multus_network', (['self.config'], {}), '(self.config)\n', (2076, 2089), False, 'from snaps_k8s.common.utils import config_utils\n'), ((2365, 2411), 'snaps_k8s.common.utils.config_utils.get_multus_net_elems', 'config_utils.get_multus_net_elems', (['self.config'], {}), '(self.config)\n', (2398, 2411), False, 'from snaps_k8s.common.utils import config_utils\n'), ((2713, 2758), 'snaps_k8s.common.utils.config_utils.get_multus_cni_cfgs', 'config_utils.get_multus_cni_cfgs', (['self.config'], {}), '(self.config)\n', (2745, 2758), False, 'from snaps_k8s.common.utils import config_utils\n'), ((3057, 3110), 'snaps_k8s.common.utils.config_utils.get_multus_cni_flannel_cfgs', 'config_utils.get_multus_cni_flannel_cfgs', (['self.config'], {}), '(self.config)\n', (3097, 3110), False, 'from snaps_k8s.common.utils import config_utils\n'), ((3461, 3514), 'snaps_k8s.common.utils.config_utils.get_multus_cni_macvlan_cfgs', 'config_utils.get_multus_cni_macvlan_cfgs', (['self.config'], {}), '(self.config)\n', (3501, 3514), False, 'from snaps_k8s.common.utils import config_utils\n'), ((3863, 3914), 'snaps_k8s.common.utils.config_utils.get_multus_cni_sriov_cfgs', 'config_utils.get_multus_cni_sriov_cfgs', (['self.config'], {}), '(self.config)\n', (3901, 3914), False, 'from snaps_k8s.common.utils import config_utils\n'), ((4263, 4314), 'snaps_k8s.common.utils.config_utils.get_multus_cni_weave_cfgs', 'config_utils.get_multus_cni_weave_cfgs', (['self.config'], {}), '(self.config)\n', (4301, 4314), False, 'from snaps_k8s.common.utils import config_utils\n'), ((4656, 4703), 'snaps_k8s.common.utils.config_utils.is_multus_cni_enabled', 'config_utils.is_multus_cni_enabled', (['self.config'], {}), '(self.config)\n', (4690, 4703), False, 'from snaps_k8s.common.utils import config_utils\n'), ((5187, 5232), 'snaps_k8s.common.utils.config_utils.get_default_network', 'config_utils.get_default_network', (['self.config'], {}), '(self.config)\n', (5219, 5232), False, 'from snaps_k8s.common.utils import config_utils\n'), ((5514, 5558), 'snaps_k8s.common.utils.config_utils.get_service_subnet', 'config_utils.get_service_subnet', (['self.config'], {}), '(self.config)\n', (5545, 5558), False, 'from snaps_k8s.common.utils import config_utils\n'), ((5869, 5916), 'snaps_k8s.common.utils.config_utils.get_networking_plugin', 'config_utils.get_networking_plugin', (['self.config'], {}), '(self.config)\n', (5903, 5916), False, 'from snaps_k8s.common.utils import config_utils\n'), ((6211, 6251), 'snaps_k8s.common.utils.config_utils.get_pod_subnet', 'config_utils.get_pod_subnet', (['self.config'], {}), '(self.config)\n', (6238, 6251), False, 'from snaps_k8s.common.utils import config_utils\n'), ((6514, 6551), 'snaps_k8s.common.utils.config_utils.get_version', 'config_utils.get_version', (['self.config'], {}), '(self.config)\n', (6538, 6551), False, 'from snaps_k8s.common.utils import config_utils\n'), ((6806, 6845), 'snaps_k8s.common.utils.config_utils.get_ha_config', 'config_utils.get_ha_config', (['self.config'], {}), '(self.config)\n', (6832, 6845), False, 'from snaps_k8s.common.utils import config_utils\n'), ((7102, 7141), 'snaps_k8s.common.utils.config_utils.get_ha_lb_ips', 'config_utils.get_ha_lb_ips', (['self.config'], {}), '(self.config)\n', (7128, 7141), False, 'from snaps_k8s.common.utils import config_utils\n'), ((7565, 7607), 'snaps_k8s.common.utils.config_utils.get_node_configs', 'config_utils.get_node_configs', (['self.config'], {}), '(self.config)\n', (7594, 7607), False, 'from snaps_k8s.common.utils import config_utils\n'), ((7888, 7935), 'snaps_k8s.common.utils.config_utils.get_hostname_ips_dict', 'config_utils.get_hostname_ips_dict', (['self.config'], {}), '(self.config)\n', (7922, 7935), False, 'from snaps_k8s.common.utils import config_utils\n'), ((8348, 8396), 'snaps_k8s.common.utils.config_utils.get_host_reg_port_dict', 'config_utils.get_host_reg_port_dict', (['self.config'], {}), '(self.config)\n', (8383, 8396), False, 'from snaps_k8s.common.utils import config_utils\n'), ((8773, 8811), 'snaps_k8s.common.utils.config_utils.get_host_ips', 'config_utils.get_host_ips', (['self.config'], {}), '(self.config)\n', (8798, 8811), False, 'from snaps_k8s.common.utils import config_utils\n'), ((9150, 9185), 'snaps_k8s.common.utils.config_utils.get_hosts', 'config_utils.get_hosts', (['self.config'], {}), '(self.config)\n', (9172, 9185), False, 'from snaps_k8s.common.utils import config_utils\n'), ((9513, 9553), 'snaps_k8s.common.utils.config_utils.get_basic_auth', 'config_utils.get_basic_auth', (['self.config'], {}), '(self.config)\n', (9540, 9553), False, 'from snaps_k8s.common.utils import config_utils\n'), ((9814, 9856), 'snaps_k8s.common.utils.config_utils.get_project_name', 'config_utils.get_project_name', (['self.config'], {}), '(self.config)\n', (9843, 9856), False, 'from snaps_k8s.common.utils import config_utils\n'), ((10130, 10172), 'snaps_k8s.common.utils.config_utils.get_artifact_dir', 'config_utils.get_artifact_dir', (['self.config'], {}), '(self.config)\n', (10159, 10172), False, 'from snaps_k8s.common.utils import config_utils\n'), ((10192, 10219), 'os.path.expanduser', 'os.path.expanduser', (['"""~/tmp"""'], {}), "('~/tmp')\n", (10210, 10219), False, 'import os\n'), ((10418, 10445), 'os.path.expanduser', 'os.path.expanduser', (['"""~/tmp"""'], {}), "('~/tmp')\n", (10436, 10445), False, 'import os\n'), ((10469, 10511), 'snaps_k8s.common.utils.config_utils.get_project_name', 'config_utils.get_project_name', (['self.config'], {}), '(self.config)\n', (10498, 10511), False, 'from snaps_k8s.common.utils import config_utils\n'), ((10641, 10691), 'snaps_k8s.common.utils.config_utils.get_project_artifact_dir', 'config_utils.get_project_artifact_dir', (['self.config'], {}), '(self.config)\n', (10678, 10691), False, 'from snaps_k8s.common.utils import config_utils\n'), ((10890, 10917), 'os.path.expanduser', 'os.path.expanduser', (['"""~/tmp"""'], {}), "('~/tmp')\n", (10908, 10917), False, 'import os\n'), ((11059, 11102), 'snaps_k8s.common.utils.config_utils.get_kubespray_dir', 'config_utils.get_kubespray_dir', (['self.config'], {}), '(self.config)\n', (11089, 11102), False, 'from snaps_k8s.common.utils import config_utils\n'), ((11292, 11333), 'snaps_k8s.common.utils.config_utils.get_docker_repo', 'config_utils.get_docker_repo', (['self.config'], {}), '(self.config)\n', (11320, 11333), False, 'from snaps_k8s.common.utils import config_utils\n'), ((11601, 11642), 'snaps_k8s.common.utils.config_utils.get_persist_vol', 'config_utils.get_persist_vol', (['self.config'], {}), '(self.config)\n', (11629, 11642), False, 'from snaps_k8s.common.utils import config_utils\n'), ((11863, 11901), 'snaps_k8s.common.utils.config_utils.get_ceph_vol', 'config_utils.get_ceph_vol', (['self.config'], {}), '(self.config)\n', (11888, 11901), False, 'from snaps_k8s.common.utils import config_utils\n'), ((12146, 12186), 'snaps_k8s.common.utils.config_utils.get_ceph_hosts', 'config_utils.get_ceph_hosts', (['self.config'], {}), '(self.config)\n', (12173, 12186), False, 'from snaps_k8s.common.utils import config_utils\n'), ((12726, 12771), 'snaps_k8s.common.utils.config_utils.get_ceph_hosts_info', 'config_utils.get_ceph_hosts_info', (['self.config'], {}), '(self.config)\n', (12758, 12771), False, 'from snaps_k8s.common.utils import config_utils\n'), ((13353, 13393), 'snaps_k8s.common.utils.config_utils.get_ceph_ctrls', 'config_utils.get_ceph_ctrls', (['self.config'], {}), '(self.config)\n', (13380, 13393), False, 'from snaps_k8s.common.utils import config_utils\n'), ((13886, 13931), 'snaps_k8s.common.utils.config_utils.get_ceph_ctrls_info', 'config_utils.get_ceph_ctrls_info', (['self.config'], {}), '(self.config)\n', (13918, 13931), False, 'from snaps_k8s.common.utils import config_utils\n'), ((14605, 14644), 'snaps_k8s.common.utils.config_utils.get_ceph_osds', 'config_utils.get_ceph_osds', (['self.config'], {}), '(self.config)\n', (14631, 14644), False, 'from snaps_k8s.common.utils import config_utils\n'), ((15126, 15170), 'snaps_k8s.common.utils.config_utils.get_ceph_osds_info', 'config_utils.get_ceph_osds_info', (['self.config'], {}), '(self.config)\n', (15157, 15170), False, 'from snaps_k8s.common.utils import config_utils\n'), ((15833, 15871), 'snaps_k8s.common.utils.config_utils.get_host_vol', 'config_utils.get_host_vol', (['self.config'], {}), '(self.config)\n', (15858, 15871), False, 'from snaps_k8s.common.utils import config_utils\n'), ((16146, 16194), 'snaps_k8s.common.utils.config_utils.get_persist_vol_claims', 'config_utils.get_persist_vol_claims', (['self.config'], {}), '(self.config)\n', (16181, 16194), False, 'from snaps_k8s.common.utils import config_utils\n'), ((16699, 16746), 'snaps_k8s.common.utils.config_utils.get_first_master_host', 'config_utils.get_first_master_host', (['self.config'], {}), '(self.config)\n', (16733, 16746), False, 'from snaps_k8s.common.utils import config_utils\n'), ((17271, 17319), 'snaps_k8s.common.utils.config_utils.get_nodes_ip_name_type', 'config_utils.get_nodes_ip_name_type', (['self.config'], {}), '(self.config)\n', (17306, 17319), False, 'from snaps_k8s.common.utils import config_utils\n'), ((17926, 17981), 'snaps_k8s.common.utils.config_utils.get_master_nodes_ip_name_type', 'config_utils.get_master_nodes_ip_name_type', (['self.config'], {}), '(self.config)\n', (17968, 17981), False, 'from snaps_k8s.common.utils import config_utils\n'), ((18662, 18707), 'snaps_k8s.common.utils.config_utils.get_master_node_ips', 'config_utils.get_master_node_ips', (['self.config'], {}), '(self.config)\n', (18694, 18707), False, 'from snaps_k8s.common.utils import config_utils\n'), ((19216, 19271), 'snaps_k8s.common.utils.config_utils.get_minion_nodes_ip_name_type', 'config_utils.get_minion_nodes_ip_name_type', (['self.config'], {}), '(self.config)\n', (19258, 19271), False, 'from snaps_k8s.common.utils import config_utils\n'), ((19952, 19997), 'snaps_k8s.common.utils.config_utils.get_minion_node_ips', 'config_utils.get_minion_node_ips', (['self.config'], {}), '(self.config)\n', (19984, 19997), False, 'from snaps_k8s.common.utils import config_utils\n'), ((22115, 22154), 'snaps_k8s.common.utils.config_utils.get_log_level', 'config_utils.get_log_level', (['self.config'], {}), '(self.config)\n', (22141, 22154), False, 'from snaps_k8s.common.utils import config_utils\n'), ((22451, 22493), 'snaps_k8s.common.utils.config_utils.get_logging_port', 'config_utils.get_logging_port', (['self.config'], {}), '(self.config)\n', (22480, 22493), False, 'from snaps_k8s.common.utils import config_utils\n'), ((22737, 22776), 'snaps_k8s.common.utils.config_utils.get_logging_port', 'config_utils.get_logging_port', (['this_cfg'], {}), '(this_cfg)\n', (22766, 22776), False, 'from snaps_k8s.common.utils import config_utils\n'), ((26072, 26114), 'snaps_k8s.common.utils.config_utils.get_node_configs', 'config_utils.get_node_configs', (['self.config'], {}), '(self.config)\n', (26101, 26114), False, 'from snaps_k8s.common.utils import config_utils\n'), ((20620, 20664), 'snaps_k8s.common.utils.config_utils.is_logging_enabled', 'config_utils.is_logging_enabled', (['self.config'], {}), '(self.config)\n', (20651, 20664), False, 'from snaps_k8s.common.utils import config_utils\n'), ((20756, 20800), 'snaps_k8s.common.utils.config_utils.is_logging_enabled', 'config_utils.is_logging_enabled', (['self.config'], {}), '(self.config)\n', (20787, 20800), False, 'from snaps_k8s.common.utils import config_utils\n'), ((20892, 20936), 'snaps_k8s.common.utils.config_utils.is_logging_enabled', 'config_utils.is_logging_enabled', (['self.config'], {}), '(self.config)\n', (20923, 20936), False, 'from snaps_k8s.common.utils import config_utils\n'), ((21027, 21071), 'snaps_k8s.common.utils.config_utils.is_logging_enabled', 'config_utils.is_logging_enabled', (['self.config'], {}), '(self.config)\n', (21058, 21071), False, 'from snaps_k8s.common.utils import config_utils\n'), ((21163, 21207), 'snaps_k8s.common.utils.config_utils.is_logging_enabled', 'config_utils.is_logging_enabled', (['self.config'], {}), '(self.config)\n', (21194, 21207), False, 'from snaps_k8s.common.utils import config_utils\n'), ((21299, 21343), 'snaps_k8s.common.utils.config_utils.is_logging_enabled', 'config_utils.is_logging_enabled', (['self.config'], {}), '(self.config)\n', (21330, 21343), False, 'from snaps_k8s.common.utils import config_utils\n'), ((21437, 21481), 'snaps_k8s.common.utils.config_utils.is_logging_enabled', 'config_utils.is_logging_enabled', (['self.config'], {}), '(self.config)\n', (21468, 21481), False, 'from snaps_k8s.common.utils import config_utils\n'), ((21575, 21619), 'snaps_k8s.common.utils.config_utils.is_logging_enabled', 'config_utils.is_logging_enabled', (['self.config'], {}), '(self.config)\n', (21606, 21619), False, 'from snaps_k8s.common.utils import config_utils\n'), ((21710, 21754), 'snaps_k8s.common.utils.config_utils.is_logging_enabled', 'config_utils.is_logging_enabled', (['self.config'], {}), '(self.config)\n', (21741, 21754), False, 'from snaps_k8s.common.utils import config_utils\n'), ((21845, 21889), 'snaps_k8s.common.utils.config_utils.is_logging_enabled', 'config_utils.is_logging_enabled', (['self.config'], {}), '(self.config)\n', (21876, 21889), False, 'from snaps_k8s.common.utils import config_utils\n'), ((23122, 23160), 'snaps_k8s.common.utils.config_utils.is_cpu_alloc', 'config_utils.is_cpu_alloc', (['self.config'], {}), '(self.config)\n', (23147, 23160), False, 'from snaps_k8s.common.utils import config_utils\n'), ((23251, 23289), 'snaps_k8s.common.utils.config_utils.is_cpu_alloc', 'config_utils.is_cpu_alloc', (['self.config'], {}), '(self.config)\n', (23276, 23289), False, 'from snaps_k8s.common.utils import config_utils\n'), ((23380, 23418), 'snaps_k8s.common.utils.config_utils.is_cpu_alloc', 'config_utils.is_cpu_alloc', (['self.config'], {}), '(self.config)\n', (23405, 23418), False, 'from snaps_k8s.common.utils import config_utils\n'), ((23508, 23546), 'snaps_k8s.common.utils.config_utils.is_cpu_alloc', 'config_utils.is_cpu_alloc', (['self.config'], {}), '(self.config)\n', (23533, 23546), False, 'from snaps_k8s.common.utils import config_utils\n'), ((23637, 23675), 'snaps_k8s.common.utils.config_utils.is_cpu_alloc', 'config_utils.is_cpu_alloc', (['self.config'], {}), '(self.config)\n', (23662, 23675), False, 'from snaps_k8s.common.utils import config_utils\n'), ((23766, 23804), 'snaps_k8s.common.utils.config_utils.is_cpu_alloc', 'config_utils.is_cpu_alloc', (['self.config'], {}), '(self.config)\n', (23791, 23804), False, 'from snaps_k8s.common.utils import config_utils\n'), ((23897, 23935), 'snaps_k8s.common.utils.config_utils.is_cpu_alloc', 'config_utils.is_cpu_alloc', (['self.config'], {}), '(self.config)\n', (23922, 23935), False, 'from snaps_k8s.common.utils import config_utils\n'), ((24028, 24066), 'snaps_k8s.common.utils.config_utils.is_cpu_alloc', 'config_utils.is_cpu_alloc', (['self.config'], {}), '(self.config)\n', (24053, 24066), False, 'from snaps_k8s.common.utils import config_utils\n'), ((24156, 24194), 'snaps_k8s.common.utils.config_utils.is_cpu_alloc', 'config_utils.is_cpu_alloc', (['self.config'], {}), '(self.config)\n', (24181, 24194), False, 'from snaps_k8s.common.utils import config_utils\n'), ((24284, 24322), 'snaps_k8s.common.utils.config_utils.is_cpu_alloc', 'config_utils.is_cpu_alloc', (['self.config'], {}), '(self.config)\n', (24309, 24322), False, 'from snaps_k8s.common.utils import config_utils\n'), ((24640, 24691), 'snaps_k8s.common.utils.config_utils.is_metrics_server_enabled', 'config_utils.is_metrics_server_enabled', (['self.config'], {}), '(self.config)\n', (24678, 24691), False, 'from snaps_k8s.common.utils import config_utils\n'), ((24787, 24838), 'snaps_k8s.common.utils.config_utils.is_metrics_server_enabled', 'config_utils.is_metrics_server_enabled', (['self.config'], {}), '(self.config)\n', (24825, 24838), False, 'from snaps_k8s.common.utils import config_utils\n'), ((24934, 24985), 'snaps_k8s.common.utils.config_utils.is_metrics_server_enabled', 'config_utils.is_metrics_server_enabled', (['self.config'], {}), '(self.config)\n', (24972, 24985), False, 'from snaps_k8s.common.utils import config_utils\n'), ((25080, 25131), 'snaps_k8s.common.utils.config_utils.is_metrics_server_enabled', 'config_utils.is_metrics_server_enabled', (['self.config'], {}), '(self.config)\n', (25118, 25131), False, 'from snaps_k8s.common.utils import config_utils\n'), ((25227, 25278), 'snaps_k8s.common.utils.config_utils.is_metrics_server_enabled', 'config_utils.is_metrics_server_enabled', (['self.config'], {}), '(self.config)\n', (25265, 25278), False, 'from snaps_k8s.common.utils import config_utils\n'), ((25374, 25425), 'snaps_k8s.common.utils.config_utils.is_metrics_server_enabled', 'config_utils.is_metrics_server_enabled', (['self.config'], {}), '(self.config)\n', (25412, 25425), False, 'from snaps_k8s.common.utils import config_utils\n'), ((25523, 25574), 'snaps_k8s.common.utils.config_utils.is_metrics_server_enabled', 'config_utils.is_metrics_server_enabled', (['self.config'], {}), '(self.config)\n', (25561, 25574), False, 'from snaps_k8s.common.utils import config_utils\n'), ((25672, 25723), 'snaps_k8s.common.utils.config_utils.is_metrics_server_enabled', 'config_utils.is_metrics_server_enabled', (['self.config'], {}), '(self.config)\n', (25710, 25723), False, 'from snaps_k8s.common.utils import config_utils\n'), ((25818, 25869), 'snaps_k8s.common.utils.config_utils.is_metrics_server_enabled', 'config_utils.is_metrics_server_enabled', (['self.config'], {}), '(self.config)\n', (25856, 25869), False, 'from snaps_k8s.common.utils import config_utils\n'), ((25964, 26015), 'snaps_k8s.common.utils.config_utils.is_metrics_server_enabled', 'config_utils.is_metrics_server_enabled', (['self.config'], {}), '(self.config)\n', (26002, 26015), False, 'from snaps_k8s.common.utils import config_utils\n'), ((26175, 26272), 'snaps_k8s.common.utils.config_utils.get_node_password', 'config_utils.get_node_password', (['self.config', 'node_conf[consts.HOST_KEY][consts.HOSTNAME_KEY]'], {}), '(self.config, node_conf[consts.HOST_KEY][\n consts.HOSTNAME_KEY])\n', (26205, 26272), False, 'from snaps_k8s.common.utils import config_utils\n')] |
from pathlib import Path
from typing import List
from src.user_errors import NoItemToRenameError
from src.user_types import Inode, InodesPaths
def paths_to_inodes_paths(paths: List[Path]) -> InodesPaths:
"""
Given a list of paths, return a mapping from inodes to paths.
Args:
paths: list of Path objects
Raises:
FileNotFoundError: if any of the paths does not exist.
NoItemToRenameError: empty single text file as a command-line argument.
Returns:
A mapping from inodes to paths.
"""
result = {}
missing_paths = []
for path in paths:
if path.exists():
result[Inode(path.stat().st_ino)] = path
else:
missing_paths.append(path)
if missing_paths:
n = len(missing_paths)
raise FileNotFoundError(f"{n} missing item{'s'[:n^1]}: {list(map(str,missing_paths))}.")
elif not result:
raise NoItemToRenameError("No item to rename was provided.")
else:
return result
| [
"src.user_errors.NoItemToRenameError"
] | [((925, 979), 'src.user_errors.NoItemToRenameError', 'NoItemToRenameError', (['"""No item to rename was provided."""'], {}), "('No item to rename was provided.')\n", (944, 979), False, 'from src.user_errors import NoItemToRenameError\n')] |
import re
import gensim.utils as gensim_utils
def normalize_text_proximity(message):
""" Clean text of dots between words
Keyword arguments:
message -- a plain sentence or paragraph
"""
sent = message.lower()
sent = sent.replace("á", "a")
sent = sent.replace("é", "e")
sent = sent.replace("í", "i")
sent = sent.replace("ó", "o")
sent = sent.replace("ú", "u")
sent = re.sub(r'(?i)(?<=[a-z])\.(?=[a-z])', "", sent)
return sent
def clean_text(message):
""" Delete extra characters from text before validation
Keyword arguments:
message -- a plain sentence or paragraph
"""
sent = re.sub(r'[\-_*+,\(\).:]{1,}', "", message)
sent = re.sub(r'[ ]{1,}', "", sent)
sent = re.sub(r'(?i)\bnº', "", sent)
return sent
def preprocess_text(message):
""" Delete some artifacts from text
Keyword arguments:
message -- a plain sentence or paragraph
"""
uni_message = gensim_utils.to_unicode(message)
uni_message = uni_message.replace("\t", " ")
uni_message = uni_message.replace("\r\n", " ")
uni_message = uni_message.replace("\r", " ")
uni_message = uni_message.replace("\n", " ")
return uni_message
def word2features(sent, i):
""" Extract features of a node in the "sent" list for a CRF
Keyword arguments:
sent -- a list of triples <word, PoS tag, label>
i -- index of the node to extract the featues
"""
word = sent[i][0]
postag = sent[i][1]
features = {
'bias': 1.0,
'word': word,
'word.lower()': word.lower(),
'word.istitle()': word.istitle(),
'word[-3:]': word[-3:],
'word[:3]': word[:3],
'word.isdigit()': word.isdigit(),
'postag': postag,
}
if i > 0:
word1 = sent[i-1][0]
postag1 = sent[i-1][1]
features.update({
'-1:word': word1,
'-1:word.lower()': word1.lower(),
'-1:word.istitle': word1.istitle(),
'-1:postag': postag1,
})
else:
features['BOS'] = True
# EXTRA
if i > 2:
word1 = sent[i-2][0]
postag1 = sent[i-2][1]
features.update({
'-2:word': word1,
'-2:word.lower()': word1.lower(),
'-2:word.istitle': word1.istitle(),
'-2:word.postag': postag1,
})
if i > 3:
word1 = sent[i-3][0]
postag1 = sent[i-3][1]
features.update({
'-3:word': word1,
'-3:word.lower()': word1.lower(),
'-3:word.istitle': word1.istitle(),
'-3:word.postag': postag1,
})
if i > 2:
word0 = sent[i][0]
postag0 = sent[i][1]
word1 = sent[i-1][0]
postag1 = sent[i-1][1]
features.update({
'-01:word': word1 + word0,
'-01:word.lower()': (word1 + " " + word0).lower(),
'-01:word0_postag1': postag1 + word0,
'-01:word1_postag0': postag0 + word1,
})
if i > 3:
word0 = sent[i][0]
word1 = sent[i-2][0]
postag0 = sent[i][1]
postag1 = sent[i-2][1]
features.update({
'-02:word': word1 + word0,
'-02:word.lower()': (word1 + " " + word0).lower(),
'-02:word0_postag1': postag1 + word0,
'-02:word1_postag0': postag0 + word1,
})
if i < len(sent) - 2:
word1 = sent[i+2][0]
postag1 = sent[i+2][1]
features.update({
'+2:word': word1,
'+2:word.lower()': word1.lower(),
'+2:word.istitle': word1.istitle(),
'+2:word.postag': postag1,
})
if i < len(sent)-1:
word1 = sent[i+1][0]
postag1 = sent[i+1][1]
features.update({
'+1:word': word1,
'+1:word.lower()': word1.lower(),
'+1:word.istitle()': word1.istitle(),
'+1:postag': postag1,
})
else:
features['EOS'] = True
return features
def char2features_mail(sent, i):
""" Extract features of a node (for the mail CRF)
Keyword arguments:
sent -- a list of pairs <word, label>
i -- index of the node to extract the featues
"""
word = sent[i][0]
features = {
'bias': 1.0,
'char.lower()': word.lower(),
}
if i > 0:
word1 = sent[i-1][0]
features.update({
'-1:char.lower()': word1.lower(),
})
else:
features['BOS'] = True
if i < len(sent)-1:
word1 = sent[i+1][0]
features.update({
'+1:char.lower()': word1.lower(),
})
else:
features['EOS'] = True
# EXTRA
if i > 2:
word1 = sent[i-2][0]
features.update({
'-2:char.lower()': word1.lower(),
})
if i > 3:
word1 = sent[i-3][0]
features.update({
'-3:char.lower()': word1.lower(),
})
if i > 4:
word1 = sent[i-4][0]
features.update({
'-4:char.lower()': word1.lower(),
})
if i > 5:
word1 = sent[i-5][0]
features.update({
'-5:char.lower()': word1.lower(),
})
if i > 6:
word1 = sent[i-6][0]
features.update({
'-6:char.lower()': word1.lower(),
})
if i > 7:
word1 = sent[i-7][0]
features.update({
'-7:char.lower()': word1.lower(),
})
if i > 8:
word1 = sent[i-8][0]
features.update({
'-8:char.lower()': word1.lower(),
})
if i < len(sent) - 2:
word1 = sent[i+2][0]
features.update({
'+2:char.lower()': word1.lower(),
})
if i < len(sent) - 3:
word1 = sent[i+3][0]
features.update({
'+3:char.lower()': word1.lower(),
})
if i < len(sent) - 4:
word1 = sent[i+4][0]
features.update({
'+4:char.lower()': word1.lower(),
})
if i < len(sent) - 5:
word1 = sent[i+5][0]
features.update({
'+5:char.lower()': word1.lower(),
})
if i < len(sent) - 6:
word1 = sent[i+6][0]
features.update({
'+6:char.lower()': word1.lower(),
})
if i < len(sent) - 7:
word1 = sent[i+7][0]
features.update({
'+7:char.lower()': word1.lower(),
})
if i < len(sent) - 8:
word1 = sent[i+8][0]
features.update({
'+8:char.lower()': word1.lower(),
})
return features
def char2features_space(sent, i):
""" Extract features of a node (for the whitespace-CRF detector)
Keyword arguments:
sent -- a list of pairs <word, label>
i -- index of the node to extract the featues
"""
word = sent[i][0]
features = {
'bias': 1.0,
'char': word,
'char.lower()': word.lower(),
}
if i > 0:
word1 = sent[i-1][0]
features.update({
'-1:char': word1,
'-1:char.lower()': word1.lower(),
'-1:char.isdigit()': word1.isdigit(),
})
else:
features['BOS'] = True
if i < len(sent)-1:
word1 = sent[i+1][0]
features.update({
'+1:char': word1,
'+1:char.lower()': word1.lower(),
'+1:char.isdigit()': word1.isdigit(),
})
else:
features['EOS'] = True
# EXTRA
if i > 2:
word1 = sent[i-2][0]
features.update({
'-2:char': word1,
'-2:char.lower()': word1.lower(),
'-2:char.isdigit()': word1.isdigit(),
})
if i > 2:
word1 = sent[i-2][0]
word2 = sent[i-1][0]
features.update({
'-21:char.lower()': word1.lower() + word2.lower(),
'-21:char.isdigit()': word1.isdigit() and word2.isdigit(),
})
if i > 3:
word1 = sent[i-3][0]
features.update({
'-3:char': word1,
'-3:char.lower()': word1.lower(),
'-3:char.isdigit()': word1.isdigit(),
})
if i > 3:
word1 = sent[i-3][0]
word2 = sent[i-2][0]
features.update({
'-32:char.lower()': word1.lower() + word2.lower(),
'-32:char.isdigit()': word1.isdigit() and word2.isdigit(),
})
if i < len(sent) - 2:
word1 = sent[i+2][0]
features.update({
'+2:char': word1,
'+2:char.lower()': word1.lower(),
'+2:char.isdigit()': word1.isdigit(),
})
if i < len(sent) - 2:
word1 = sent[i+1][0]
word2 = sent[i+2][0]
features.update({
'+21:char.lower()': word1.lower() + word2.lower(),
'+21:char.isdigit()': word1.isdigit() and word2.isdigit(),
})
if i < len(sent) - 3:
word1 = sent[i+3][0]
features.update({
'+3:char': word1,
'+3:char.lower()': word1.lower(),
'+3:char.isdigit()': word1.isdigit(),
})
if i < len(sent) - 3:
word1 = sent[i+2][0]
word2 = sent[i+3][0]
features.update({
'+32:char.lower()': word1.lower() + word2.lower(),
'+32:char.isdigit()': word1.isdigit() and word2.isdigit(),
})
if i < len(sent) - 3:
word0 = sent[i][0]
word1 = sent[i+1][0]
word2 = sent[i+2][0]
features.update({
'+02:lower()': (word0 + word1 + word2).lower(),
'+02:isdigit()': (word0 + word1 + word2).isdigit(),
})
return features
| [
"gensim.utils.to_unicode",
"re.sub"
] | [((419, 465), 're.sub', 're.sub', (['"""(?i)(?<=[a-z])\\\\.(?=[a-z])"""', '""""""', 'sent'], {}), "('(?i)(?<=[a-z])\\\\.(?=[a-z])', '', sent)\n", (425, 465), False, 'import re\n'), ((664, 708), 're.sub', 're.sub', (['"""[\\\\-_*+,\\\\(\\\\).:]{1,}"""', '""""""', 'message'], {}), "('[\\\\-_*+,\\\\(\\\\).:]{1,}', '', message)\n", (670, 708), False, 'import re\n'), ((718, 745), 're.sub', 're.sub', (['"""[ ]{1,}"""', '""""""', 'sent'], {}), "('[ ]{1,}', '', sent)\n", (724, 745), False, 'import re\n'), ((758, 787), 're.sub', 're.sub', (['"""(?i)\\\\bnº"""', '""""""', 'sent'], {}), "('(?i)\\\\bnº', '', sent)\n", (764, 787), False, 'import re\n'), ((974, 1006), 'gensim.utils.to_unicode', 'gensim_utils.to_unicode', (['message'], {}), '(message)\n', (997, 1006), True, 'import gensim.utils as gensim_utils\n')] |
import angr
from angr.sim_type import SimTypeInt
import logging
l = logging.getLogger("angr.procedures.libc.tolower")
class tolower(angr.SimProcedure):
def run(self, c):
self.argument_types = {0: SimTypeInt(self.state.arch, True)}
self.return_type = SimTypeInt(self.state.arch, True)
return self.state.solver.If(
self.state.solver.And(c >= 65, c <= 90), # A - Z
c + 32, c)
| [
"logging.getLogger",
"angr.sim_type.SimTypeInt"
] | [((69, 118), 'logging.getLogger', 'logging.getLogger', (['"""angr.procedures.libc.tolower"""'], {}), "('angr.procedures.libc.tolower')\n", (86, 118), False, 'import logging\n'), ((273, 306), 'angr.sim_type.SimTypeInt', 'SimTypeInt', (['self.state.arch', '(True)'], {}), '(self.state.arch, True)\n', (283, 306), False, 'from angr.sim_type import SimTypeInt\n'), ((211, 244), 'angr.sim_type.SimTypeInt', 'SimTypeInt', (['self.state.arch', '(True)'], {}), '(self.state.arch, True)\n', (221, 244), False, 'from angr.sim_type import SimTypeInt\n')] |
import data_mine as dm
from data_mine.nlp.cosmos_qa import CosmosQAType
def main():
df = dm.COSMOS_QA(CosmosQAType.TRAIN)
print(df)
print("\n")
df = df.sample(n=1)
row = next(df.iterrows())[1]
print("Question: ", row.question, "\n")
print("Context: ", row.context, "\n")
for i, answer in enumerate(row.answers):
print("{}) {}".format(chr(ord('A') + i), answer))
print("\nCorrect answer: {}".format(row.correct))
if __name__ == "__main__":
main()
| [
"data_mine.COSMOS_QA"
] | [((96, 128), 'data_mine.COSMOS_QA', 'dm.COSMOS_QA', (['CosmosQAType.TRAIN'], {}), '(CosmosQAType.TRAIN)\n', (108, 128), True, 'import data_mine as dm\n')] |
# used for connecting to the trusted capsule server
import socket
TCP_IP = '127.0.0.1'
TCP_PORT = 4000
BUFFER_SIZE = 1024
MESSAGE = "Hello, World!"
def connect(ip: str, port: int, request: bytes):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, port))
s.send(request)
print("sent")
data = s.recv(BUFFER_SIZE)
s.close()
print("received data:", data)
connect(TCP_IP, TCP_PORT, bytes("Hello, World!", 'ascii'))
| [
"socket.socket"
] | [((208, 257), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (221, 257), False, 'import socket\n')] |
from randonet.generator.param import Param, IntParam, FloatParam, BinaryParam, ChoiceParam, TupleParam
from randonet.generator.unit import Unit, Factory as _Factory
from randonet.generator.conv import ConvFactory, ConvTransposeFactory
from collections import namedtuple
class TransformerEncoder(_Factory):
def __init__(self, **kwargs):
_Factory.__init__(self)
self.template_fn = namedtuple("TransformerEncoder", ['encoder_layer', 'num_layers', 'norm'])
self.params = self.template_fn(
encoder_layer=Param(name="encoder_layer", default=None),
num_layers=Param(name="num_layers", default=None),
norm=Param(name="norm", default=None),
)
for k,v in kwargs.items():
getattr(self.params, k).val = v
class TransformerDecoder(_Factory):
def __init__(self, **kwargs):
_Factory.__init__(self)
self.template_fn = namedtuple("TransformerDecoder", ['decoder_layer', 'num_layers', 'norm'])
self.params = self.template_fn(
decoder_layer=Param(name="decoder_layer", default=None),
num_layers=Param(name="num_layers", default=None),
norm=Param(name="norm", default=None),
)
for k,v in kwargs.items():
getattr(self.params, k).val = v
class TransformerEncoderLayer(_Factory):
def __init__(self, **kwargs):
_Factory.__init__(self)
self.template_fn = namedtuple("TransformerEncoderLayer", ['d_model', 'nhead', 'dim_feedforward', 'dropout', 'activation'])
self.params = self.template_fn(
d_model=Param(name="d_model", default=None),
nhead=Param(name="nhead", default=None),
dim_feedforward=IntParam(name="dim_feedforward", default=2048),
dropout=FloatParam(name="dropout", default=0.1),
activation=ChoiceParam(name="activation", choices=("relu",), cprobs=(1,), default="relu"),
)
for k,v in kwargs.items():
getattr(self.params, k).val = v
class TransformerDecoderLayer(_Factory):
def __init__(self, **kwargs):
_Factory.__init__(self)
self.template_fn = namedtuple("TransformerDecoderLayer", ['d_model', 'nhead', 'dim_feedforward', 'dropout', 'activation'])
self.params = self.template_fn(
d_model=Param(name="d_model", default=None),
nhead=Param(name="nhead", default=None),
dim_feedforward=IntParam(name="dim_feedforward", default=2048),
dropout=FloatParam(name="dropout", default=0.1),
activation=ChoiceParam(name="activation", choices=("relu",), cprobs=(1,), default="relu"),
)
for k,v in kwargs.items():
getattr(self.params, k).val = v
class Transformer(_Factory):
def __init__(self, **kwargs):
_Factory.__init__(self)
self.template_fn = namedtuple("Transformer", ['d_model', 'nhead', 'num_encoder_layers', 'num_decoder_layers', 'dim_feedforward', 'dropout', 'activation', 'custom_encoder', 'custom_decoder'])
self.params = self.template_fn(
d_model=IntParam(name="d_model", default=512),
nhead=IntParam(name="nhead", default=8),
num_encoder_layers=IntParam(name="num_encoder_layers", default=6),
num_decoder_layers=IntParam(name="num_decoder_layers", default=6),
dim_feedforward=IntParam(name="dim_feedforward", default=2048),
dropout=FloatParam(name="dropout", default=0.1),
activation=ChoiceParam(name="activation", choices=("relu",), cprobs=(1,), default="relu"),
custom_encoder=Param(name="custom_encoder", default=None),
custom_decoder=Param(name="custom_decoder", default=None),
)
for k,v in kwargs.items():
getattr(self.params, k).val = v
| [
"collections.namedtuple",
"randonet.generator.param.FloatParam",
"randonet.generator.param.ChoiceParam",
"randonet.generator.param.Param",
"randonet.generator.param.IntParam",
"randonet.generator.unit.Factory.__init__"
] | [((351, 374), 'randonet.generator.unit.Factory.__init__', '_Factory.__init__', (['self'], {}), '(self)\n', (368, 374), True, 'from randonet.generator.unit import Unit, Factory as _Factory\n'), ((402, 475), 'collections.namedtuple', 'namedtuple', (['"""TransformerEncoder"""', "['encoder_layer', 'num_layers', 'norm']"], {}), "('TransformerEncoder', ['encoder_layer', 'num_layers', 'norm'])\n", (412, 475), False, 'from collections import namedtuple\n'), ((868, 891), 'randonet.generator.unit.Factory.__init__', '_Factory.__init__', (['self'], {}), '(self)\n', (885, 891), True, 'from randonet.generator.unit import Unit, Factory as _Factory\n'), ((919, 992), 'collections.namedtuple', 'namedtuple', (['"""TransformerDecoder"""', "['decoder_layer', 'num_layers', 'norm']"], {}), "('TransformerDecoder', ['decoder_layer', 'num_layers', 'norm'])\n", (929, 992), False, 'from collections import namedtuple\n'), ((1390, 1413), 'randonet.generator.unit.Factory.__init__', '_Factory.__init__', (['self'], {}), '(self)\n', (1407, 1413), True, 'from randonet.generator.unit import Unit, Factory as _Factory\n'), ((1441, 1548), 'collections.namedtuple', 'namedtuple', (['"""TransformerEncoderLayer"""', "['d_model', 'nhead', 'dim_feedforward', 'dropout', 'activation']"], {}), "('TransformerEncoderLayer', ['d_model', 'nhead',\n 'dim_feedforward', 'dropout', 'activation'])\n", (1451, 1548), False, 'from collections import namedtuple\n'), ((2109, 2132), 'randonet.generator.unit.Factory.__init__', '_Factory.__init__', (['self'], {}), '(self)\n', (2126, 2132), True, 'from randonet.generator.unit import Unit, Factory as _Factory\n'), ((2160, 2267), 'collections.namedtuple', 'namedtuple', (['"""TransformerDecoderLayer"""', "['d_model', 'nhead', 'dim_feedforward', 'dropout', 'activation']"], {}), "('TransformerDecoderLayer', ['d_model', 'nhead',\n 'dim_feedforward', 'dropout', 'activation'])\n", (2170, 2267), False, 'from collections import namedtuple\n'), ((2816, 2839), 'randonet.generator.unit.Factory.__init__', '_Factory.__init__', (['self'], {}), '(self)\n', (2833, 2839), True, 'from randonet.generator.unit import Unit, Factory as _Factory\n'), ((2867, 3046), 'collections.namedtuple', 'namedtuple', (['"""Transformer"""', "['d_model', 'nhead', 'num_encoder_layers', 'num_decoder_layers',\n 'dim_feedforward', 'dropout', 'activation', 'custom_encoder',\n 'custom_decoder']"], {}), "('Transformer', ['d_model', 'nhead', 'num_encoder_layers',\n 'num_decoder_layers', 'dim_feedforward', 'dropout', 'activation',\n 'custom_encoder', 'custom_decoder'])\n", (2877, 3046), False, 'from collections import namedtuple\n'), ((542, 583), 'randonet.generator.param.Param', 'Param', ([], {'name': '"""encoder_layer"""', 'default': 'None'}), "(name='encoder_layer', default=None)\n", (547, 583), False, 'from randonet.generator.param import Param, IntParam, FloatParam, BinaryParam, ChoiceParam, TupleParam\n'), ((608, 646), 'randonet.generator.param.Param', 'Param', ([], {'name': '"""num_layers"""', 'default': 'None'}), "(name='num_layers', default=None)\n", (613, 646), False, 'from randonet.generator.param import Param, IntParam, FloatParam, BinaryParam, ChoiceParam, TupleParam\n'), ((665, 697), 'randonet.generator.param.Param', 'Param', ([], {'name': '"""norm"""', 'default': 'None'}), "(name='norm', default=None)\n", (670, 697), False, 'from randonet.generator.param import Param, IntParam, FloatParam, BinaryParam, ChoiceParam, TupleParam\n'), ((1059, 1100), 'randonet.generator.param.Param', 'Param', ([], {'name': '"""decoder_layer"""', 'default': 'None'}), "(name='decoder_layer', default=None)\n", (1064, 1100), False, 'from randonet.generator.param import Param, IntParam, FloatParam, BinaryParam, ChoiceParam, TupleParam\n'), ((1125, 1163), 'randonet.generator.param.Param', 'Param', ([], {'name': '"""num_layers"""', 'default': 'None'}), "(name='num_layers', default=None)\n", (1130, 1163), False, 'from randonet.generator.param import Param, IntParam, FloatParam, BinaryParam, ChoiceParam, TupleParam\n'), ((1182, 1214), 'randonet.generator.param.Param', 'Param', ([], {'name': '"""norm"""', 'default': 'None'}), "(name='norm', default=None)\n", (1187, 1214), False, 'from randonet.generator.param import Param, IntParam, FloatParam, BinaryParam, ChoiceParam, TupleParam\n'), ((1605, 1640), 'randonet.generator.param.Param', 'Param', ([], {'name': '"""d_model"""', 'default': 'None'}), "(name='d_model', default=None)\n", (1610, 1640), False, 'from randonet.generator.param import Param, IntParam, FloatParam, BinaryParam, ChoiceParam, TupleParam\n'), ((1660, 1693), 'randonet.generator.param.Param', 'Param', ([], {'name': '"""nhead"""', 'default': 'None'}), "(name='nhead', default=None)\n", (1665, 1693), False, 'from randonet.generator.param import Param, IntParam, FloatParam, BinaryParam, ChoiceParam, TupleParam\n'), ((1723, 1769), 'randonet.generator.param.IntParam', 'IntParam', ([], {'name': '"""dim_feedforward"""', 'default': '(2048)'}), "(name='dim_feedforward', default=2048)\n", (1731, 1769), False, 'from randonet.generator.param import Param, IntParam, FloatParam, BinaryParam, ChoiceParam, TupleParam\n'), ((1791, 1830), 'randonet.generator.param.FloatParam', 'FloatParam', ([], {'name': '"""dropout"""', 'default': '(0.1)'}), "(name='dropout', default=0.1)\n", (1801, 1830), False, 'from randonet.generator.param import Param, IntParam, FloatParam, BinaryParam, ChoiceParam, TupleParam\n'), ((1855, 1933), 'randonet.generator.param.ChoiceParam', 'ChoiceParam', ([], {'name': '"""activation"""', 'choices': "('relu',)", 'cprobs': '(1,)', 'default': '"""relu"""'}), "(name='activation', choices=('relu',), cprobs=(1,), default='relu')\n", (1866, 1933), False, 'from randonet.generator.param import Param, IntParam, FloatParam, BinaryParam, ChoiceParam, TupleParam\n'), ((2324, 2359), 'randonet.generator.param.Param', 'Param', ([], {'name': '"""d_model"""', 'default': 'None'}), "(name='d_model', default=None)\n", (2329, 2359), False, 'from randonet.generator.param import Param, IntParam, FloatParam, BinaryParam, ChoiceParam, TupleParam\n'), ((2379, 2412), 'randonet.generator.param.Param', 'Param', ([], {'name': '"""nhead"""', 'default': 'None'}), "(name='nhead', default=None)\n", (2384, 2412), False, 'from randonet.generator.param import Param, IntParam, FloatParam, BinaryParam, ChoiceParam, TupleParam\n'), ((2442, 2488), 'randonet.generator.param.IntParam', 'IntParam', ([], {'name': '"""dim_feedforward"""', 'default': '(2048)'}), "(name='dim_feedforward', default=2048)\n", (2450, 2488), False, 'from randonet.generator.param import Param, IntParam, FloatParam, BinaryParam, ChoiceParam, TupleParam\n'), ((2510, 2549), 'randonet.generator.param.FloatParam', 'FloatParam', ([], {'name': '"""dropout"""', 'default': '(0.1)'}), "(name='dropout', default=0.1)\n", (2520, 2549), False, 'from randonet.generator.param import Param, IntParam, FloatParam, BinaryParam, ChoiceParam, TupleParam\n'), ((2574, 2652), 'randonet.generator.param.ChoiceParam', 'ChoiceParam', ([], {'name': '"""activation"""', 'choices': "('relu',)", 'cprobs': '(1,)', 'default': '"""relu"""'}), "(name='activation', choices=('relu',), cprobs=(1,), default='relu')\n", (2585, 2652), False, 'from randonet.generator.param import Param, IntParam, FloatParam, BinaryParam, ChoiceParam, TupleParam\n'), ((3099, 3136), 'randonet.generator.param.IntParam', 'IntParam', ([], {'name': '"""d_model"""', 'default': '(512)'}), "(name='d_model', default=512)\n", (3107, 3136), False, 'from randonet.generator.param import Param, IntParam, FloatParam, BinaryParam, ChoiceParam, TupleParam\n'), ((3156, 3189), 'randonet.generator.param.IntParam', 'IntParam', ([], {'name': '"""nhead"""', 'default': '(8)'}), "(name='nhead', default=8)\n", (3164, 3189), False, 'from randonet.generator.param import Param, IntParam, FloatParam, BinaryParam, ChoiceParam, TupleParam\n'), ((3222, 3268), 'randonet.generator.param.IntParam', 'IntParam', ([], {'name': '"""num_encoder_layers"""', 'default': '(6)'}), "(name='num_encoder_layers', default=6)\n", (3230, 3268), False, 'from randonet.generator.param import Param, IntParam, FloatParam, BinaryParam, ChoiceParam, TupleParam\n'), ((3301, 3347), 'randonet.generator.param.IntParam', 'IntParam', ([], {'name': '"""num_decoder_layers"""', 'default': '(6)'}), "(name='num_decoder_layers', default=6)\n", (3309, 3347), False, 'from randonet.generator.param import Param, IntParam, FloatParam, BinaryParam, ChoiceParam, TupleParam\n'), ((3377, 3423), 'randonet.generator.param.IntParam', 'IntParam', ([], {'name': '"""dim_feedforward"""', 'default': '(2048)'}), "(name='dim_feedforward', default=2048)\n", (3385, 3423), False, 'from randonet.generator.param import Param, IntParam, FloatParam, BinaryParam, ChoiceParam, TupleParam\n'), ((3445, 3484), 'randonet.generator.param.FloatParam', 'FloatParam', ([], {'name': '"""dropout"""', 'default': '(0.1)'}), "(name='dropout', default=0.1)\n", (3455, 3484), False, 'from randonet.generator.param import Param, IntParam, FloatParam, BinaryParam, ChoiceParam, TupleParam\n'), ((3509, 3587), 'randonet.generator.param.ChoiceParam', 'ChoiceParam', ([], {'name': '"""activation"""', 'choices': "('relu',)", 'cprobs': '(1,)', 'default': '"""relu"""'}), "(name='activation', choices=('relu',), cprobs=(1,), default='relu')\n", (3520, 3587), False, 'from randonet.generator.param import Param, IntParam, FloatParam, BinaryParam, ChoiceParam, TupleParam\n'), ((3616, 3658), 'randonet.generator.param.Param', 'Param', ([], {'name': '"""custom_encoder"""', 'default': 'None'}), "(name='custom_encoder', default=None)\n", (3621, 3658), False, 'from randonet.generator.param import Param, IntParam, FloatParam, BinaryParam, ChoiceParam, TupleParam\n'), ((3687, 3729), 'randonet.generator.param.Param', 'Param', ([], {'name': '"""custom_decoder"""', 'default': 'None'}), "(name='custom_decoder', default=None)\n", (3692, 3729), False, 'from randonet.generator.param import Param, IntParam, FloatParam, BinaryParam, ChoiceParam, TupleParam\n')] |
from rest_framework import serializers
from . import models
class ShelterSerializer(serializers.ModelSerializer):
class Meta:
model = models.Shelter
fields = ('name',
'location')
class DogSerializer(serializers.ModelSerializer):
class Meta:
model = models.Dog
fields = ('shelter',
'name',
'description',
'intake_date')
class ErrorSerializer(serializers.Serializer):
error_message = serializers.CharField(max_length=200)
| [
"rest_framework.serializers.CharField"
] | [((505, 542), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (526, 542), False, 'from rest_framework import serializers\n')] |
import struct
_SetSeparator=b"_~|IMMU|~_"
def wrap_zindex_ref(key: bytes, index) -> bytes:
fmt=">{}sQB".format(len(key))
if index!=None and index.index!=None:
ret=struct.pack(fmt,key,index.index,1)
else:
ret=struct.pack(fmt,key,0,0)
return ret
def unwrap_zindex_ref(value:bytes):
l=len(value)
fmt=">{}sQB".format(l-8-1)
key, index, flag = struct.unpack(fmt,value)
if flag==0:
index=None
return (key,index)
def build_set_key(key: bytes, zset: bytes, score: float, index) -> bytes:
ret=_SetSeparator + zset + _SetSeparator + float64_2_bytes(score) + key
ret=wrap_zindex_ref(ret,index)
return ret
def float64_2_bytes(f:float)->bytes:
return struct.pack(">d",f)
| [
"struct.unpack",
"struct.pack"
] | [((353, 378), 'struct.unpack', 'struct.unpack', (['fmt', 'value'], {}), '(fmt, value)\n', (366, 378), False, 'import struct\n'), ((662, 682), 'struct.pack', 'struct.pack', (['""">d"""', 'f'], {}), "('>d', f)\n", (673, 682), False, 'import struct\n'), ((169, 206), 'struct.pack', 'struct.pack', (['fmt', 'key', 'index.index', '(1)'], {}), '(fmt, key, index.index, 1)\n', (180, 206), False, 'import struct\n'), ((217, 244), 'struct.pack', 'struct.pack', (['fmt', 'key', '(0)', '(0)'], {}), '(fmt, key, 0, 0)\n', (228, 244), False, 'import struct\n')] |
"""Testing the Flask application factory"""
import os
import tempfile
import textwrap
from interpersonal import create_app
def test_config():
"""Test the application configuration
Make sure it works in testing mode and in normal mode.
"""
db_fd, db_path = tempfile.mkstemp()
conf_fd, conf_path = tempfile.mkstemp()
media_staging_path = tempfile.mkdtemp()
appconfig_str = textwrap.dedent(
f"""\
---
loglevel: DEBUG
database: {db_path}
password: <PASSWORD>
cookie_secret_key: whocaresman
uri: http://interpersonal.example.net
mediastaging: {media_staging_path}
blogs:
- name: example
type: built-in example
uri: http://whatever.example.net
sectionmap:
default: blog
mediaprefix: /media
"""
)
os.write(conf_fd, appconfig_str.encode())
assert not create_app(configpath=conf_path).testing
assert create_app({"TESTING": True}, configpath=conf_path).testing
os.close(db_fd)
os.unlink(db_path)
os.close(conf_fd)
os.unlink(conf_path)
| [
"textwrap.dedent",
"interpersonal.create_app",
"os.close",
"tempfile.mkdtemp",
"os.unlink",
"tempfile.mkstemp"
] | [((277, 295), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (293, 295), False, 'import tempfile\n'), ((321, 339), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (337, 339), False, 'import tempfile\n'), ((365, 383), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (381, 383), False, 'import tempfile\n'), ((405, 861), 'textwrap.dedent', 'textwrap.dedent', (['f""" ---\n loglevel: DEBUG\n database: {db_path}\n password: <PASSWORD>\n cookie_secret_key: whocaresman\n uri: http://interpersonal.example.net\n mediastaging: {media_staging_path}\n blogs:\n - name: example\n type: built-in example\n uri: http://whatever.example.net\n sectionmap:\n default: blog\n mediaprefix: /media\n """'], {}), '(\n f""" ---\n loglevel: DEBUG\n database: {db_path}\n password: <PASSWORD>\n cookie_secret_key: whocaresman\n uri: http://interpersonal.example.net\n mediastaging: {media_staging_path}\n blogs:\n - name: example\n type: built-in example\n uri: http://whatever.example.net\n sectionmap:\n default: blog\n mediaprefix: /media\n """\n )\n', (420, 861), False, 'import textwrap\n'), ((1048, 1063), 'os.close', 'os.close', (['db_fd'], {}), '(db_fd)\n', (1056, 1063), False, 'import os\n'), ((1068, 1086), 'os.unlink', 'os.unlink', (['db_path'], {}), '(db_path)\n', (1077, 1086), False, 'import os\n'), ((1091, 1108), 'os.close', 'os.close', (['conf_fd'], {}), '(conf_fd)\n', (1099, 1108), False, 'import os\n'), ((1113, 1133), 'os.unlink', 'os.unlink', (['conf_path'], {}), '(conf_path)\n', (1122, 1133), False, 'import os\n'), ((983, 1034), 'interpersonal.create_app', 'create_app', (["{'TESTING': True}"], {'configpath': 'conf_path'}), "({'TESTING': True}, configpath=conf_path)\n", (993, 1034), False, 'from interpersonal import create_app\n'), ((931, 963), 'interpersonal.create_app', 'create_app', ([], {'configpath': 'conf_path'}), '(configpath=conf_path)\n', (941, 963), False, 'from interpersonal import create_app\n')] |
import sql as sql
import streamlit as st
from streamlit_folium import folium_static
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import json
import sys
import folium
import requests
from bs4 import BeautifulSoup
import csv
from tqdm import tqdm
import webbrowser
import os.path as osp
import os
from folium.plugins import MarkerCluster
import numpy as np
from numpy import genfromtxt
import sqlite3
with st.echo(code_location='below'):
import zipfile
zipFile = zipfile.ZipFile("2019-20-fullyr-data_sa_crime.csv.zip", 'r')
zipFile.extract('2019-20-fullyr-data_sa_crime.csv')
df1 = pd.read_csv('2019-20-fullyr-data_sa_crime.csv')
# from sqlite3 import Error
#
#
# def create_connection(path):
# connection = None
# try:
# connection = sqlite3.connect(path)
# print("Connection to SQLite DB successful")
# except Error as e:
# print(f"The error '{e}' occurred")
#
# return connection
st.title("Различные данные по правонарушениям в Южной Австралии за 2018-2020гг.")
xx = df1.copy()
xx.drop(columns = ['Reported Date', 'Postcode - Incident', 'Offence Level 1 Description', 'Offence Level 2 Description', 'Offence Level 3 Description'])
xx.sort_values(by='Suburb - Incident', ascending=False)
groups = xx.groupby('Suburb - Incident', as_index=False).sum()
group1 = groups.sort_values('Offence count', ascending=False).head(15)
st.write('Статистика по пригородам с наибольшим количествам правонарушений за 2019-2020гг.')
fig2, ax2 = plt.subplots(figsize=(40, 20))
sns.barplot(data=group1, x='Suburb - Incident', y='Offence count', palette='magma')
plt.xlabel('Suburb', size=20)
plt.ylabel('Offence count in the suburb', size=20)
plt.title('Total offence count of crimes in the suburbs (top 15) 2019/2020', size=36)
st.pyplot(fig2)
if st.button('Показать статистику по пригородам с наибольшим количествам правонарушений за 2019-2020гг. в виде таблицы'):
st.dataframe(group1)
xx1 = pd.read_csv('2019-20-fullyr-data_sa_crime.csv')
xx1.drop(columns=['Reported Date', 'Postcode - Incident', 'Suburb - Incident', 'Offence Level 2 Description', 'Offence Level 3 Description'])
xx1.sort_values(by='Offence Level 1 Description', ascending=False)
groups1 = xx1.groupby('Offence Level 1 Description', as_index=False).sum()
group12 = groups1.sort_values('Offence count', ascending=False)
st.write('Статистика по количеству правонарушений по первой классификации за 2019-2020гг.')
fig3, ax3 = plt.subplots(figsize=(40, 20))
sns.barplot(data=group12, x='Offence Level 1 Description', y='Offence count', palette='magma')
plt.xlabel('Type of crime (lev1)', size=20)
plt.ylabel('Offence count', size=20)
plt.title('Total offence count of different crimes (lev1) 2019/2020', size=36)
st.pyplot(fig3)
if st.button('Показать статистику по количеству правонарушений по первой классификации за 2019-2020гг. в виде таблицы'):
st.dataframe(group12)
xx2 = pd.read_csv('2019-20-fullyr-data_sa_crime.csv')
xx2.drop(columns=['Reported Date', 'Postcode - Incident', 'Suburb - Incident', 'Offence Level 1 Description', 'Offence Level 3 Description'])
xx2.sort_values(by='Offence Level 2 Description', ascending=False)
groups1_2 = xx2.groupby('Offence Level 2 Description', as_index=False).sum()
group123 = groups1_2.sort_values('Offence count', ascending=False)
st.write('Статистика по количеству правонарушений по второй классификации за 2019-2020гг.')
fig4, ax4 = plt.subplots(figsize=(40, 20))
sns.barplot(data=group123, x='Offence Level 2 Description', y='Offence count', palette='magma')
plt.xlabel('Type of crime (lev2)', size=20)
plt.ylabel('Offence count', size=20)
plt.title('Total offence count of different crimes (lev2) 2019/2020', size=36)
st.pyplot(fig4)
if st.button('Показать статистику по количеству правонарушений по второй классификации за 2019-2020гг. в виде таблицы'):
st.dataframe(group123)
xx3 = pd.read_csv('2019-20-fullyr-data_sa_crime.csv')
xx3.drop(columns=['Reported Date', 'Postcode - Incident', 'Suburb - Incident', 'Offence Level 1 Description', 'Offence Level 2 Description'])
xx3.sort_values(by='Offence Level 3 Description', ascending=False)
groups1_2_3 = xx3.groupby('Offence Level 3 Description', as_index=False).sum()
group1234 = groups1_2_3.sort_values('Offence count', ascending=False)
st.write('Статистика по количеству правонарушений по третьей классификации за 2019-2020гг.')
fig5, ax5 = plt.subplots(figsize=(60, 20))
sns.barplot(data=group1234, x='Offence Level 3 Description', y='Offence count', palette='magma')
plt.xlabel('Type of crime (lev3)', size=20)
plt.ylabel('Offence count', size=20)
plt.title('Total offence count of different crimes (lev3)', size=36)
st.pyplot(fig5)
if st.button('Показать cтатистику по количеству правонарушений по третьей классификации за 2019-2020гг. в виде таблицы'):
st.dataframe(data=group1234)
xx4 = pd.read_csv('2019-20-fullyr-data_sa_crime.csv')
xx4.drop(columns=['Postcode - Incident', 'Suburb - Incident', 'Offence Level 1 Description', 'Offence Level 2 Description', 'Offence Level 3 Description'])
xx4.sort_values(by='Reported Date')
groups1_2_3_4 = xx4.groupby('Reported Date', as_index=False).sum()
group12345 = groups1_2_3_4.sort_values('Offence count', ascending=False)
st.write('Статистика по количеству правонарушений по датам за 2019-2020гг.')
fig6, ax6 = plt.subplots(figsize=(60, 20))
sns.lineplot(data=group12345, x='Reported Date', y='Offence count', color='red')
plt.xlabel('Date', size=20)
plt.ylabel('Offence count', size=20)
plt.title('Total offence count by date 01.07.19-30.06.20', size=36)
st.pyplot(fig6)
if st.button('Показать статистику по количеству правонарушений по датам за 2019-2020гг. в виде таблицы'):
st.dataframe(data=group12345)
x_18_19=pd.read_csv ('2018-19-data_sa_crime.csv')
x_18_19.drop(columns=['Reported Date', 'Postcode - Incident', 'Offence Level 1 Description', 'Offence Level 2 Description', 'Offence Level 3 Description'])
x_18_19.sort_values(by='Suburb - Incident', ascending=False)
groups_18_19 = x_18_19.groupby('Suburb - Incident', as_index=False).sum()
group_18_19_1 = groups_18_19.sort_values('Offence count', ascending=False).head(15)
st.write('Статистика по пригородам с наибольшим количествам правонарушений за 2018-2019гг.')
fig7, ax7 = plt.subplots(figsize=(40, 20))
sns.barplot(data=group_18_19_1, x='Suburb - Incident', y='Offence count', palette='magma')
plt.xlabel('Suburb', size=20)
plt.ylabel('Offence count in the suburb', size=20)
plt.title('Total offence count of crimes in the suburbs (top 15) 2018/2019', size=36)
st.pyplot(fig7)
if st.button('Показать статистику по пригородам с наибольшим количествам правонарушений за 2018-2019гг. в виде таблицы'):
st.dataframe(group_18_19_1)
x_18_19_2 = pd.read_csv('2018-19-data_sa_crime.csv')
x_18_19_2.drop(columns=['Reported Date', 'Postcode - Incident', 'Suburb - Incident', 'Offence Level 2 Description', 'Offence Level 3 Description'])
x_18_19_2.sort_values(by='Offence Level 1 Description', ascending=False)
groups_18_19_2 = x_18_19_2.groupby('Offence Level 1 Description', as_index=False).sum()
group_18_19_2 = groups_18_19_2.sort_values('Offence count', ascending=False)
st.write('Статистика по количеству правонарушений по первой классификации за 2018-2019гг.')
fig8, ax8 = plt.subplots(figsize=(40, 20))
sns.barplot(data=group_18_19_2, x='Offence Level 1 Description', y='Offence count', palette='magma')
plt.xlabel('Type of crime (lev1)', size=20)
plt.ylabel('Offence count', size=20)
plt.title('Total offence count of different crimes (lev1) 2018/2019', size=36)
st.pyplot(fig8)
if st.button('Показать статистику по количеству правонарушений по первой классификации за 2018-2019гг. в виде таблицы'):
st.dataframe(group_18_19_2)
x_18_19_4 = pd.read_csv('2018-19-data_sa_crime.csv')
x_18_19_4.drop(columns=['Reported Date', 'Postcode - Incident', 'Suburb - Incident', 'Offence Level 1 Description', 'Offence Level 3 Description'])
x_18_19_4.sort_values(by='Offence Level 2 Description', ascending=False)
groups_18_19_4 = x_18_19_4.groupby('Offence Level 2 Description', as_index=False).sum()
group_18_19_4 = groups_18_19_4.sort_values('Offence count', ascending=False)
st.write('Статистика по количеству правонарушений по второй классификации за 2018-2019гг.')
fig10, ax10 = plt.subplots(figsize=(40, 20))
sns.barplot(data=group_18_19_4, x='Offence Level 2 Description', y='Offence count', palette='magma')
plt.xlabel('Type of crime (lev2)', size=20)
plt.ylabel('Offence count', size=20)
plt.title('Total offence count of different crimes (lev2) 2018/2019', size=36)
st.pyplot(fig10)
if st.button('Показать статистику по количеству правонврушений по второй классификации за 2018-2019гг. в виде таблицы'):
st.dataframe(group_18_19_4)
x_18_19_3 = pd.read_csv('2018-19-data_sa_crime.csv')
x_18_19_3.drop(columns=['Reported Date', 'Postcode - Incident', 'Suburb - Incident', 'Offence Level 1 Description', 'Offence Level 2 Description'])
x_18_19_3.sort_values(by='Offence Level 3 Description', ascending=False)
groups_18_19_3 = x_18_19_3.groupby('Offence Level 3 Description', as_index=False).sum()
group_18_19_3 = groups_18_19_3.sort_values('Offence count', ascending=False)
st.write('Статистика по количеству правонарушений по третьей классификации за 2018-2019гг.')
fig9, ax9 = plt.subplots(figsize=(60, 20))
sns.barplot(data=group_18_19_3, x='Offence Level 3 Description', y='Offence count', palette='magma')
plt.xlabel('Type of crime (lev3)', size=20)
plt.ylabel('Offence count', size=20)
plt.title('Total offence count of different crimes (lev3) 2018/2019', size=36)
st.pyplot(fig9)
if st.button('Показать статистику по количеству правонарушений по третьей классификации за 2018-2019гг. в виде таблицы'):
st.dataframe(group_18_19_3)
din=pd.read_csv("Offenders, principal offence of public order offences.csv")
#din_data = genfromtxt('Offenders, principal offence of public order offences.csv', delimiter=',')
print(din)
#din.columns=["Years", 'Offenders']
#print(din)
st.write('Статистика по количеству правонарушителей 2009-2019гг.')
fig10, ax10 = plt.subplots(figsize=(40, 20))
sns.lineplot(data=din, x="Years", y='Offenders', color='red')
plt.xlabel('Year', size=40)
plt.ylabel('Offenders', size=40)
plt.title('Offenders dinamics', size=50)
st.pyplot(fig10)
if st.button('Показать статистику по количеству правонарушителей 2009-2019гг. в виде таблицы'):
st.dataframe(din)
years = np.array([2019, 2020])
st.write("(Придётся немного подождать, программа обрабатывает примерно 95тыс. результатов для каждого года)")
files = ['2019-20-fullyr-data_sa_crime.csv',
'2018-19-data_sa_crime.csv'] # выполняем весь этот код по созданию карты с маркерами для каждого файла
for file in files:
locations = []
entrypoint1 = "https://nominatim.openstreetmap.org/search"
query1 = {'q': 'MORPHETT VALE australia', 'format': 'xml'}
r1 = requests.get(entrypoint1, params=query1)
soup = BeautifulSoup(r1.text, 'xml')
st.write("Визуализация количества правонарушений по пригородам на карте " + str(years[0]) + "-" + str(years[1]) + "гг.")
years = years-1
print(years)
with open(osp.join(os.environ['HOME'], 'Downloads/first_project 2', file), newline='') as f: # если будут ошибки из-за пути, то просто вставь полный путь к папке с файлами csv
reader = csv.reader(f)
for row in reader:
place = row[1] + ' ' + row[2] # берем название города и почтовый индекс
locations.append(place)
locations.pop(0) # удаляем перую строку (название столбцов)
new_dict = {i: locations.count(i) for i in tqdm(locations)} # собираем словарь {локация : кол-во нарушений}
sorted_values = sorted(new_dict.values(), reverse=True) # сортируем от большего к меньшему значения словаря
sorted_dict = {}
for i in sorted_values: # собираем новый словарь с сортировкой по значению
for k in new_dict.keys():
if new_dict[k] == i:
sorted_dict[k] = new_dict[k]
break
# делаем срез словаря через списки
lst_slice_key = list(sorted_dict.keys())[:27] # берем первые 27 записей (ключи)
lst_slice_val = list(sorted_dict.values())[:27] # берем первые 27 записей (значения)
new_sorted_dict = dict(zip(lst_slice_key, lst_slice_val)) # собираем новый словрь-срез
print(new_sorted_dict)
lat_19_20 = []
lon_19_20 = []
lst_number = []
lst_place = []
# делаем запрос и заполняем словари нужными данными
for name, number in tqdm(new_sorted_dict.items()):
entrypoint2 = "https://nominatim.openstreetmap.org/search"
query2 = {'q': str(name), 'format': 'xml'}
r2 = requests.get(entrypoint2, params=query2)
soup1 = BeautifulSoup(r2.text, 'xml')
for place1 in soup1.find_all("place"):
lst_place.append(place1['display_name'])
lat_19_20.append(float(place1['lat']))
lon_19_20.append(float(place1['lon']))
lst_number.append(number)
break
coord_19_20 = dict(zip(lat_19_20, lon_19_20))
a = list(coord_19_20.keys())[0]
b = coord_19_20[a]
def color_change(count): # менеяем цвет в зависимости от кол-ва преступлений в точке
if (count < 800):
return ('green')
elif (800 <= count < 1100):
return ('orange')
else:
return ('red')
def radius_change(count): # менеяем радиус в зависимости от кол-ва преступлений в точке
if (count < 800):
rad = 7
return rad
elif (800 <= count < 1100):
rad = 14
return rad
else:
rad = 21
return rad
map = folium.Map(location=[a, b], zoom_start=8) # создаем карту с дефолтной локацией
marker_cluster = folium.plugins.MarkerCluster().add_to(map) # создаем кластеризацию маркеров на карте
for lat, lon, place, number in tqdm(zip(lat_19_20, lon_19_20, lst_place, lst_number)): # создаем маркеры на карте one by one
place_splited = place.split(',')
folium.CircleMarker(location=[lat, lon], radius=radius_change(int(number)),
# location - координаты маркера, radius - берем из функции radius_change
popup=f'Place: {place_splited[0]}, {place_splited[1]}, {place_splited[2]}\nCrimes: {str(number)}',
# popup - текст маркера
fill_color=color_change(int(number)), color="black", fill_opacity=0.9).add_to(
marker_cluster) # fill_color - берем из функции color_change
map.save(f"map_{file[:-4]}.html") # сохраняем карту в html формате
print(f'DONE with {file}')
url = f"map_{file[:-4]}.html"
folium_static(map)
| [
"pandas.read_csv",
"zipfile.ZipFile",
"streamlit.echo",
"matplotlib.pyplot.ylabel",
"streamlit.button",
"numpy.array",
"streamlit.title",
"matplotlib.pyplot.xlabel",
"folium.Map",
"folium.plugins.MarkerCluster",
"csv.reader",
"streamlit.write",
"requests.get",
"seaborn.lineplot",
"stream... | [((438, 468), 'streamlit.echo', 'st.echo', ([], {'code_location': '"""below"""'}), "(code_location='below')\n", (445, 468), True, 'import streamlit as st\n'), ((503, 563), 'zipfile.ZipFile', 'zipfile.ZipFile', (['"""2019-20-fullyr-data_sa_crime.csv.zip"""', '"""r"""'], {}), "('2019-20-fullyr-data_sa_crime.csv.zip', 'r')\n", (518, 563), False, 'import zipfile\n'), ((630, 677), 'pandas.read_csv', 'pd.read_csv', (['"""2019-20-fullyr-data_sa_crime.csv"""'], {}), "('2019-20-fullyr-data_sa_crime.csv')\n", (641, 677), True, 'import pandas as pd\n'), ((1026, 1112), 'streamlit.title', 'st.title', (['"""Различные данные по правонарушениям в Южной Австралии за 2018-2020гг."""'], {}), "(\n 'Различные данные по правонарушениям в Южной Австралии за 2018-2020гг.')\n", (1034, 1112), True, 'import streamlit as st\n'), ((1492, 1594), 'streamlit.write', 'st.write', (['"""Статистика по пригородам с наибольшим количествам правонарушений за 2019-2020гг."""'], {}), "(\n 'Статистика по пригородам с наибольшим количествам правонарушений за 2019-2020гг.'\n )\n", (1500, 1594), True, 'import streamlit as st\n'), ((1602, 1632), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(40, 20)'}), '(figsize=(40, 20))\n', (1614, 1632), True, 'import matplotlib.pyplot as plt\n'), ((1637, 1725), 'seaborn.barplot', 'sns.barplot', ([], {'data': 'group1', 'x': '"""Suburb - Incident"""', 'y': '"""Offence count"""', 'palette': '"""magma"""'}), "(data=group1, x='Suburb - Incident', y='Offence count', palette=\n 'magma')\n", (1648, 1725), True, 'import seaborn as sns\n'), ((1725, 1754), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Suburb"""'], {'size': '(20)'}), "('Suburb', size=20)\n", (1735, 1754), True, 'import matplotlib.pyplot as plt\n'), ((1759, 1809), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Offence count in the suburb"""'], {'size': '(20)'}), "('Offence count in the suburb', size=20)\n", (1769, 1809), True, 'import matplotlib.pyplot as plt\n'), ((1814, 1903), 'matplotlib.pyplot.title', 'plt.title', (['"""Total offence count of crimes in the suburbs (top 15) 2019/2020"""'], {'size': '(36)'}), "('Total offence count of crimes in the suburbs (top 15) 2019/2020',\n size=36)\n", (1823, 1903), True, 'import matplotlib.pyplot as plt\n'), ((1904, 1919), 'streamlit.pyplot', 'st.pyplot', (['fig2'], {}), '(fig2)\n', (1913, 1919), True, 'import streamlit as st\n'), ((1927, 2054), 'streamlit.button', 'st.button', (['"""Показать статистику по пригородам с наибольшим количествам правонарушений за 2019-2020гг. в виде таблицы"""'], {}), "(\n 'Показать статистику по пригородам с наибольшим количествам правонарушений за 2019-2020гг. в виде таблицы'\n )\n", (1936, 2054), True, 'import streamlit as st\n'), ((2086, 2133), 'pandas.read_csv', 'pd.read_csv', (['"""2019-20-fullyr-data_sa_crime.csv"""'], {}), "('2019-20-fullyr-data_sa_crime.csv')\n", (2097, 2133), True, 'import pandas as pd\n'), ((2503, 2604), 'streamlit.write', 'st.write', (['"""Статистика по количеству правонарушений по первой классификации за 2019-2020гг."""'], {}), "(\n 'Статистика по количеству правонарушений по первой классификации за 2019-2020гг.'\n )\n", (2511, 2604), True, 'import streamlit as st\n'), ((2611, 2641), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(40, 20)'}), '(figsize=(40, 20))\n', (2623, 2641), True, 'import matplotlib.pyplot as plt\n'), ((2646, 2745), 'seaborn.barplot', 'sns.barplot', ([], {'data': 'group12', 'x': '"""Offence Level 1 Description"""', 'y': '"""Offence count"""', 'palette': '"""magma"""'}), "(data=group12, x='Offence Level 1 Description', y=\n 'Offence count', palette='magma')\n", (2657, 2745), True, 'import seaborn as sns\n'), ((2745, 2788), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Type of crime (lev1)"""'], {'size': '(20)'}), "('Type of crime (lev1)', size=20)\n", (2755, 2788), True, 'import matplotlib.pyplot as plt\n'), ((2793, 2829), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Offence count"""'], {'size': '(20)'}), "('Offence count', size=20)\n", (2803, 2829), True, 'import matplotlib.pyplot as plt\n'), ((2834, 2912), 'matplotlib.pyplot.title', 'plt.title', (['"""Total offence count of different crimes (lev1) 2019/2020"""'], {'size': '(36)'}), "('Total offence count of different crimes (lev1) 2019/2020', size=36)\n", (2843, 2912), True, 'import matplotlib.pyplot as plt\n'), ((2917, 2932), 'streamlit.pyplot', 'st.pyplot', (['fig3'], {}), '(fig3)\n', (2926, 2932), True, 'import streamlit as st\n'), ((2940, 3066), 'streamlit.button', 'st.button', (['"""Показать статистику по количеству правонарушений по первой классификации за 2019-2020гг. в виде таблицы"""'], {}), "(\n 'Показать статистику по количеству правонарушений по первой классификации за 2019-2020гг. в виде таблицы'\n )\n", (2949, 3066), True, 'import streamlit as st\n'), ((3099, 3146), 'pandas.read_csv', 'pd.read_csv', (['"""2019-20-fullyr-data_sa_crime.csv"""'], {}), "('2019-20-fullyr-data_sa_crime.csv')\n", (3110, 3146), True, 'import pandas as pd\n'), ((3521, 3622), 'streamlit.write', 'st.write', (['"""Статистика по количеству правонарушений по второй классификации за 2019-2020гг."""'], {}), "(\n 'Статистика по количеству правонарушений по второй классификации за 2019-2020гг.'\n )\n", (3529, 3622), True, 'import streamlit as st\n'), ((3629, 3659), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(40, 20)'}), '(figsize=(40, 20))\n', (3641, 3659), True, 'import matplotlib.pyplot as plt\n'), ((3664, 3764), 'seaborn.barplot', 'sns.barplot', ([], {'data': 'group123', 'x': '"""Offence Level 2 Description"""', 'y': '"""Offence count"""', 'palette': '"""magma"""'}), "(data=group123, x='Offence Level 2 Description', y=\n 'Offence count', palette='magma')\n", (3675, 3764), True, 'import seaborn as sns\n'), ((3764, 3807), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Type of crime (lev2)"""'], {'size': '(20)'}), "('Type of crime (lev2)', size=20)\n", (3774, 3807), True, 'import matplotlib.pyplot as plt\n'), ((3812, 3848), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Offence count"""'], {'size': '(20)'}), "('Offence count', size=20)\n", (3822, 3848), True, 'import matplotlib.pyplot as plt\n'), ((3853, 3931), 'matplotlib.pyplot.title', 'plt.title', (['"""Total offence count of different crimes (lev2) 2019/2020"""'], {'size': '(36)'}), "('Total offence count of different crimes (lev2) 2019/2020', size=36)\n", (3862, 3931), True, 'import matplotlib.pyplot as plt\n'), ((3936, 3951), 'streamlit.pyplot', 'st.pyplot', (['fig4'], {}), '(fig4)\n', (3945, 3951), True, 'import streamlit as st\n'), ((3959, 4085), 'streamlit.button', 'st.button', (['"""Показать статистику по количеству правонарушений по второй классификации за 2019-2020гг. в виде таблицы"""'], {}), "(\n 'Показать статистику по количеству правонарушений по второй классификации за 2019-2020гг. в виде таблицы'\n )\n", (3968, 4085), True, 'import streamlit as st\n'), ((4119, 4166), 'pandas.read_csv', 'pd.read_csv', (['"""2019-20-fullyr-data_sa_crime.csv"""'], {}), "('2019-20-fullyr-data_sa_crime.csv')\n", (4130, 4166), True, 'import pandas as pd\n'), ((4546, 4648), 'streamlit.write', 'st.write', (['"""Статистика по количеству правонарушений по третьей классификации за 2019-2020гг."""'], {}), "(\n 'Статистика по количеству правонарушений по третьей классификации за 2019-2020гг.'\n )\n", (4554, 4648), True, 'import streamlit as st\n'), ((4655, 4685), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(60, 20)'}), '(figsize=(60, 20))\n', (4667, 4685), True, 'import matplotlib.pyplot as plt\n'), ((4690, 4791), 'seaborn.barplot', 'sns.barplot', ([], {'data': 'group1234', 'x': '"""Offence Level 3 Description"""', 'y': '"""Offence count"""', 'palette': '"""magma"""'}), "(data=group1234, x='Offence Level 3 Description', y=\n 'Offence count', palette='magma')\n", (4701, 4791), True, 'import seaborn as sns\n'), ((4791, 4834), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Type of crime (lev3)"""'], {'size': '(20)'}), "('Type of crime (lev3)', size=20)\n", (4801, 4834), True, 'import matplotlib.pyplot as plt\n'), ((4839, 4875), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Offence count"""'], {'size': '(20)'}), "('Offence count', size=20)\n", (4849, 4875), True, 'import matplotlib.pyplot as plt\n'), ((4880, 4948), 'matplotlib.pyplot.title', 'plt.title', (['"""Total offence count of different crimes (lev3)"""'], {'size': '(36)'}), "('Total offence count of different crimes (lev3)', size=36)\n", (4889, 4948), True, 'import matplotlib.pyplot as plt\n'), ((4953, 4968), 'streamlit.pyplot', 'st.pyplot', (['fig5'], {}), '(fig5)\n', (4962, 4968), True, 'import streamlit as st\n'), ((4976, 5103), 'streamlit.button', 'st.button', (['"""Показать cтатистику по количеству правонарушений по третьей классификации за 2019-2020гг. в виде таблицы"""'], {}), "(\n 'Показать cтатистику по количеству правонарушений по третьей классификации за 2019-2020гг. в виде таблицы'\n )\n", (4985, 5103), True, 'import streamlit as st\n'), ((5143, 5190), 'pandas.read_csv', 'pd.read_csv', (['"""2019-20-fullyr-data_sa_crime.csv"""'], {}), "('2019-20-fullyr-data_sa_crime.csv')\n", (5154, 5190), True, 'import pandas as pd\n'), ((5544, 5620), 'streamlit.write', 'st.write', (['"""Статистика по количеству правонарушений по датам за 2019-2020гг."""'], {}), "('Статистика по количеству правонарушений по датам за 2019-2020гг.')\n", (5552, 5620), True, 'import streamlit as st\n'), ((5637, 5667), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(60, 20)'}), '(figsize=(60, 20))\n', (5649, 5667), True, 'import matplotlib.pyplot as plt\n'), ((5672, 5757), 'seaborn.lineplot', 'sns.lineplot', ([], {'data': 'group12345', 'x': '"""Reported Date"""', 'y': '"""Offence count"""', 'color': '"""red"""'}), "(data=group12345, x='Reported Date', y='Offence count', color='red'\n )\n", (5684, 5757), True, 'import seaborn as sns\n'), ((5757, 5784), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Date"""'], {'size': '(20)'}), "('Date', size=20)\n", (5767, 5784), True, 'import matplotlib.pyplot as plt\n'), ((5789, 5825), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Offence count"""'], {'size': '(20)'}), "('Offence count', size=20)\n", (5799, 5825), True, 'import matplotlib.pyplot as plt\n'), ((5830, 5897), 'matplotlib.pyplot.title', 'plt.title', (['"""Total offence count by date 01.07.19-30.06.20"""'], {'size': '(36)'}), "('Total offence count by date 01.07.19-30.06.20', size=36)\n", (5839, 5897), True, 'import matplotlib.pyplot as plt\n'), ((5902, 5917), 'streamlit.pyplot', 'st.pyplot', (['fig6'], {}), '(fig6)\n', (5911, 5917), True, 'import streamlit as st\n'), ((5925, 6036), 'streamlit.button', 'st.button', (['"""Показать статистику по количеству правонарушений по датам за 2019-2020гг. в виде таблицы"""'], {}), "(\n 'Показать статистику по количеству правонарушений по датам за 2019-2020гг. в виде таблицы'\n )\n", (5934, 6036), True, 'import streamlit as st\n'), ((6080, 6120), 'pandas.read_csv', 'pd.read_csv', (['"""2018-19-data_sa_crime.csv"""'], {}), "('2018-19-data_sa_crime.csv')\n", (6091, 6120), True, 'import pandas as pd\n'), ((6519, 6621), 'streamlit.write', 'st.write', (['"""Статистика по пригородам с наибольшим количествам правонарушений за 2018-2019гг."""'], {}), "(\n 'Статистика по пригородам с наибольшим количествам правонарушений за 2018-2019гг.'\n )\n", (6527, 6621), True, 'import streamlit as st\n'), ((6628, 6658), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(40, 20)'}), '(figsize=(40, 20))\n', (6640, 6658), True, 'import matplotlib.pyplot as plt\n'), ((6663, 6757), 'seaborn.barplot', 'sns.barplot', ([], {'data': 'group_18_19_1', 'x': '"""Suburb - Incident"""', 'y': '"""Offence count"""', 'palette': '"""magma"""'}), "(data=group_18_19_1, x='Suburb - Incident', y='Offence count',\n palette='magma')\n", (6674, 6757), True, 'import seaborn as sns\n'), ((6758, 6787), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Suburb"""'], {'size': '(20)'}), "('Suburb', size=20)\n", (6768, 6787), True, 'import matplotlib.pyplot as plt\n'), ((6792, 6842), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Offence count in the suburb"""'], {'size': '(20)'}), "('Offence count in the suburb', size=20)\n", (6802, 6842), True, 'import matplotlib.pyplot as plt\n'), ((6847, 6936), 'matplotlib.pyplot.title', 'plt.title', (['"""Total offence count of crimes in the suburbs (top 15) 2018/2019"""'], {'size': '(36)'}), "('Total offence count of crimes in the suburbs (top 15) 2018/2019',\n size=36)\n", (6856, 6936), True, 'import matplotlib.pyplot as plt\n'), ((6937, 6952), 'streamlit.pyplot', 'st.pyplot', (['fig7'], {}), '(fig7)\n', (6946, 6952), True, 'import streamlit as st\n'), ((6960, 7087), 'streamlit.button', 'st.button', (['"""Показать статистику по пригородам с наибольшим количествам правонарушений за 2018-2019гг. в виде таблицы"""'], {}), "(\n 'Показать статистику по пригородам с наибольшим количествам правонарушений за 2018-2019гг. в виде таблицы'\n )\n", (6969, 7087), True, 'import streamlit as st\n'), ((7132, 7172), 'pandas.read_csv', 'pd.read_csv', (['"""2018-19-data_sa_crime.csv"""'], {}), "('2018-19-data_sa_crime.csv')\n", (7143, 7172), True, 'import pandas as pd\n'), ((7580, 7681), 'streamlit.write', 'st.write', (['"""Статистика по количеству правонарушений по первой классификации за 2018-2019гг."""'], {}), "(\n 'Статистика по количеству правонарушений по первой классификации за 2018-2019гг.'\n )\n", (7588, 7681), True, 'import streamlit as st\n'), ((7688, 7718), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(40, 20)'}), '(figsize=(40, 20))\n', (7700, 7718), True, 'import matplotlib.pyplot as plt\n'), ((7723, 7828), 'seaborn.barplot', 'sns.barplot', ([], {'data': 'group_18_19_2', 'x': '"""Offence Level 1 Description"""', 'y': '"""Offence count"""', 'palette': '"""magma"""'}), "(data=group_18_19_2, x='Offence Level 1 Description', y=\n 'Offence count', palette='magma')\n", (7734, 7828), True, 'import seaborn as sns\n'), ((7828, 7871), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Type of crime (lev1)"""'], {'size': '(20)'}), "('Type of crime (lev1)', size=20)\n", (7838, 7871), True, 'import matplotlib.pyplot as plt\n'), ((7876, 7912), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Offence count"""'], {'size': '(20)'}), "('Offence count', size=20)\n", (7886, 7912), True, 'import matplotlib.pyplot as plt\n'), ((7917, 7995), 'matplotlib.pyplot.title', 'plt.title', (['"""Total offence count of different crimes (lev1) 2018/2019"""'], {'size': '(36)'}), "('Total offence count of different crimes (lev1) 2018/2019', size=36)\n", (7926, 7995), True, 'import matplotlib.pyplot as plt\n'), ((8000, 8015), 'streamlit.pyplot', 'st.pyplot', (['fig8'], {}), '(fig8)\n', (8009, 8015), True, 'import streamlit as st\n'), ((8023, 8149), 'streamlit.button', 'st.button', (['"""Показать статистику по количеству правонарушений по первой классификации за 2018-2019гг. в виде таблицы"""'], {}), "(\n 'Показать статистику по количеству правонарушений по первой классификации за 2018-2019гг. в виде таблицы'\n )\n", (8032, 8149), True, 'import streamlit as st\n'), ((8194, 8234), 'pandas.read_csv', 'pd.read_csv', (['"""2018-19-data_sa_crime.csv"""'], {}), "('2018-19-data_sa_crime.csv')\n", (8205, 8234), True, 'import pandas as pd\n'), ((8642, 8743), 'streamlit.write', 'st.write', (['"""Статистика по количеству правонарушений по второй классификации за 2018-2019гг."""'], {}), "(\n 'Статистика по количеству правонарушений по второй классификации за 2018-2019гг.'\n )\n", (8650, 8743), True, 'import streamlit as st\n'), ((8752, 8782), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(40, 20)'}), '(figsize=(40, 20))\n', (8764, 8782), True, 'import matplotlib.pyplot as plt\n'), ((8787, 8892), 'seaborn.barplot', 'sns.barplot', ([], {'data': 'group_18_19_4', 'x': '"""Offence Level 2 Description"""', 'y': '"""Offence count"""', 'palette': '"""magma"""'}), "(data=group_18_19_4, x='Offence Level 2 Description', y=\n 'Offence count', palette='magma')\n", (8798, 8892), True, 'import seaborn as sns\n'), ((8892, 8935), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Type of crime (lev2)"""'], {'size': '(20)'}), "('Type of crime (lev2)', size=20)\n", (8902, 8935), True, 'import matplotlib.pyplot as plt\n'), ((8940, 8976), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Offence count"""'], {'size': '(20)'}), "('Offence count', size=20)\n", (8950, 8976), True, 'import matplotlib.pyplot as plt\n'), ((8981, 9059), 'matplotlib.pyplot.title', 'plt.title', (['"""Total offence count of different crimes (lev2) 2018/2019"""'], {'size': '(36)'}), "('Total offence count of different crimes (lev2) 2018/2019', size=36)\n", (8990, 9059), True, 'import matplotlib.pyplot as plt\n'), ((9064, 9080), 'streamlit.pyplot', 'st.pyplot', (['fig10'], {}), '(fig10)\n', (9073, 9080), True, 'import streamlit as st\n'), ((9088, 9214), 'streamlit.button', 'st.button', (['"""Показать статистику по количеству правонврушений по второй классификации за 2018-2019гг. в виде таблицы"""'], {}), "(\n 'Показать статистику по количеству правонврушений по второй классификации за 2018-2019гг. в виде таблицы'\n )\n", (9097, 9214), True, 'import streamlit as st\n'), ((9259, 9299), 'pandas.read_csv', 'pd.read_csv', (['"""2018-19-data_sa_crime.csv"""'], {}), "('2018-19-data_sa_crime.csv')\n", (9270, 9299), True, 'import pandas as pd\n'), ((9707, 9809), 'streamlit.write', 'st.write', (['"""Статистика по количеству правонарушений по третьей классификации за 2018-2019гг."""'], {}), "(\n 'Статистика по количеству правонарушений по третьей классификации за 2018-2019гг.'\n )\n", (9715, 9809), True, 'import streamlit as st\n'), ((9816, 9846), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(60, 20)'}), '(figsize=(60, 20))\n', (9828, 9846), True, 'import matplotlib.pyplot as plt\n'), ((9851, 9956), 'seaborn.barplot', 'sns.barplot', ([], {'data': 'group_18_19_3', 'x': '"""Offence Level 3 Description"""', 'y': '"""Offence count"""', 'palette': '"""magma"""'}), "(data=group_18_19_3, x='Offence Level 3 Description', y=\n 'Offence count', palette='magma')\n", (9862, 9956), True, 'import seaborn as sns\n'), ((9956, 9999), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Type of crime (lev3)"""'], {'size': '(20)'}), "('Type of crime (lev3)', size=20)\n", (9966, 9999), True, 'import matplotlib.pyplot as plt\n'), ((10004, 10040), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Offence count"""'], {'size': '(20)'}), "('Offence count', size=20)\n", (10014, 10040), True, 'import matplotlib.pyplot as plt\n'), ((10045, 10123), 'matplotlib.pyplot.title', 'plt.title', (['"""Total offence count of different crimes (lev3) 2018/2019"""'], {'size': '(36)'}), "('Total offence count of different crimes (lev3) 2018/2019', size=36)\n", (10054, 10123), True, 'import matplotlib.pyplot as plt\n'), ((10128, 10143), 'streamlit.pyplot', 'st.pyplot', (['fig9'], {}), '(fig9)\n', (10137, 10143), True, 'import streamlit as st\n'), ((10151, 10278), 'streamlit.button', 'st.button', (['"""Показать статистику по количеству правонарушений по третьей классификации за 2018-2019гг. в виде таблицы"""'], {}), "(\n 'Показать статистику по количеству правонарушений по третьей классификации за 2018-2019гг. в виде таблицы'\n )\n", (10160, 10278), True, 'import streamlit as st\n'), ((10316, 10388), 'pandas.read_csv', 'pd.read_csv', (['"""Offenders, principal offence of public order offences.csv"""'], {}), "('Offenders, principal offence of public order offences.csv')\n", (10327, 10388), True, 'import pandas as pd\n'), ((10567, 10633), 'streamlit.write', 'st.write', (['"""Статистика по количеству правонарушителей 2009-2019гг."""'], {}), "('Статистика по количеству правонарушителей 2009-2019гг.')\n", (10575, 10633), True, 'import streamlit as st\n'), ((10652, 10682), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(40, 20)'}), '(figsize=(40, 20))\n', (10664, 10682), True, 'import matplotlib.pyplot as plt\n'), ((10687, 10748), 'seaborn.lineplot', 'sns.lineplot', ([], {'data': 'din', 'x': '"""Years"""', 'y': '"""Offenders"""', 'color': '"""red"""'}), "(data=din, x='Years', y='Offenders', color='red')\n", (10699, 10748), True, 'import seaborn as sns\n'), ((10753, 10780), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Year"""'], {'size': '(40)'}), "('Year', size=40)\n", (10763, 10780), True, 'import matplotlib.pyplot as plt\n'), ((10785, 10817), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Offenders"""'], {'size': '(40)'}), "('Offenders', size=40)\n", (10795, 10817), True, 'import matplotlib.pyplot as plt\n'), ((10822, 10862), 'matplotlib.pyplot.title', 'plt.title', (['"""Offenders dinamics"""'], {'size': '(50)'}), "('Offenders dinamics', size=50)\n", (10831, 10862), True, 'import matplotlib.pyplot as plt\n'), ((10867, 10883), 'streamlit.pyplot', 'st.pyplot', (['fig10'], {}), '(fig10)\n', (10876, 10883), True, 'import streamlit as st\n'), ((10892, 10993), 'streamlit.button', 'st.button', (['"""Показать статистику по количеству правонарушителей 2009-2019гг. в виде таблицы"""'], {}), "(\n 'Показать статистику по количеству правонарушителей 2009-2019гг. в виде таблицы'\n )\n", (10901, 10993), True, 'import streamlit as st\n'), ((11024, 11046), 'numpy.array', 'np.array', (['[2019, 2020]'], {}), '([2019, 2020])\n', (11032, 11046), True, 'import numpy as np\n'), ((11051, 11170), 'streamlit.write', 'st.write', (['"""(Придётся немного подождать, программа обрабатывает примерно 95тыс. результатов для каждого года)"""'], {}), "(\n '(Придётся немного подождать, программа обрабатывает примерно 95тыс. результатов для каждого года)'\n )\n", (11059, 11170), True, 'import streamlit as st\n'), ((2054, 2074), 'streamlit.dataframe', 'st.dataframe', (['group1'], {}), '(group1)\n', (2066, 2074), True, 'import streamlit as st\n'), ((3066, 3087), 'streamlit.dataframe', 'st.dataframe', (['group12'], {}), '(group12)\n', (3078, 3087), True, 'import streamlit as st\n'), ((4085, 4107), 'streamlit.dataframe', 'st.dataframe', (['group123'], {}), '(group123)\n', (4097, 4107), True, 'import streamlit as st\n'), ((5103, 5131), 'streamlit.dataframe', 'st.dataframe', ([], {'data': 'group1234'}), '(data=group1234)\n', (5115, 5131), True, 'import streamlit as st\n'), ((6036, 6065), 'streamlit.dataframe', 'st.dataframe', ([], {'data': 'group12345'}), '(data=group12345)\n', (6048, 6065), True, 'import streamlit as st\n'), ((7087, 7114), 'streamlit.dataframe', 'st.dataframe', (['group_18_19_1'], {}), '(group_18_19_1)\n', (7099, 7114), True, 'import streamlit as st\n'), ((8149, 8176), 'streamlit.dataframe', 'st.dataframe', (['group_18_19_2'], {}), '(group_18_19_2)\n', (8161, 8176), True, 'import streamlit as st\n'), ((9214, 9241), 'streamlit.dataframe', 'st.dataframe', (['group_18_19_4'], {}), '(group_18_19_4)\n', (9226, 9241), True, 'import streamlit as st\n'), ((10278, 10305), 'streamlit.dataframe', 'st.dataframe', (['group_18_19_3'], {}), '(group_18_19_3)\n', (10290, 10305), True, 'import streamlit as st\n'), ((10993, 11010), 'streamlit.dataframe', 'st.dataframe', (['din'], {}), '(din)\n', (11005, 11010), True, 'import streamlit as st\n'), ((11524, 11564), 'requests.get', 'requests.get', (['entrypoint1'], {'params': 'query1'}), '(entrypoint1, params=query1)\n', (11536, 11564), False, 'import requests\n'), ((11580, 11609), 'bs4.BeautifulSoup', 'BeautifulSoup', (['r1.text', '"""xml"""'], {}), "(r1.text, 'xml')\n", (11593, 11609), False, 'from bs4 import BeautifulSoup\n'), ((14581, 14622), 'folium.Map', 'folium.Map', ([], {'location': '[a, b]', 'zoom_start': '(8)'}), '(location=[a, b], zoom_start=8)\n', (14591, 14622), False, 'import folium\n'), ((15682, 15700), 'streamlit_folium.folium_static', 'folium_static', (['map'], {}), '(map)\n', (15695, 15700), False, 'from streamlit_folium import folium_static\n'), ((11994, 12007), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (12004, 12007), False, 'import csv\n'), ((13446, 13486), 'requests.get', 'requests.get', (['entrypoint2'], {'params': 'query2'}), '(entrypoint2, params=query2)\n', (13458, 13486), False, 'import requests\n'), ((13507, 13536), 'bs4.BeautifulSoup', 'BeautifulSoup', (['r2.text', '"""xml"""'], {}), "(r2.text, 'xml')\n", (13520, 13536), False, 'from bs4 import BeautifulSoup\n'), ((11806, 11869), 'os.path.join', 'osp.join', (["os.environ['HOME']", '"""Downloads/first_project 2"""', 'file'], {}), "(os.environ['HOME'], 'Downloads/first_project 2', file)\n", (11814, 11869), True, 'import os.path as osp\n'), ((12290, 12305), 'tqdm.tqdm', 'tqdm', (['locations'], {}), '(locations)\n', (12294, 12305), False, 'from tqdm import tqdm\n'), ((14686, 14716), 'folium.plugins.MarkerCluster', 'folium.plugins.MarkerCluster', ([], {}), '()\n', (14714, 14716), False, 'import folium\n')] |
# Generated by Django 2.2.12 on 2020-07-18 07:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('movie', '0002_auto_20200717_1039'),
]
operations = [
migrations.RemoveField(
model_name='show',
name='plot',
),
migrations.AddField(
model_name='show',
name='genres',
field=models.TextField(default=''),
),
migrations.AddField(
model_name='show',
name='image',
field=models.TextField(default=''),
),
migrations.AddField(
model_name='show',
name='name',
field=models.TextField(default=''),
),
migrations.AddField(
model_name='userrating',
name='position',
field=models.IntegerField(default=0),
),
]
| [
"django.db.migrations.RemoveField",
"django.db.models.TextField",
"django.db.models.IntegerField"
] | [((234, 288), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""show"""', 'name': '"""plot"""'}), "(model_name='show', name='plot')\n", (256, 288), False, 'from django.db import migrations, models\n'), ((430, 458), 'django.db.models.TextField', 'models.TextField', ([], {'default': '""""""'}), "(default='')\n", (446, 458), False, 'from django.db import migrations, models\n'), ((575, 603), 'django.db.models.TextField', 'models.TextField', ([], {'default': '""""""'}), "(default='')\n", (591, 603), False, 'from django.db import migrations, models\n'), ((719, 747), 'django.db.models.TextField', 'models.TextField', ([], {'default': '""""""'}), "(default='')\n", (735, 747), False, 'from django.db import migrations, models\n'), ((873, 903), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (892, 903), False, 'from django.db import migrations, models\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 18-10-28 上午11:51
# @Author : Vitan
# @File : mao.py
import requests
import re
import json
from multiprocessing import Pool
from requests.exceptions import RequestException
def get_one_page(url):
headers = {'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36'}
try:
response = requests.get(url,headers = headers)
if response.status_code == 200:
html = response.text
return html
return None
except RequestException:
return None
def parse_one_page(html):
pattern = re.compile('<dd>.*?board-index.*?>(\d+)</i>'
+ '.*?<p.*?title="(.*?)".*?</p>.*?star">(.*?)</p>'
+ '.*?releasetime">(.*?)</p>.*?integer">(.*?)'
+ '<.*?fraction">(.*?)</i>',re.S)
movies = re.findall(pattern,html)
for item in movies:
yield{
'排名':item[0],
'电影名':item[1],
'主演':item[2].strip()[3:],
'上映时间':item[3][5:],
'评分':item[4]+item[5]
}
def write_to_txt(content):
# 采用 append 追加模式,字符集为utf8
with open('movies.txt','a',encoding='utf8') as f:
# 采用json的dumps方法来初始化字符串
f.write(json.dumps(content,ensure_ascii=False) + '\n')
f.close()
def main(offset):
url = 'http://maoyan.com/board/4?offset=' + str(offset)
html = get_one_page(url)
for item in parse_one_page(html):
print(item)
write_to_txt(item)
if __name__ == '__main__':
pool = Pool() # 多线程
pool.map(main, [i*10 for i in range(10)])
pool.close()
pool.join() | [
"re.compile",
"json.dumps",
"requests.get",
"multiprocessing.Pool",
"re.findall"
] | [((690, 880), 're.compile', 're.compile', (['(\'<dd>.*?board-index.*?>(\\\\d+)</i>\' +\n \'.*?<p.*?title="(.*?)".*?</p>.*?star">(.*?)</p>\' +\n \'.*?releasetime">(.*?)</p>.*?integer">(.*?)\' + \'<.*?fraction">(.*?)</i>\')', 're.S'], {}), '(\'<dd>.*?board-index.*?>(\\\\d+)</i>\' +\n \'.*?<p.*?title="(.*?)".*?</p>.*?star">(.*?)</p>\' +\n \'.*?releasetime">(.*?)</p>.*?integer">(.*?)\' +\n \'<.*?fraction">(.*?)</i>\', re.S)\n', (700, 880), False, 'import re\n'), ((896, 921), 're.findall', 're.findall', (['pattern', 'html'], {}), '(pattern, html)\n', (906, 921), False, 'import re\n'), ((1608, 1614), 'multiprocessing.Pool', 'Pool', ([], {}), '()\n', (1612, 1614), False, 'from multiprocessing import Pool\n'), ((440, 474), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (452, 474), False, 'import requests\n'), ((1300, 1339), 'json.dumps', 'json.dumps', (['content'], {'ensure_ascii': '(False)'}), '(content, ensure_ascii=False)\n', (1310, 1339), False, 'import json\n')] |
from distutils.core import setup
setup(
name='shortest-python',
packages=['shortest'],
version='0.1',
description='Python library for shorte.st url shortener',
long_description="More on github: https://github.com/CubexX/shortest-python",
author='CubexX',
author_email='<EMAIL>',
url='https://github.com/CubexX/shortest-python',
keywords=['shortest', 'shorte.st', 'links'],
license='MIT License'
)
| [
"distutils.core.setup"
] | [((34, 422), 'distutils.core.setup', 'setup', ([], {'name': '"""shortest-python"""', 'packages': "['shortest']", 'version': '"""0.1"""', 'description': '"""Python library for shorte.st url shortener"""', 'long_description': '"""More on github: https://github.com/CubexX/shortest-python"""', 'author': '"""CubexX"""', 'author_email': '"""<EMAIL>"""', 'url': '"""https://github.com/CubexX/shortest-python"""', 'keywords': "['shortest', 'shorte.st', 'links']", 'license': '"""MIT License"""'}), "(name='shortest-python', packages=['shortest'], version='0.1',\n description='Python library for shorte.st url shortener',\n long_description=\n 'More on github: https://github.com/CubexX/shortest-python', author=\n 'CubexX', author_email='<EMAIL>', url=\n 'https://github.com/CubexX/shortest-python', keywords=['shortest',\n 'shorte.st', 'links'], license='MIT License')\n", (39, 422), False, 'from distutils.core import setup\n')] |
"""
Implementation of vegas+ algorithm:
adaptive importance sampling + adaptive stratified sampling
from https://arxiv.org/abs/2009.05112
The main interface is the `VegasFlowPlus` class.
"""
from itertools import product
import numpy as np
import tensorflow as tf
from vegasflow.configflow import (
DTYPE,
DTYPEINT,
fone,
fzero,
float_me,
int_me,
BINS_MAX,
BETA,
MAX_NEVAL_HCUBE,
)
from vegasflow.monte_carlo import wrapper, sampler, MonteCarloFlow
from vegasflow.vflow import VegasFlow, importance_sampling_digest
from vegasflow.utils import consume_array_into_indices
import logging
logger = logging.getLogger(__name__)
FBINS = float_me(BINS_MAX)
@tf.function(
input_signature=[
tf.TensorSpec(shape=[None, None], dtype=DTYPE),
tf.TensorSpec(shape=[], dtype=DTYPEINT),
tf.TensorSpec(shape=[None], dtype=DTYPEINT),
tf.TensorSpec(shape=[None, None], dtype=DTYPEINT),
tf.TensorSpec(shape=[None, None], dtype=DTYPE),
]
)
def generate_samples_in_hypercubes(rnds, n_strat, n_ev, hypercubes, divisions):
"""Receives an array of random numbers 0 and 1 and
distribute them in each hypercube according to the
number of samples in each hypercube specified by n_ev
Parameters
----------
`rnds`: tensor of random number between 0 and 1
`n_strat`: tensor with number of stratifications in each dimension
`n_ev`: tensor containing number of samples per hypercube
`hypercubes`: tensor containing all different hypercube
`divisions`: vegas grid
Returns
-------
`x` : random numbers collocated in hypercubes
`w` : weight of each event
`ind`: division index in which each (n_dim) set of random numbers fall
`segm` : segmentantion for later computations
"""
# Use the event-per-hypercube information to fix each random event to a hypercub
indices = tf.repeat(tf.range(tf.shape(hypercubes, out_type=DTYPEINT)[0]), n_ev)
points = float_me(tf.gather(hypercubes, indices))
n_evs = float_me(tf.gather(n_ev, indices))
# Compute in which division of the importance_sampling grid the points fall
xn = tf.transpose(points + rnds) * FBINS / float_me(n_strat)
ind_xn, x, weights = importance_sampling_digest(xn, divisions)
# Reweight taking into account the number of events per hypercub
final_weights = weights / n_evs
segm = indices
return x, ind_xn, final_weights, segm
class VegasFlowPlus(VegasFlow):
"""
Implementation of the VEGAS+ algorithm
"""
def __init__(self, n_dim, n_events, train=True, adaptive=False, events_limit=None, **kwargs):
# https://github.com/N3PDF/vegasflow/issues/78
if events_limit is None:
logger.info("Events per device limit set to %d", n_events)
events_limit = n_events
elif events_limit < n_events:
logger.warning("VegasFlowPlus needs to hold all events in memory at once, "
"setting the `events_limit` to be equal to `n_events=%d`", n_events)
events_limit = n_events
super().__init__(n_dim, n_events, train, events_limit=events_limit, **kwargs)
# Save the initial number of events
self._init_calls = n_events
# Don't use adaptive if the number of dimension is too big
if n_dim > 13 and adaptive:
self._adaptive = False
logger.warning("Disabling adaptive mode from VegasFlowPlus, too many dimensions!")
else:
self._adaptive = adaptive
# Initialize stratifications
if self._adaptive:
neval_eff = int(self.n_events / 2)
self._n_strat = tf.math.floor(tf.math.pow(neval_eff / 2, 1 / n_dim))
else:
neval_eff = self.n_events
self._n_strat = tf.math.floor(tf.math.pow(neval_eff / 2, 1 / n_dim))
if tf.math.pow(self._n_strat, n_dim) > MAX_NEVAL_HCUBE:
self._n_strat = tf.math.floor(tf.math.pow(1e4, 1 / n_dim))
self._n_strat = int_me(self._n_strat)
# Initialize hypercubes
hypercubes_one_dim = np.arange(0, int(self._n_strat))
hypercubes = [list(p) for p in product(hypercubes_one_dim, repeat=int(n_dim))]
self._hypercubes = tf.convert_to_tensor(hypercubes, dtype=DTYPEINT)
if len(hypercubes) != int(tf.math.pow(self._n_strat, n_dim)):
raise ValueError("Hypercubes are not equal to n_strat^n_dim")
self.min_neval_hcube = int(neval_eff // len(hypercubes))
self.min_neval_hcube = max(self.min_neval_hcube, 2)
self.n_ev = tf.fill([1, len(hypercubes)], self.min_neval_hcube)
self.n_ev = int_me(tf.reshape(self.n_ev, [-1]))
self._n_events = int(tf.reduce_sum(self.n_ev))
self.my_xjac = float_me(1 / len(hypercubes))
if self._adaptive:
logger.warning("Variable number of events requires function signatures all across")
def redistribute_samples(self, arr_var):
"""Receives an array with the variance of the integrand in each
hypercube and recalculate the samples per hypercube according
to VEGAS+ algorithm"""
damped_arr_sdev = tf.pow(arr_var, BETA / 2)
new_n_ev = tf.maximum(
self.min_neval_hcube,
damped_arr_sdev * self._init_calls / 2 / tf.reduce_sum(damped_arr_sdev),
)
self.n_ev = int_me(new_n_ev)
self.n_events = int(tf.reduce_sum(self.n_ev))
def _generate_random_array(self, n_events):
"""Interface compatible with other algorithms dropping the segmentation in hypercubes"""
x, ind, w, _ = self._generate_random_array_plus(n_events, self.n_ev)
return x, ind, w
def _generate_random_array_plus(self, n_events, n_ev):
"""Generate a random array for a given number of events divided in hypercubes"""
# Needs to skip parent and go directly to the random array generation of MonteCarloFlow
rnds, _, _ = MonteCarloFlow._generate_random_array(self, n_events)
# Get random numbers from hypercubes
x, ind, w, segm = generate_samples_in_hypercubes(
rnds,
self._n_strat,
n_ev,
self._hypercubes,
self.divisions,
)
return x, ind, w * self.my_xjac, segm
def _run_event(self, integrand, ncalls=None, n_ev=None):
"""Run one step of VegasFlowPlus
Similar to the event step for importance sampling VegasFlow
adding the n_ev argument for the segmentation into hypercubes
n_ev is a tensor containing the number of samples per hypercube
Parameters
----------
`integrand`: function to integrate
`ncalls`: how many events to run in this step
`n_ev`: number of samples per hypercube
Returns
-------
`res`: sum of the result of the integrand for all events per segement
`res2`: sum of the result squared of the integrand for all events per segment
`arr_res2`: result of the integrand squared per dimension and grid bin
"""
# NOTE: needs to receive both ncalls and n_ev
x, ind, xjac, segm = self._generate_random_array_plus(ncalls, n_ev)
# compute integrand
tmp = xjac * integrand(x, weight=xjac)
tmp2 = tf.square(tmp)
# tensor containing resummed component for each hypercubes
ress = tf.math.segment_sum(tmp, segm)
ress2 = tf.math.segment_sum(tmp2, segm)
fn_ev = float_me(n_ev)
arr_var = ress2 * fn_ev - tf.square(ress)
arr_res2 = self._importance_sampling_array_filling(tmp2, ind)
return ress, arr_var, arr_res2
def _iteration_content(self):
"""Steps to follow per iteration
Differently from importance-sampling Vegas, the result of the integration
is a result _per segment_ and thus the total result needs to be computed at this point
"""
ress, arr_var, arr_res2 = self.run_event(n_ev=self.n_ev)
# Compute the rror
sigmas2 = tf.maximum(arr_var, fzero)
res = tf.reduce_sum(ress)
sigma2 = tf.reduce_sum(sigmas2 / (float_me(self.n_ev) - fone))
sigma = tf.sqrt(sigma2)
# If adaptive is active redistribute the samples
if self._adaptive:
self.redistribute_samples(arr_var)
if self.train:
self.refine_grid(arr_res2)
return res, sigma
def run_event(self, tensorize_events=None, **kwargs):
"""Tensorizes the number of events
so they are not python or numpy primitives if self._adaptive=True"""
return super().run_event(tensorize_events=self._adaptive, **kwargs)
def vegasflowplus_wrapper(integrand, n_dim, n_iter, total_n_events, **kwargs):
"""Convenience wrapper
Parameters
----------
`integrand`: tf.function
`n_dim`: number of dimensions
`n_iter`: number of iterations
`n_events`: number of events per iteration
Returns
-------
`final_result`: integral value
`sigma`: monte carlo error
"""
return wrapper(VegasFlowPlus, integrand, n_dim, n_iter, total_n_events, **kwargs)
def vegasflowplus_sampler(*args, **kwargs):
"""Convenience wrapper for sampling random numbers
Parameters
----------
`integrand`: tf.function
`n_dim`: number of dimensions
`n_events`: number of events per iteration
`training_steps`: number of training_iterations
Returns
-------
`sampler`: a reference to the generate_random_array method of the integrator class
"""
return sampler(VegasFlowPlus, *args, **kwargs)
| [
"logging.getLogger",
"tensorflow.math.pow",
"tensorflow.shape",
"tensorflow.transpose",
"tensorflow.reduce_sum",
"vegasflow.monte_carlo.sampler",
"tensorflow.pow",
"tensorflow.maximum",
"tensorflow.square",
"tensorflow.convert_to_tensor",
"vegasflow.vflow.importance_sampling_digest",
"tensorfl... | [((651, 678), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (668, 678), False, 'import logging\n'), ((688, 706), 'vegasflow.configflow.float_me', 'float_me', (['BINS_MAX'], {}), '(BINS_MAX)\n', (696, 706), False, 'from vegasflow.configflow import DTYPE, DTYPEINT, fone, fzero, float_me, int_me, BINS_MAX, BETA, MAX_NEVAL_HCUBE\n'), ((2295, 2336), 'vegasflow.vflow.importance_sampling_digest', 'importance_sampling_digest', (['xn', 'divisions'], {}), '(xn, divisions)\n', (2321, 2336), False, 'from vegasflow.vflow import VegasFlow, importance_sampling_digest\n'), ((9201, 9275), 'vegasflow.monte_carlo.wrapper', 'wrapper', (['VegasFlowPlus', 'integrand', 'n_dim', 'n_iter', 'total_n_events'], {}), '(VegasFlowPlus, integrand, n_dim, n_iter, total_n_events, **kwargs)\n', (9208, 9275), False, 'from vegasflow.monte_carlo import wrapper, sampler, MonteCarloFlow\n'), ((9721, 9760), 'vegasflow.monte_carlo.sampler', 'sampler', (['VegasFlowPlus', '*args'], {}), '(VegasFlowPlus, *args, **kwargs)\n', (9728, 9760), False, 'from vegasflow.monte_carlo import wrapper, sampler, MonteCarloFlow\n'), ((2044, 2074), 'tensorflow.gather', 'tf.gather', (['hypercubes', 'indices'], {}), '(hypercubes, indices)\n', (2053, 2074), True, 'import tensorflow as tf\n'), ((2097, 2121), 'tensorflow.gather', 'tf.gather', (['n_ev', 'indices'], {}), '(n_ev, indices)\n', (2106, 2121), True, 'import tensorflow as tf\n'), ((2251, 2268), 'vegasflow.configflow.float_me', 'float_me', (['n_strat'], {}), '(n_strat)\n', (2259, 2268), False, 'from vegasflow.configflow import DTYPE, DTYPEINT, fone, fzero, float_me, int_me, BINS_MAX, BETA, MAX_NEVAL_HCUBE\n'), ((4085, 4106), 'vegasflow.configflow.int_me', 'int_me', (['self._n_strat'], {}), '(self._n_strat)\n', (4091, 4106), False, 'from vegasflow.configflow import DTYPE, DTYPEINT, fone, fzero, float_me, int_me, BINS_MAX, BETA, MAX_NEVAL_HCUBE\n'), ((4316, 4364), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['hypercubes'], {'dtype': 'DTYPEINT'}), '(hypercubes, dtype=DTYPEINT)\n', (4336, 4364), True, 'import tensorflow as tf\n'), ((5242, 5267), 'tensorflow.pow', 'tf.pow', (['arr_var', '(BETA / 2)'], {}), '(arr_var, BETA / 2)\n', (5248, 5267), True, 'import tensorflow as tf\n'), ((5448, 5464), 'vegasflow.configflow.int_me', 'int_me', (['new_n_ev'], {}), '(new_n_ev)\n', (5454, 5464), False, 'from vegasflow.configflow import DTYPE, DTYPEINT, fone, fzero, float_me, int_me, BINS_MAX, BETA, MAX_NEVAL_HCUBE\n'), ((6033, 6086), 'vegasflow.monte_carlo.MonteCarloFlow._generate_random_array', 'MonteCarloFlow._generate_random_array', (['self', 'n_events'], {}), '(self, n_events)\n', (6070, 6086), False, 'from vegasflow.monte_carlo import wrapper, sampler, MonteCarloFlow\n'), ((7397, 7411), 'tensorflow.square', 'tf.square', (['tmp'], {}), '(tmp)\n', (7406, 7411), True, 'import tensorflow as tf\n'), ((7495, 7525), 'tensorflow.math.segment_sum', 'tf.math.segment_sum', (['tmp', 'segm'], {}), '(tmp, segm)\n', (7514, 7525), True, 'import tensorflow as tf\n'), ((7542, 7573), 'tensorflow.math.segment_sum', 'tf.math.segment_sum', (['tmp2', 'segm'], {}), '(tmp2, segm)\n', (7561, 7573), True, 'import tensorflow as tf\n'), ((7591, 7605), 'vegasflow.configflow.float_me', 'float_me', (['n_ev'], {}), '(n_ev)\n', (7599, 7605), False, 'from vegasflow.configflow import DTYPE, DTYPEINT, fone, fzero, float_me, int_me, BINS_MAX, BETA, MAX_NEVAL_HCUBE\n'), ((8142, 8168), 'tensorflow.maximum', 'tf.maximum', (['arr_var', 'fzero'], {}), '(arr_var, fzero)\n', (8152, 8168), True, 'import tensorflow as tf\n'), ((8183, 8202), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['ress'], {}), '(ress)\n', (8196, 8202), True, 'import tensorflow as tf\n'), ((8290, 8305), 'tensorflow.sqrt', 'tf.sqrt', (['sigma2'], {}), '(sigma2)\n', (8297, 8305), True, 'import tensorflow as tf\n'), ((2213, 2240), 'tensorflow.transpose', 'tf.transpose', (['(points + rnds)'], {}), '(points + rnds)\n', (2225, 2240), True, 'import tensorflow as tf\n'), ((753, 799), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[None, None]', 'dtype': 'DTYPE'}), '(shape=[None, None], dtype=DTYPE)\n', (766, 799), True, 'import tensorflow as tf\n'), ((809, 848), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[]', 'dtype': 'DTYPEINT'}), '(shape=[], dtype=DTYPEINT)\n', (822, 848), True, 'import tensorflow as tf\n'), ((858, 901), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[None]', 'dtype': 'DTYPEINT'}), '(shape=[None], dtype=DTYPEINT)\n', (871, 901), True, 'import tensorflow as tf\n'), ((911, 960), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[None, None]', 'dtype': 'DTYPEINT'}), '(shape=[None, None], dtype=DTYPEINT)\n', (924, 960), True, 'import tensorflow as tf\n'), ((970, 1016), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[None, None]', 'dtype': 'DTYPE'}), '(shape=[None, None], dtype=DTYPE)\n', (983, 1016), True, 'import tensorflow as tf\n'), ((3936, 3969), 'tensorflow.math.pow', 'tf.math.pow', (['self._n_strat', 'n_dim'], {}), '(self._n_strat, n_dim)\n', (3947, 3969), True, 'import tensorflow as tf\n'), ((4736, 4763), 'tensorflow.reshape', 'tf.reshape', (['self.n_ev', '[-1]'], {}), '(self.n_ev, [-1])\n', (4746, 4763), True, 'import tensorflow as tf\n'), ((4794, 4818), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.n_ev'], {}), '(self.n_ev)\n', (4807, 4818), True, 'import tensorflow as tf\n'), ((5493, 5517), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.n_ev'], {}), '(self.n_ev)\n', (5506, 5517), True, 'import tensorflow as tf\n'), ((7640, 7655), 'tensorflow.square', 'tf.square', (['ress'], {}), '(ress)\n', (7649, 7655), True, 'import tensorflow as tf\n'), ((1971, 2010), 'tensorflow.shape', 'tf.shape', (['hypercubes'], {'out_type': 'DTYPEINT'}), '(hypercubes, out_type=DTYPEINT)\n', (1979, 2010), True, 'import tensorflow as tf\n'), ((3752, 3789), 'tensorflow.math.pow', 'tf.math.pow', (['(neval_eff / 2)', '(1 / n_dim)'], {}), '(neval_eff / 2, 1 / n_dim)\n', (3763, 3789), True, 'import tensorflow as tf\n'), ((3885, 3922), 'tensorflow.math.pow', 'tf.math.pow', (['(neval_eff / 2)', '(1 / n_dim)'], {}), '(neval_eff / 2, 1 / n_dim)\n', (3896, 3922), True, 'import tensorflow as tf\n'), ((4031, 4062), 'tensorflow.math.pow', 'tf.math.pow', (['(10000.0)', '(1 / n_dim)'], {}), '(10000.0, 1 / n_dim)\n', (4042, 4062), True, 'import tensorflow as tf\n'), ((4400, 4433), 'tensorflow.math.pow', 'tf.math.pow', (['self._n_strat', 'n_dim'], {}), '(self._n_strat, n_dim)\n', (4411, 4433), True, 'import tensorflow as tf\n'), ((5386, 5416), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['damped_arr_sdev'], {}), '(damped_arr_sdev)\n', (5399, 5416), True, 'import tensorflow as tf\n'), ((8245, 8264), 'vegasflow.configflow.float_me', 'float_me', (['self.n_ev'], {}), '(self.n_ev)\n', (8253, 8264), False, 'from vegasflow.configflow import DTYPE, DTYPEINT, fone, fzero, float_me, int_me, BINS_MAX, BETA, MAX_NEVAL_HCUBE\n')] |
#!/usr/bin/env python
'''
Copyright (c) 2020 RIKEN
All Rights Reserved
See file LICENSE for details.
'''
import os,sys,datetime,multiprocessing
from os.path import abspath,dirname,realpath,join
import log,traceback
# http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
elif 'PATH' in os.environ:
for path in os.environ['PATH'].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def check(args, argv):
log.logger.debug('started')
try:
log.logger.debug('command line:\n'+ ' '.join(argv))
# check python version
version=sys.version_info
if (version[0] >= 3) and (version[1] >= 7):
log.logger.debug('Python version=%d.%d.%d' % (version[0], version[1], version[2]))
else:
log.logger.error('Please use Python 3.7 or later. Your Python is version %d.%d.' % (version[0], version[1]))
exit(1)
# check PATH
for i in ['blastn', 'bedtools']:
if which(i) is None:
log.logger.error('%s not found in $PATH. Please check %s is installed and added to PATH.' % (i, i))
exit(1)
# check files
if args.c is not None:
if os.path.exists(args.c) is False:
log.logger.error('CRAM file (%s) was not found.' % args.c)
exit(1)
elif args.b is not None:
if os.path.exists(args.b) is False:
log.logger.error('BAM file (%s) was not found.' % args.b)
exit(1)
else:
log.logger.error('Please specify BAM or CRAM file (-b or -c option).')
exit(1)
if args.c is not None:
if args.fa is None:
log.logger.error('Reference genome (%s) was not specified.' % args.fa)
exit(1)
elif os.path.exists(args.fa) is False:
log.logger.error('Reference genome (%s) was not found.' % args.fa)
exit(1)
# check prerequisite modules
from Bio.Seq import Seq
import gzip
from pybedtools import BedTool
import matplotlib
import pysam
except:
log.logger.error('\n'+ traceback.format_exc())
exit(1)
| [
"os.path.exists",
"traceback.format_exc",
"log.logger.debug",
"os.access",
"os.path.join",
"os.path.split",
"os.path.isfile",
"log.logger.error"
] | [((428, 450), 'os.path.split', 'os.path.split', (['program'], {}), '(program)\n', (441, 450), False, 'import os, sys, datetime, multiprocessing\n'), ((804, 831), 'log.logger.debug', 'log.logger.debug', (['"""started"""'], {}), "('started')\n", (820, 831), False, 'import log, traceback\n'), ((357, 378), 'os.path.isfile', 'os.path.isfile', (['fpath'], {}), '(fpath)\n', (371, 378), False, 'import os, sys, datetime, multiprocessing\n'), ((383, 408), 'os.access', 'os.access', (['fpath', 'os.X_OK'], {}), '(fpath, os.X_OK)\n', (392, 408), False, 'import os, sys, datetime, multiprocessing\n'), ((1029, 1115), 'log.logger.debug', 'log.logger.debug', (["('Python version=%d.%d.%d' % (version[0], version[1], version[2]))"], {}), "('Python version=%d.%d.%d' % (version[0], version[1],\n version[2]))\n", (1045, 1115), False, 'import log, traceback\n'), ((1138, 1256), 'log.logger.error', 'log.logger.error', (["('Please use Python 3.7 or later. Your Python is version %d.%d.' % (version\n [0], version[1]))"], {}), "(\n 'Please use Python 3.7 or later. Your Python is version %d.%d.' % (\n version[0], version[1]))\n", (1154, 1256), False, 'import log, traceback\n'), ((667, 694), 'os.path.join', 'os.path.join', (['path', 'program'], {}), '(path, program)\n', (679, 694), False, 'import os, sys, datetime, multiprocessing\n'), ((1395, 1503), 'log.logger.error', 'log.logger.error', (["('%s not found in $PATH. Please check %s is installed and added to PATH.' %\n (i, i))"], {}), "(\n '%s not found in $PATH. Please check %s is installed and added to PATH.' %\n (i, i))\n", (1411, 1503), False, 'import log, traceback\n'), ((1596, 1618), 'os.path.exists', 'os.path.exists', (['args.c'], {}), '(args.c)\n', (1610, 1618), False, 'import os, sys, datetime, multiprocessing\n'), ((1645, 1703), 'log.logger.error', 'log.logger.error', (["('CRAM file (%s) was not found.' % args.c)"], {}), "('CRAM file (%s) was not found.' % args.c)\n", (1661, 1703), False, 'import log, traceback\n'), ((1933, 2003), 'log.logger.error', 'log.logger.error', (['"""Please specify BAM or CRAM file (-b or -c option)."""'], {}), "('Please specify BAM or CRAM file (-b or -c option).')\n", (1949, 2003), False, 'import log, traceback\n'), ((2116, 2186), 'log.logger.error', 'log.logger.error', (["('Reference genome (%s) was not specified.' % args.fa)"], {}), "('Reference genome (%s) was not specified.' % args.fa)\n", (2132, 2186), False, 'import log, traceback\n'), ((1776, 1798), 'os.path.exists', 'os.path.exists', (['args.b'], {}), '(args.b)\n', (1790, 1798), False, 'import os, sys, datetime, multiprocessing\n'), ((1825, 1882), 'log.logger.error', 'log.logger.error', (["('BAM file (%s) was not found.' % args.b)"], {}), "('BAM file (%s) was not found.' % args.b)\n", (1841, 1882), False, 'import log, traceback\n'), ((2228, 2251), 'os.path.exists', 'os.path.exists', (['args.fa'], {}), '(args.fa)\n', (2242, 2251), False, 'import os, sys, datetime, multiprocessing\n'), ((2278, 2344), 'log.logger.error', 'log.logger.error', (["('Reference genome (%s) was not found.' % args.fa)"], {}), "('Reference genome (%s) was not found.' % args.fa)\n", (2294, 2344), False, 'import log, traceback\n'), ((2605, 2627), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (2625, 2627), False, 'import log, traceback\n')] |
from pydantic_avro.avro_to_pydantic import avsc_to_pydantic
def test_avsc_to_pydantic_empty():
pydantic_code = avsc_to_pydantic({"name": "Test", "type": "record", "fields": []})
assert "class Test(BaseModel):\n pass" in pydantic_code
def test_avsc_to_pydantic_primitive():
pydantic_code = avsc_to_pydantic(
{
"name": "Test",
"type": "record",
"fields": [
{"name": "col1", "type": "string"},
{"name": "col2", "type": "int"},
{"name": "col3", "type": "long"},
{"name": "col4", "type": "double"},
{"name": "col5", "type": "float"},
{"name": "col6", "type": "boolean"},
],
}
)
assert (
"class Test(BaseModel):\n"
" col1: str\n"
" col2: int\n"
" col3: int\n"
" col4: float\n"
" col5: float\n"
" col6: bool" in pydantic_code
)
def test_avsc_to_pydantic_map():
pydantic_code = avsc_to_pydantic(
{
"name": "Test",
"type": "record",
"fields": [
{"name": "col1", "type": {"type": "map", "values": "string", "default": {}}},
],
}
)
assert "class Test(BaseModel):\n" " col1: Dict[str, str]" in pydantic_code
def test_avsc_to_pydantic_map_nested_object():
pydantic_code = avsc_to_pydantic(
{
"name": "Test",
"type": "record",
"fields": [
{
"name": "col1",
"type": {
"type": "map",
"values": {"type": "record", "name": "Nested", "fields": [{"name": "col1", "type": "string"}]},
"default": {},
},
},
],
}
)
assert "class Test(BaseModel):\n" " col1: Dict[str, Nested]" in pydantic_code
assert "class Nested(BaseModel):\n" " col1: str" in pydantic_code
def test_avsc_to_pydantic_map_nested_array():
pydantic_code = avsc_to_pydantic(
{
"name": "Test",
"type": "record",
"fields": [
{
"name": "col1",
"type": {
"type": "map",
"values": {
"type": "array",
"items": "string",
},
"default": {},
},
},
],
}
)
assert "class Test(BaseModel):\n" " col1: Dict[str, List[str]]" in pydantic_code
def test_avsc_to_pydantic_logical():
pydantic_code = avsc_to_pydantic(
{
"name": "Test",
"type": "record",
"fields": [
{
"name": "col1",
"type": {"type": "int", "logicalType": "date"},
},
{
"name": "col2",
"type": {"type": "long", "logicalType": "time-micros"},
},
{
"name": "col3",
"type": {"type": "long", "logicalType": "time-millis"},
},
{
"name": "col4",
"type": {"type": "long", "logicalType": "timestamp-micros"},
},
{
"name": "col5",
"type": {"type": "long", "logicalType": "timestamp-millis"},
},
],
}
)
assert (
"class Test(BaseModel):\n"
" col1: date\n"
" col2: time\n"
" col3: time\n"
" col4: datetime\n"
" col5: datetime" in pydantic_code
)
def test_avsc_to_pydantic_complex():
pydantic_code = avsc_to_pydantic(
{
"name": "Test",
"type": "record",
"fields": [
{
"name": "col1",
"type": {
"name": "Nested",
"type": "record",
"fields": [],
},
},
{
"name": "col2",
"type": {
"type": "array",
"items": "int",
},
},
{
"name": "col3",
"type": {
"type": "array",
"items": "Nested",
},
},
],
}
)
assert (
"class Test(BaseModel):\n"
" col1: Nested\n"
" col2: List[int]\n"
" col3: List[Nested]\n" in pydantic_code
)
assert "class Nested(BaseModel):\n pass\n" in pydantic_code
def test_default():
pydantic_code = avsc_to_pydantic(
{
"name": "Test",
"type": "record",
"fields": [
{"name": "col1", "type": "string", "default": "test"},
{"name": "col2", "type": ["string", "null"], "default": None},
{"name": "col3", "type": {"type": "map", "values": "string"}, "default": {"key": "value"}},
{"name": "col4", "type": "boolean", "default": True},
{"name": "col5", "type": "boolean", "default": False},
],
}
)
assert (
"class Test(BaseModel):\n"
' col1: str = "test"\n'
" col2: Optional[str] = None\n"
' col3: Dict[str, str] = {"key": "value"}\n'
" col4: bool = True\n"
" col5: bool = False\n" in pydantic_code
)
def test_enums():
pydantic_code = avsc_to_pydantic(
{
"name": "Test",
"type": "record",
"fields": [
{"name": "c1", "type": {"type": "enum", "symbols": ["passed", "failed"], "name": "Status"}},
],
}
)
assert "class Test(BaseModel):\n" " c1: Status" in pydantic_code
assert "class Status(str, Enum):\n" ' passed = "passed"\n' ' failed = "failed"' in pydantic_code
def test_enums_reuse():
pydantic_code = avsc_to_pydantic(
{
"name": "Test",
"type": "record",
"fields": [
{"name": "c1", "type": {"type": "enum", "symbols": ["passed", "failed"], "name": "Status"}},
{"name": "c2", "type": "Status"},
],
}
)
assert "class Test(BaseModel):\n" " c1: Status\n" " c2: Status" in pydantic_code
assert "class Status(str, Enum):\n" ' passed = "passed"\n' ' failed = "failed"' in pydantic_code
| [
"pydantic_avro.avro_to_pydantic.avsc_to_pydantic"
] | [((117, 183), 'pydantic_avro.avro_to_pydantic.avsc_to_pydantic', 'avsc_to_pydantic', (["{'name': 'Test', 'type': 'record', 'fields': []}"], {}), "({'name': 'Test', 'type': 'record', 'fields': []})\n", (133, 183), False, 'from pydantic_avro.avro_to_pydantic import avsc_to_pydantic\n'), ((308, 595), 'pydantic_avro.avro_to_pydantic.avsc_to_pydantic', 'avsc_to_pydantic', (["{'name': 'Test', 'type': 'record', 'fields': [{'name': 'col1', 'type':\n 'string'}, {'name': 'col2', 'type': 'int'}, {'name': 'col3', 'type':\n 'long'}, {'name': 'col4', 'type': 'double'}, {'name': 'col5', 'type':\n 'float'}, {'name': 'col6', 'type': 'boolean'}]}"], {}), "({'name': 'Test', 'type': 'record', 'fields': [{'name':\n 'col1', 'type': 'string'}, {'name': 'col2', 'type': 'int'}, {'name':\n 'col3', 'type': 'long'}, {'name': 'col4', 'type': 'double'}, {'name':\n 'col5', 'type': 'float'}, {'name': 'col6', 'type': 'boolean'}]})\n", (324, 595), False, 'from pydantic_avro.avro_to_pydantic import avsc_to_pydantic\n'), ((1041, 1187), 'pydantic_avro.avro_to_pydantic.avsc_to_pydantic', 'avsc_to_pydantic', (["{'name': 'Test', 'type': 'record', 'fields': [{'name': 'col1', 'type': {\n 'type': 'map', 'values': 'string', 'default': {}}}]}"], {}), "({'name': 'Test', 'type': 'record', 'fields': [{'name':\n 'col1', 'type': {'type': 'map', 'values': 'string', 'default': {}}}]})\n", (1057, 1187), False, 'from pydantic_avro.avro_to_pydantic import avsc_to_pydantic\n'), ((1427, 1658), 'pydantic_avro.avro_to_pydantic.avsc_to_pydantic', 'avsc_to_pydantic', (["{'name': 'Test', 'type': 'record', 'fields': [{'name': 'col1', 'type': {\n 'type': 'map', 'values': {'type': 'record', 'name': 'Nested', 'fields':\n [{'name': 'col1', 'type': 'string'}]}, 'default': {}}}]}"], {}), "({'name': 'Test', 'type': 'record', 'fields': [{'name':\n 'col1', 'type': {'type': 'map', 'values': {'type': 'record', 'name':\n 'Nested', 'fields': [{'name': 'col1', 'type': 'string'}]}, 'default': {\n }}}]})\n", (1443, 1658), False, 'from pydantic_avro.avro_to_pydantic import avsc_to_pydantic\n'), ((2118, 2296), 'pydantic_avro.avro_to_pydantic.avsc_to_pydantic', 'avsc_to_pydantic', (["{'name': 'Test', 'type': 'record', 'fields': [{'name': 'col1', 'type': {\n 'type': 'map', 'values': {'type': 'array', 'items': 'string'},\n 'default': {}}}]}"], {}), "({'name': 'Test', 'type': 'record', 'fields': [{'name':\n 'col1', 'type': {'type': 'map', 'values': {'type': 'array', 'items':\n 'string'}, 'default': {}}}]})\n", (2134, 2296), False, 'from pydantic_avro.avro_to_pydantic import avsc_to_pydantic\n'), ((2765, 3226), 'pydantic_avro.avro_to_pydantic.avsc_to_pydantic', 'avsc_to_pydantic', (["{'name': 'Test', 'type': 'record', 'fields': [{'name': 'col1', 'type': {\n 'type': 'int', 'logicalType': 'date'}}, {'name': 'col2', 'type': {\n 'type': 'long', 'logicalType': 'time-micros'}}, {'name': 'col3', 'type':\n {'type': 'long', 'logicalType': 'time-millis'}}, {'name': 'col4',\n 'type': {'type': 'long', 'logicalType': 'timestamp-micros'}}, {'name':\n 'col5', 'type': {'type': 'long', 'logicalType': 'timestamp-millis'}}]}"], {}), "({'name': 'Test', 'type': 'record', 'fields': [{'name':\n 'col1', 'type': {'type': 'int', 'logicalType': 'date'}}, {'name':\n 'col2', 'type': {'type': 'long', 'logicalType': 'time-micros'}}, {\n 'name': 'col3', 'type': {'type': 'long', 'logicalType': 'time-millis'}},\n {'name': 'col4', 'type': {'type': 'long', 'logicalType':\n 'timestamp-micros'}}, {'name': 'col5', 'type': {'type': 'long',\n 'logicalType': 'timestamp-millis'}}]})\n", (2781, 3226), False, 'from pydantic_avro.avro_to_pydantic import avsc_to_pydantic\n'), ((3924, 4204), 'pydantic_avro.avro_to_pydantic.avsc_to_pydantic', 'avsc_to_pydantic', (["{'name': 'Test', 'type': 'record', 'fields': [{'name': 'col1', 'type': {\n 'name': 'Nested', 'type': 'record', 'fields': []}}, {'name': 'col2',\n 'type': {'type': 'array', 'items': 'int'}}, {'name': 'col3', 'type': {\n 'type': 'array', 'items': 'Nested'}}]}"], {}), "({'name': 'Test', 'type': 'record', 'fields': [{'name':\n 'col1', 'type': {'name': 'Nested', 'type': 'record', 'fields': []}}, {\n 'name': 'col2', 'type': {'type': 'array', 'items': 'int'}}, {'name':\n 'col3', 'type': {'type': 'array', 'items': 'Nested'}}]})\n", (3940, 4204), False, 'from pydantic_avro.avro_to_pydantic import avsc_to_pydantic\n'), ((5007, 5410), 'pydantic_avro.avro_to_pydantic.avsc_to_pydantic', 'avsc_to_pydantic', (["{'name': 'Test', 'type': 'record', 'fields': [{'name': 'col1', 'type':\n 'string', 'default': 'test'}, {'name': 'col2', 'type': ['string',\n 'null'], 'default': None}, {'name': 'col3', 'type': {'type': 'map',\n 'values': 'string'}, 'default': {'key': 'value'}}, {'name': 'col4',\n 'type': 'boolean', 'default': True}, {'name': 'col5', 'type': 'boolean',\n 'default': False}]}"], {}), "({'name': 'Test', 'type': 'record', 'fields': [{'name':\n 'col1', 'type': 'string', 'default': 'test'}, {'name': 'col2', 'type':\n ['string', 'null'], 'default': None}, {'name': 'col3', 'type': {'type':\n 'map', 'values': 'string'}, 'default': {'key': 'value'}}, {'name':\n 'col4', 'type': 'boolean', 'default': True}, {'name': 'col5', 'type':\n 'boolean', 'default': False}]})\n", (5023, 5410), False, 'from pydantic_avro.avro_to_pydantic import avsc_to_pydantic\n'), ((5861, 6026), 'pydantic_avro.avro_to_pydantic.avsc_to_pydantic', 'avsc_to_pydantic', (["{'name': 'Test', 'type': 'record', 'fields': [{'name': 'c1', 'type': {\n 'type': 'enum', 'symbols': ['passed', 'failed'], 'name': 'Status'}}]}"], {}), "({'name': 'Test', 'type': 'record', 'fields': [{'name':\n 'c1', 'type': {'type': 'enum', 'symbols': ['passed', 'failed'], 'name':\n 'Status'}}]})\n", (5877, 6026), False, 'from pydantic_avro.avro_to_pydantic import avsc_to_pydantic\n'), ((6338, 6537), 'pydantic_avro.avro_to_pydantic.avsc_to_pydantic', 'avsc_to_pydantic', (["{'name': 'Test', 'type': 'record', 'fields': [{'name': 'c1', 'type': {\n 'type': 'enum', 'symbols': ['passed', 'failed'], 'name': 'Status'}}, {\n 'name': 'c2', 'type': 'Status'}]}"], {}), "({'name': 'Test', 'type': 'record', 'fields': [{'name':\n 'c1', 'type': {'type': 'enum', 'symbols': ['passed', 'failed'], 'name':\n 'Status'}}, {'name': 'c2', 'type': 'Status'}]})\n", (6354, 6537), False, 'from pydantic_avro.avro_to_pydantic import avsc_to_pydantic\n')] |
from django.test import TestCase
from dojo.models import Test
from dojo.tools.cloudsploit.parser import CloudsploitParser
class TestCloudsploitParser(TestCase):
def test_cloudsploit_parser_with_no_vuln_has_no_findings(self):
testfile = open("dojo/unittests/scans/cloudsploit/cloudsploit_zero_vul.json")
parser = CloudsploitParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(0, len(findings))
def test_cloudsploit_parser_with_one_criticle_vuln_has_one_findings(self):
testfile = open("dojo/unittests/scans/cloudsploit/cloudsploit_one_vul.json")
parser = CloudsploitParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(1, len(findings))
def test_cloudsploit_parser_with_many_vuln_has_many_findings(self):
testfile = open("dojo/unittests/scans/cloudsploit/cloudsploit_many_vul.json")
parser = CloudsploitParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(6, len(findings))
| [
"dojo.tools.cloudsploit.parser.CloudsploitParser",
"dojo.models.Test"
] | [((335, 354), 'dojo.tools.cloudsploit.parser.CloudsploitParser', 'CloudsploitParser', ([], {}), '()\n', (352, 354), False, 'from dojo.tools.cloudsploit.parser import CloudsploitParser\n'), ((662, 681), 'dojo.tools.cloudsploit.parser.CloudsploitParser', 'CloudsploitParser', ([], {}), '()\n', (679, 681), False, 'from dojo.tools.cloudsploit.parser import CloudsploitParser\n'), ((983, 1002), 'dojo.tools.cloudsploit.parser.CloudsploitParser', 'CloudsploitParser', ([], {}), '()\n', (1000, 1002), False, 'from dojo.tools.cloudsploit.parser import CloudsploitParser\n'), ((404, 410), 'dojo.models.Test', 'Test', ([], {}), '()\n', (408, 410), False, 'from dojo.models import Test\n'), ((731, 737), 'dojo.models.Test', 'Test', ([], {}), '()\n', (735, 737), False, 'from dojo.models import Test\n'), ((1052, 1058), 'dojo.models.Test', 'Test', ([], {}), '()\n', (1056, 1058), False, 'from dojo.models import Test\n')] |
#!/usr/bin/env python
'''
Port Google Bookmarks over to pinboard.in
* Export Google Bookmarks by hitting
http://www.google.com/bookmarks/?output=xml&num=10000
* Get pinboard auth_token from https://pinboard.in/settings/password
Run:
./gbmk2pinb.py bookmarks.xml --auth-token <token>
'''
import requests
from cStringIO import StringIO
from datetime import datetime
import httplib
import logging as log
import xml.etree.cElementTree as et
add_url = 'https://api.pinboard.in/v1/posts/add'
# Example XML:
# <xml_api_reply version="1">
# <bookmarks>
# <bookmark>
# <title>Finnish Doctors Are Prescribing Video Games For ADHD -
# Slashdot</title>
# <url>bit.ly/15J6NSBCustomize</url>
# <timestamp>1381590052580408</timestamp>
# <id>536897562302183779</id>
# <labels>
# <label>psychology</label>
# <label>adhd</label>
# <label>video</label>
# <label>games</label>
# </labels>
# </bookmark>
# ...
def iter_xml(fo):
tree = et.parse(fo)
for bmk in tree.iterfind('.//bookmark'):
title = bmk.find('title')
ts = int(bmk.find('timestamp').text)
yield {
'title': title.text if title is not None else 'UNKNOWN TITLE',
'url': bmk.find('url').text,
'labels': [elem.text for elem in bmk.iterfind('.//label')],
'timestamp': datetime.utcfromtimestamp(ts/1000000),
}
def bmk2params(bmk, auth_token):
return {
'url': bmk['url'],
'description': bmk['title'],
'tags': ','.join(bmk['labels']),
'dt': bmk['timestamp'].strftime('%Y-%m-%dT%H:%M:%SZ'),
'auth_token': auth_token,
'replace': 'yes',
}
def parse_reply(reply):
if reply.status_code != httplib.OK:
return False
root = et.parse(StringIO(reply.content)).getroot()
code = root.get('code')
if code != 'done':
log.error(code)
return False
return True
def post_bookmark(bmk, auth_token):
params = bmk2params(bmk, auth_token)
reply = requests.post(add_url, params=params)
return parse_reply(reply)
def main(argv=None):
import sys
from argparse import ArgumentParser
argv = argv or sys.argv
parser = ArgumentParser(description='Post Google Bookmarks to Pinboard')
parser.add_argument('filename')
parser.add_argument('--auth-token')
parser.add_argument('--start', help='start offset', type=int, default=0)
args = parser.parse_args(argv[1:])
with open(args.filename) as fo:
bmks = list(iter_xml(fo))
if args.start > 0:
bmks = bmks[args.start:]
for i, bmk in enumerate(bmks):
print(u'{}: {}'.format(args.start + i, bmk['title']))
if not post_bookmark(bmk, args.auth_token):
raise SystemExit('error: cannot post {}'.format(bmk['title']))
if __name__ == '__main__':
main()
| [
"datetime.datetime.utcfromtimestamp",
"requests.post",
"cStringIO.StringIO",
"argparse.ArgumentParser",
"xml.etree.cElementTree.parse",
"logging.error"
] | [((1017, 1029), 'xml.etree.cElementTree.parse', 'et.parse', (['fo'], {}), '(fo)\n', (1025, 1029), True, 'import xml.etree.cElementTree as et\n'), ((2062, 2099), 'requests.post', 'requests.post', (['add_url'], {'params': 'params'}), '(add_url, params=params)\n', (2075, 2099), False, 'import requests\n'), ((2251, 2314), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Post Google Bookmarks to Pinboard"""'}), "(description='Post Google Bookmarks to Pinboard')\n", (2265, 2314), False, 'from argparse import ArgumentParser\n'), ((1916, 1931), 'logging.error', 'log.error', (['code'], {}), '(code)\n', (1925, 1931), True, 'import logging as log\n'), ((1383, 1422), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['(ts / 1000000)'], {}), '(ts / 1000000)\n', (1408, 1422), False, 'from datetime import datetime\n'), ((1822, 1845), 'cStringIO.StringIO', 'StringIO', (['reply.content'], {}), '(reply.content)\n', (1830, 1845), False, 'from cStringIO import StringIO\n')] |
import os
import re
import subprocess
from ..segment import BasicSegment
class Repo(object):
symbols = {
"detached": "\u2693",
"ahead": "\u2B06",
"behind": "\u2B07",
"staged": "\u2714",
"changed": "\u270E",
"new": "\uf128",
"conflicted": "\u273C",
"stash": "\u2398",
"git": "\uf418",
}
def __init__(self):
self.attrs = [
"new",
"changed",
"staged",
"conflicted",
"active",
"ahead",
"behind",
"conflicted",
"branch",
"remote",
]
for attr in self.attrs:
setattr(self, attr, 0)
@property
def dirty(self):
return sum([getattr(self, attr) for attr in self.attrs[:4]]) > 0
def __str__(self):
return str({attr: getattr(self, attr) for attr in self.attrs})
def subprocess(self, cmd):
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
except OSError:
return ''
data = proc.communicate()
if proc.returncode != 0:
return ''
return data[0].decode("utf-8")
def get_branch(self):
cmd = ["git", "rev-parse", "--abbrev-ref", "HEAD"]
return self.subprocess(cmd).strip() or 'master'
def get_stash(self):
cmd = ["git", "stash", "list"]
stash = self.subprocess(cmd).splitlines()
self.stash = len(stash) if stash else None
def status(self, show_stash=False):
cmd = ["git", "status", "--porcelain", "-b"]
status = self.subprocess(cmd).splitlines()
if show_stash:
self.get_stash()
for statusline in status[1:]:
code = statusline[:2]
if code == "??":
self.new += 1
elif code in ("DD", "AU", "UD", "UA", "DU", "AA", "UU"):
self.conflicted += 1
else:
if code[1] != " ":
self.changed += 1
if code[0] != " ":
self.staged += 1
info = re.search(
r"^## (?P<local>\S+?)"
r"(\.{3}(?P<remote>\S+?)( \[(ahead (?P<ahead>\d+)(, )?)?(behind (?P<behind>\d+))?\])?)?$",
status[0],
)
branch = info.groupdict() if info else {}
self.ahead = branch.get("ahead", 0)
self.behind = branch.get("behind", 0)
self.branch = branch.get("local", "")
self.remote = branch.get("remote", "")
self.active = True
class Segment(BasicSegment):
ATTRIBUTES = {
"skip_dirs": [],
"show_stash": False,
}
def is_gitdir(self, cwd):
found = False
_cwd = cwd
while cwd != "/":
if os.access(".git", os.R_OK):
found = True
self.git_dir = cwd
break
_cwd = os.getcwd()
os.chdir("..")
cwd = os.getcwd()
if cwd == _cwd:
break
os.chdir(self.hyper_prompt.cwd)
return found
def add_sub_segment(self, key, fg, bg):
segment = BasicSegment(self.hyper_prompt, self.seg_conf)
value = getattr(self.repo, key, None)
if value:
symbol = self.symbol(key, self.repo.symbols)
content = symbol + str(value)
segment.append(self.hyper_prompt._content % (content), fg, bg)
self.sub_segments.append(segment)
def activate(self):
if self.is_gitdir(self.hyper_prompt.cwd):
self.repo = Repo()
fg, bg = (
self.theme.get("REPO_CLEAN_FG", 0),
self.theme.get("REPO_CLEAN_BG", 148),
)
symbol = self.symbol("git", self.repo.symbols)
content = symbol + str(self.repo.get_branch())
# if skipped dir, only show branch name
if self.hyper_prompt.cwd in self.attr_skip_dirs:
self.append(self.hyper_prompt._content % (content), fg, bg)
return True
self.repo.status(show_stash=self.attr_show_stash)
if not self.repo.active:
return False
if self.repo.dirty:
fg, bg = (
self.theme.get("REPO_DIRTY_FG", 15),
self.theme.get("REPO_DIRTY_BG", 161),
)
self.append(self.hyper_prompt._content % (content), fg, bg)
self.add_sub_segment(
"ahead",
self.theme.get("GIT_AHEAD_FG", 250),
self.theme.get("GIT_AHEAD_BG", 240),
)
self.add_sub_segment(
"behind",
self.theme.get("GIT_BEHIND_FG", 250),
self.theme.get("GIT_BEHIND_BG", 240),
)
self.add_sub_segment(
"staged",
self.theme.get("GIT_STAGED_FG", 15),
self.theme.get("GIT_STAGED_BG", 22),
)
self.add_sub_segment(
"changed",
self.theme.get("GIT_NOTSTAGED_FG", 15),
self.theme.get("GIT_NOTSTAGED_BG", 130),
)
self.add_sub_segment(
"new",
self.theme.get("GIT_UNTRACKED_FG", 15),
self.theme.get("GIT_UNTRACKED_BG", 52),
)
self.add_sub_segment(
"conflicted",
self.theme.get("GIT_CONFLICTED_FG", 15),
self.theme.get("GIT_CONFLICTED_BG", 9),
)
self.add_sub_segment(
"stash",
self.theme.get("GIT_STASH_BG", 221),
self.theme.get("GIT_STASH_FG", 0),
)
| [
"subprocess.Popen",
"os.access",
"os.getcwd",
"os.chdir",
"re.search"
] | [((2219, 2365), 're.search', 're.search', (['"""^## (?P<local>\\\\S+?)(\\\\.{3}(?P<remote>\\\\S+?)( \\\\[(ahead (?P<ahead>\\\\d+)(, )?)?(behind (?P<behind>\\\\d+))?\\\\])?)?$"""', 'status[0]'], {}), "(\n '^## (?P<local>\\\\S+?)(\\\\.{3}(?P<remote>\\\\S+?)( \\\\[(ahead (?P<ahead>\\\\d+)(, )?)?(behind (?P<behind>\\\\d+))?\\\\])?)?$'\n , status[0])\n", (2228, 2365), False, 'import re\n'), ((3180, 3211), 'os.chdir', 'os.chdir', (['self.hyper_prompt.cwd'], {}), '(self.hyper_prompt.cwd)\n', (3188, 3211), False, 'import os\n'), ((1036, 1081), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdout': 'subprocess.PIPE'}), '(cmd, stdout=subprocess.PIPE)\n', (1052, 1081), False, 'import subprocess\n'), ((2911, 2937), 'os.access', 'os.access', (['""".git"""', 'os.R_OK'], {}), "('.git', os.R_OK)\n", (2920, 2937), False, 'import os\n'), ((3048, 3059), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3057, 3059), False, 'import os\n'), ((3073, 3087), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (3081, 3087), False, 'import os\n'), ((3107, 3118), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3116, 3118), False, 'import os\n')] |
import unittest
from . import *
from edg_core.ScalaCompilerInterface import ScalaCompiler
class TestConstPropInternal(Block):
def __init__(self) -> None:
super().__init__()
self.float_param = self.Parameter(FloatExpr())
self.range_param = self.Parameter(RangeExpr())
class TestParameterConstProp(Block):
def __init__(self) -> None:
super().__init__()
self.float_const = self.Parameter(FloatExpr())
self.float_param = self.Parameter(FloatExpr())
self.range_const = self.Parameter(RangeExpr())
self.range_param = self.Parameter(RangeExpr())
def contents(self):
self.assign(self.float_const, 2.0)
self.assign(self.float_param, self.float_const)
self.assign(self.range_const, Range(1.0, 42.0))
self.assign(self.range_param, self.range_const)
self.block = self.Block(TestConstPropInternal())
self.assign(self.block.float_param, self.float_param)
self.assign(self.block.range_param, self.range_param)
class ConstPropTestCase(unittest.TestCase):
def setUp(self) -> None:
self.compiled = ScalaCompiler.compile(TestParameterConstProp)
def test_float_prop(self) -> None:
self.assertEqual(self.compiled.get_value(['float_const']), 2.0)
self.assertEqual(self.compiled.get_value(['block', 'float_param']), 2.0)
def test_range_prop(self) -> None:
self.assertEqual(self.compiled.get_value(['range_const']), Range(1.0, 42.0))
self.assertEqual(self.compiled.get_value(['block', 'range_param']), Range(1.0, 42.0))
class TestPortConstPropLink(Link):
def __init__(self) -> None:
super().__init__()
self.a = self.Port(TestPortConstPropPort())
self.b = self.Port(TestPortConstPropPort())
self.assign(self.b.float_param, self.a.float_param) # first connected is source
class TestPortConstPropPort(Port[TestPortConstPropLink]):
def __init__(self) -> None:
super().__init__()
self.link_type = TestPortConstPropLink
self.float_param = self.Parameter(FloatExpr())
class TestPortConstPropInnerBlock(Block):
def __init__(self) -> None:
super().__init__()
self.port = self.Port(TestPortConstPropPort(), optional=True)
class TestPortConstPropOuterBlock(Block):
def __init__(self) -> None:
super().__init__()
self.inner = self.Block(TestPortConstPropInnerBlock())
self.port = self.Port(TestPortConstPropPort())
self.connect(self.inner.port, self.port)
class TestPortConstPropTopBlock(Block):
def __init__(self) -> None:
super().__init__()
self.block1 = self.Block(TestPortConstPropInnerBlock())
self.block2 = self.Block(TestPortConstPropOuterBlock())
self.link = self.connect(self.block1.port, self.block2.port)
self.assign(self.block1.port.float_param, 3.5)
class ConstPropPortTestCase(unittest.TestCase):
def setUp(self) -> None:
self.compiled = ScalaCompiler.compile(TestPortConstPropTopBlock)
def test_port_param_prop(self) -> None:
self.assertEqual(self.compiled.get_value(['block1', 'port', 'float_param']), 3.5)
self.assertEqual(self.compiled.get_value(['link', 'a', 'float_param']), 3.5)
self.assertEqual(self.compiled.get_value(['link', 'b', 'float_param']), 3.5)
self.assertEqual(self.compiled.get_value(['block2', 'port', 'float_param']), 3.5)
self.assertEqual(self.compiled.get_value(['block2', 'inner', 'port', 'float_param']), 3.5)
def test_connected_link(self) -> None:
self.assertEqual(self.compiled.get_value(['block1', 'port', edgir.IS_CONNECTED]), True)
self.assertEqual(self.compiled.get_value(['block2', 'port', edgir.IS_CONNECTED]), True)
self.assertEqual(self.compiled.get_value(['block2', 'inner', 'port', edgir.IS_CONNECTED]), True)
class TestDisconnectedTopBlock(Block):
def __init__(self) -> None:
super().__init__()
self.block1 = self.Block(TestPortConstPropInnerBlock())
self.assign(self.block1.port.float_param, 3.5)
class DisconnectedPortTestCase(unittest.TestCase):
def setUp(self) -> None:
self.compiled = ScalaCompiler.compile(TestDisconnectedTopBlock)
def test_disconnected_link(self) -> None:
self.assertEqual(self.compiled.get_value(['block1', 'port', edgir.IS_CONNECTED]), False)
class TestPortConstPropBundleLink(Link):
def __init__(self) -> None:
super().__init__()
self.a = self.Port(TestPortConstPropBundle())
self.b = self.Port(TestPortConstPropBundle())
self.elt1_link = self.connect(self.a.elt1, self.b.elt1)
self.elt2_link = self.connect(self.a.elt2, self.b.elt2)
class TestPortConstPropBundle(Bundle[TestPortConstPropBundleLink]):
def __init__(self) -> None:
super().__init__()
self.link_type = TestPortConstPropBundleLink
self.elt1 = self.Port(TestPortConstPropPort())
self.elt2 = self.Port(TestPortConstPropPort())
class TestPortConstPropBundleInnerBlock(Block):
def __init__(self) -> None:
super().__init__()
self.port = self.Port(TestPortConstPropBundle())
class TestPortConstPropBundleTopBlock(Block):
def __init__(self) -> None:
super().__init__()
def contents(self) -> None:
self.block1 = self.Block(TestPortConstPropBundleInnerBlock())
self.block2 = self.Block(TestPortConstPropBundleInnerBlock())
self.link = self.connect(self.block1.port, self.block2.port)
self.assign(self.block1.port.elt1.float_param, 3.5)
self.assign(self.block1.port.elt2.float_param, 6.0)
class ConstPropBundleTestCase(unittest.TestCase):
def setUp(self) -> None:
self.compiled = ScalaCompiler.compile(TestPortConstPropBundleTopBlock)
def test_port_param_prop(self) -> None:
self.assertEqual(self.compiled.get_value(['block1', 'port', 'elt1', 'float_param']), 3.5)
self.assertEqual(self.compiled.get_value(['block1', 'port', 'elt2', 'float_param']), 6.0)
self.assertEqual(self.compiled.get_value(['link', 'a', 'elt1', 'float_param']), 3.5)
self.assertEqual(self.compiled.get_value(['link', 'a', 'elt2', 'float_param']), 6.0)
self.assertEqual(self.compiled.get_value(['link', 'elt1_link', 'a', 'float_param']), 3.5)
self.assertEqual(self.compiled.get_value(['link', 'elt2_link', 'a', 'float_param']), 6.0)
self.assertEqual(self.compiled.get_value(['link', 'elt1_link', 'b', 'float_param']), 3.5)
self.assertEqual(self.compiled.get_value(['link', 'elt2_link', 'b', 'float_param']), 6.0)
self.assertEqual(self.compiled.get_value(['link', 'b', 'elt1', 'float_param']), 3.5)
self.assertEqual(self.compiled.get_value(['link', 'b', 'elt2', 'float_param']), 6.0)
self.assertEqual(self.compiled.get_value(['block2', 'port', 'elt1', 'float_param']), 3.5)
self.assertEqual(self.compiled.get_value(['block2', 'port', 'elt2', 'float_param']), 6.0)
def test_connected_link(self) -> None:
self.assertEqual(self.compiled.get_value(['block1', 'port', edgir.IS_CONNECTED]), True)
self.assertEqual(self.compiled.get_value(['block2', 'port', edgir.IS_CONNECTED]), True)
# Note: inner ports IS_CONNECTED is not defined
self.assertEqual(self.compiled.get_value(['link', 'a', edgir.IS_CONNECTED]), True)
self.assertEqual(self.compiled.get_value(['link', 'b', edgir.IS_CONNECTED]), True)
self.assertEqual(self.compiled.get_value(['link', 'elt1_link', 'a', edgir.IS_CONNECTED]), True)
self.assertEqual(self.compiled.get_value(['link', 'elt1_link', 'b', edgir.IS_CONNECTED]), True)
self.assertEqual(self.compiled.get_value(['link', 'elt2_link', 'a', edgir.IS_CONNECTED]), True)
self.assertEqual(self.compiled.get_value(['link', 'elt2_link', 'b', edgir.IS_CONNECTED]), True)
| [
"edg_core.ScalaCompilerInterface.ScalaCompiler.compile"
] | [((1065, 1110), 'edg_core.ScalaCompilerInterface.ScalaCompiler.compile', 'ScalaCompiler.compile', (['TestParameterConstProp'], {}), '(TestParameterConstProp)\n', (1086, 1110), False, 'from edg_core.ScalaCompilerInterface import ScalaCompiler\n'), ((2826, 2874), 'edg_core.ScalaCompilerInterface.ScalaCompiler.compile', 'ScalaCompiler.compile', (['TestPortConstPropTopBlock'], {}), '(TestPortConstPropTopBlock)\n', (2847, 2874), False, 'from edg_core.ScalaCompilerInterface import ScalaCompiler\n'), ((3979, 4026), 'edg_core.ScalaCompilerInterface.ScalaCompiler.compile', 'ScalaCompiler.compile', (['TestDisconnectedTopBlock'], {}), '(TestDisconnectedTopBlock)\n', (4000, 4026), False, 'from edg_core.ScalaCompilerInterface import ScalaCompiler\n'), ((5455, 5509), 'edg_core.ScalaCompilerInterface.ScalaCompiler.compile', 'ScalaCompiler.compile', (['TestPortConstPropBundleTopBlock'], {}), '(TestPortConstPropBundleTopBlock)\n', (5476, 5509), False, 'from edg_core.ScalaCompilerInterface import ScalaCompiler\n')] |
'''
Created on 2017年1月15日
@author: Think
题目:一个5位数,判断它是不是回文数。即12321是回文数,个位与万位相同,十位与千位相同。
1.程序分析:同29例
2.程序源代码:
'''
from pip._vendor.distlib.compat import raw_input
def jcp030():
x = int(raw_input('input a number:\n'))
x = str(x)
for i in range(len(x)//2):
if x[i] != x[-i - 1]:
print('this number is not a huiwen')
break
print('this number is a huiwen')
jcp030() | [
"pip._vendor.distlib.compat.raw_input"
] | [((193, 223), 'pip._vendor.distlib.compat.raw_input', 'raw_input', (['"""input a number:\n"""'], {}), "('input a number:\\n')\n", (202, 223), False, 'from pip._vendor.distlib.compat import raw_input\n')] |
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Created by <NAME> (<EMAIL>)
Anisotropy data analysis
The equation for the curve as published by Marchand et al. in Nature Cell Biology in 2001 is as follows:
y = a + (b-a) / [(c(x+K)/K*d)+1], where
a is the anisotropy without protein,
b is anisotropy with protein,
c is the Kd for ligand,
d is the total concentration of protein.
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
from __future__ import division, print_function, absolute_import
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from pathlib import Path
import os
import shutil
from timeit import default_timer as timer
from scipy.stats import norm
from scipy.optimize import curve_fit
from scipy.optimize import minimize
from inspect import currentframe, getframeinfo
fname = getframeinfo(currentframe()).filename # current file name
current_dir = Path(fname).resolve().parent
# User input ----------------------------------------------------------------
red_x = np.array([100, 50, 25, 12.5, 6.25, 3.125, 1.56, 0])
red_y = np.array([0.179, 0.186, 0.19, 0.195, 0.2, 0.212, 0.222, 0.248])
red_p = np.array([0.191, 0.248, 0.05, 1])
black_x = np.array([100, 50, 25, 18.75, 14.1, 10.5, 7.9, 5.9, 0])
black_y = np.array([0.204, 0.225, 0.248, 0.26, 0.268, 0.271, 0.274, 0.277, 0.278])
black_p = np.array([0.183, 0.278, 1.5, 16])
# ---------------------------------------------------------------------------
def red_anisotropy(x, K):
a = red_p[0]
b = red_p[1]
c = red_p[2]
d = red_p[3]
return a+(b-a)/((c*(x+K)/(K*d))+1)
def black_anisotropy(x, K):
a = black_p[0]
b = black_p[1]
c = black_p[2]
d = black_p[3]
return a+(b-a)/((c*(x+K)/(K*d))+1)
def main():
red_p, _ = curve_fit(red_anisotropy, red_x, red_y, p0=[0.078])
black_p, _ = curve_fit(black_anisotropy, black_x, black_y, p0=[0.1])
# Plot the result
fit_x = np.linspace(0, 100, 1000)
fig, (ax1, ax2) = plt.subplots(figsize=(20, 10), ncols=2, nrows=1, dpi=300)
ax1.plot(red_x, red_y, 'ro', ms=10)
ax1.plot(fit_x, red_anisotropy(fit_x, red_p), 'r', lw=2)
ax1.set_xlabel('[dark D] um')
ax1.set_ylabel('Anisotropy')
ax1.set_title('Red K = %f' %(red_p))
ax1.set_ylim([0.15, 0.3])
ax2.plot(black_x, black_y, 'ko', ms=10)
ax2.plot(fit_x, black_anisotropy(fit_x, black_p), 'k', lw=2)
ax2.set_xlabel('[dark D] um')
ax2.set_ylabel('Anisotropy')
ax2.set_title('Black K = %f' %(black_p))
ax2.set_ylim([0.15, 0.3])
fig.savefig('plot_anisotropy.png')
plt.close(fig)
if __name__ == "__main__":
main()
| [
"scipy.optimize.curve_fit",
"pathlib.Path",
"inspect.currentframe",
"matplotlib.pyplot.close",
"numpy.array",
"numpy.linspace",
"matplotlib.pyplot.subplots"
] | [((1092, 1143), 'numpy.array', 'np.array', (['[100, 50, 25, 12.5, 6.25, 3.125, 1.56, 0]'], {}), '([100, 50, 25, 12.5, 6.25, 3.125, 1.56, 0])\n', (1100, 1143), True, 'import numpy as np\n'), ((1153, 1216), 'numpy.array', 'np.array', (['[0.179, 0.186, 0.19, 0.195, 0.2, 0.212, 0.222, 0.248]'], {}), '([0.179, 0.186, 0.19, 0.195, 0.2, 0.212, 0.222, 0.248])\n', (1161, 1216), True, 'import numpy as np\n'), ((1226, 1259), 'numpy.array', 'np.array', (['[0.191, 0.248, 0.05, 1]'], {}), '([0.191, 0.248, 0.05, 1])\n', (1234, 1259), True, 'import numpy as np\n'), ((1273, 1328), 'numpy.array', 'np.array', (['[100, 50, 25, 18.75, 14.1, 10.5, 7.9, 5.9, 0]'], {}), '([100, 50, 25, 18.75, 14.1, 10.5, 7.9, 5.9, 0])\n', (1281, 1328), True, 'import numpy as np\n'), ((1340, 1412), 'numpy.array', 'np.array', (['[0.204, 0.225, 0.248, 0.26, 0.268, 0.271, 0.274, 0.277, 0.278]'], {}), '([0.204, 0.225, 0.248, 0.26, 0.268, 0.271, 0.274, 0.277, 0.278])\n', (1348, 1412), True, 'import numpy as np\n'), ((1424, 1457), 'numpy.array', 'np.array', (['[0.183, 0.278, 1.5, 16]'], {}), '([0.183, 0.278, 1.5, 16])\n', (1432, 1457), True, 'import numpy as np\n'), ((1866, 1917), 'scipy.optimize.curve_fit', 'curve_fit', (['red_anisotropy', 'red_x', 'red_y'], {'p0': '[0.078]'}), '(red_anisotropy, red_x, red_y, p0=[0.078])\n', (1875, 1917), False, 'from scipy.optimize import curve_fit\n'), ((1936, 1991), 'scipy.optimize.curve_fit', 'curve_fit', (['black_anisotropy', 'black_x', 'black_y'], {'p0': '[0.1]'}), '(black_anisotropy, black_x, black_y, p0=[0.1])\n', (1945, 1991), False, 'from scipy.optimize import curve_fit\n'), ((2033, 2058), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(1000)'], {}), '(0, 100, 1000)\n', (2044, 2058), True, 'import numpy as np\n'), ((2084, 2141), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(20, 10)', 'ncols': '(2)', 'nrows': '(1)', 'dpi': '(300)'}), '(figsize=(20, 10), ncols=2, nrows=1, dpi=300)\n', (2096, 2141), True, 'import matplotlib.pyplot as plt\n'), ((2738, 2752), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (2747, 2752), True, 'import matplotlib.pyplot as plt\n'), ((913, 927), 'inspect.currentframe', 'currentframe', ([], {}), '()\n', (925, 927), False, 'from inspect import currentframe, getframeinfo\n'), ((973, 984), 'pathlib.Path', 'Path', (['fname'], {}), '(fname)\n', (977, 984), False, 'from pathlib import Path\n')] |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of a DDPG agent.
Implementation of DDPG - Deep Deterministic Policy Gradient
Algorithm and hyperparameter details can be found here:
http://arxiv.org/pdf/1509.02971v2.pdf
"""
import agent
from common import replay_buffer
from common.actor_critic import ActorNetwork
from common.actor_critic import CriticNetwork
import numpy as np
class DDPG(agent.Agent):
"""DDPG agent."""
def __init__(self, env, sess, config):
"""Initialize members."""
state_dim = env.observation_space.shape[0]
self.env = env
self.action_dim = env.action_space.shape[0]
self.action_high = env.action_space.high
self.action_low = env.action_space.low
self.batch_size = config.batch_size
self.warmup_size = config.warmup_size
self.gamma = config.gamma
self.sigma = config.sigma
self.noise_cap = config.c
self.actor = ActorNetwork(sess=sess,
state_dim=state_dim,
action_dim=self.action_dim,
action_high=self.action_high,
action_low=self.action_low,
learning_rate=config.actor_lr,
grad_norm_clip=config.grad_norm_clip,
tau=config.tau,
batch_size=config.batch_size)
self.critic = CriticNetwork(sess=sess,
state_dim=state_dim,
action_dim=self.action_dim,
learning_rate=config.critic_lr,
tau=config.tau,
gamma=config.gamma)
self.replay_buffer = replay_buffer.ReplayBuffer(
buffer_size=config.buffer_size)
def random_action(self, observation):
"""Return a random action."""
return self.env.action_space.sample()
def action(self, observation):
"""Return an action according to the agent's policy."""
return self.actor.get_action(observation)
def action_with_noise(self, observation):
"""Return a noisy action."""
if self.replay_buffer.size > self.warmup_size:
action = self.action(observation)
else:
action = self.random_action(observation)
noise = np.clip(np.random.randn(self.action_dim) * self.sigma,
-self.noise_cap, self.noise_cap)
action_with_noise = action + noise
return (np.clip(action_with_noise, self.action_low, self.action_high),
action, noise)
def store_experience(self, s, a, r, t, s2):
"""Save experience to replay buffer."""
self.replay_buffer.add(s, a, r, t, s2)
def train(self, global_step):
"""Train the agent's policy for 1 iteration."""
if self.replay_buffer.size > self.warmup_size:
s0, a, r, t, s1 = self.replay_buffer.sample_batch(self.batch_size)
target_actions = self.actor.get_target_action(s1)
target_qval = self.get_target_qval(s1, target_actions)
t = t.astype(dtype=int)
y = r + self.gamma * target_qval * (1 - t)
self.critic.train(s0, a, y)
actions = self.actor.get_action(s0)
grads = self.critic.get_action_gradients(s0, actions)
self.actor.train(s0, grads[0])
self.update_targets()
def update_targets(self):
"""Update all target networks."""
self.actor.update_target_network()
self.critic.update_target_network()
def get_target_qval(self, observation, action):
"""Get target Q-val."""
return self.critic.get_target_qval(observation, action)
def get_qval(self, observation, action):
"""Get Q-val."""
return self.critic.get_qval(observation, action)
| [
"numpy.clip",
"common.actor_critic.ActorNetwork",
"common.replay_buffer.ReplayBuffer",
"common.actor_critic.CriticNetwork",
"numpy.random.randn"
] | [((1489, 1747), 'common.actor_critic.ActorNetwork', 'ActorNetwork', ([], {'sess': 'sess', 'state_dim': 'state_dim', 'action_dim': 'self.action_dim', 'action_high': 'self.action_high', 'action_low': 'self.action_low', 'learning_rate': 'config.actor_lr', 'grad_norm_clip': 'config.grad_norm_clip', 'tau': 'config.tau', 'batch_size': 'config.batch_size'}), '(sess=sess, state_dim=state_dim, action_dim=self.action_dim,\n action_high=self.action_high, action_low=self.action_low, learning_rate\n =config.actor_lr, grad_norm_clip=config.grad_norm_clip, tau=config.tau,\n batch_size=config.batch_size)\n', (1501, 1747), False, 'from common.actor_critic import ActorNetwork\n'), ((2029, 2174), 'common.actor_critic.CriticNetwork', 'CriticNetwork', ([], {'sess': 'sess', 'state_dim': 'state_dim', 'action_dim': 'self.action_dim', 'learning_rate': 'config.critic_lr', 'tau': 'config.tau', 'gamma': 'config.gamma'}), '(sess=sess, state_dim=state_dim, action_dim=self.action_dim,\n learning_rate=config.critic_lr, tau=config.tau, gamma=config.gamma)\n', (2042, 2174), False, 'from common.actor_critic import CriticNetwork\n'), ((2380, 2438), 'common.replay_buffer.ReplayBuffer', 'replay_buffer.ReplayBuffer', ([], {'buffer_size': 'config.buffer_size'}), '(buffer_size=config.buffer_size)\n', (2406, 2438), False, 'from common import replay_buffer\n'), ((3168, 3229), 'numpy.clip', 'np.clip', (['action_with_noise', 'self.action_low', 'self.action_high'], {}), '(action_with_noise, self.action_low, self.action_high)\n', (3175, 3229), True, 'import numpy as np\n'), ((3005, 3037), 'numpy.random.randn', 'np.random.randn', (['self.action_dim'], {}), '(self.action_dim)\n', (3020, 3037), True, 'import numpy as np\n')] |
# Generated by Django 3.1.4 on 2021-01-09 20:56
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('products', '0002_auto_20210102_1247'),
]
operations = [
migrations.RemoveField(
model_name='product',
name='review',
),
migrations.AddField(
model_name='product',
name='total_review',
field=models.FloatField(default=0),
),
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.TextField(blank=True, max_length=250)),
('status', models.CharField(choices=[('True', 'True'), ('False', 'False'), ('New', 'New')], default='New', max_length=20)),
('subject', models.CharField(blank=True, max_length=50)),
('ip', models.CharField(blank=True, max_length=20)),
('rate', models.IntegerField(default=1)),
('create_at', models.DateTimeField(auto_now_add=True)),
('update_at', models.DateTimeField(auto_now=True)),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='products.product')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"django.db.models.FloatField",
"django.db.migrations.RemoveField",
"django.db.models.TextField",
"django.db.models.IntegerField",
"django.db.models.ForeignKey",
"django.db.models.AutoField",
"django.db.models.DateTimeField",
"django.db.migrations.swappable_dependency",
"django.db.models.CharField"
] | [((227, 284), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (258, 284), False, 'from django.db import migrations, models\n'), ((369, 428), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""product"""', 'name': '"""review"""'}), "(model_name='product', name='review')\n", (391, 428), False, 'from django.db import migrations, models\n'), ((579, 607), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0)'}), '(default=0)\n', (596, 607), False, 'from django.db import migrations, models\n'), ((723, 816), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (739, 816), False, 'from django.db import migrations, models\n'), ((843, 887), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'max_length': '(250)'}), '(blank=True, max_length=250)\n', (859, 887), False, 'from django.db import migrations, models\n'), ((917, 1031), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('True', 'True'), ('False', 'False'), ('New', 'New')]", 'default': '"""New"""', 'max_length': '(20)'}), "(choices=[('True', 'True'), ('False', 'False'), ('New',\n 'New')], default='New', max_length=20)\n", (933, 1031), False, 'from django.db import migrations, models\n'), ((1058, 1101), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(50)'}), '(blank=True, max_length=50)\n', (1074, 1101), False, 'from django.db import migrations, models\n'), ((1127, 1170), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(20)'}), '(blank=True, max_length=20)\n', (1143, 1170), False, 'from django.db import migrations, models\n'), ((1198, 1228), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(1)'}), '(default=1)\n', (1217, 1228), False, 'from django.db import migrations, models\n'), ((1261, 1300), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1281, 1300), False, 'from django.db import migrations, models\n'), ((1333, 1368), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (1353, 1368), False, 'from django.db import migrations, models\n'), ((1399, 1489), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""products.product"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'products.product')\n", (1416, 1489), False, 'from django.db import migrations, models\n'), ((1512, 1608), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL)\n', (1529, 1608), False, 'from django.db import migrations, models\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from distutils.core import Extension
import pathlib
here = pathlib.Path(__file__).parent.resolve()
setup(
name='envemind',
version='0.0.1',
description='Prediction of monoisotopic mass in mass spectra',
# long_description=(here / 'README.md').read_text(encoding='utf-8'),
# long_description_content_type='text/markdown',
url='https://github.com/PiotrRadzinski/envemind',
author='<NAME>, <NAME>',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
],
keywords = 'Mass spectrometry monisotopic mass',
packages=find_packages(),
python_requires='>=3.6',
install_requires='numpy scipy IsoSpecPy pyteomics'.split(),
# entry_points={},
# scripts=[''],
)
| [
"setuptools.find_packages",
"pathlib.Path"
] | [((1178, 1193), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (1191, 1193), False, 'from setuptools import setup, find_packages\n'), ((150, 172), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (162, 172), False, 'import pathlib\n')] |
import argparse
import os
import subprocess
import time
from Cython.Build import cythonize
import generate_bindings
from meson_scripts import copy_tools, download_python, generate_init_files, \
locations, platform_check, generate_godot, \
download_godot
generate_bindings.build()
def cythonize_files():
module = cythonize('py4godot/core/*/*.pyx', language_level=3)
module += cythonize("py4godot/classes/*.pyx", language_level=3)
module += cythonize("py4godot/utils/*.pyx", language_level=3)
module += cythonize("py4godot/pluginscript_api/*.pyx", language_level=3)
module += cythonize("py4godot/pluginscript_api/*/*.pyx", language_level=3)
module += cythonize("py4godot/pluginscript_api/*/*/*.pyx", language_level=3)
module += cythonize("py4godot/pluginscript_api/*/*/*/*.pyx", language_level=3)
module += cythonize("py4godot/gdnative_api/*.pyx", language_level=3)
module += cythonize("py4godot/enums/*.pyx", language_level=3)
module += cythonize("py4godot/events/*.pyx", language_level=3)
def compile_python_ver_file(platform):
"""compile python file, to find the matching python version"""
python_dir = locations.get_python_dir(platform)
godot_dir = locations.get_godot_dir(platform)
with open("platforms/binary_dirs/python_ver_temp.cross", "r") as python_temp:
file_string = python_temp.read()
# Replacing things like in a template
file_string = file_string.replace("{python_ver}", python_dir)
file_string = file_string.replace("{godot}", godot_dir)
with open("platforms/binary_dirs/python_ver_compile.cross", "w") as python_compile:
python_compile.write(file_string)
def get_compiler():
compiler_res = subprocess.run("vcvarsall", shell=True, stdout=subprocess.DEVNULL,
stderr=subprocess.STDOUT)
if compiler_res.returncode == 0:
return "msvc"
compiler_res = subprocess.run("gcc --version", shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
if compiler_res.returncode == 0:
return "gcc"
raise Exception("No compiler found")
current_platform = platform_check.get_platform()
command_separator = "&"
if "linux" in current_platform:
command_separator = ";"
my_parser = argparse.ArgumentParser(fromfile_prefix_chars='@')
my_parser.add_argument('--compiler',
help='specify the compiler, you want to use to compile')
my_parser.add_argument('--target_platform',
help='specify the platform, you want to go build for')
my_parser.add_argument("-run_tests", help="should tests be run", default="False")
my_parser.add_argument("-download_godot", help="should tests be run", default="False")
# Execute parse_args()
args = my_parser.parse_args()
# Determining if tests should be run
should_run_tests = args.run_tests.lower() == "true"
# Determining if godot binary should be downloaded
should_download_godot = args.download_godot.lower() == "true"
build_dir = f"build_meson/{args.target_platform}"
start = time.time()
if args.compiler is None:
print("Checking for compilers")
args.compiler = get_compiler()
print(f"Got compiler:{args.compiler}")
cythonize_files()
# loading the needed python files for the target platform
download_python.download_file(args.target_platform, allow_copy=True)
# downlaod needed python files for the current platform
download_python.download_file(current_platform, allow_copy=False)
compile_python_ver_file(current_platform)
# initializing for msvc if wanted as compiler (todo:should be improved sometime)
msvc_init = f"vcvarsall.bat {'x86_amd64'} {command_separator} cl {command_separator} " if "msvc" in args.compiler else ""
res = subprocess.Popen(msvc_init +
f"meson {build_dir} --cross-file platforms/{args.target_platform}.cross "
f"--cross-file platforms/compilers/{args.compiler}_compiler.native "
f"--cross-file platforms/binary_dirs/python_ver_compile.cross "
f"--buildtype=release {'--wipe' if os.path.isdir(build_dir) else ''}"
f"{command_separator} ninja -C build_meson/{args.target_platform}",
shell=True)
res.wait()
copy_tools.run(args.target_platform)
generate_init_files.create_init_file(args.target_platform)
copy_tools.copy_main(args.target_platform)
generate_godot.generate_lib(args.target_platform)
generate_godot.generate_gdignore()
print("=================================Build finished==================================")
print("Build took:", time.time() - start, "seconds")
if should_download_godot:
print("=================================Start download==================================")
download_godot.run(current_platform)
print("=================================Fnish download==================================")
# running tests
if should_run_tests:
print("=================================Start tests==================================")
start = time.time()
copy_tools.copy_tests(args.target_platform)
res = subprocess.Popen(
f"ninja -C build_meson/{args.target_platform} test", shell=True)
res.wait()
streamdata = res.communicate()[0]
rc = res.returncode
print("=================================Build finished==================================")
print("Running tests took:", time.time() - start, "seconds")
if rc != 0:
raise Exception("Tests failed")
| [
"meson_scripts.copy_tools.copy_main",
"meson_scripts.generate_godot.generate_lib",
"argparse.ArgumentParser",
"Cython.Build.cythonize",
"subprocess.Popen",
"subprocess.run",
"meson_scripts.copy_tools.copy_tests",
"meson_scripts.locations.get_godot_dir",
"meson_scripts.generate_godot.generate_gdignor... | [((264, 289), 'generate_bindings.build', 'generate_bindings.build', ([], {}), '()\n', (287, 289), False, 'import generate_bindings\n'), ((2157, 2186), 'meson_scripts.platform_check.get_platform', 'platform_check.get_platform', ([], {}), '()\n', (2184, 2186), False, 'from meson_scripts import copy_tools, download_python, generate_init_files, locations, platform_check, generate_godot, download_godot\n'), ((2285, 2335), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'fromfile_prefix_chars': '"""@"""'}), "(fromfile_prefix_chars='@')\n", (2308, 2335), False, 'import argparse\n'), ((3060, 3071), 'time.time', 'time.time', ([], {}), '()\n', (3069, 3071), False, 'import time\n'), ((3290, 3358), 'meson_scripts.download_python.download_file', 'download_python.download_file', (['args.target_platform'], {'allow_copy': '(True)'}), '(args.target_platform, allow_copy=True)\n', (3319, 3358), False, 'from meson_scripts import copy_tools, download_python, generate_init_files, locations, platform_check, generate_godot, download_godot\n'), ((3416, 3481), 'meson_scripts.download_python.download_file', 'download_python.download_file', (['current_platform'], {'allow_copy': '(False)'}), '(current_platform, allow_copy=False)\n', (3445, 3481), False, 'from meson_scripts import copy_tools, download_python, generate_init_files, locations, platform_check, generate_godot, download_godot\n'), ((4272, 4308), 'meson_scripts.copy_tools.run', 'copy_tools.run', (['args.target_platform'], {}), '(args.target_platform)\n', (4286, 4308), False, 'from meson_scripts import copy_tools, download_python, generate_init_files, locations, platform_check, generate_godot, download_godot\n'), ((4309, 4367), 'meson_scripts.generate_init_files.create_init_file', 'generate_init_files.create_init_file', (['args.target_platform'], {}), '(args.target_platform)\n', (4345, 4367), False, 'from meson_scripts import copy_tools, download_python, generate_init_files, locations, platform_check, generate_godot, download_godot\n'), ((4368, 4410), 'meson_scripts.copy_tools.copy_main', 'copy_tools.copy_main', (['args.target_platform'], {}), '(args.target_platform)\n', (4388, 4410), False, 'from meson_scripts import copy_tools, download_python, generate_init_files, locations, platform_check, generate_godot, download_godot\n'), ((4411, 4460), 'meson_scripts.generate_godot.generate_lib', 'generate_godot.generate_lib', (['args.target_platform'], {}), '(args.target_platform)\n', (4438, 4460), False, 'from meson_scripts import copy_tools, download_python, generate_init_files, locations, platform_check, generate_godot, download_godot\n'), ((4461, 4495), 'meson_scripts.generate_godot.generate_gdignore', 'generate_godot.generate_gdignore', ([], {}), '()\n', (4493, 4495), False, 'from meson_scripts import copy_tools, download_python, generate_init_files, locations, platform_check, generate_godot, download_godot\n'), ((328, 380), 'Cython.Build.cythonize', 'cythonize', (['"""py4godot/core/*/*.pyx"""'], {'language_level': '(3)'}), "('py4godot/core/*/*.pyx', language_level=3)\n", (337, 380), False, 'from Cython.Build import cythonize\n'), ((395, 448), 'Cython.Build.cythonize', 'cythonize', (['"""py4godot/classes/*.pyx"""'], {'language_level': '(3)'}), "('py4godot/classes/*.pyx', language_level=3)\n", (404, 448), False, 'from Cython.Build import cythonize\n'), ((463, 514), 'Cython.Build.cythonize', 'cythonize', (['"""py4godot/utils/*.pyx"""'], {'language_level': '(3)'}), "('py4godot/utils/*.pyx', language_level=3)\n", (472, 514), False, 'from Cython.Build import cythonize\n'), ((529, 591), 'Cython.Build.cythonize', 'cythonize', (['"""py4godot/pluginscript_api/*.pyx"""'], {'language_level': '(3)'}), "('py4godot/pluginscript_api/*.pyx', language_level=3)\n", (538, 591), False, 'from Cython.Build import cythonize\n'), ((606, 670), 'Cython.Build.cythonize', 'cythonize', (['"""py4godot/pluginscript_api/*/*.pyx"""'], {'language_level': '(3)'}), "('py4godot/pluginscript_api/*/*.pyx', language_level=3)\n", (615, 670), False, 'from Cython.Build import cythonize\n'), ((685, 751), 'Cython.Build.cythonize', 'cythonize', (['"""py4godot/pluginscript_api/*/*/*.pyx"""'], {'language_level': '(3)'}), "('py4godot/pluginscript_api/*/*/*.pyx', language_level=3)\n", (694, 751), False, 'from Cython.Build import cythonize\n'), ((766, 834), 'Cython.Build.cythonize', 'cythonize', (['"""py4godot/pluginscript_api/*/*/*/*.pyx"""'], {'language_level': '(3)'}), "('py4godot/pluginscript_api/*/*/*/*.pyx', language_level=3)\n", (775, 834), False, 'from Cython.Build import cythonize\n'), ((849, 907), 'Cython.Build.cythonize', 'cythonize', (['"""py4godot/gdnative_api/*.pyx"""'], {'language_level': '(3)'}), "('py4godot/gdnative_api/*.pyx', language_level=3)\n", (858, 907), False, 'from Cython.Build import cythonize\n'), ((922, 973), 'Cython.Build.cythonize', 'cythonize', (['"""py4godot/enums/*.pyx"""'], {'language_level': '(3)'}), "('py4godot/enums/*.pyx', language_level=3)\n", (931, 973), False, 'from Cython.Build import cythonize\n'), ((988, 1040), 'Cython.Build.cythonize', 'cythonize', (['"""py4godot/events/*.pyx"""'], {'language_level': '(3)'}), "('py4godot/events/*.pyx', language_level=3)\n", (997, 1040), False, 'from Cython.Build import cythonize\n'), ((1166, 1200), 'meson_scripts.locations.get_python_dir', 'locations.get_python_dir', (['platform'], {}), '(platform)\n', (1190, 1200), False, 'from meson_scripts import copy_tools, download_python, generate_init_files, locations, platform_check, generate_godot, download_godot\n'), ((1217, 1250), 'meson_scripts.locations.get_godot_dir', 'locations.get_godot_dir', (['platform'], {}), '(platform)\n', (1240, 1250), False, 'from meson_scripts import copy_tools, download_python, generate_init_files, locations, platform_check, generate_godot, download_godot\n'), ((1733, 1830), 'subprocess.run', 'subprocess.run', (['"""vcvarsall"""'], {'shell': '(True)', 'stdout': 'subprocess.DEVNULL', 'stderr': 'subprocess.STDOUT'}), "('vcvarsall', shell=True, stdout=subprocess.DEVNULL, stderr=\n subprocess.STDOUT)\n", (1747, 1830), False, 'import subprocess\n'), ((1939, 2039), 'subprocess.run', 'subprocess.run', (['"""gcc --version"""'], {'shell': '(True)', 'stdout': 'subprocess.DEVNULL', 'stderr': 'subprocess.STDOUT'}), "('gcc --version', shell=True, stdout=subprocess.DEVNULL,\n stderr=subprocess.STDOUT)\n", (1953, 2039), False, 'import subprocess\n'), ((4767, 4803), 'meson_scripts.download_godot.run', 'download_godot.run', (['current_platform'], {}), '(current_platform)\n', (4785, 4803), False, 'from meson_scripts import copy_tools, download_python, generate_init_files, locations, platform_check, generate_godot, download_godot\n'), ((5041, 5052), 'time.time', 'time.time', ([], {}), '()\n', (5050, 5052), False, 'import time\n'), ((5057, 5100), 'meson_scripts.copy_tools.copy_tests', 'copy_tools.copy_tests', (['args.target_platform'], {}), '(args.target_platform)\n', (5078, 5100), False, 'from meson_scripts import copy_tools, download_python, generate_init_files, locations, platform_check, generate_godot, download_godot\n'), ((5111, 5197), 'subprocess.Popen', 'subprocess.Popen', (['f"""ninja -C build_meson/{args.target_platform} test"""'], {'shell': '(True)'}), "(f'ninja -C build_meson/{args.target_platform} test', shell\n =True)\n", (5127, 5197), False, 'import subprocess\n'), ((4609, 4620), 'time.time', 'time.time', ([], {}), '()\n', (4618, 4620), False, 'import time\n'), ((5407, 5418), 'time.time', 'time.time', ([], {}), '()\n', (5416, 5418), False, 'import time\n'), ((4099, 4123), 'os.path.isdir', 'os.path.isdir', (['build_dir'], {}), '(build_dir)\n', (4112, 4123), False, 'import os\n')] |
import os
import django
from django.conf import settings
def pytest_configure(config):
"""Configure Django."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
settings.configure()
django.setup()
| [
"os.environ.setdefault",
"django.setup",
"django.conf.settings.configure"
] | [((123, 182), 'os.environ.setdefault', 'os.environ.setdefault', (['"""DJANGO_SETTINGS_MODULE"""', '"""settings"""'], {}), "('DJANGO_SETTINGS_MODULE', 'settings')\n", (144, 182), False, 'import os\n'), ((187, 207), 'django.conf.settings.configure', 'settings.configure', ([], {}), '()\n', (205, 207), False, 'from django.conf import settings\n'), ((212, 226), 'django.setup', 'django.setup', ([], {}), '()\n', (224, 226), False, 'import django\n')] |
import owlready2
import yaml
import urllib.request
import os
import gzip
import json
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "../cellxgene_schema"))
import env
from typing import List
import os
def _download_owls(
owl_info_yml: str = env.OWL_INFO_YAML, output_dir: str = env.ONTOLOGY_DIR
):
"""
Downloads the ontology owl files specified in 'owl_info_yml' into 'output_dir'
:param str owl_info_yml: path to yaml file wit OWL information
:param str output_dir: path to writable directory where owl files will be downloaded to
:rtype None
"""
with open(owl_info_yml, "r") as owl_info_handle:
owl_info = yaml.safe_load(owl_info_handle)
for ontology, info in owl_info.items():
print(f"Downloading {ontology}")
# Get owl info
latest_version = owl_info[ontology]["latest"]
url = owl_info[ontology]["urls"][latest_version]
# Format of owl (handles cases where they are compressed)
download_format = url.split(".")[-1]
output_file = os.path.join(output_dir, ontology + ".owl")
if download_format == "gz":
urllib.request.urlretrieve(url, output_file + ".gz")
_decompress(output_file + ".gz", output_file)
os.remove(output_file + ".gz")
else:
urllib.request.urlretrieve(url, output_file)
def _decompress(infile: str, tofile: str):
"""
Decompresses a gziped file
:param str infile: path gziped file
:param str tofile: path to output decompressed file
:rtype None
"""
with open(infile, "rb") as inf, open(tofile, "w", encoding="utf8") as tof:
decom_str = gzip.decompress(inf.read()).decode("utf-8")
tof.write(decom_str)
def _parse_owls(
working_dir: str = env.ONTOLOGY_DIR,
owl_info_yml: str = env.OWL_INFO_YAML,
output_json_file: str = env.PARSED_ONTOLOGIES_FILE,
):
"""
Parser all owl files in working_dir. Extracts information from all classes in the owl file.
The extracted information is written into a gzipped a json file with the following structure:
{
"ontology_name":
{
"term_id": {
"label": "..."
"deprecated": True
"ancestors": [
"ancestor1_term_id_1",
"ancestor2_term_id_2"
]
}
}
"term_id2": {
...
}
...
}
}
:param str working_dir: path to folder with owl files
:param str owl_info_yml: path to writable directory where owl files will be downloaded to
:param str owl_info_yml: path to yaml file wit owl information
:param str output_json_file: path to output jsaon file
:rtype None
"""
with open(owl_info_yml, "r") as owl_info_handle:
owl_info = yaml.safe_load(owl_info_handle)
owl_files = []
for owl_file in os.listdir(working_dir):
if owl_file.endswith(".owl"):
owl_files.append(os.path.join(working_dir, owl_file))
# Parse owl files
onto_dict = {}
for owl_file in owl_files:
world = owlready2.World()
onto = world.get_ontology(owl_file)
onto.load()
onto_dict[onto.name] = {}
print(f"Processing {onto.name}")
for onto_class in onto.classes():
term_id = onto_class.name.replace("_", ":")
# Skip terms that are not direct children from this ontology
if not onto.name == term_id.split(":")[0]:
continue
# If there are specified target terms then only work with them
if onto.name in owl_info:
if "only" in owl_info[onto.name]:
if term_id not in owl_info[onto.name]["only"]:
continue
# Gets label
onto_dict[onto.name][term_id] = dict()
try:
onto_dict[onto.name][term_id]["label"] = onto_class.label[0]
except IndexError:
onto_dict[onto.name][term_id]["label"] = ""
# Add the "deprecated" status
onto_dict[onto.name][term_id]["deprecated"] = False
if onto_class.deprecated:
if onto_class.deprecated.first():
onto_dict[onto.name][term_id]["deprecated"] = True
# Gets ancestors
ancestors = _get_ancestors(onto_class, onto.name)
# If "children_of" specified in owl info then skip the current term if it is
# not a children of those indicated.
if onto.name in owl_info:
if "children_of" in owl_info[onto.name]:
if not list(set(ancestors) &
set(owl_info[onto.name]["children_of"])):
onto_dict[onto.name].pop(term_id)
continue
# only add the ancestors if it's not NCBITaxon, as this saves a lot of disk space
if onto.name == "NCBITaxon":
onto_dict[onto.name][term_id]["ancestors"] = []
else:
onto_dict[onto.name][term_id]["ancestors"] = ancestors
with gzip.open(output_json_file, "wt") as output_json:
json.dump(onto_dict, output_json, indent=2)
def _get_ancestors(onto_class: owlready2.entity.ThingClass, ontololgy_name: str) -> List[str]:
"""
Returns a list of ancestors ids of the given onto class, only returns those belonging to ontology_name,
it will format the id from the form CL_xxxx to CL:xxxx
:param owlready2.entity.ThingClass onto_class: the class for which ancestors will be retrieved
:param str ontololgy_name: only ancestors from this ontology will be kept
:rtype List[str]
:return list of ancestors (term ids), it could be empty
"""
ancestors = []
for ancestor in onto_class.ancestors():
if onto_class.name == ancestor.name:
continue
if ancestor.name.split("_")[0] == ontololgy_name:
ancestors.append(ancestor.name.replace("_", ":"))
return ancestors
# Download and parse owls upon execution
if __name__ == "__main__":
_download_owls()
_parse_owls()
| [
"os.listdir",
"owlready2.World",
"gzip.open",
"os.path.join",
"os.path.realpath",
"yaml.safe_load",
"json.dump",
"os.remove"
] | [((2994, 3017), 'os.listdir', 'os.listdir', (['working_dir'], {}), '(working_dir)\n', (3004, 3017), False, 'import os\n'), ((692, 723), 'yaml.safe_load', 'yaml.safe_load', (['owl_info_handle'], {}), '(owl_info_handle)\n', (706, 723), False, 'import yaml\n'), ((1081, 1124), 'os.path.join', 'os.path.join', (['output_dir', "(ontology + '.owl')"], {}), "(output_dir, ontology + '.owl')\n", (1093, 1124), False, 'import os\n'), ((2922, 2953), 'yaml.safe_load', 'yaml.safe_load', (['owl_info_handle'], {}), '(owl_info_handle)\n', (2936, 2953), False, 'import yaml\n'), ((3213, 3230), 'owlready2.World', 'owlready2.World', ([], {}), '()\n', (3228, 3230), False, 'import owlready2\n'), ((5260, 5293), 'gzip.open', 'gzip.open', (['output_json_file', '"""wt"""'], {}), "(output_json_file, 'wt')\n", (5269, 5293), False, 'import gzip\n'), ((5318, 5361), 'json.dump', 'json.dump', (['onto_dict', 'output_json'], {'indent': '(2)'}), '(onto_dict, output_json, indent=2)\n', (5327, 5361), False, 'import json\n'), ((141, 167), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (157, 167), False, 'import os\n'), ((1296, 1326), 'os.remove', 'os.remove', (["(output_file + '.gz')"], {}), "(output_file + '.gz')\n", (1305, 1326), False, 'import os\n'), ((3086, 3121), 'os.path.join', 'os.path.join', (['working_dir', 'owl_file'], {}), '(working_dir, owl_file)\n', (3098, 3121), False, 'import os\n')] |
# importing modules and packages
# system tools
import os
import sys
import argparse
sys.path.append(os.path.join("..", ".."))
from contextlib import redirect_stdout
# pandas, numpy, gensim
import pandas as pd
import numpy as np
import gensim.downloader
# import my classifier utility functions - see the Github repo!
import utils.classifier_utils as clf
# Machine learning stuff
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import ShuffleSplit
from sklearn import metrics
# matplotlib
import matplotlib.pyplot as plt
class lr_classifier():
def __init__(self, args):
self.args = args
self.data = pd.read_csv(self.args["filename"])
def preprocessing(self):
'''
The preprocessing function performs various transformations to the data
1. Data is balanced to have an equal amount of label classes
2. Data is split into x and y
3. Data is further split into train and test values
4. X features are vectorized
'''
print("[INFO] Preprocessing Game of Thrones data...")
# I'm interested in seeing how many sentences there are from each season
n_sentences = []
for val in set(self.data['Season']):
length = len(self.data['Sentence'].loc[self.data['Season'] == val])
n_sentences.append(length) # I can see there is a different amount of sentences in each season, which might affect the classification. So I am saving all lengths and choose the minimum as n in the balance function to have a distribution that isn't skewed
# Balancing data to not bias classifier
balanced_data = clf.balance(self.data, label = "Season", n=min(n_sentences))
# Splitting up to x features and y from the balanced data
x = balanced_data['Sentence'].values
y = np.array(balanced_data['Season'].str.extract('(\d+)')).ravel()# Extracting only numbers to have cleaner output - ravel to make it a row-vector instead of column vector
self.y = [int(numeric_string) for numeric_string in y] # Integer
# Splitting into train and test sets
# I am only attributing "self" to y because these are finished being preprocessed. self.X features are defined in vectorization
X_train, X_test, self.y_train, self.y_test = train_test_split(x, # Creating two lists - sentences is an array
self.y, # Labels
test_size=0.25,
random_state=42,
stratify=self.y) # This should keep an equal amount of labels in each set - keeps the original distribution, which is equal.
# .fit_transform(X) = learn feature names + .transform(X)
# Vectorization
print("[INFO] Vectorizing text...")
vectorizer = CountVectorizer()
# Fitting the vectorizer to our data
# Transform to traning featues
self.X_train_feats = vectorizer.fit_transform(X_train)
#... then we do it for our test data
self.X_test_feats = vectorizer.transform(X_test)
# Create a list of the feature names.
feature_names = vectorizer.get_feature_names()
# Vectorize full dataset
self.X_vect = vectorizer.fit_transform(x)
def model(self):
'''
Function that fit a Logistic Regression to countvectorized X and y features and generates predictions
'''
print("[INFO] Defining logistic regression model...")
# Basic logistic regression
classifier = LogisticRegression(random_state=42).fit(self.X_train_feats, self.y_train)
self.y_pred = classifier.predict(self.X_test_feats)
def evaluation(self):
'''
Evaluation function that saves classification report and learning curve in defined paths. The learning curve is made from a 10-fold cross-validation of the the entired dataset
'''
print("[INFO] Evaluating logistic regression model...")
# Evaluation
classifier_metrics = pd.DataFrame(metrics.classification_report(self.y_test,
self.y_pred,
output_dict = True))
print(classifier_metrics)
classifier_metrics.to_csv(os.path.join(self.args['outpath'], "lr_classification_report.csv"))
def main():
# Argparse
ap = argparse.ArgumentParser(description="[INFO] LR classifier arguments")
ap.add_argument("-f",
"--filename",
required=False,
type=str,
default= os.path.join("..","..", "data", "4", "Game_of_Thrones_Script.csv"),
help="str, file name and location")
ap.add_argument("-o",
"--outpath",
required=False,
type=str,
default= os.path.join("..","..", "out","4"),
help="str, output location")
args = vars(ap.parse_args())
# Define class
lr_classifier_got = lr_classifier(args)
lr_classifier_got.preprocessing()
lr_classifier_got.model()
lr_classifier_got.evaluation()
if __name__=="__main__":
main()
| [
"argparse.ArgumentParser",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.feature_extraction.text.CountVectorizer",
"sklearn.metrics.classification_report",
"os.path.join",
"sklearn.linear_model.LogisticRegression"
] | [((101, 125), 'os.path.join', 'os.path.join', (['""".."""', '""".."""'], {}), "('..', '..')\n", (113, 125), False, 'import os\n'), ((4799, 4868), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""[INFO] LR classifier arguments"""'}), "(description='[INFO] LR classifier arguments')\n", (4822, 4868), False, 'import argparse\n'), ((787, 821), 'pandas.read_csv', 'pd.read_csv', (["self.args['filename']"], {}), "(self.args['filename'])\n", (798, 821), True, 'import pandas as pd\n'), ((2505, 2582), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'self.y'], {'test_size': '(0.25)', 'random_state': '(42)', 'stratify': 'self.y'}), '(x, self.y, test_size=0.25, random_state=42, stratify=self.y)\n', (2521, 2582), False, 'from sklearn.model_selection import train_test_split\n'), ((3110, 3127), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {}), '()\n', (3125, 3127), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n'), ((4374, 4447), 'sklearn.metrics.classification_report', 'metrics.classification_report', (['self.y_test', 'self.y_pred'], {'output_dict': '(True)'}), '(self.y_test, self.y_pred, output_dict=True)\n', (4403, 4447), False, 'from sklearn import metrics\n'), ((4677, 4743), 'os.path.join', 'os.path.join', (["self.args['outpath']", '"""lr_classification_report.csv"""'], {}), "(self.args['outpath'], 'lr_classification_report.csv')\n", (4689, 4743), False, 'import os\n'), ((5018, 5085), 'os.path.join', 'os.path.join', (['""".."""', '""".."""', '"""data"""', '"""4"""', '"""Game_of_Thrones_Script.csv"""'], {}), "('..', '..', 'data', '4', 'Game_of_Thrones_Script.csv')\n", (5030, 5085), False, 'import os\n'), ((5290, 5326), 'os.path.join', 'os.path.join', (['""".."""', '""".."""', '"""out"""', '"""4"""'], {}), "('..', '..', 'out', '4')\n", (5302, 5326), False, 'import os\n'), ((3859, 3894), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(42)'}), '(random_state=42)\n', (3877, 3894), False, 'from sklearn.linear_model import LogisticRegression\n')] |
#!/usr/bin/env python
'''
Central execution points for non-python services
'''
import logging
from neo4j.v1 import GraphDatabase, basic_auth
import neo4j.bolt.connection
import elasticsearch.exceptions
from isoprene_pumpjack.constants.environment import environment
from isoprene_pumpjack.utils.neo_to_d3 import neo_to_d3
from isoprene_pumpjack.exceptions import IsopumpException
logger = logging.getLogger(__name__)
def execute_cypher(cypher_statement):
'''Configure and safely execute a cypher statement'''
logger.debug("Executing cypher statement")
try:
bolt_driver = GraphDatabase.driver(
environment["ISOPRENE_PUMPJACK_BOLT_URL"],
auth=basic_auth(
environment["ISOPRENE_PUMPJACK_BOLT_USER"],
environment["ISOPRENE_PUMPJACK_BOLT_PASSWORD"]
)
)
with bolt_driver.session() as session:
result = session.run(cypher_statement)
except neo4j.bolt.connection.ServiceUnavailable as e:
logger.error(e)
raise IsopumpException("Could not reach graph server", status_code=503, payload={
"message_original": e.message
})
return result
def execute_cypher_get_d3(cypher_statement, nodeLabels=[], linkLabels=[]):
'''In addition to safe execution, return the cypher query in d3 dict format'''
logger.debug("Executing cypher and returning as d3 dict")
result = execute_cypher(cypher_statement)
data = neo_to_d3(result, nodeLabels, linkLabels)
return data
def execute_search(elasticsearch_dsl_search_object):
'''Execute an elasticsearch-dsl object safely'''
try:
response = elasticsearch_dsl_search_object.execute()
except elasticsearch.exceptions.ConnectionError as e:
logger.error(e)
raise IsopumpException("Could not reach document server", status_code=503)
return response
| [
"logging.getLogger",
"isoprene_pumpjack.utils.neo_to_d3.neo_to_d3",
"neo4j.v1.basic_auth",
"isoprene_pumpjack.exceptions.IsopumpException"
] | [((394, 421), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (411, 421), False, 'import logging\n'), ((1470, 1511), 'isoprene_pumpjack.utils.neo_to_d3.neo_to_d3', 'neo_to_d3', (['result', 'nodeLabels', 'linkLabels'], {}), '(result, nodeLabels, linkLabels)\n', (1479, 1511), False, 'from isoprene_pumpjack.utils.neo_to_d3 import neo_to_d3\n'), ((1045, 1156), 'isoprene_pumpjack.exceptions.IsopumpException', 'IsopumpException', (['"""Could not reach graph server"""'], {'status_code': '(503)', 'payload': "{'message_original': e.message}"}), "('Could not reach graph server', status_code=503, payload={\n 'message_original': e.message})\n", (1061, 1156), False, 'from isoprene_pumpjack.exceptions import IsopumpException\n'), ((1801, 1869), 'isoprene_pumpjack.exceptions.IsopumpException', 'IsopumpException', (['"""Could not reach document server"""'], {'status_code': '(503)'}), "('Could not reach document server', status_code=503)\n", (1817, 1869), False, 'from isoprene_pumpjack.exceptions import IsopumpException\n'), ((692, 799), 'neo4j.v1.basic_auth', 'basic_auth', (["environment['ISOPRENE_PUMPJACK_BOLT_USER']", "environment['ISOPRENE_PUMPJACK_BOLT_PASSWORD']"], {}), "(environment['ISOPRENE_PUMPJACK_BOLT_USER'], environment[\n 'ISOPRENE_PUMPJACK_BOLT_PASSWORD'])\n", (702, 799), False, 'from neo4j.v1 import GraphDatabase, basic_auth\n')] |
# Generated by Django 2.2.1 on 2019-07-28 08:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('login', '0002_auto_20190720_1846'),
]
operations = [
migrations.AddField(
model_name='user',
name='token',
field=models.CharField(default=1, max_length=100, verbose_name='token验证'),
preserve_default=False,
),
]
| [
"django.db.models.CharField"
] | [((329, 396), 'django.db.models.CharField', 'models.CharField', ([], {'default': '(1)', 'max_length': '(100)', 'verbose_name': '"""token验证"""'}), "(default=1, max_length=100, verbose_name='token验证')\n", (345, 396), False, 'from django.db import migrations, models\n')] |
from allauth.account.utils import setup_user_email, send_email_confirmation
from rest_framework.response import Response
from usersystem.serializers import UserSerializer, UserRegisterSerializer
from rest_framework.views import APIView
from rest_framework.status import HTTP_200_OK, HTTP_400_BAD_REQUEST, HTTP_201_CREATED, HTTP_404_NOT_FOUND
from rest_framework.permissions import AllowAny
from django.contrib.auth.models import User
from usersystem.settings import PASSWORD_MAX_LENGTH, PASSWORD_MIN_LENGTH, LOCAL_OAUTH2_KEY
import requests as makerequest
from usersystem.secrets import SOCIAL_AUTH_GOOGLE_OAUTH2_KEY, SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET
from social.apps.django_app.default.models import UserSocialAuth
# Create your views here.
class AccountView(APIView):
"""
An API endpoint for managing the current user.
GET returns basic information about the current user.
POST expects at least one of 'email', 'first_name' or 'last_name' fields.
DELETE deletes the current user.
"""
def get(self, request):
serializer = UserSerializer(request.user, context={'request': request})
return Response(serializer.data)
def post(self, request):
if not request.data:
return Response(status=HTTP_400_BAD_REQUEST)
serializer = UserSerializer(data=request.data, partial=True)
# Return a 400 response if the data was invalid.
serializer.is_valid(raise_exception=True)
request.user.email = serializer.validated_data.get(
'email', request.user.email)
request.user.first_name = serializer.validated_data.get(
'first_name', request.user.first_name)
request.user.last_name = serializer.validated_data.get(
'last_name', request.user.last_name)
request.user.save()
return Response(status=HTTP_200_OK)
def delete(self, request):
# If this is a Google social account, revoke its Google tokens
socAuth = next(
iter(UserSocialAuth.get_social_auth_for_user(request.user)), None)
if socAuth and socAuth.provider == 'google-oauth2':
refresh_token = socAuth.extra_data.get(
'refresh_token', socAuth.extra_data['access_token'])
makerequest.post(
'https://accounts.google.com/o/oauth2/revoke?token=' + refresh_token)
request.user.delete()
return Response(status=HTTP_200_OK)
class AccountUsernameView(APIView):
"""
A simple API endpoint for getting an username with a given email.
POST must contain 'email' field. Server returns 400 if email is already used or 200 otherwise.
"""
permission_classes = (AllowAny,)
def post(self, request):
email = request.data.get('email', None)
if email is None:
return Response({"message": "'email' field is missing"}, status=HTTP_400_BAD_REQUEST)
try:
data = User.objects.get(email=email)
except User.DoesNotExist:
data = None
if data:
return Response({"userExist": True, "username": data.username}, status=HTTP_200_OK)
return Response({"userExist": False}, status=HTTP_200_OK)
class AccountPasswordView(APIView):
"""
An API endpoint for password management (for the current user)
GET returns 200 if user has a password or 404 otherwise
POST must contain 'newPassword' field ( and 'oldPassword' if user already has a password )
"""
def post(self, request):
newpass = request.data.get('newPassword', None)
if newpass is None:
return Response({"message": "Missing 'newPassword' field"}, status=HTTP_400_BAD_REQUEST)
if len(newpass) < PASSWORD_MIN_LENGTH or len(newpass) > PASSWORD_MAX_LENGTH:
return Response({"message": "New password doesn't match length requirements"}, status=HTTP_400_BAD_REQUEST)
if request.user.has_usable_password():
oldpass = request.data.get('oldPassword', None)
if oldpass is None:
return Response({"message": "Missing 'oldPassword' field"}, status=HTTP_400_BAD_REQUEST)
if not request.user.check_password(oldpass):
return Response({"message": "'oldPassword' is invalid"}, status=HTTP_400_BAD_REQUEST)
if oldpass == newpass:
return Response({"message": "oldPassword and newPassword are identical"}, status=HTTP_400_BAD_REQUEST)
request.user.set_password(newpass)
request.user.save()
return Response(status=HTTP_200_OK)
def get(self, request):
if request.user.has_usable_password():
return Response(status=HTTP_200_OK)
return Response(status=HTTP_404_NOT_FOUND)
class AccountSocialView(APIView):
"""
A simple API endpoint for checking if user has connected social account
GET returns 200 and the name of the social auth provider if user has connected social account or 404 otherwise.
"""
def get(self, request):
socAuth = next(
iter(UserSocialAuth.get_social_auth_for_user(request.user)), None)
if not socAuth:
return Response(status=HTTP_404_NOT_FOUND)
else:
return Response({"social_provider": socAuth.provider}, status=HTTP_200_OK)
class RegisterView(APIView):
"""
An API endpoint for user registration.
POST must contain 'username', 'email', 'first_name', 'last_name' and 'password' fields.
"""
permission_classes = (AllowAny,)
def post(self, request):
serializer = UserRegisterSerializer(
data=request.data, context={'request': request})
# Return a 400 response if the data was invalid.
serializer.is_valid(raise_exception=True)
validated_data = serializer.validated_data
user = User.objects.create(
username=validated_data['username'],
email=validated_data['email'],
first_name=validated_data['first_name'],
last_name=validated_data['last_name']
)
user.set_password(validated_data['password'])
user.save()
setup_user_email(request, user, [])
# send_email_confirmation(request, user, signup=True)
return Response(status=HTTP_201_CREATED)
class RegisterCheckEmailView(APIView):
"""
A simple API endpoint for checking if an user with a given email exists.
POST must contain 'email' field. Server returns 400 if email is already used or 200 otherwise.
"""
permission_classes = (AllowAny,)
def post(self, request):
email = request.data.get('email', None)
if email is None:
return Response({"message": "'email' field is missing"}, status=HTTP_400_BAD_REQUEST)
if User.objects.filter(email=email):
return Response({"emailExist": True}, status=HTTP_400_BAD_REQUEST)
return Response(status=HTTP_200_OK)
class RegisterCheckUsernameView(APIView):
"""
An API endpoint for checking if an username is taken.
POST must contain 'username' field. Server returns 400 if username is already used or 200 is it is available
"""
permission_classes = (AllowAny,)
def post(self, request):
username = request.data.get('username', None)
if username is None:
return Response({"message": "'username' field is missing"}, status=HTTP_400_BAD_REQUEST)
if User.objects.filter(username=username):
return Response(status=HTTP_400_BAD_REQUEST)
return Response(status=HTTP_200_OK)
class GoogleAuthCodeView(APIView):
"""
An API endpoint which expects a google auth code, which is then used for social login.
POST must contain a 'code' field with the authorization code. This code is
exchanged for google's access and refresh tokens, which are stored on server.
Afterwards local access and refresh tokens are generated and returned, which are
then used to communicate with our API.
Go to https://developers.google.com/identity/sign-in/web/server-side-flow
for more information on the google server-side auth flow implemented here.
"""
permission_classes = (AllowAny,)
def post(self, request):
code = request.data.get('code', None)
if not code:
return Response({"message": "Authorization code missing"}, status=HTTP_400_BAD_REQUEST)
# Exchange auth code for tokens
googleurl = 'https://accounts.google.com/o/oauth2/token'
exchangeCodeRequest = makerequest.post(
googleurl,
data={
'code': code,
'redirect_uri': 'postmessage',
'client_id': SOCIAL_AUTH_GOOGLE_OAUTH2_KEY,
'client_secret': SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET,
'grant_type': 'authorization_code'
})
# We can now exchange the external token for a token linked to *OUR*
# OAuth2 provider
exchangeExternalTokenUrl = 'http://' + \
request.META['HTTP_HOST'] + '/social-auth/convert-token'
externalToken = exchangeCodeRequest.json().get('access_token', None)
if externalToken is None:
return Response({"message": "Server could not retrieve external tokens"}, status=HTTP_400_BAD_REQUEST)
exchangeExternalTokenRequest = makerequest.post(exchangeExternalTokenUrl, data={
'grant_type': 'convert_token',
'client_id': LOCAL_OAUTH2_KEY,
'backend': 'google-oauth2',
'token': externalToken}
)
# Get user and add exchangeCodeRequest's (Google's) refresh token to UserSocialAuth extra_data
# This is a bit hacky, @TODO use python-social-auth's pipeline
# mechanism instead
if exchangeExternalTokenRequest.status_code is not makerequest.codes.ok:
# If the social account's email is already used in another account,
# throw an error
return Response({"message": "User with that email already exists!"}, status=HTTP_400_BAD_REQUEST)
getUserUrl = 'http://' + request.META['HTTP_HOST'] + '/account/'
getUserRequest = makerequest.get(getUserUrl, data={}, headers={
'Authorization': 'Bearer ' + exchangeExternalTokenRequest.json()['access_token']})
refreshToken = exchangeCodeRequest.json().get('refresh_token', None)
if refreshToken is not None:
user = User.objects.all().filter(
username=getUserRequest.json()['username'])[0]
userSocial = user.social_auth.get(provider='google-oauth2')
userSocial.extra_data['refresh_token'] = refreshToken
userSocial.save()
return Response(exchangeExternalTokenRequest.json())
| [
"requests.post",
"usersystem.serializers.UserSerializer",
"social.apps.django_app.default.models.UserSocialAuth.get_social_auth_for_user",
"usersystem.serializers.UserRegisterSerializer",
"django.contrib.auth.models.User.objects.all",
"django.contrib.auth.models.User.objects.filter",
"allauth.account.ut... | [((1065, 1123), 'usersystem.serializers.UserSerializer', 'UserSerializer', (['request.user'], {'context': "{'request': request}"}), "(request.user, context={'request': request})\n", (1079, 1123), False, 'from usersystem.serializers import UserSerializer, UserRegisterSerializer\n'), ((1139, 1164), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (1147, 1164), False, 'from rest_framework.response import Response\n'), ((1302, 1349), 'usersystem.serializers.UserSerializer', 'UserSerializer', ([], {'data': 'request.data', 'partial': '(True)'}), '(data=request.data, partial=True)\n', (1316, 1349), False, 'from usersystem.serializers import UserSerializer, UserRegisterSerializer\n'), ((1832, 1860), 'rest_framework.response.Response', 'Response', ([], {'status': 'HTTP_200_OK'}), '(status=HTTP_200_OK)\n', (1840, 1860), False, 'from rest_framework.response import Response\n'), ((2410, 2438), 'rest_framework.response.Response', 'Response', ([], {'status': 'HTTP_200_OK'}), '(status=HTTP_200_OK)\n', (2418, 2438), False, 'from rest_framework.response import Response\n'), ((3153, 3203), 'rest_framework.response.Response', 'Response', (["{'userExist': False}"], {'status': 'HTTP_200_OK'}), "({'userExist': False}, status=HTTP_200_OK)\n", (3161, 3203), False, 'from rest_framework.response import Response\n'), ((4549, 4577), 'rest_framework.response.Response', 'Response', ([], {'status': 'HTTP_200_OK'}), '(status=HTTP_200_OK)\n', (4557, 4577), False, 'from rest_framework.response import Response\n'), ((4717, 4752), 'rest_framework.response.Response', 'Response', ([], {'status': 'HTTP_404_NOT_FOUND'}), '(status=HTTP_404_NOT_FOUND)\n', (4725, 4752), False, 'from rest_framework.response import Response\n'), ((5581, 5652), 'usersystem.serializers.UserRegisterSerializer', 'UserRegisterSerializer', ([], {'data': 'request.data', 'context': "{'request': request}"}), "(data=request.data, context={'request': request})\n", (5603, 5652), False, 'from usersystem.serializers import UserSerializer, UserRegisterSerializer\n'), ((5841, 6017), 'django.contrib.auth.models.User.objects.create', 'User.objects.create', ([], {'username': "validated_data['username']", 'email': "validated_data['email']", 'first_name': "validated_data['first_name']", 'last_name': "validated_data['last_name']"}), "(username=validated_data['username'], email=\n validated_data['email'], first_name=validated_data['first_name'],\n last_name=validated_data['last_name'])\n", (5860, 6017), False, 'from django.contrib.auth.models import User\n'), ((6149, 6184), 'allauth.account.utils.setup_user_email', 'setup_user_email', (['request', 'user', '[]'], {}), '(request, user, [])\n', (6165, 6184), False, 'from allauth.account.utils import setup_user_email, send_email_confirmation\n'), ((6262, 6295), 'rest_framework.response.Response', 'Response', ([], {'status': 'HTTP_201_CREATED'}), '(status=HTTP_201_CREATED)\n', (6270, 6295), False, 'from rest_framework.response import Response\n'), ((6781, 6813), 'django.contrib.auth.models.User.objects.filter', 'User.objects.filter', ([], {'email': 'email'}), '(email=email)\n', (6800, 6813), False, 'from django.contrib.auth.models import User\n'), ((6910, 6938), 'rest_framework.response.Response', 'Response', ([], {'status': 'HTTP_200_OK'}), '(status=HTTP_200_OK)\n', (6918, 6938), False, 'from rest_framework.response import Response\n'), ((7434, 7472), 'django.contrib.auth.models.User.objects.filter', 'User.objects.filter', ([], {'username': 'username'}), '(username=username)\n', (7453, 7472), False, 'from django.contrib.auth.models import User\n'), ((7547, 7575), 'rest_framework.response.Response', 'Response', ([], {'status': 'HTTP_200_OK'}), '(status=HTTP_200_OK)\n', (7555, 7575), False, 'from rest_framework.response import Response\n'), ((8538, 8760), 'requests.post', 'makerequest.post', (['googleurl'], {'data': "{'code': code, 'redirect_uri': 'postmessage', 'client_id':\n SOCIAL_AUTH_GOOGLE_OAUTH2_KEY, 'client_secret':\n SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET, 'grant_type': 'authorization_code'}"}), "(googleurl, data={'code': code, 'redirect_uri':\n 'postmessage', 'client_id': SOCIAL_AUTH_GOOGLE_OAUTH2_KEY,\n 'client_secret': SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET, 'grant_type':\n 'authorization_code'})\n", (8554, 8760), True, 'import requests as makerequest\n'), ((9357, 9528), 'requests.post', 'makerequest.post', (['exchangeExternalTokenUrl'], {'data': "{'grant_type': 'convert_token', 'client_id': LOCAL_OAUTH2_KEY, 'backend':\n 'google-oauth2', 'token': externalToken}"}), "(exchangeExternalTokenUrl, data={'grant_type':\n 'convert_token', 'client_id': LOCAL_OAUTH2_KEY, 'backend':\n 'google-oauth2', 'token': externalToken})\n", (9373, 9528), True, 'import requests as makerequest\n'), ((1243, 1280), 'rest_framework.response.Response', 'Response', ([], {'status': 'HTTP_400_BAD_REQUEST'}), '(status=HTTP_400_BAD_REQUEST)\n', (1251, 1280), False, 'from rest_framework.response import Response\n'), ((2260, 2350), 'requests.post', 'makerequest.post', (["('https://accounts.google.com/o/oauth2/revoke?token=' + refresh_token)"], {}), "('https://accounts.google.com/o/oauth2/revoke?token=' +\n refresh_token)\n", (2276, 2350), True, 'import requests as makerequest\n'), ((2823, 2901), 'rest_framework.response.Response', 'Response', (['{\'message\': "\'email\' field is missing"}'], {'status': 'HTTP_400_BAD_REQUEST'}), '({\'message\': "\'email\' field is missing"}, status=HTTP_400_BAD_REQUEST)\n', (2831, 2901), False, 'from rest_framework.response import Response\n'), ((2935, 2964), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'email': 'email'}), '(email=email)\n', (2951, 2964), False, 'from django.contrib.auth.models import User\n'), ((3060, 3136), 'rest_framework.response.Response', 'Response', (["{'userExist': True, 'username': data.username}"], {'status': 'HTTP_200_OK'}), "({'userExist': True, 'username': data.username}, status=HTTP_200_OK)\n", (3068, 3136), False, 'from rest_framework.response import Response\n'), ((3614, 3700), 'rest_framework.response.Response', 'Response', (['{\'message\': "Missing \'newPassword\' field"}'], {'status': 'HTTP_400_BAD_REQUEST'}), '({\'message\': "Missing \'newPassword\' field"}, status=\n HTTP_400_BAD_REQUEST)\n', (3622, 3700), False, 'from rest_framework.response import Response\n'), ((3801, 3905), 'rest_framework.response.Response', 'Response', (['{\'message\': "New password doesn\'t match length requirements"}'], {'status': 'HTTP_400_BAD_REQUEST'}), '({\'message\': "New password doesn\'t match length requirements"},\n status=HTTP_400_BAD_REQUEST)\n', (3809, 3905), False, 'from rest_framework.response import Response\n'), ((4673, 4701), 'rest_framework.response.Response', 'Response', ([], {'status': 'HTTP_200_OK'}), '(status=HTTP_200_OK)\n', (4681, 4701), False, 'from rest_framework.response import Response\n'), ((5173, 5208), 'rest_framework.response.Response', 'Response', ([], {'status': 'HTTP_404_NOT_FOUND'}), '(status=HTTP_404_NOT_FOUND)\n', (5181, 5208), False, 'from rest_framework.response import Response\n'), ((5242, 5309), 'rest_framework.response.Response', 'Response', (["{'social_provider': socAuth.provider}"], {'status': 'HTTP_200_OK'}), "({'social_provider': socAuth.provider}, status=HTTP_200_OK)\n", (5250, 5309), False, 'from rest_framework.response import Response\n'), ((6690, 6768), 'rest_framework.response.Response', 'Response', (['{\'message\': "\'email\' field is missing"}'], {'status': 'HTTP_400_BAD_REQUEST'}), '({\'message\': "\'email\' field is missing"}, status=HTTP_400_BAD_REQUEST)\n', (6698, 6768), False, 'from rest_framework.response import Response\n'), ((6834, 6893), 'rest_framework.response.Response', 'Response', (["{'emailExist': True}"], {'status': 'HTTP_400_BAD_REQUEST'}), "({'emailExist': True}, status=HTTP_400_BAD_REQUEST)\n", (6842, 6893), False, 'from rest_framework.response import Response\n'), ((7340, 7426), 'rest_framework.response.Response', 'Response', (['{\'message\': "\'username\' field is missing"}'], {'status': 'HTTP_400_BAD_REQUEST'}), '({\'message\': "\'username\' field is missing"}, status=\n HTTP_400_BAD_REQUEST)\n', (7348, 7426), False, 'from rest_framework.response import Response\n'), ((7493, 7530), 'rest_framework.response.Response', 'Response', ([], {'status': 'HTTP_400_BAD_REQUEST'}), '(status=HTTP_400_BAD_REQUEST)\n', (7501, 7530), False, 'from rest_framework.response import Response\n'), ((8321, 8406), 'rest_framework.response.Response', 'Response', (["{'message': 'Authorization code missing'}"], {'status': 'HTTP_400_BAD_REQUEST'}), "({'message': 'Authorization code missing'}, status=HTTP_400_BAD_REQUEST\n )\n", (8329, 8406), False, 'from rest_framework.response import Response\n'), ((9221, 9321), 'rest_framework.response.Response', 'Response', (["{'message': 'Server could not retrieve external tokens'}"], {'status': 'HTTP_400_BAD_REQUEST'}), "({'message': 'Server could not retrieve external tokens'}, status=\n HTTP_400_BAD_REQUEST)\n", (9229, 9321), False, 'from rest_framework.response import Response\n'), ((9991, 10086), 'rest_framework.response.Response', 'Response', (["{'message': 'User with that email already exists!'}"], {'status': 'HTTP_400_BAD_REQUEST'}), "({'message': 'User with that email already exists!'}, status=\n HTTP_400_BAD_REQUEST)\n", (9999, 10086), False, 'from rest_framework.response import Response\n'), ((2005, 2058), 'social.apps.django_app.default.models.UserSocialAuth.get_social_auth_for_user', 'UserSocialAuth.get_social_auth_for_user', (['request.user'], {}), '(request.user)\n', (2044, 2058), False, 'from social.apps.django_app.default.models import UserSocialAuth\n'), ((4065, 4151), 'rest_framework.response.Response', 'Response', (['{\'message\': "Missing \'oldPassword\' field"}'], {'status': 'HTTP_400_BAD_REQUEST'}), '({\'message\': "Missing \'oldPassword\' field"}, status=\n HTTP_400_BAD_REQUEST)\n', (4073, 4151), False, 'from rest_framework.response import Response\n'), ((4228, 4306), 'rest_framework.response.Response', 'Response', (['{\'message\': "\'oldPassword\' is invalid"}'], {'status': 'HTTP_400_BAD_REQUEST'}), '({\'message\': "\'oldPassword\' is invalid"}, status=HTTP_400_BAD_REQUEST)\n', (4236, 4306), False, 'from rest_framework.response import Response\n'), ((4366, 4466), 'rest_framework.response.Response', 'Response', (["{'message': 'oldPassword and newPassword are identical'}"], {'status': 'HTTP_400_BAD_REQUEST'}), "({'message': 'oldPassword and newPassword are identical'}, status=\n HTTP_400_BAD_REQUEST)\n", (4374, 4466), False, 'from rest_framework.response import Response\n'), ((5068, 5121), 'social.apps.django_app.default.models.UserSocialAuth.get_social_auth_for_user', 'UserSocialAuth.get_social_auth_for_user', (['request.user'], {}), '(request.user)\n', (5107, 5121), False, 'from social.apps.django_app.default.models import UserSocialAuth\n'), ((10457, 10475), 'django.contrib.auth.models.User.objects.all', 'User.objects.all', ([], {}), '()\n', (10473, 10475), False, 'from django.contrib.auth.models import User\n')] |
#!/usr/bin/env python
#
# ======================================================================
#
# <NAME>, U.S. Geological Survey
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2017 University of California, Davis
#
# See COPYING for license information.
#
# ======================================================================
#
## @file geocoords/tests/libtests/data/ConvertDataApp.py
## @brief Python application to generate data for coordinate conversion tests.
from pyre.applications.Script import Script
# ConvertDataApp class
class ConvertDataApp(Script):
"""Python application to generate data for coordinate conversion tests."""
def main(self, *args, **kwds):
"""Run application."""
data = self.inventory.data
data.calculate()
data.dump(self.inventory.dumper)
return
def __init__(self):
"""Constructor."""
Script.__init__(self, 'convertdataapp')
return
class Inventory(Script.Inventory):
## @class Inventory
## Python object for managing ConvertDataApp facilities and properties.
##
## \b Properties
## @li None
##
## \b Facilities
## @li \b data Data generator for coordinate transformation test
## @li \b dumper Dump data to file
import pyre.inventory
from spatialdata.utils.CppData import CppData
from ConvertData import ConvertData
data = pyre.inventory.facility('data', factory=ConvertData)
dumper = pyre.inventory.facility('dumper', factory=CppData)
# main
if __name__ == '__main__':
app = ConvertDataApp()
app.run()
# End of file
| [
"pyre.applications.Script.Script.__init__"
] | [((949, 988), 'pyre.applications.Script.Script.__init__', 'Script.__init__', (['self', '"""convertdataapp"""'], {}), "(self, 'convertdataapp')\n", (964, 988), False, 'from pyre.applications.Script import Script\n')] |
import subprocess
import json
import csv
from csv import DictWriter
import datetime
import pandas as pd
import Cmd
import Data
from Report import Report
import File
def main():
Data.val_earnings_w_sum_columns()
dataframe=Data.get_val_token_info()
dataframe.to_csv(File._generate_file_name("fxcored_status"), index=False)
if __name__ == '__main__':
main()
| [
"File._generate_file_name",
"Data.val_earnings_w_sum_columns",
"Data.get_val_token_info"
] | [((187, 220), 'Data.val_earnings_w_sum_columns', 'Data.val_earnings_w_sum_columns', ([], {}), '()\n', (218, 220), False, 'import Data\n'), ((238, 263), 'Data.get_val_token_info', 'Data.get_val_token_info', ([], {}), '()\n', (261, 263), False, 'import Data\n'), ((285, 327), 'File._generate_file_name', 'File._generate_file_name', (['"""fxcored_status"""'], {}), "('fxcored_status')\n", (309, 327), False, 'import File\n')] |
# Solution of;
# Project Euler Problem 67: Maximum path sum II
# https://projecteuler.net/problem=67
#
# By starting at the top of the triangle below and moving to adjacent numbers
# on the row below, the maximum total from top to bottom is 23. 37 42 4 68 5 9
# 3That is, 3 + 7 + 4 + 9 = 23. Find the maximum total from top to bottom in
# triangle. txt (right click and 'Save Link/Target As. . . '), a 15K text file
# containing a triangle with one-hundred rows. NOTE: This is a much more
# difficult version of Problem 18. It is not possible to try every route to
# solve this problem, as there are 299 altogether! If you could check one
# trillion (1012) routes every second it would take over twenty billion years
# to check them all. There is an efficient algorithm to solve it. ;o)
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 67
timed.caller(dummy, n, i, prob_id)
| [
"timed.caller"
] | [((965, 999), 'timed.caller', 'timed.caller', (['dummy', 'n', 'i', 'prob_id'], {}), '(dummy, n, i, prob_id)\n', (977, 999), False, 'import timed\n')] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def unemp_dur(path):
"""Unemployment Duration
Journal of Business Economics and Statistics web site :
http://amstat.tandfonline.com/loi/ubes20
*number of observations* : 3343
A time serie containing :
spell
length of spell in number of two-week intervals
censor1
= 1 if re-employed at full-time job
censor2
= 1 if re-employed at part-time job
censor3
1 if re-employed but left job: pt-ft status unknown
censor4
1 if still jobless
age
age
ui
= 1 if filed UI claim
reprate
eligible replacement rate
disrate
eligible disregard rate
logwage
log weekly earnings in lost job (1985\\$)
tenure
years tenure in lost job
<NAME>. (1996) “Unemployment Insurance Rules, Joblessness, and
Part-time Work”, *Econometrica*, **64**, 647–682.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `unemp_dur.csv`.
Returns:
Tuple of np.ndarray `x_train` with 3343 rows and 11 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'unemp_dur.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/Ecdat/UnempDur.csv'
maybe_download_and_extract(path, url,
save_file_name='unemp_dur.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
| [
"observations.util.maybe_download_and_extract",
"os.path.join",
"os.path.expanduser"
] | [((1438, 1462), 'os.path.expanduser', 'os.path.expanduser', (['path'], {}), '(path)\n', (1456, 1462), False, 'import os\n'), ((1611, 1698), 'observations.util.maybe_download_and_extract', 'maybe_download_and_extract', (['path', 'url'], {'save_file_name': '"""unemp_dur.csv"""', 'resume': '(False)'}), "(path, url, save_file_name='unemp_dur.csv',\n resume=False)\n", (1637, 1698), False, 'from observations.util import maybe_download_and_extract\n'), ((1779, 1807), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (1791, 1807), False, 'import os\n'), ((1516, 1544), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (1528, 1544), False, 'import os\n')] |
import numpy as np
import GPy
from .GP_interface import GPInterface, convert_lengthscale, convert_2D_format
class GPyWrapper(GPInterface):
def __init__(self):
# GPy settings
GPy.plotting.change_plotting_library("matplotlib") # use matpoltlib for drawing
super().__init__()
self.center = 0.0
def create_kernel(self, ndim, kernel_name, var_f=1.0, lengthscale=1.0, const_kernel=False):
if kernel_name == 'Matern52':
l = convert_lengthscale(ndim, lengthscale)
kernel = GPy.kern.Matern52(input_dim=ndim, ARD=True, variance=var_f, lengthscale=l, name='basic')
elif kernel_name == 'RBF':
l = convert_lengthscale(ndim, lengthscale)
kernel = GPy.kern.RBF(input_dim=ndim, ARD=True, variance=var_f, lengthscale=l, name='basic')
else:
raise ValueError('Unsupported kernel: '+ kernel_name)
self.ndim = ndim
self.kernel = kernel
if const_kernel:
self.kernel += GPy.kern.Bias(1.0)
self.stat_kernel = self.kernel.basic
else:
self.stat_kernel = self.kernel
def set_kernel_length_prior(self, prior_mean, prior_var):
if self.ndim != len(prior_mean) or self.ndim != len(prior_var):
raise ValueError('Incorrect kernel prior parameters.')
if self.kernel is None:
raise ValueError('Kernel should be defined first.')
for i in range(self.ndim):
self.stat_kernel.lengthscale[[i]].set_prior(GPy.priors.Gamma.from_EV(prior_mean[i],prior_var[i])) # don't know why, but [i] does not work
def set_kernel_var_prior(self, prior_mean, prior_var):
self.stat_kernel.variance.set_prior(GPy.priors.Gamma.from_EV(prior_mean,prior_var))
def fix_kernel_lengthscale(self):
self.stat_kernel.lengthscale.fix()
def fix_kernel_var(self):
self.stat_kernel.variance.fix()
def create_model(self, x, y, noise_var, noise_prior='fixed'):
x = convert_2D_format(x)
y = convert_2D_format(y) - self.center
self.outdim = y.shape[1]
noise_var = np.array(noise_var)
if noise_var.ndim == 0:
self.model = GPy.models.GPRegression(x, y, self.kernel, noise_var=noise_var)
noise = self.model.Gaussian_noise
else:
assert noise_var.shape == y.shape
self.model = GPy.models.GPHeteroscedasticRegression(x, y, self.kernel)
self.model['.*het_Gauss.variance'] = noise_var
noise = self.model.het_Gauss.variance
if noise_prior == 'fixed':
noise.fix()
else:
raise ValueError('Not Implemented yet.')
def predict_f(self, x, full_cov=False):
'''
Returns:
posterior mean, posterior variance
'''
x = convert_2D_format(x)
post_mean, post_var = self.model.predict_noiseless(x, full_cov=full_cov)
if self.outdim > 1:
post_var = np.concatenate([post_var]*self.outdim, axis=-1)
return post_mean + self.center, post_var
def predict_withGradients(self, x):
'''
Borrowed from https://github.com/SheffieldML/GPyOpt/blob/master/GPyOpt/models/gpmodel.py
Returns the mean, standard deviation, mean gradient and standard deviation gradient at X.
'''
x = convert_2D_format(x)
m, v = self.model.predict(x)
v = np.clip(v, 1e-10, np.inf)
dmdx, dvdx = self.model.predictive_gradients(x)
dmdx = dmdx[:,:,0]
dsdx = dvdx / (2*np.sqrt(v))
return m + self.center, np.sqrt(v), dmdx, dsdx
def posterior_sample_f(self, x, size = 10):
'''
Parameters
x: (Nnew x input_dim)
Returns
(Nnew x output_dim x samples)
'''
return self.model.posterior_samples_f(x, size) + self.center
def optimize(self, num_restarts=30, opt_messages=False, print_result=True, parallel=False):
self.model.optimize_restarts(num_restarts=num_restarts, robust=True, parallel=False, messages=opt_messages)
if print_result:
print(self.kernel)
print(self.stat_kernel.lengthscale)
print(self.stat_kernel.variance)
class GPyWrapper_Classifier(GPyWrapper):
def create_model(self, x, y):
assert self.center == 0.0
x = convert_2D_format(x)
y = convert_2D_format(y)
self.outdim = y.shape[1]
self.model = GPy.models.GPClassification(x, y, self.kernel)
def predict_prob(self, x):
x = convert_2D_format(x)
prob = self.model.predict(x, full_cov=False)[0]
return prob
def optimize(self, maxiter=1000, opt_messages=False, print_result=True):
for i in range(5):
self.model.optimize(max_iters=int(maxiter/5), messages=opt_messages)
if print_result:
print(self.kernel)
print(self.stat_kernel.lengthscale)
class GPyWrapper_MultiSeparate(object):
def create_kernel(self, ndim, outdim, kernel_name, var_f=1.0, lengthscale=1.0, const_kernel=False):
if isinstance(kernel_name, str):
kernel_name = [kernel_name]*outdim
if np.isscalar(var_f):
var_f = np.ones(outdim) * var_f
if np.isscalar(lengthscale):
var_f = np.ones(outdim) * lengthscale
if isinstance(const_kernel, bool):
const_kernel = [const_kernel]*outdim
self.gp_list = list()
for i in range(outdim):
gp = GPyWrapper()
gp.create_kernel(ndim, kernel_name[i], var_f[i], lengthscale[i], const_kernel[i])
self.gp_list.append(gp)
self.outdim = outdim
def set_kernel_length_prior(self, prior_mean, prior_var):
# Apply same prior for all outputs
for i in range(self.outdim):
self.gp_list[i].set_kernel_length_prior(prior_mean, prior_var)
def set_kernel_var_prior(self, prior_mean, prior_var):
# Apply same prior for all outputs
for i in range(self.outdim):
self.gp_list[i].set_kernel_var_prior(prior_mean, prior_var)
def fix_kernel_lengthscale(self):
for i in range(self.outdim):
self.gp_list[i].fix_kernel_lengthscale()
def fix_kernel_var(self):
for i in range(self.outdim):
self.gp_list[i].fix_kernel_var()
def create_model(self, x, y, noise_var, noise_prior='fixed'):
if not (y.ndim == 2 and y.shape[1] == self.outdim):
raise ValueError('Incorrect data shape.')
noise_var = np.array(noise_var)
for i in range(self.outdim):
if noise_var.ndim == 2 and noise_var.shape[1] == self.outdim:
noise_var_i = noise_var[:, i:i+1]
else:
noise_var_i = noise_var
gp = self.gp_list[i]
gp.create_model(x, y[:,i:i+1], noise_var_i, noise_prior)
def predict_f(self, x, full_cov=False):
post_mean_all = list()
post_var_all = list()
for i in range(self.outdim):
post_mean, post_var = self.gp_list[i].predict_f(x, full_cov)
post_mean_all.append(post_mean)
post_var_all.append(post_var)
return np.concatenate(post_mean_all,axis=-1), np.concatenate(post_var_all,axis=-1)
def posterior_sample_f(self, x, size = 10):
post_samples_all = list()
for i in range(self.outdim):
post_samples = self.gp_list[i].predict_f(x, full_cov)
post_samples_all.append(post_samples)
return np.concatenate(post_samples_all,axis=1)
def optimize(self, num_restarts=30, opt_messages=False, print_result=False):
for i in range(self.outdim):
self.gp_list[i].optimize(num_restarts, opt_messages, print_result)
def predict_withGradients(self, x):
'''
Borrowed from https://github.com/SheffieldML/GPyOpt/blob/master/GPyOpt/models/gpmodel.py
Returns the mean, standard deviation, mean gradient and standard deviation gradient at X.
m_all: (num_x, outdim)
std_all: (num_x, outdim)
dmdx_all: (num_x, outdim, n_dim)
dsdx_all: (num_x, outdim, n_dim)
'''
m_all, std_all, dmdx_all, dsdx_all = [], [], [], []
for i in range(self.outdim):
m, std, dmdx, dsdx = self.gp_list[i].predict_withGradients(x)
m_all.append(m)
std_all.append(std)
dmdx_all.append(dmdx)
dsdx_all.append(dsdx)
return np.concatenate(m_all,axis=-1), np.concatenate(std_all,axis=-1), np.stack(dmdx_all,axis=1), np.stack(dsdx_all,axis=1)
class GPyWrapper_MultiIndep(GPyWrapper):
def create_kernel(self, ndim, outdim, kernel_name, var_f=1.0, lengthscale=1.0, const_kernel=False):
super().create_kernel(ndim, kernel_name, var_f, lengthscale, const_kernel)
k_multi = GPy.kern.IndependentOutputs([self.kernel, self.kernel.copy()])
#icm = GPy.util.multioutput.ICM(input_dim=ndim, num_outputs=outdim, kernel=self.kernel)
#icm.B.W.constrain_fixed(0) # fix W matrix to 0
if const_kernel:
self.stat_kernel = k_multi.sum.basic
else:
self.stat_kernel = k_multi.basic
self.kernel = k_multi
print(self.kernel)
def create_model(self, x, y, noise_var, noise_prior='fixed'):
x = convert_2D_format(x)
y = convert_2D_format(y) - self.center
numdata = x.shape[0]
outdim = y.shape[1]
indim = x.shape[1]
yy = y.transpose().ravel()
ind = np.concatenate([ o*np.ones(numdata) for o in range(outdim)])
xx = np.concatenate([x]*outdim)
xx = np.concatenate((xx,ind[:,np.newaxis]), axis=1)
print(xx.shape, yy.shape)
self.model = GPy.models.GPRegression(x, y, self.kernel, noise_var=noise_var)
if noise_prior == 'fixed':
self.model.Gaussian_noise.fix()
else:
raise ValueError('Not Implemented yet.')
def create_GP(num_active_gates, outdim, k_name='Matern52', var_f=1.0, lengthscale=1.0, center=0.0):
if np.isscalar(lengthscale):
lengthscale = np.ones(num_active_gates)
gp = GPyWrapper() # initialize GP environment
#gp = GPyWrapper_MultiIndep() # initialize GP environment
gp.center = center
# GP kernels
gp.create_kernel(num_active_gates, k_name, var_f, lengthscale)
#gp.create_kernel(num_active_gates, outdim, k_name, var_f, lengthscale)
return gp
def main():
X = np.arange(1,6).reshape((5,1))
f = lambda x : np.square(x-4.0)
#Y = np.concatenate([f(X), -f(X)], axis=1)
Y = np.concatenate([f(X)], axis=1)
#noise_var = 0.01**2
#noise_var = np.concatenate([np.square(X / 10.)]*2, axis=1)
noise_var = np.square(X / 10.)
print(X.shape, Y.shape)
gp = create_GP(1, 2, 'Matern52', 2.0, 1.0, 0.0)
gp.create_model(X, Y, noise_var, noise_prior='fixed')
gp.optimize()
X_pred = np.linspace(1.,5.,10).reshape((-1,1))
mean, cov = gp.predict_f(X_pred)
print(mean)
#print(cov)
'''
###
# GP Classification test
###
X = np.arange(1,6).reshape((5,1))
Y = np.array([1.0, 1.0, 1.0, 0.0, 0.0]).reshape((5,1))
gpc = GPyWrapper_Classifier()
gpc.create_kernel(1, 'RBF', 1.0, 1.0)
gpc.create_model(X, Y)
X_pred = np.linspace(1.,5.,10).reshape((-1,1))
print(gpc.predict_prob(X_pred))
print(gpc.model)
gpc.optimize()
print(gpc.predict_prob(X_pred))
print(gpc.model)
'''
if __name__ == '__main__':
main()
| [
"numpy.clip",
"numpy.sqrt",
"numpy.isscalar",
"numpy.ones",
"GPy.kern.RBF",
"GPy.plotting.change_plotting_library",
"numpy.square",
"numpy.array",
"GPy.models.GPClassification",
"GPy.kern.Matern52",
"GPy.kern.Bias",
"numpy.stack",
"numpy.concatenate",
"GPy.priors.Gamma.from_EV",
"numpy.l... | [((10112, 10136), 'numpy.isscalar', 'np.isscalar', (['lengthscale'], {}), '(lengthscale)\n', (10123, 10136), True, 'import numpy as np\n'), ((10773, 10792), 'numpy.square', 'np.square', (['(X / 10.0)'], {}), '(X / 10.0)\n', (10782, 10792), True, 'import numpy as np\n'), ((197, 247), 'GPy.plotting.change_plotting_library', 'GPy.plotting.change_plotting_library', (['"""matplotlib"""'], {}), "('matplotlib')\n", (233, 247), False, 'import GPy\n'), ((2126, 2145), 'numpy.array', 'np.array', (['noise_var'], {}), '(noise_var)\n', (2134, 2145), True, 'import numpy as np\n'), ((3429, 3454), 'numpy.clip', 'np.clip', (['v', '(1e-10)', 'np.inf'], {}), '(v, 1e-10, np.inf)\n', (3436, 3454), True, 'import numpy as np\n'), ((4468, 4514), 'GPy.models.GPClassification', 'GPy.models.GPClassification', (['x', 'y', 'self.kernel'], {}), '(x, y, self.kernel)\n', (4495, 4514), False, 'import GPy\n'), ((5190, 5208), 'numpy.isscalar', 'np.isscalar', (['var_f'], {}), '(var_f)\n', (5201, 5208), True, 'import numpy as np\n'), ((5265, 5289), 'numpy.isscalar', 'np.isscalar', (['lengthscale'], {}), '(lengthscale)\n', (5276, 5289), True, 'import numpy as np\n'), ((6559, 6578), 'numpy.array', 'np.array', (['noise_var'], {}), '(noise_var)\n', (6567, 6578), True, 'import numpy as np\n'), ((7545, 7585), 'numpy.concatenate', 'np.concatenate', (['post_samples_all'], {'axis': '(1)'}), '(post_samples_all, axis=1)\n', (7559, 7585), True, 'import numpy as np\n'), ((9650, 9678), 'numpy.concatenate', 'np.concatenate', (['([x] * outdim)'], {}), '([x] * outdim)\n', (9664, 9678), True, 'import numpy as np\n'), ((9690, 9738), 'numpy.concatenate', 'np.concatenate', (['(xx, ind[:, np.newaxis])'], {'axis': '(1)'}), '((xx, ind[:, np.newaxis]), axis=1)\n', (9704, 9738), True, 'import numpy as np\n'), ((9794, 9857), 'GPy.models.GPRegression', 'GPy.models.GPRegression', (['x', 'y', 'self.kernel'], {'noise_var': 'noise_var'}), '(x, y, self.kernel, noise_var=noise_var)\n', (9817, 9857), False, 'import GPy\n'), ((10160, 10185), 'numpy.ones', 'np.ones', (['num_active_gates'], {}), '(num_active_gates)\n', (10167, 10185), True, 'import numpy as np\n'), ((10565, 10583), 'numpy.square', 'np.square', (['(x - 4.0)'], {}), '(x - 4.0)\n', (10574, 10583), True, 'import numpy as np\n'), ((541, 633), 'GPy.kern.Matern52', 'GPy.kern.Matern52', ([], {'input_dim': 'ndim', 'ARD': '(True)', 'variance': 'var_f', 'lengthscale': 'l', 'name': '"""basic"""'}), "(input_dim=ndim, ARD=True, variance=var_f, lengthscale=l,\n name='basic')\n", (558, 633), False, 'import GPy\n'), ((1013, 1031), 'GPy.kern.Bias', 'GPy.kern.Bias', (['(1.0)'], {}), '(1.0)\n', (1026, 1031), False, 'import GPy\n'), ((1726, 1773), 'GPy.priors.Gamma.from_EV', 'GPy.priors.Gamma.from_EV', (['prior_mean', 'prior_var'], {}), '(prior_mean, prior_var)\n', (1750, 1773), False, 'import GPy\n'), ((2203, 2266), 'GPy.models.GPRegression', 'GPy.models.GPRegression', (['x', 'y', 'self.kernel'], {'noise_var': 'noise_var'}), '(x, y, self.kernel, noise_var=noise_var)\n', (2226, 2266), False, 'import GPy\n'), ((2398, 2455), 'GPy.models.GPHeteroscedasticRegression', 'GPy.models.GPHeteroscedasticRegression', (['x', 'y', 'self.kernel'], {}), '(x, y, self.kernel)\n', (2436, 2455), False, 'import GPy\n'), ((2990, 3039), 'numpy.concatenate', 'np.concatenate', (['([post_var] * self.outdim)'], {'axis': '(-1)'}), '([post_var] * self.outdim, axis=-1)\n', (3004, 3039), True, 'import numpy as np\n'), ((3607, 3617), 'numpy.sqrt', 'np.sqrt', (['v'], {}), '(v)\n', (3614, 3617), True, 'import numpy as np\n'), ((7218, 7256), 'numpy.concatenate', 'np.concatenate', (['post_mean_all'], {'axis': '(-1)'}), '(post_mean_all, axis=-1)\n', (7232, 7256), True, 'import numpy as np\n'), ((7257, 7294), 'numpy.concatenate', 'np.concatenate', (['post_var_all'], {'axis': '(-1)'}), '(post_var_all, axis=-1)\n', (7271, 7294), True, 'import numpy as np\n'), ((8523, 8553), 'numpy.concatenate', 'np.concatenate', (['m_all'], {'axis': '(-1)'}), '(m_all, axis=-1)\n', (8537, 8553), True, 'import numpy as np\n'), ((8554, 8586), 'numpy.concatenate', 'np.concatenate', (['std_all'], {'axis': '(-1)'}), '(std_all, axis=-1)\n', (8568, 8586), True, 'import numpy as np\n'), ((8587, 8613), 'numpy.stack', 'np.stack', (['dmdx_all'], {'axis': '(1)'}), '(dmdx_all, axis=1)\n', (8595, 8613), True, 'import numpy as np\n'), ((8614, 8640), 'numpy.stack', 'np.stack', (['dsdx_all'], {'axis': '(1)'}), '(dsdx_all, axis=1)\n', (8622, 8640), True, 'import numpy as np\n'), ((10516, 10531), 'numpy.arange', 'np.arange', (['(1)', '(6)'], {}), '(1, 6)\n', (10525, 10531), True, 'import numpy as np\n'), ((10963, 10988), 'numpy.linspace', 'np.linspace', (['(1.0)', '(5.0)', '(10)'], {}), '(1.0, 5.0, 10)\n', (10974, 10988), True, 'import numpy as np\n'), ((741, 829), 'GPy.kern.RBF', 'GPy.kern.RBF', ([], {'input_dim': 'ndim', 'ARD': '(True)', 'variance': 'var_f', 'lengthscale': 'l', 'name': '"""basic"""'}), "(input_dim=ndim, ARD=True, variance=var_f, lengthscale=l, name=\n 'basic')\n", (753, 829), False, 'import GPy\n'), ((1528, 1581), 'GPy.priors.Gamma.from_EV', 'GPy.priors.Gamma.from_EV', (['prior_mean[i]', 'prior_var[i]'], {}), '(prior_mean[i], prior_var[i])\n', (1552, 1581), False, 'import GPy\n'), ((3563, 3573), 'numpy.sqrt', 'np.sqrt', (['v'], {}), '(v)\n', (3570, 3573), True, 'import numpy as np\n'), ((5230, 5245), 'numpy.ones', 'np.ones', (['outdim'], {}), '(outdim)\n', (5237, 5245), True, 'import numpy as np\n'), ((5311, 5326), 'numpy.ones', 'np.ones', (['outdim'], {}), '(outdim)\n', (5318, 5326), True, 'import numpy as np\n'), ((9595, 9611), 'numpy.ones', 'np.ones', (['numdata'], {}), '(numdata)\n', (9602, 9611), True, 'import numpy as np\n')] |
import os
from pathlib import Path
from unittest import TestCase
import validator.validator as validator
from .test_utils import schema, build_map
class TestStrangeNames(TestCase):
old_cwd = os.getcwd()
@classmethod
def setUpClass(cls):
os.chdir('./tests/workspaces/strange-names')
@classmethod
def tearDownClass(cls):
os.chdir(cls.old_cwd)
def test_findsAll(self):
(status, successful, failed, ignored) = validator.validate_cwd('', schema, build_map)
self.assertEqual(status, 1)
self.assertSetEqual(successful, {Path('CAPS.VERSION')})
self.assertSetEqual(failed, {Path('camelCaseVersionMissing.Version')})
# Make sure 'not-detected.version.json' has not been detected.
self.assertSetEqual(ignored, set())
| [
"os.chdir",
"validator.validator.validate_cwd",
"pathlib.Path",
"os.getcwd"
] | [((198, 209), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (207, 209), False, 'import os\n'), ((261, 305), 'os.chdir', 'os.chdir', (['"""./tests/workspaces/strange-names"""'], {}), "('./tests/workspaces/strange-names')\n", (269, 305), False, 'import os\n'), ((360, 381), 'os.chdir', 'os.chdir', (['cls.old_cwd'], {}), '(cls.old_cwd)\n', (368, 381), False, 'import os\n'), ((460, 505), 'validator.validator.validate_cwd', 'validator.validate_cwd', (['""""""', 'schema', 'build_map'], {}), "('', schema, build_map)\n", (482, 505), True, 'import validator.validator as validator\n'), ((583, 603), 'pathlib.Path', 'Path', (['"""CAPS.VERSION"""'], {}), "('CAPS.VERSION')\n", (587, 603), False, 'from pathlib import Path\n'), ((643, 682), 'pathlib.Path', 'Path', (['"""camelCaseVersionMissing.Version"""'], {}), "('camelCaseVersionMissing.Version')\n", (647, 682), False, 'from pathlib import Path\n')] |
'''Tokenizer Class'''
# -*- encoding: utf-8 -*-
import re
from .token_with_pos import TokenWithPos
from .patterns import Patterns
class Tokenizer():
'''
A basic Tokenizer class to tokenize strings and patterns
Parameters:
- regexp: regexp used to tokenize the string
'''
def __init__(self, regexp=None):
if regexp is not None:
self.regexp = regexp
else:
self.regexp = re.compile(r'[^\s]+|\s+')
self.space_regexp = re.compile(r'\s')
def _tokenize(self, text):
for match in self.regexp.finditer(text):
phrase = match.group()
if self.space_regexp.search(phrase):
continue
if self._phrase_full_match(phrase) is not None:
for adjusted_token in self._adjust_on_punc(
TokenWithPos(phrase, match.start(), match.end())):
yield adjusted_token
else:
for token in self._top_down_tokenize(phrase,
match.start()):
for adjusted_token in self._adjust_on_punc(token):
yield adjusted_token
def _adjust_on_punc(self, token):
if Patterns.PUNCT_SEQ_RE.fullmatch(token.text) and \
Patterns.PARA_SEP_RE.fullmatch(token.text) is None:
# a string of punc, very likely .. or ...
for shift, single_char in enumerate(token.text):
start_pos = token.start + shift
yield TokenWithPos(single_char,
start_pos,
start_pos + 1)
elif self._has_end_of_phrase_punc(token.text) and \
self._phrase_full_match(token.text) in [None, 'url/email']:
end_pos = token.end - 1
for splitted_token in [
TokenWithPos(token.text[:-1],
token.start,
end_pos),
TokenWithPos(token.text[-1],
end_pos,
token.end)
]:
yield splitted_token
else:
yield token
def _top_down_tokenize(self, phrase, offset=0):
# first get the web url and emails out
for token in self._top_down_level_1(phrase, offset):
yield token
def _top_down_level_1(self, phrase, offset=0):
'''
level 1: split on url, emails
'''
for sub_phrase in re.split(Patterns.ALL_WEB_CAPTURED_RE, phrase):
if sub_phrase == '':
continue
length_sub_phrase = len(sub_phrase)
if self._phrase_full_match(sub_phrase) is not None:
yield TokenWithPos(sub_phrase,
offset,
offset + length_sub_phrase)
else:
for token in self._top_down_level_2(sub_phrase, offset):
yield token
offset += length_sub_phrase
def _top_down_level_2(self, phrase, offset=0):
'''
level 2: split on number phrases
'''
for sub_phrase in re.split(Patterns.DIGITS_CAPTURED_RE, phrase):
if sub_phrase == '':
continue
length_sub_phrase = len(sub_phrase)
if self._phrase_full_match(sub_phrase) is not None:
yield TokenWithPos(sub_phrase,
offset,
offset + length_sub_phrase)
else:
for token in self._top_down_level_3(sub_phrase, offset):
yield token
offset += length_sub_phrase
def _top_down_level_3(self, phrase, offset=0):
'''
level 3: split on normal word boundaries
'''
for sub_phrase in re.split(Patterns.WORD_BF_CAPTURED_RE, phrase):
if sub_phrase == '':
continue
length_sub_phrase = len(sub_phrase)
if self._phrase_full_match(sub_phrase) is not None:
yield TokenWithPos(sub_phrase,
offset,
offset + length_sub_phrase)
else:
for token in self._top_down_level_4(sub_phrase, offset):
yield token
offset += length_sub_phrase
def _top_down_level_4(self, phrase, offset):
'''
level 4: here we handle special cases
'''
splitted = False
parts = []
# - split on hyphen #
if Patterns.HYPHEN_RE.search(phrase):
splitted = True
parts = [
part
for part in Patterns.HYPHEN_CAPTURED_RE.split(phrase)
if part != ''
]
if len(parts) == 3:
if parts[0].lower() in Patterns.COMMON_HYPHEN_START:
splitted = False
elif len(parts[0]) < 4 and len(parts[2]) < 4 \
and len(parts[0]) + len(parts[2]) < 6:
# mx-doc, tcp-ip, e-mail, hp-ux etc. #
splitted = False
if splitted:
for part in parts:
new_offset = offset + len(part)
yield TokenWithPos(part, offset, new_offset)
offset = new_offset
else:
# pick up what ever left as a token #
yield TokenWithPos(phrase, offset, offset + len(phrase))
def _has_end_of_phrase_punc(self, phrase):
end_char_is_punc = False
if phrase[-1] in Patterns.PUNCT_END_PHRASE:
end_char_is_punc = True
if Patterns.ABBREV_RE.fullmatch(phrase):
end_char_is_punc = False
return end_char_is_punc
def _phrase_full_match(self, phrase):
matched_type = None
if len(phrase) == 1:
matched_type = 'single_char'
elif phrase.isalpha():
matched_type = 'word'
elif phrase in Patterns.si_units:
matched_type = 'unit'
elif Patterns.DIGITS_RE.fullmatch(phrase):
matched_type = 'digit'
elif Patterns.PARA_SEP_RE.fullmatch(phrase):
matched_type = 'punctuation_seq'
elif Patterns.abbreviation(phrase):
matched_type = 'abbreviation'
elif Patterns.ALL_WEB_RE.fullmatch(phrase):
matched_type = 'url/email'
return matched_type
def tokenize(self, text):
'''
tokenize
params:
- text: string
- pos_info: also output the position information when tokenizing
output: tokens (with position info)
'''
return [token.text for token in self._tokenize(text)]
def tokenize_with_pos_info(self, text):
'''
tokenize
params:
- text: string
output:
- a list of Token object
'''
return list(self._tokenize(text))
| [
"re.split",
"re.compile"
] | [((493, 510), 're.compile', 're.compile', (['"""\\\\s"""'], {}), "('\\\\s')\n", (503, 510), False, 'import re\n'), ((2574, 2620), 're.split', 're.split', (['Patterns.ALL_WEB_CAPTURED_RE', 'phrase'], {}), '(Patterns.ALL_WEB_CAPTURED_RE, phrase)\n', (2582, 2620), False, 'import re\n'), ((3253, 3298), 're.split', 're.split', (['Patterns.DIGITS_CAPTURED_RE', 'phrase'], {}), '(Patterns.DIGITS_CAPTURED_RE, phrase)\n', (3261, 3298), False, 'import re\n'), ((3938, 3984), 're.split', 're.split', (['Patterns.WORD_BF_CAPTURED_RE', 'phrase'], {}), '(Patterns.WORD_BF_CAPTURED_RE, phrase)\n', (3946, 3984), False, 'import re\n'), ((439, 465), 're.compile', 're.compile', (['"""[^\\\\s]+|\\\\s+"""'], {}), "('[^\\\\s]+|\\\\s+')\n", (449, 465), False, 'import re\n')] |
from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, \
random, prod, asarray, set_printoptions, unravel_index
# Generate a random uniform number (array) in range [0,1].
def zero(*shape): return zeros(shape)
def randnorm(*shape): return random.normal(size=shape)
def randuni(*shape): return random.random(size=shape)
def randint(*shape, min=-3, max=9):
data = asarray(random.randint(min+1,max+1,size=shape), dtype=float)
data[data <= 0] -= 1
return data
# Build a model given four integers:
# di - dimension of input
# ds - dimension for each internal state
# ns - number of internal states
# do - dimension of output
def build_model(di, ds, ns, do):
# Use random normal vectors.
input_params = randnorm(di+1, ds)
internal_params = randnorm(ds+1, ds, ns-1)
output_params = randnorm(ds+1, do)
# Normalize the length of all random normal vectors for input.
input_params[:,:] /= ((input_params[:,:]**2).sum(axis=0))**(1/2)
internal_params[:,:,:] /= ((internal_params[:,:,:]**2).sum(axis=0))**(1/2)
# Set the bias values.
input_params[-1,:] = 1
internal_params[-1,:,:] = 0
# Set the scratch space for storing internal values to zero.
internal_values = zero(ds, ns)
return input_params, internal_params, output_params, internal_values
# Get the shape of a model (when provided the arrays).
def get_shape(*model):
di, ds = model[0].shape
di -= 1
ns = model[1].shape[-1] + 1
do = model[2].shape[-1]
return di, ds, ns, do
# Function for pushing values forward through a dense MLP.
def forward(inputs, input_params, internal_params, output_params, internal_values, display=False):
di, ds, ns, do = get_shape(input_params, internal_params, output_params)
# Compute the input layer.
internal_values[:,0] = clip(dot(inputs, input_params[:di,:]) +
input_params[di,:], 0.0, float('inf'))
if display:
print("^"*70)
print("input: ",inputs)
print()
for n in range(ds):
print(f"0.{n} ", input_params[:di,n], '+', input_params[di,n], '=', internal_values[n,0])
print(" 0 out ", internal_values[:,0])
# Compute the next set of internal values with a rectified activation.
for i in range(ns-1):
internal_values[:,i+1] = internal_params[ds,:,i] + \
dot(internal_values[:,i],
internal_params[:ds,:,i])
if display:
print()
for n in range(ds):
print(f"{i+1}.{n} ", internal_params[:ds,n,i], '+', internal_params[ds:ds+1,n,i], '=', internal_values[n,i+1])
internal_values[:,i+1] = clip(internal_values[:,i+1], 0.0, float('inf'))
if display: print(f" {i+1} out ", internal_values[:,i+1])
# compute the output.
output = dot(internal_values[:,ns-1], output_params[:ds]) + output_params[ds]
if display:
print()
for n in range(do):
print(f"{ns}.{n} ", output_params[:ds,n],'+', output_params[ds,n], '=', output[n])
print(f" {ns} out ", output[:])
print()
print("output:", output)
print("_"*70)
return output
# Compute the gradient with respect to all parameters using finite differences.
def gradient(grad, inputs, *model, display=False):
# Get the model shape.
di, ds, ns, do = get_shape(*model)
# Initialize storage for the gradients.
input_grad = zeros(model[0].shape)
internal_grad = zeros(model[1].shape)
output_grad = ones(model[2].shape)
# Retrieve the model parameters.
internal_params = model[1]
output_params = model[2]
# Retreive the internal values of the model (after executing forwards).
internal_values = model[-1]
# Compute the gradient of the last parameters.
nonzero = internal_values[:,-1].nonzero()
output_grad[ds,:] = grad[:]
for i in range(do):
output_grad[:ds,i] = internal_values[:,-1] * grad[i]
internal_values[nonzero,-1] = dot(output_params[:ds,:][nonzero], grad)
if display:
print("^"*70)
print("Output grad:")
print("",output_grad.T)
print("",nonzero, internal_values[:,-1])
# Compute the gradient of all internal parameters.
for i in range(ns-2,-1,-1):
# Compute the gradient for all weights.
# set the bias gradient.
internal_grad[ds,:,i] = internal_values[:,i+1]
# set the gradient for each column of connections
# (to a single output in next layer).
nonzero = internal_values[:,i].nonzero()
for j in range(ds):
if (internal_values[j,i+1] == 0): continue
internal_grad[:ds,j,i][nonzero] = internal_values[nonzero,i] * internal_values[j,i+1]
if display:
print(f"layer {i} -> {i+1}, output node {j}")
print(" ",internal_grad[:,j,i])
# Compute the next preceding layer of internal values.
internal_values[nonzero,i] = dot(internal_params[:ds,:,i][nonzero], internal_values[:,i+1])
if display:
print("Grads for next layer:")
print("",nonzero, internal_values[:,i])
# Compute the gradient for the input parameters.
input_grad[di,:] = internal_values[:,0]
for i in range(ds):
input_grad[:di,i] = inputs[:] * internal_values[i,0]
if display:
print("Input grad:")
print(input_grad.T)
print("_"*70)
# Return the gradients.
return input_grad, internal_grad, output_grad
# Compute the gradient with respect to all parameters using finite differences.
def finite_difference(inputs, *model, diff=0.0001, display=False):
# Shift matrices (used for computing finite differences).
input_shift = zeros(model[0].shape)
internal_shift = zeros(model[1].shape)
output_shift = zeros(model[2].shape)
# Function for producting the shifted model.
shifted_model = lambda: (model[0]+input_shift, model[1]+internal_shift, model[2]+output_shift, model[3])
# Gradient matrices.
input_grad = zeros(model[0].shape)
internal_grad = zeros(model[1].shape)
output_grad = zeros(model[2].shape)
# Total number of outputs.
output_shape = forward(inputs, *model).shape
num_outputs = prod(output_shape)
# Compute the expected set of nonzero internal activations.
forward(inputs, *model)
expected_nonzero = tuple(model[-1].nonzero()[0])
# Function for testing the effect that a shift
def measure_layer(layer, grad, shift, name):
for j in range(layer.size):
curr_idx = unravel_index(j, layer.shape)
shift[curr_idx] = diff/2
out_high = forward(inputs, *shifted_model())[out_index]
nonzero_high = tuple(model[3].nonzero()[0])
shift[curr_idx] = -diff/2
out_low = forward(inputs, *shifted_model())[out_index]
nonzero_low = tuple(model[3].nonzero()[0])
shift[curr_idx] = 0
# If a zero became nonzero (or vice versa), then the
# finite different approximation is unstable.
if ((len(nonzero_high) <= len(expected_nonzero)) and
(len(nonzero_low) <= len(expected_nonzero))):
# Compute the gradient
grad[curr_idx] += sum(out_high - out_low) / diff
if display:
print(f"{name:14s}{str(curr_idx):10s} {grad[curr_idx]: .3f}")
print(f" {float(out_high)}")
print(f" {float(out_low)}")
print(f" {float(diff)}")
# Display information.
if display:
print("^"*70)
print("shifted_model: ",[v.shape for v in shifted_model()])
print("output shape, size: ", output_shape, num_outputs)
# Cycle over each output.
for i in range(num_outputs):
out_index = unravel_index(i, output_shape)
if display: print("out_index: ",out_index)
# Cycle over all model parameters, testing effect on output.
# input layer
measure_layer(model[0], input_grad, input_shift, "input idx:")
# internal layers
measure_layer(model[1], internal_grad, internal_shift, "internal idx:")
# output layer
measure_layer(model[2], output_grad, output_shift, "output idx:")
if display: print("_"*70)
# Done computing finite difference gradient!
return input_grad, internal_grad, output_grad
def test():
print("Testing..")
di_vals = (1,2,3)
ds_vals = (1,2,3)
ns_vals = (1,2,3)
do_vals = (1,2,3)
seeds = list(range(5))
# Cycle all combination of tests.
from itertools import product
for (di, ds, ns, do, seed) in product(di_vals, ds_vals, ns_vals, do_vals, seeds):
# --------------------------------------------------------------------
# di - dimension of input
# ds - dimension for each internal state
# ns - number of internal states
# do - dimension of output
# --------------------------------------------------------------------
random.seed(seed)
# Create the model.
#
model = build_model(di, ds, ns, do)
# Call the "forward" function.
# inputs = randuni(di)
inputs = randuni(di)
# inputs = randint(di)
# Run the model forward to compute internal values.
output = forward(inputs, *model, display=False)
# Compute the gradients with a finite difference.
approx_model_grad = finite_difference(inputs, *model, display=False)
# Run the model again (fresh) to get the "internal values".
output = forward(inputs, *model, display=False)
# Print the model gradients that were directly computed.
model_grad = gradient(ones(do), inputs, *model)
# Check the correctness of the gradient function.
for i,(app, true) in enumerate(zip(approx_model_grad, model_grad)):
diff = (abs(app - true) / (abs(true) + 1)).T
# Skip "internal params" if that is empty.
if (len(diff) == 0): continue
# Check for the difference.
if (max(diff) > .01):
set_printoptions(precision=3, sign=" ")
print()
print("ERROR ON TEST")
print(" seed =",seed)
print()
print("di, ds, ns, do: ",di, ds, ns, do)
print("input_params: ",model[0].shape)
print("internal_params: ",model[1].shape)
print("output_params: ",model[2].shape)
print("internal_values: ",model[3].shape)
print()
# forward(inputs, *model, display=True)
finite_difference(inputs, *model, display=True)
print()
print("model[0]:")
print(model[0].T)
print()
print("model[1]:")
print(model[1].T)
print()
print("model[2]:")
print(model[2].T)
print()
print("internals:")
print(model[-1].T)
print()
print()
print("approx_model_grad[0]:")
print(approx_model_grad[0].T)
print()
print("approx_model_grad[1]:")
print(approx_model_grad[1].T)
print()
print("approx_model_grad[2]:")
print(approx_model_grad[2].T)
print()
print()
print("model_grad[0]:")
print(model_grad[0].T)
print()
print("model_grad[1]:")
print(model_grad[1].T)
print()
print("model_grad[2]:")
print(model_grad[2].T)
print()
print()
print("Phase",i,"(0 = input, 1 = internal, 2 = output)")
print("",max(diff))
print("",unravel_index(argmax(diff), diff.shape))
print()
print("Finite differene gradient:")
print(app.T)
print()
print("Directly computed gradient:")
print(true.T)
print()
print("Difference")
print(diff)
print()
print("ERROR ON TEST")
exit()
print(" all passed!")
if __name__ == "__main__":
test()
class NN:
def __init__(self, di, do, ds=16, ns=4):
self.di = di
self.ds = ds
self.ns = ns
self.do = do
self.model = list(build_model(di, ds, ns, do))
def fit(self, x, y, steps=1000, step_factor=0.01, display=False,
show=False, **kwargs):
# Make sure that the given data is the right shape.
assert (self.di == x.shape[-1])
assert (self.do == y.shape[-1])
if (show and (self.do == 1) and (self.di == 1)):
show_interval = max([1, steps // 100])
from util.plot import Plot
p = Plot()
p.add("Data", *(x.T), y.flatten(), group='d', frame=-1)
p.add_func("Model", self, [x.min(), x.max()], group='m', frame=-1)
loss_values = []
# For the number of training steps..
for s in range(steps):
if (not s%10): print(s, end="\r")
if (show): loss_values.append( ((y - self(x))**2).sum()**(1/2) )
grads = [zeros(l.shape) for l in self.model]
# Average gradient from all data points.
for i, (d_in, d_out) in enumerate(zip(x,y)):
m_out = forward(d_in, *self.model, display=False)
loss_grad = m_out - d_out
grad_step = gradient(loss_grad, d_in, *self.model, display=False)
# Dynamically update the average (of the gradients).
for j in range(len(grad_step)):
grads[j] += (grad_step[j] - grads[j]) / (i+1)
if display:
yhat = self(x).reshape(y.shape)
loss = ((y - yhat)**2).sum(axis=-1).mean()
# Take a step in the gradient direction.
for j in range(len(grads)):
self.model[j] -= grads[j] * step_factor
# Display progress.
if display:
print()
print("Step:", s)
print("loss:", loss)
print("model:")
for l in self.model[:-1]:
print("",l.T)
print("grads: ")
for l in grads[:-1]:
print("",-l.T)
print()
# Update the model plot, if appropriate.
if (show and (s%show_interval == 0)):
p.add("Data", *(x.T), y.flatten(), group='d', frame=s)
p.add_func("Model", self, [x.min(), x.max()], group='m', frame=s)
# Add the last frame, if it wasn't already added.
if (show):
print(" showing plot..")
# Show the plot of the model.
p.show(show=False)
p = Plot("","Step","Loss value")
p.add("Loss", list(range(len(loss_values))), loss_values,
mode="markers+lines", color=1)
p.show(append=True, show_legend=False)
# Return predictions for new data.
def predict(self, x):
if (len(x.shape) == 2):
outputs = []
for x_in in x:
outputs.append( forward(x_in, *self.model)[0] )
return asarray(outputs)
else: return forward(x, *self.model)
# Wrapper for the "__call__".
def __call__(self, *args):
return self.predict(*args)
| [
"numpy.random.normal",
"numpy.prod",
"numpy.abs",
"numpy.ones",
"numpy.random.random",
"itertools.product",
"numpy.asarray",
"numpy.argmax",
"numpy.max",
"numpy.sum",
"numpy.dot",
"numpy.zeros",
"numpy.random.randint",
"numpy.unravel_index",
"numpy.random.seed",
"util.plot.Plot",
"nu... | [((214, 226), 'numpy.zeros', 'zeros', (['shape'], {}), '(shape)\n', (219, 226), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((256, 281), 'numpy.random.normal', 'random.normal', ([], {'size': 'shape'}), '(size=shape)\n', (269, 281), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((310, 335), 'numpy.random.random', 'random.random', ([], {'size': 'shape'}), '(size=shape)\n', (323, 335), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((3507, 3528), 'numpy.zeros', 'zeros', (['model[0].shape'], {}), '(model[0].shape)\n', (3512, 3528), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((3549, 3570), 'numpy.zeros', 'zeros', (['model[1].shape'], {}), '(model[1].shape)\n', (3554, 3570), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((3589, 3609), 'numpy.ones', 'ones', (['model[2].shape'], {}), '(model[2].shape)\n', (3593, 3609), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((4063, 4104), 'numpy.dot', 'dot', (['output_params[:ds, :][nonzero]', 'grad'], {}), '(output_params[:ds, :][nonzero], grad)\n', (4066, 4104), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((5815, 5836), 'numpy.zeros', 'zeros', (['model[0].shape'], {}), '(model[0].shape)\n', (5820, 5836), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((5858, 5879), 'numpy.zeros', 'zeros', (['model[1].shape'], {}), '(model[1].shape)\n', (5863, 5879), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((5901, 5922), 'numpy.zeros', 'zeros', (['model[2].shape'], {}), '(model[2].shape)\n', (5906, 5922), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((6126, 6147), 'numpy.zeros', 'zeros', (['model[0].shape'], {}), '(model[0].shape)\n', (6131, 6147), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((6168, 6189), 'numpy.zeros', 'zeros', (['model[1].shape'], {}), '(model[1].shape)\n', (6173, 6189), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((6210, 6231), 'numpy.zeros', 'zeros', (['model[2].shape'], {}), '(model[2].shape)\n', (6215, 6231), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((6331, 6349), 'numpy.prod', 'prod', (['output_shape'], {}), '(output_shape)\n', (6335, 6349), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((8782, 8832), 'itertools.product', 'product', (['di_vals', 'ds_vals', 'ns_vals', 'do_vals', 'seeds'], {}), '(di_vals, ds_vals, ns_vals, do_vals, seeds)\n', (8789, 8832), False, 'from itertools import product\n'), ((391, 435), 'numpy.random.randint', 'random.randint', (['(min + 1)', '(max + 1)'], {'size': 'shape'}), '(min + 1, max + 1, size=shape)\n', (405, 435), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((2892, 2943), 'numpy.dot', 'dot', (['internal_values[:, ns - 1]', 'output_params[:ds]'], {}), '(internal_values[:, ns - 1], output_params[:ds])\n', (2895, 2943), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((5050, 5117), 'numpy.dot', 'dot', (['internal_params[:ds, :, i][nonzero]', 'internal_values[:, i + 1]'], {}), '(internal_params[:ds, :, i][nonzero], internal_values[:, i + 1])\n', (5053, 5117), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((7945, 7975), 'numpy.unravel_index', 'unravel_index', (['i', 'output_shape'], {}), '(i, output_shape)\n', (7958, 7975), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((9200, 9217), 'numpy.random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (9211, 9217), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((1847, 1880), 'numpy.dot', 'dot', (['inputs', 'input_params[:di, :]'], {}), '(inputs, input_params[:di, :])\n', (1850, 1880), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((2414, 2468), 'numpy.dot', 'dot', (['internal_values[:, i]', 'internal_params[:ds, :, i]'], {}), '(internal_values[:, i], internal_params[:ds, :, i])\n', (2417, 2468), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((6655, 6684), 'numpy.unravel_index', 'unravel_index', (['j', 'layer.shape'], {}), '(j, layer.shape)\n', (6668, 6684), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((9902, 9910), 'numpy.ones', 'ones', (['do'], {}), '(do)\n', (9906, 9910), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((13193, 13215), 'numpy.max', 'max', (['[1, steps // 100]'], {}), '([1, steps // 100])\n', (13196, 13215), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((13271, 13277), 'util.plot.Plot', 'Plot', ([], {}), '()\n', (13275, 13277), False, 'from util.plot import Plot\n'), ((15320, 15350), 'util.plot.Plot', 'Plot', (['""""""', '"""Step"""', '"""Loss value"""'], {}), "('', 'Step', 'Loss value')\n", (15324, 15350), False, 'from util.plot import Plot\n'), ((15753, 15769), 'numpy.asarray', 'asarray', (['outputs'], {}), '(outputs)\n', (15760, 15769), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((10273, 10282), 'numpy.max', 'max', (['diff'], {}), '(diff)\n', (10276, 10282), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((10307, 10346), 'numpy.set_printoptions', 'set_printoptions', ([], {'precision': '(3)', 'sign': '""" """'}), "(precision=3, sign=' ')\n", (10323, 10346), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((13674, 13688), 'numpy.zeros', 'zeros', (['l.shape'], {}), '(l.shape)\n', (13679, 13688), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((7362, 7385), 'numpy.sum', 'sum', (['(out_high - out_low)'], {}), '(out_high - out_low)\n', (7365, 7385), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((10083, 10098), 'numpy.abs', 'abs', (['(app - true)'], {}), '(app - true)\n', (10086, 10098), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((12133, 12142), 'numpy.max', 'max', (['diff'], {}), '(diff)\n', (12136, 12142), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((10102, 10111), 'numpy.abs', 'abs', (['true'], {}), '(true)\n', (10105, 10111), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((12183, 12195), 'numpy.argmax', 'argmax', (['diff'], {}), '(diff)\n', (12189, 12195), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n')] |
'''
Algorithm for matching the model to image points.
Based on (Cootes et al. 2000, p.9) and (Blanz et al., p.4).
'''
import numpy as np
from utils.structure import Shape
from utils.align import Aligner
class Fitter(object):
def __init__(self, pdmodel):
self.pdmodel = pdmodel
self.aligner = Aligner()
self.start_pose = ()
def fit(self, prev_shape, new_shape, pyramid_level=0, n=None):
'''
Algorithm that finds the best shape parameters that match identified
image points.
In: PointDistributionModel instance pdm,
array of new image points (x1, x2, ..., xN, y1, y2,..., yN)
Out: the pose params (Tx, Ty, s, theta) and shape parameter (c) to
fit the model to the image
'''
if not isinstance(new_shape, Shape):
new_shape = Shape(new_shape)
if not isinstance(prev_shape, Shape):
prev_shape = Shape(prev_shape)
if not self.start_pose:
raise ValueError('No inital pose parameters found.')
# find pose parameters to align with new image points
Tx, Ty, s, theta = self.start_pose
dx, dy, ds, dTheta = self.aligner.get_pose_parameters(prev_shape, new_shape)
changed_pose = (Tx + dx, Ty + dy, s*(1+ds), theta+dTheta)
# align image with model
y = self.aligner.invert_transform(new_shape, changed_pose)
# SVD on scaled eigenvectors of the model
u, w, v = np.linalg.svd(self.pdmodel.scaled_eigenvectors, full_matrices=False)
W = np.zeros_like(w)
# define weight vector n
if n is None:
last_eigenvalue = self.pdmodel.eigenvalues[-1]
n = last_eigenvalue**2 if last_eigenvalue**2 >= 0 else 0
# calculate the shape vector
W = np.diag(w/((w**2) + n))
c = (v.T).dot(W).dot(u.T).dot(y.vector)
return changed_pose, c
| [
"utils.align.Aligner",
"numpy.diag",
"utils.structure.Shape",
"numpy.linalg.svd",
"numpy.zeros_like"
] | [((315, 324), 'utils.align.Aligner', 'Aligner', ([], {}), '()\n', (322, 324), False, 'from utils.align import Aligner\n'), ((1480, 1548), 'numpy.linalg.svd', 'np.linalg.svd', (['self.pdmodel.scaled_eigenvectors'], {'full_matrices': '(False)'}), '(self.pdmodel.scaled_eigenvectors, full_matrices=False)\n', (1493, 1548), True, 'import numpy as np\n'), ((1561, 1577), 'numpy.zeros_like', 'np.zeros_like', (['w'], {}), '(w)\n', (1574, 1577), True, 'import numpy as np\n'), ((1812, 1837), 'numpy.diag', 'np.diag', (['(w / (w ** 2 + n))'], {}), '(w / (w ** 2 + n))\n', (1819, 1837), True, 'import numpy as np\n'), ((850, 866), 'utils.structure.Shape', 'Shape', (['new_shape'], {}), '(new_shape)\n', (855, 866), False, 'from utils.structure import Shape\n'), ((938, 955), 'utils.structure.Shape', 'Shape', (['prev_shape'], {}), '(prev_shape)\n', (943, 955), False, 'from utils.structure import Shape\n')] |
from flask import Flask
from flask_cors import CORS # type: ignore
from .api import account_blueprint
from .event_handlers import register_event_handlers
from .infrastructure import event_store_db
from .composition_root import event_manager
def account_app_factory(db_string: str):
app = Flask(__name__)
CORS(app)
app.config['SQLALCHEMY_DATABASE_URI'] = db_string
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
event_store_db.init_app(app)
app.register_blueprint(account_blueprint)
register_event_handlers(event_manager)
return app
| [
"flask_cors.CORS",
"flask.Flask"
] | [((295, 310), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (300, 310), False, 'from flask import Flask\n'), ((315, 324), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (319, 324), False, 'from flask_cors import CORS\n')] |
# Package: Storage Manager
# License: Released under MIT License
# Notice: Copyright (c) 2020 TytusDB Team
# Developers: <NAME>
from storage.avl import avlMode
from storage.b import BMode
from storage.bplus import BPlusMode
from storage.hash import HashMode
from storage.isam import ISAMMode
from storage.json_mode import jsonMode
from storage.dict import DictMode
from storage.b import Serializable
from DBList import DBList
import re
import codificar
from random import randint
MODES = ['avl', 'b', 'bplus', 'dict', 'isam', 'json', 'hash']
HEX_SYMBOLS = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "A", "B", "C", "D", "E", "F"]
VALID_ENCODING = ["utf8", "iso-8859-1", "ascii"]
DB_NAME_PATTERN = "^[a-zA-Z][a-zA-Z0-9#@$_]*"
# Obteniendo la lista general de bases de datos a partir del binario almacenado
# Si no existe el binario, se crea una nueva lista de bases de datos y se almacena
# el binario
try:
databases = Serializable.rollback("lista_bases_de_datos")
except FileNotFoundError:
databases = DBList()
Serializable.commit(databases, "lista_bases_de_datos")
# Descripción:
# Crea una nueva base de datos
# Parámetros:
# database:str - El nombre de la nueva base de datos
# mode:str - El modo de almacenamiento a utilizar en la base de datos
# encoding:str - La codificación utilizada por la base de datos
# Valores de Retorno:
# 0 - Operación exitosa
# 1 - Error en la operación
# 2 - Base de datos existente
# 3 - Modo incorrecto
# 4 - Codificación incorrecta
def createDatabase(database: str, mode: str, encoding: str) -> int:
if encoding not in VALID_ENCODING:
return 4
if databases.search(database) != None:
return 2
if mode == "avl":
code = avlMode.createDatabase(database)
elif mode == "b":
code = BMode.createDatabase(database)
elif mode == "bplus":
code = BPlusMode.createDatabase(database)
elif mode == "dict":
code = DictMode.createDatabase(database)
elif mode == "isam":
code = ISAMMode.createDatabase(database)
elif mode == "json":
code = jsonMode.createDatabase(database)
elif mode == "hash":
code = HashMode.createDatabase(database)
else:
return 3
if code == 0:
databases.create(database, mode, encoding)
try:
for i in range(5):
try:
Serializable.commit(databases, "lista_bases_de_datos")
except:
continue
except:
code_drop = dropDatabase(database)
if code_drop == 0:
databases.delete(database)
return 1
else:
for i in range(4):
code_drop = dropDatabase(database)
if code_drop == 0:
databases.delete(database)
break
return 1
return code
# Descripción:
# Crea una nueva base de datos
# Parámetros:
# database:str - El nombre de la nueva base de datos
# mode:str - El modo de almacenamiento a utilizar en la base de datos
# encoding:str - La codificación utilizada por la base de datos
# Valores de Retorno:
# 0 - Operación exitosa
# 1 - Error en la operación
# 2 - Base de datos existente
# 3 - Modo incorrecto
# 4 - Codificación incorrecta
def __create_database_sp(database: str, mode: str, encoding: str) -> int:
if encoding not in VALID_ENCODING:
return 4
dbs = databases.find_all(database)
if dbs != None:
for db in dbs:
if db.name == database and db.mode == mode:
# Ya existe esta base de datos alternativa
return 0
if mode == "avl":
code = avlMode.createDatabase(database)
elif mode == "b":
code = BMode.createDatabase(database)
elif mode == "bplus":
code = BPlusMode.createDatabase(database)
elif mode == "dict":
code = DictMode.createDatabase(database)
elif mode == "isam":
code = ISAMMode.createDatabase(database)
elif mode == "json":
code = jsonMode.createDatabase(database)
elif mode == "hash":
code = HashMode.createDatabase(database)
else:
return 3
if code == 0:
databases.create(database, mode, encoding)
try:
for i in range(5):
try:
Serializable.commit(databases, "lista_bases_de_datos")
except:
continue
except:
code_drop = dropDatabase(database)
if code_drop == 0:
databases.delete(database)
return 1
else:
for i in range(4):
code_drop = dropDatabase(database)
if code_drop == 0:
databases.delete(database)
break
return 1
return code
# Descripción:
# Devuelve un nombre aleatorio que no existe entre las bases de datos
# Valores de retorno:
# str - Un nombre no utilizado entre las bases de datos
def temp_name():
temp_database_name = HEX_SYMBOLS[randint(10, len(HEX_SYMBOLS)-1)]
for x in range(4):
temp_database_name += HEX_SYMBOLS[randint(0, len(HEX_SYMBOLS)-1)]
while databases.search(temp_database_name) != None:
temp_database_name += HEX_SYMBOLS[randint(0, len(HEX_SYMBOLS)-1)]
return temp_database_name
# Descripción:
# Cambia el modo de almacenamiento de una base de datos
# Parámetros:
# database:str - El nombre de la base de datos que se desea modificar
# mode:str - Es un string indicando el modo 'avl', 'b', 'bplus', 'dict', 'isam', 'json', 'hash'
# Valores de retorno:
# 0 - Operación exitosa
# 1 - Error en la operación
# 2 - database no existente
# 4 - modo incorrecto
def alterDatabaseMode(database: str, mode: str) -> int:
dbs = databases.find_all(database)
if dbs == []:
return 2
if mode not in MODES:
return 4
tables = showTables(database)
temp_db_name = temp_name()
createDatabase(temp_db_name, mode, dbs[0].encoding)
for table in tables:
aux_table = databases.find_table(database, table)
if createTable(temp_db_name, table, aux_table.columns) != 0:
dropDatabase(temp_db_name)
return 1
if aux_table.pk != []:
if alterAddPK(temp_db_name, table, aux_table.pk) != 0:
dropDatabase(temp_db_name)
return 1
registers = extractTable(database, table)
for register in registers:
if insert(temp_db_name, table, register) != 0:
dropDatabase(temp_db_name)
return 1
if dropDatabase(database) != 0:
dropDatabase(temp_db_name)
return 1
if alterDatabase(temp_db_name, database) != 0:
return 1
return 0
# Descripción:
# Cambia el modo de almacenamiento de una tabla de una base de datos especificada
# Parámetros:
# database:str - El nombre de la base de datos que se desea modificar
# table:str - El nombre de la tabla que se desea modificar
# mode:str - Es un string indicando el modo 'avl', 'b', 'bplus', 'dict', 'isam', 'json', 'hash'
# Valores de retorno:
# 0 - Operación exitosa
# 1 - Error en la operación
# 2 - database no existente
# 3 - table no existente
# 4 - modo incorrecto
def alterTableMode(database: str, table: str, mode: str) -> int:
dbs = databases.find_all(database)
if dbs == []:
return 2
if databases.find_table(database, table) == None:
return 3
if mode not in MODES:
return 4
for db in dbs:
tb = db.tables.search(table)
if tb != None:
# Revisando si la tabla ya se encuentra en una base de datos con el modo indicado
if db.mode == mode:
return 0
# Se revisa si ya existe una base de datos alternativa con el modo indicado
alt_db_exists = False
for aux in dbs:
if aux.name == database and aux.mode == mode and aux.encoding == db.encoding:
alt_db_exists = True
# Extraer los registros de la tabla
registers = extractTable(database, table)
if alt_db_exists:
# Crear tabla en esta base de datos
if __create_table_sp(database, table, tb.columns, mode) != 0:
return 1
# Insertar registros
for register in registers:
if __insert_sp(database, table, register, mode) != 0:
return 1
else:
# Crear base de datos y tabla e insertar las tuplas
if __create_database_sp(database, mode, db.encoding) != 0:
return 1
# Crear tabla en esta base de datos
if __create_table_sp(database, table, tb.columns, mode) != 0:
return 1
# Insertar registros
for register in registers:
if __insert_sp(database, table, register, mode) != 0:
return 1
# Eliminar tabla original
if __drop_table_sp(database, table, db.mode) != 0:
return 1
if db.tables.first == None:
if __drop_database_sp(db.name, db.mode) != 0:
return 1
return 0
return 1
# Descripción:
# Devuelve una lista con los nombres de las bases de datos
# Valores de retorno:
# Lista de strings con los nombres de las bases de datos
# Si ocurrió un error devuelve una lista vacía
# Si no hay bases de datos devuelve una lista vacía
def showDatabases() -> list:
return databases.list_databases_diff()
# Descripción:
# Renombra la base de datos databaseOld por databaseNew
# Parámetros:
# databaseOld:str - Nombre actual de la base de datos, debe cumplir con las reglas de identificadores de SQL
# databaseNew:str - Nuevo nombre de la base de datos, debe cumplir con las reglas de identificadores de SQL
# Valores de retorno:
# 0 - Operación exitosa
# 1 - Error en la operación
# 2 - databaseOld no existente
# 3 - databaseNew existente
def alterDatabase(databaseOld: str, databaseNew: str) -> int:
if re.search(DB_NAME_PATTERN, databaseOld) and re.search(DB_NAME_PATTERN, databaseNew):
if databases.search(databaseNew) != None:
return 3
dbs = databases.find_all(databaseOld)
if dbs == []:
return 2
for db in dbs:
if db.mode == "avl":
code = avlMode.alterDatabase(databaseOld, databaseNew)
elif db.mode == "b":
code = BMode.alterDatabase(databaseOld, databaseNew)
elif db.mode == "bplus":
code = BPlusMode.alterDatabase(databaseOld, databaseNew)
elif db.mode == "dict":
code = DictMode.alterDatabase(databaseOld, databaseNew)
elif db.mode == "isam":
code = ISAMMode.alterDatabase(databaseOld, databaseNew)
elif db.mode == "json":
code = jsonMode.alterDatabase(databaseOld, databaseNew)
elif db.mode == "hash":
code = HashMode.alterDatabase(databaseOld, databaseNew)
if code == 0:
db.name = databaseNew
try:
Serializable.commit(databases, "lista_bases_de_datos")
return 0
except:
db.name = databaseOld
return 1
else:
return 1
# Descripción:
# Elimina por completo la base de datos indicada en database
# Parámetros:
# database:str - Es el nombre de la base de datos que se desea eliminar, debe cumplir con las reglas de identificadores de SQL
# Valores de retorno:
# 0 - Operación exitosa
# 1 - Error en la operación
# 2 - Base de datos no existente
def dropDatabase(database: str) -> int:
if re.search(DB_NAME_PATTERN, database):
dbs = databases.find_all(database)
if dbs == []:
return 2
for db in dbs:
if db.mode == "avl":
code = avlMode.dropDatabase(database)
elif db.mode == "b":
code = BMode.dropDatabase(database)
elif db.mode == "bplus":
code = BPlusMode.dropDatabase(database)
elif db.mode == "dict":
code = DictMode.dropDatabase(database)
elif db.mode == "isam":
code = ISAMMode.dropDatabase(database)
elif db.mode == "json":
code = jsonMode.dropDatabase(database)
elif db.mode == "hash":
code = HashMode.dropDatabase(database)
if code == 0:
databases.delete(db.name)
for x in range(5):
try:
Serializable.commit(databases, "lista_bases_de_datos")
return 0
except:
continue
return 1
else:
return 1
# Descripción:
# Elimina por la base de datos con el nombre y modo indicados
# Parámetros:
# database:str - Es el nombre de la base de datos que se desea eliminar, debe cumplir con las reglas de identificadores de SQL
# mode:str - El modo de almacenamiento utilizado por la base de datos que se desea eliminar
# Valores de retorno:
# 0 - Operación exitosa
# 1 - Error en la operación
# 2 - Base de datos no existente
# 4 - Modo incorrecto
def __drop_database_sp(database: str, mode: str) -> int:
if re.search(DB_NAME_PATTERN, database):
dbs = databases.find_all(database)
if dbs == []:
return 2
if mode not in MODES:
return 4
for db in dbs:
if db.mode == mode == "avl":
code = avlMode.dropDatabase(database)
elif db.mode == mode == "b":
code = BMode.dropDatabase(database)
elif db.mode == mode == "bplus":
code = BPlusMode.dropDatabase(database)
elif db.mode == mode == "dict":
code = DictMode.dropDatabase(database)
elif db.mode == mode == "isam":
code = ISAMMode.dropDatabase(database)
elif db.mode == mode == "json":
code = jsonMode.dropDatabase(database)
elif db.mode == mode == "hash":
code = HashMode.dropDatabase(database)
else:
continue
if code == 0:
databases.delete_sp(db.name, db.mode)
for x in range(5):
try:
Serializable.commit(databases, "lista_bases_de_datos")
return 0
except:
continue
return 1
else:
return 1
# Descripción:
# Crea una nueva tabla en la base de datos indicada
# Parámetros:
# database:str - El nombre de la base de datos a la que se desea agregar la tabla
# table:str - El nombre de la nueva tabla
# numberColumns:int - La cantidad de columnas que manejará la tabla
# Valores de Retorno:
# 0 - Operación exitosa
# 1 - Error en la operación
# 2 - Base de datos inexistente
# 3 - Tabla existente
def createTable(database: str, table: str, numberColumns: int) -> int:
db = databases.search(database)
if db == None:
return 2
if table in showTables(database):
return 3
if db.mode == "avl":
result = avlMode.createTable(database, table, numberColumns)
elif db.mode == "b":
result = BMode.createTable(database, table, numberColumns)
elif db.mode == "bplus":
result = BPlusMode.createTable(database, table, numberColumns)
elif db.mode == "dict":
result = DictMode.createTable(database, table, numberColumns)
elif db.mode == "isam":
result = ISAMMode.createTable(database, table, numberColumns)
elif db.mode == "json":
result = jsonMode.createTable(database, table, numberColumns)
elif db.mode == "hash":
result = HashMode.createTable(database, table, numberColumns)
if result == 0:
if db.tables.create(table, numberColumns) == 0:
for x in range(5):
try:
Serializable.commit(databases, "lista_bases_de_datos")
return result
except:
continue
return 1
return result
# Descripción:
# Crea una nueva tabla en la base de datos indicada con el modo indicado
# Parámetros:
# database:str - El nombre de la base de datos a la que se desea agregar la tabla
# table:str - El nombre de la nueva tabla
# numberColumns:int - La cantidad de columnas que manejará la tabla
# mode:str - El modo de almacenamiento utilizado por la base de datos
# Valores de Retorno:
# 0 - Operación exitosa
# 1 - Error en la operación
# 2 - Base de datos inexistente
# 3 - Tabla existente
# 4 - Modo incorrecto
def __create_table_sp(database: str, table: str, numberColumns: int, mode: str) -> int:
dbs = databases.find_all(database)
if dbs == []:
return 2
# if table in showTables(database):
# return 3
if mode not in MODES:
return 4
for db in dbs:
if db.mode == mode == "avl":
result = avlMode.createTable(database, table, numberColumns)
elif db.mode == mode == "b":
result = BMode.createTable(database, table, numberColumns)
elif db.mode == mode == "bplus":
result = BPlusMode.createTable(database, table, numberColumns)
elif db.mode == mode == "dict":
result = DictMode.createTable(database, table, numberColumns)
elif db.mode == mode == "isam":
result = ISAMMode.createTable(database, table, numberColumns)
elif db.mode == mode == "json":
result = jsonMode.createTable(database, table, numberColumns)
elif db.mode == mode == "hash":
result = HashMode.createTable(database, table, numberColumns)
else:
continue
if result == 0:
if db.tables.create(table, numberColumns) == 0:
for x in range(5):
try:
Serializable.commit(databases, "lista_bases_de_datos")
return result
except:
continue
return 1
return result
# Descripción:
# Devuelve una lista con los nombres de todas las tablas de la base de datos
# Parámetros:
# database:str - El nombre de la base de datos cuyas tablas se desean obtener
# Valores de retorno:
# Si existen la base de datos y las tablas devuelve una lista de nombres de tablas
# Si existe la base de datos, pero no existen tablas devuelve una lista vacía
# Si no existe la base de datos devuelve None
def showTables(database: str) -> list:
dbs = databases.find_all(database)
if dbs == []:
return None
result = []
for db in dbs:
if db.mode == "avl":
result += avlMode.showTables(database)
elif db.mode == "b":
result += BMode.showTables(database)
elif db.mode == "bplus":
result += BPlusMode.showTables(database)
elif db.mode == "dict":
result += DictMode.showTables(database)
elif db.mode == "isam":
result += ISAMMode.showTables(database)
elif db.mode == "json":
result += jsonMode.showTables(database)
elif db.mode == "hash":
result += HashMode.showTables(database)
return result
# Descripción:
# Elimina la llave primaria actual en la información de la tabla, manteniendo el índice
# actual de la estructura del árbol hasta que se invoque de nuevo el alterAddPK()
# Parámetros:
# database:str - El nombre de la base de datos a utilizar
# table:str - El nombre de la tabla a utilizar
# Valores de retorno:
# 0 - Operación exitosa
# 1 - Error en la operación
# 2 - database no existente
# 3 - table no existente
# 4 - pk no existente
def alterDropPK(database: str, table: str) -> int:
dbs = databases.find_all(database)
if dbs == []:
return 2
for db in dbs:
if db.mode == "avl":
result = avlMode.alterDropPK(database, table)
elif db.mode == "b":
result = BMode.alterDropPK(database, table)
elif db.mode == "bplus":
result = BPlusMode.alterDropPK(database, table)
elif db.mode == "dict":
result = DictMode.alterDropPK(database, table)
elif db.mode == "isam":
result = ISAMMode.alterDropPK(database, table)
elif db.mode == "json":
result = jsonMode.alterDropPK(database, table)
elif db.mode == "hash":
result = HashMode.alterDropPK(database, table)
if result != 3:
if result == 0:
db.tables.search(table).pk = []
for x in range(5):
try:
Serializable.commit(databases, "lista_bases_de_datos")
return result
except:
break
return 1
break
return result
# Descripción:
# Agrega una columna al final de cada registro de la tabla y base de datos especificada
# Parámetros:
# database:str - El nombre de la base de datos a utilizar
# table:str - El nombre de la tabla a utilizar
# default:any - Es el valor que se establecerá en a la nueva columna para los registros existentes
# Valores de retorno:
# 0 - Operación exitosa
# 1 - Error en la operación
# 2 - database no existente
# 3 - table no existente
def alterAddColumn(database: str, table: str, default: any) -> int:
dbs = databases.find_all(database)
if dbs == []:
return 2
for db in dbs:
if db.mode == "avl":
result = avlMode.alterAddColumn(database, table, default)
elif db.mode == "b":
result = BMode.alterAddColumn(database, table, default)
elif db.mode == "bplus":
result = BPlusMode.alterAddColumn(database, table, default)
elif db.mode == "dict":
result = DictMode.alterAddColumn(database, table, default)
elif db.mode == "isam":
result = ISAMMode.alterAddColumn(database, table, default)
elif db.mode == "json":
result = jsonMode.alterAddColumn(database, table, default)
elif db.mode == "hash":
result = HashMode.alterAddColumn(database, table, default)
if result != 3:
if result == 0:
db.search(table).columns += 1
for x in range(5):
try:
Serializable.commit(databases, "lista_bases_de_datos")
return result
except:
break
return 1
break
return result
# Descripción:
# Eliminar una n-ésima columna de cada registro de la tabla excepto si son llaves primarias
# Parámetros:
# database:str - El nombre de la base de datos a utilizar
# table:str - El nombre de la tabla a utilizar
# columnNumber:int - El número de la columna a eliminar
# Valores de retorno:
# 0 - Operación exitosa
# 1 - Error en la operación
# 2 - database no existe
# 3 - table no existe
# 4 - Llave no puede eliminarse o tabla quedarse sin columnas
# 5 - Columna fuera de límites
def alterDropColumn(database: str, table: str, columnNumber: int) -> int:
dbs = databases.find_all(database)
if dbs == []:
return 2
for db in dbs:
if db.mode == "avl":
result = avlMode.alterDropColumn(database, table, columnNumber)
elif db.mode == "b":
result = BMode.alterDropColumn(database, table, columnNumber)
elif db.mode == "bplus":
result = BPlusMode.alterDropColumn(database, table, columnNumber)
elif db.mode == "dict":
result = DictMode.alterDropColumn(database, table, columnNumber)
elif db.mode == "isam":
result = ISAMMode.alterDropColumn(database, table, columnNumber)
elif db.mode == "json":
result = jsonMode.alterDropColumn(database, table, columnNumber)
elif db.mode == "hash":
result = HashMode.alterDropColumn(database, table, columnNumber)
if result != 3:
if result == 0:
db.search(table).columns -= 1
for x in range(5):
try:
Serializable.commit(databases, "lista_bases_de_datos")
return result
except:
break
return 1
break
return result
# Descripción:
# Convierte un string en bytes utilizando la codificación especificada
# Parámetros:
# text:str - El texto que se desea codificar
# encoding:str - El tipo de codificación que se desea utilizar
# Valores de retorno:
# bytes - El objeto de tipo bytes con el texto codificado
# None - Error durante la codificación
def codificar(text, encoding):
if encoding not in VALID_ENCODING:
return None
try:
if encoding == "utf8":
return bytes(text, encoding = "utf-8")
if encoding == "iso-8859-1":
return bytes(text, encoding = "iso-8859-1")
if encoding == "ascii":
return bytes(text, encoding = "ascii")
return None
except:
return None
# Descripción:
# Inserta un registro en la estructura de datos asociada a la tabla y la base de datos
# Parámetros:
# database:str - El nombre de la base de datos a utilizar
# table:str - El nombre de la tabla a utilizar
# register:list - Es una lista de elementos que represent un registro
# Valores de retorno:
# 0 - Operación exitosa
# 1 - Error en la operación
# 2 - database no existente
# 3 - table no existente
# 4 - Llave primaria duplicada
# 5 - Columnas fuera de límites
def insert(database: str, table: str, register: list):
dbs = databases.find_all(database)
if dbs == []:
return 2
for db in dbs:
# for x in range(0, len(register)):
# aux = codificar(register[x], db.encoding)
# if aux == None:
# return 1
# register[x] = aux
if db.mode == "avl":
result = avlMode.insert(database, table, register)
elif db.mode == "b":
result = BMode.insert(database, table, register)
elif db.mode == "bplus":
result = BPlusMode.insert(database, table, register)
elif db.mode == "dict":
result = DictMode.insert(database, table, register)
elif db.mode == "isam":
result = ISAMMode.insert(database, table, register)
elif db.mode == "json":
result = jsonMode.insert(database, table, register)
elif db.mode == "hash":
result = HashMode.insert(database, table, register)
if result != 3:
break
return result
# Descripción:
# Inserta un registro en la estructura de datos asociada a la tabla y la base de datos con el modo indicado
# Parámetros:
# database:str - El nombre de la base de datos a utilizar
# table:str - El nombre de la tabla a utilizar
# register:list - Es una lista de elementos que represent un registro
# mode:str - El modo de la base de datos en la que se desea insertar
# Valores de retorno:
# 0 - Operación exitosa
# 1 - Error en la operación
# 2 - database no existente
# 3 - table no existente
# 4 - Llave primaria duplicada
# 5 - Columnas fuera de límites
# 6 - Modo incorrecto
def __insert_sp(database: str, table: str, register: list, mode: str):
dbs = databases.find_all(database)
if dbs == []:
return 2
tb = databases.find_table(database, table)
if tb == None:
return 3
if len(register) != tb.columns:
return 5
if mode not in MODES:
return 6
for db in dbs:
# for x in range(0, len(register)):
# aux = codificar(register[x], db.encoding)
# if aux == None:
# return 1
# register[x] = aux
if db.mode == mode == "avl":
result = avlMode.insert(database, table, register)
elif db.mode == mode == "b":
result = BMode.insert(database, table, register)
elif db.mode == mode == "bplus":
result = BPlusMode.insert(database, table, register)
elif db.mode == mode == "dict":
result = DictMode.insert(database, table, register)
elif db.mode == mode == "isam":
result = ISAMMode.insert(database, table, register)
elif db.mode == mode == "json":
result = jsonMode.insert(database, table, register)
elif db.mode == mode == "hash":
result = HashMode.insert(database, table, register)
else:
continue
if result != 3:
break
return result
# Descripción:
# Carga un archivo CSV de una ruta especificada indicando la base de datos y tabla donde será almacenado
# Parámetros:
# file:str - Ruta del archivo CSV a utilizar
# database:str - El nombre de la base de datos a utilizar
# table:str - El nombre de la tabla a utilizar
# Valores de retorno:
# Lista con los valores enteros que devuelve el insert por cada fila del CSV
# Si ocurrió un error o el archivo CSV no tiene filas devuelve una lista vacía
def loadCSV(file: str, database: str, table: str) -> list:
try:
result = []
with open(file, "r") as csv:
for line in csv:
register = line.strip().split(",")
result += [insert(database, table, register)]
return result
except:
return []
def delete(database: str, table: str, columns: list):
db = databases.search(database)
if db == None:
return 2
if db.mode == "avl":
result = avlMode.delete(database, table, columns)
elif db.mode == "b":
result = BMode.delete(database, table, columns)
elif db.mode == "bplus":
result = BPlusMode.delete(database, table, columns)
elif db.mode == "dict":
result = DictMode.delete(database, table, columns)
elif db.mode == "isam":
result = ISAMMode.delete(database, table, columns)
elif db.mode == "json":
result = jsonMode.delete(database, table, columns)
elif db.mode == "hash":
result = HashMode.delete(database, table, columns)
return result
def extractTable(database,table):
dbs = databases.find_all(database)
if dbs == []:
return None
if databases.find_table(database, table) == None:
return None
result = []
for db in dbs:
tb = db.tables.search(table)
if tb != None:
if db.mode == "avl":
result = avlMode.extractTable(database,table)
elif db.mode == "b":
result = BMode.extractTable(database,table)
elif db.mode == "bplus":
result = BPlusMode.extractTable(database,table)
elif db.mode == "dict":
result = DictMode.extractTable(database,table)
elif db.mode == "isam":
result = ISAMMode.extractTable(database,table)
elif db.mode == "json":
result = jsonMode.extractTable(database,table)
elif db.mode == "hash":
result = HashMode.extractTable(database,table)
else:
continue
return result
def extractRangeTable(database: str, table: str, columnNumber: int, lower: any, upper: any) -> list:
db = databases.search(database)
if db == None:
return None
if db.mode == "avl":
result = avlMode.extractRangeTable(database,table,columnNumber,lower, upper)
elif db.mode == "b":
result = BMode.extractRangeTable(database,table,columnNumber,lower, upper)
elif db.mode == "bplus":
result = BPlusMode.extractRangeTable(database,table,columnNumber,lower, upper)
elif db.mode == "dict":
result = DictMode.extractRangeTable(database,table,columnNumber,lower, upper)
elif db.mode == "isam":
result = ISAMMode.extractRangeTable(database,table,columnNumber,lower, upper)
elif db.mode == "json":
result = jsonMode.extractRangeTable(database,table,columnNumber,lower, upper)
elif db.mode == "hash":
result = HashMode.extractRangeTable(database,table,columnNumber,lower, upper)
return result
def alterTable(database, tableOld, tableNew):
dbs = databases.find_all(database)
if dbs == []:
return 2
for db in dbs:
if db.mode == "avl":
result = avlMode.alterTable(database,tableOld,tableNew)
elif db.mode == "b":
result = BMode.alterTable(database,tableOld,tableNew)
elif db.mode == "bplus":
result = BPlusMode.alterTable(database,tableOld,tableNew)
elif db.mode == "dict":
result = DictMode.alterTable(database,tableOld,tableNew)
elif db.mode == "isam":
result = ISAMMode.alterTable(database,tableOld,tableNew)
elif db.mode == "json":
result = jsonMode.alterTable(database,tableOld,tableNew)
elif db.mode == "hash":
result = HashMode.alterTable(database,tableOld,tableNew)
if result != 3:
if result == 0:
db.tables.search(tableOld).name = tableNew
for x in range(5):
try:
Serializable.commit(databases, "lista_bases_de_datos")
return result
except:
break
return 1
break
return result
def dropTable(database,table):
dbs = databases.find_all(database)
if dbs == []:
return 2
for db in dbs:
if db.mode == "avl":
result = avlMode.dropTable(database, table)
elif db.mode == "b":
result = BMode.dropTable(database, table)
elif db.mode == "bplus":
result = BPlusMode.dropTable(database, table)
elif db.mode == "dict":
result = DictMode.dropTable(database, table)
elif db.mode == "isam":
result = ISAMMode.dropTable(database, table)
elif db.mode == "json":
result = jsonMode.dropTable(database, table)
elif db.mode == "hash":
result = HashMode.dropTable(database, table)
if result != 3:
if result == 0:
db.tables.delete(table)
for x in range(5):
try:
Serializable.commit(databases, "lista_bases_de_datos")
return result
except:
break
return 1
break
return result
# Descripción:
# Elimina una tabla especificada en una base de datos con el nombre y modo especificados.
# Parámetros:
# database:str - El nombre de la base de datos que se va a utilizar
# table:str - El nombre de la tabla que se desea eliminar
# mode:str - El modo que debe tener la base de datos que se va a utilizar
# Valores de retorno:
# 0 - Operación exitosa
# 1 - Error durante la operación
# 2 - database no existe
# 3 - table no existe
def __drop_table_sp(database, table, mode):
dbs = databases.find_all(database)
if dbs == []:
return 2
if databases.find_table(database, table) == None:
return 3
for db in dbs:
if db.mode == mode == "avl":
result = avlMode.dropTable(database, table)
elif db.mode == mode == "b":
result = BMode.dropTable(database, table)
elif db.mode == mode == "bplus":
result = BPlusMode.dropTable(database, table)
elif db.mode == mode == "dict":
result = DictMode.dropTable(database, table)
elif db.mode == mode == "isam":
result = ISAMMode.dropTable(database, table)
elif db.mode == mode == "json":
result = jsonMode.dropTable(database, table)
elif db.mode == mode == "hash":
result = HashMode.dropTable(database, table)
else:
continue
if result != 3:
if result == 0:
db.tables.delete(table)
for x in range(5):
try:
Serializable.commit(databases, "lista_bases_de_datos")
return result
except:
break
return 1
break
return result
def extractRow(database, table, columns):
db = databases.search(database)
if db == None:
return 2
if db.mode == "avl":
result = avlMode.extractRow(database, table, columns)
elif db.mode == "b":
result = BMode.extractRow(database, table, columns)
elif db.mode == "bplus":
result = BPlusMode.extractRow(database, table, columns)
elif db.mode == "dict":
result = DictMode.extractRow(database, table, columns)
elif db.mode == "isam":
result = ISAMMode.extractRow(database, table, columns)
elif db.mode == "json":
result = jsonMode.extractRow(database, table, columns)
elif db.mode == "hash":
result = HashMode.extractRow(database, table, columns)
return result
def update(database, table, register, columns):
db = databases.search(database)
if db == None:
return 2
if db.mode == "avl":
result = avlMode.update(database, table, register, columns)
elif db.mode == "b":
result = BMode.update(database, table, register, columns)
elif db.mode == "bplus":
result = BPlusMode.update(database, table, register, columns)
elif db.mode == "dict":
result = DictMode.update(database, table, register, columns)
elif db.mode == "isam":
result = ISAMMode.update(database, table, register, columns)
elif db.mode == "json":
result = jsonMode.update(database, table, register, columns)
elif db.mode == "hash":
result = HashMode.update(database, table, register, columns)
return result
def truncate(database, table):
db = databases.search(database)
if db == None:
return 2
if db.mode == "avl":
result = avlMode.truncate(database, table)
elif db.mode == "b":
result = BMode.truncate(database, table)
elif db.mode == "bplus":
result = BPlusMode.truncate(database, table)
elif db.mode == "dict":
result = DictMode.truncate(database, table)
elif db.mode == "isam":
result = ISAMMode.truncate(database, table)
elif db.mode == "json":
result = jsonMode.truncate(database, table)
elif db.mode == "hash":
result = HashMode.truncate(database, table)
return result
def alterAddPK(database, table, columns):
dbs = databases.find_all(database)
if dbs == []:
return 2
for db in dbs:
if db.mode == "avl":
result = avlMode.alterAddPK(database, table, columns)
elif db.mode == "b":
result = BMode.alterAddPK(database, table, columns)
elif db.mode == "bplus":
result = BPlusMode.alterAddPK(database, table, columns)
elif db.mode == "dict":
result = DictMode.alterAddPK(database, table, columns)
elif db.mode == "isam":
result = ISAMMode.alterAddPK(database, table, columns)
elif db.mode == "json":
result = jsonMode.alterAddPK(database, table, columns)
elif db.mode == "hash":
result = HashMode.alterAddPK(database, table, columns)
if result != 3:
if result == 0:
db.tables.search(table).pk += columns
for x in range(5):
try:
Serializable.commit(databases, "lista_bases_de_datos")
return result
except:
break
return 1
break
return result | [
"storage.bplus.BPlusMode.dropDatabase",
"storage.b.BMode.extractRangeTable",
"storage.bplus.BPlusMode.extractRangeTable",
"storage.b.BMode.update",
"storage.json_mode.jsonMode.delete",
"storage.json_mode.jsonMode.extractRow",
"storage.hash.HashMode.extractRangeTable",
"storage.hash.HashMode.alterDatab... | [((977, 1022), 'storage.b.Serializable.rollback', 'Serializable.rollback', (['"""lista_bases_de_datos"""'], {}), "('lista_bases_de_datos')\n", (998, 1022), False, 'from storage.b import Serializable\n'), ((12469, 12505), 're.search', 're.search', (['DB_NAME_PATTERN', 'database'], {}), '(DB_NAME_PATTERN, database)\n', (12478, 12505), False, 'import re\n'), ((14168, 14204), 're.search', 're.search', (['DB_NAME_PATTERN', 'database'], {}), '(DB_NAME_PATTERN, database)\n', (14177, 14204), False, 'import re\n'), ((1067, 1075), 'DBList.DBList', 'DBList', ([], {}), '()\n', (1073, 1075), False, 'from DBList import DBList\n'), ((1081, 1135), 'storage.b.Serializable.commit', 'Serializable.commit', (['databases', '"""lista_bases_de_datos"""'], {}), "(databases, 'lista_bases_de_datos')\n", (1100, 1135), False, 'from storage.b import Serializable\n'), ((1817, 1849), 'storage.avl.avlMode.createDatabase', 'avlMode.createDatabase', (['database'], {}), '(database)\n', (1839, 1849), False, 'from storage.avl import avlMode\n'), ((3898, 3930), 'storage.avl.avlMode.createDatabase', 'avlMode.createDatabase', (['database'], {}), '(database)\n', (3920, 3930), False, 'from storage.avl import avlMode\n'), ((10717, 10756), 're.search', 're.search', (['DB_NAME_PATTERN', 'databaseOld'], {}), '(DB_NAME_PATTERN, databaseOld)\n', (10726, 10756), False, 'import re\n'), ((10761, 10800), 're.search', 're.search', (['DB_NAME_PATTERN', 'databaseNew'], {}), '(DB_NAME_PATTERN, databaseNew)\n', (10770, 10800), False, 'import re\n'), ((16182, 16233), 'storage.avl.avlMode.createTable', 'avlMode.createTable', (['database', 'table', 'numberColumns'], {}), '(database, table, numberColumns)\n', (16201, 16233), False, 'from storage.avl import avlMode\n'), ((31308, 31348), 'storage.avl.avlMode.delete', 'avlMode.delete', (['database', 'table', 'columns'], {}), '(database, table, columns)\n', (31322, 31348), False, 'from storage.avl import avlMode\n'), ((33179, 33249), 'storage.avl.avlMode.extractRangeTable', 'avlMode.extractRangeTable', (['database', 'table', 'columnNumber', 'lower', 'upper'], {}), '(database, table, columnNumber, lower, upper)\n', (33204, 33249), False, 'from storage.avl import avlMode\n'), ((38390, 38434), 'storage.avl.avlMode.extractRow', 'avlMode.extractRow', (['database', 'table', 'columns'], {}), '(database, table, columns)\n', (38408, 38434), False, 'from storage.avl import avlMode\n'), ((39178, 39228), 'storage.avl.avlMode.update', 'avlMode.update', (['database', 'table', 'register', 'columns'], {}), '(database, table, register, columns)\n', (39192, 39228), False, 'from storage.avl import avlMode\n'), ((39991, 40024), 'storage.avl.avlMode.truncate', 'avlMode.truncate', (['database', 'table'], {}), '(database, table)\n', (40007, 40024), False, 'from storage.avl import avlMode\n'), ((1889, 1919), 'storage.b.BMode.createDatabase', 'BMode.createDatabase', (['database'], {}), '(database)\n', (1909, 1919), False, 'from storage.b import BMode\n'), ((3970, 4000), 'storage.b.BMode.createDatabase', 'BMode.createDatabase', (['database'], {}), '(database)\n', (3990, 4000), False, 'from storage.b import BMode\n'), ((16278, 16327), 'storage.b.BMode.createTable', 'BMode.createTable', (['database', 'table', 'numberColumns'], {}), '(database, table, numberColumns)\n', (16295, 16327), False, 'from storage.b import BMode\n'), ((18088, 18139), 'storage.avl.avlMode.createTable', 'avlMode.createTable', (['database', 'table', 'numberColumns'], {}), '(database, table, numberColumns)\n', (18107, 18139), False, 'from storage.avl import avlMode\n'), ((19897, 19925), 'storage.avl.avlMode.showTables', 'avlMode.showTables', (['database'], {}), '(database)\n', (19915, 19925), False, 'from storage.avl import avlMode\n'), ((21161, 21197), 'storage.avl.avlMode.alterDropPK', 'avlMode.alterDropPK', (['database', 'table'], {}), '(database, table)\n', (21180, 21197), False, 'from storage.avl import avlMode\n'), ((22871, 22919), 'storage.avl.avlMode.alterAddColumn', 'avlMode.alterAddColumn', (['database', 'table', 'default'], {}), '(database, table, default)\n', (22893, 22919), False, 'from storage.avl import avlMode\n'), ((24727, 24781), 'storage.avl.avlMode.alterDropColumn', 'avlMode.alterDropColumn', (['database', 'table', 'columnNumber'], {}), '(database, table, columnNumber)\n', (24750, 24781), False, 'from storage.avl import avlMode\n'), ((27569, 27610), 'storage.avl.avlMode.insert', 'avlMode.insert', (['database', 'table', 'register'], {}), '(database, table, register)\n', (27583, 27610), False, 'from storage.avl import avlMode\n'), ((29532, 29573), 'storage.avl.avlMode.insert', 'avlMode.insert', (['database', 'table', 'register'], {}), '(database, table, register)\n', (29546, 29573), False, 'from storage.avl import avlMode\n'), ((31393, 31431), 'storage.b.BMode.delete', 'BMode.delete', (['database', 'table', 'columns'], {}), '(database, table, columns)\n', (31405, 31431), False, 'from storage.b import BMode\n'), ((33291, 33359), 'storage.b.BMode.extractRangeTable', 'BMode.extractRangeTable', (['database', 'table', 'columnNumber', 'lower', 'upper'], {}), '(database, table, columnNumber, lower, upper)\n', (33314, 33359), False, 'from storage.b import BMode\n'), ((34156, 34204), 'storage.avl.avlMode.alterTable', 'avlMode.alterTable', (['database', 'tableOld', 'tableNew'], {}), '(database, tableOld, tableNew)\n', (34174, 34204), False, 'from storage.avl import avlMode\n'), ((35419, 35453), 'storage.avl.avlMode.dropTable', 'avlMode.dropTable', (['database', 'table'], {}), '(database, table)\n', (35436, 35453), False, 'from storage.avl import avlMode\n'), ((37161, 37195), 'storage.avl.avlMode.dropTable', 'avlMode.dropTable', (['database', 'table'], {}), '(database, table)\n', (37178, 37195), False, 'from storage.avl import avlMode\n'), ((38479, 38521), 'storage.b.BMode.extractRow', 'BMode.extractRow', (['database', 'table', 'columns'], {}), '(database, table, columns)\n', (38495, 38521), False, 'from storage.b import BMode\n'), ((39273, 39321), 'storage.b.BMode.update', 'BMode.update', (['database', 'table', 'register', 'columns'], {}), '(database, table, register, columns)\n', (39285, 39321), False, 'from storage.b import BMode\n'), ((40069, 40100), 'storage.b.BMode.truncate', 'BMode.truncate', (['database', 'table'], {}), '(database, table)\n', (40083, 40100), False, 'from storage.b import BMode\n'), ((40726, 40770), 'storage.avl.avlMode.alterAddPK', 'avlMode.alterAddPK', (['database', 'table', 'columns'], {}), '(database, table, columns)\n', (40744, 40770), False, 'from storage.avl import avlMode\n'), ((1963, 1997), 'storage.bplus.BPlusMode.createDatabase', 'BPlusMode.createDatabase', (['database'], {}), '(database)\n', (1987, 1997), False, 'from storage.bplus import BPlusMode\n'), ((4044, 4078), 'storage.bplus.BPlusMode.createDatabase', 'BPlusMode.createDatabase', (['database'], {}), '(database)\n', (4068, 4078), False, 'from storage.bplus import BPlusMode\n'), ((11049, 11096), 'storage.avl.avlMode.alterDatabase', 'avlMode.alterDatabase', (['databaseOld', 'databaseNew'], {}), '(databaseOld, databaseNew)\n', (11070, 11096), False, 'from storage.avl import avlMode\n'), ((12678, 12708), 'storage.avl.avlMode.dropDatabase', 'avlMode.dropDatabase', (['database'], {}), '(database)\n', (12698, 12708), False, 'from storage.avl import avlMode\n'), ((14438, 14468), 'storage.avl.avlMode.dropDatabase', 'avlMode.dropDatabase', (['database'], {}), '(database)\n', (14458, 14468), False, 'from storage.avl import avlMode\n'), ((16376, 16429), 'storage.bplus.BPlusMode.createTable', 'BPlusMode.createTable', (['database', 'table', 'numberColumns'], {}), '(database, table, numberColumns)\n', (16397, 16429), False, 'from storage.bplus import BPlusMode\n'), ((18200, 18249), 'storage.b.BMode.createTable', 'BMode.createTable', (['database', 'table', 'numberColumns'], {}), '(database, table, numberColumns)\n', (18217, 18249), False, 'from storage.b import BMode\n'), ((19979, 20005), 'storage.b.BMode.showTables', 'BMode.showTables', (['database'], {}), '(database)\n', (19995, 20005), False, 'from storage.b import BMode\n'), ((21250, 21284), 'storage.b.BMode.alterDropPK', 'BMode.alterDropPK', (['database', 'table'], {}), '(database, table)\n', (21267, 21284), False, 'from storage.b import BMode\n'), ((22972, 23018), 'storage.b.BMode.alterAddColumn', 'BMode.alterAddColumn', (['database', 'table', 'default'], {}), '(database, table, default)\n', (22992, 23018), False, 'from storage.b import BMode\n'), ((24834, 24886), 'storage.b.BMode.alterDropColumn', 'BMode.alterDropColumn', (['database', 'table', 'columnNumber'], {}), '(database, table, columnNumber)\n', (24855, 24886), False, 'from storage.b import BMode\n'), ((27663, 27702), 'storage.b.BMode.insert', 'BMode.insert', (['database', 'table', 'register'], {}), '(database, table, register)\n', (27675, 27702), False, 'from storage.b import BMode\n'), ((29634, 29673), 'storage.b.BMode.insert', 'BMode.insert', (['database', 'table', 'register'], {}), '(database, table, register)\n', (29646, 29673), False, 'from storage.b import BMode\n'), ((31480, 31522), 'storage.bplus.BPlusMode.delete', 'BPlusMode.delete', (['database', 'table', 'columns'], {}), '(database, table, columns)\n', (31496, 31522), False, 'from storage.bplus import BPlusMode\n'), ((32250, 32287), 'storage.avl.avlMode.extractTable', 'avlMode.extractTable', (['database', 'table'], {}), '(database, table)\n', (32270, 32287), False, 'from storage.avl import avlMode\n'), ((33405, 33477), 'storage.bplus.BPlusMode.extractRangeTable', 'BPlusMode.extractRangeTable', (['database', 'table', 'columnNumber', 'lower', 'upper'], {}), '(database, table, columnNumber, lower, upper)\n', (33432, 33477), False, 'from storage.bplus import BPlusMode\n'), ((34255, 34301), 'storage.b.BMode.alterTable', 'BMode.alterTable', (['database', 'tableOld', 'tableNew'], {}), '(database, tableOld, tableNew)\n', (34271, 34301), False, 'from storage.b import BMode\n'), ((35506, 35538), 'storage.b.BMode.dropTable', 'BMode.dropTable', (['database', 'table'], {}), '(database, table)\n', (35521, 35538), False, 'from storage.b import BMode\n'), ((37256, 37288), 'storage.b.BMode.dropTable', 'BMode.dropTable', (['database', 'table'], {}), '(database, table)\n', (37271, 37288), False, 'from storage.b import BMode\n'), ((38570, 38616), 'storage.bplus.BPlusMode.extractRow', 'BPlusMode.extractRow', (['database', 'table', 'columns'], {}), '(database, table, columns)\n', (38590, 38616), False, 'from storage.bplus import BPlusMode\n'), ((39370, 39422), 'storage.bplus.BPlusMode.update', 'BPlusMode.update', (['database', 'table', 'register', 'columns'], {}), '(database, table, register, columns)\n', (39386, 39422), False, 'from storage.bplus import BPlusMode\n'), ((40149, 40184), 'storage.bplus.BPlusMode.truncate', 'BPlusMode.truncate', (['database', 'table'], {}), '(database, table)\n', (40167, 40184), False, 'from storage.bplus import BPlusMode\n'), ((40823, 40865), 'storage.b.BMode.alterAddPK', 'BMode.alterAddPK', (['database', 'table', 'columns'], {}), '(database, table, columns)\n', (40839, 40865), False, 'from storage.b import BMode\n'), ((2040, 2073), 'storage.dict.DictMode.createDatabase', 'DictMode.createDatabase', (['database'], {}), '(database)\n', (2063, 2073), False, 'from storage.dict import DictMode\n'), ((2491, 2545), 'storage.b.Serializable.commit', 'Serializable.commit', (['databases', '"""lista_bases_de_datos"""'], {}), "(databases, 'lista_bases_de_datos')\n", (2510, 2545), False, 'from storage.b import Serializable\n'), ((4121, 4154), 'storage.dict.DictMode.createDatabase', 'DictMode.createDatabase', (['database'], {}), '(database)\n', (4144, 4154), False, 'from storage.dict import DictMode\n'), ((4572, 4626), 'storage.b.Serializable.commit', 'Serializable.commit', (['databases', '"""lista_bases_de_datos"""'], {}), "(databases, 'lista_bases_de_datos')\n", (4591, 4626), False, 'from storage.b import Serializable\n'), ((11155, 11200), 'storage.b.BMode.alterDatabase', 'BMode.alterDatabase', (['databaseOld', 'databaseNew'], {}), '(databaseOld, databaseNew)\n', (11174, 11200), False, 'from storage.b import BMode\n'), ((11862, 11916), 'storage.b.Serializable.commit', 'Serializable.commit', (['databases', '"""lista_bases_de_datos"""'], {}), "(databases, 'lista_bases_de_datos')\n", (11881, 11916), False, 'from storage.b import Serializable\n'), ((12767, 12795), 'storage.b.BMode.dropDatabase', 'BMode.dropDatabase', (['database'], {}), '(database)\n', (12785, 12795), False, 'from storage.b import BMode\n'), ((14535, 14563), 'storage.b.BMode.dropDatabase', 'BMode.dropDatabase', (['database'], {}), '(database)\n', (14553, 14563), False, 'from storage.b import BMode\n'), ((16477, 16529), 'storage.dict.DictMode.createTable', 'DictMode.createTable', (['database', 'table', 'numberColumns'], {}), '(database, table, numberColumns)\n', (16497, 16529), False, 'from storage.dict import DictMode\n'), ((16983, 17037), 'storage.b.Serializable.commit', 'Serializable.commit', (['databases', '"""lista_bases_de_datos"""'], {}), "(databases, 'lista_bases_de_datos')\n", (17002, 17037), False, 'from storage.b import Serializable\n'), ((18314, 18367), 'storage.bplus.BPlusMode.createTable', 'BPlusMode.createTable', (['database', 'table', 'numberColumns'], {}), '(database, table, numberColumns)\n', (18335, 18367), False, 'from storage.bplus import BPlusMode\n'), ((20063, 20093), 'storage.bplus.BPlusMode.showTables', 'BPlusMode.showTables', (['database'], {}), '(database)\n', (20083, 20093), False, 'from storage.bplus import BPlusMode\n'), ((21341, 21379), 'storage.bplus.BPlusMode.alterDropPK', 'BPlusMode.alterDropPK', (['database', 'table'], {}), '(database, table)\n', (21362, 21379), False, 'from storage.bplus import BPlusMode\n'), ((23075, 23125), 'storage.bplus.BPlusMode.alterAddColumn', 'BPlusMode.alterAddColumn', (['database', 'table', 'default'], {}), '(database, table, default)\n', (23099, 23125), False, 'from storage.bplus import BPlusMode\n'), ((24943, 24999), 'storage.bplus.BPlusMode.alterDropColumn', 'BPlusMode.alterDropColumn', (['database', 'table', 'columnNumber'], {}), '(database, table, columnNumber)\n', (24968, 24999), False, 'from storage.bplus import BPlusMode\n'), ((27759, 27802), 'storage.bplus.BPlusMode.insert', 'BPlusMode.insert', (['database', 'table', 'register'], {}), '(database, table, register)\n', (27775, 27802), False, 'from storage.bplus import BPlusMode\n'), ((29738, 29781), 'storage.bplus.BPlusMode.insert', 'BPlusMode.insert', (['database', 'table', 'register'], {}), '(database, table, register)\n', (29754, 29781), False, 'from storage.bplus import BPlusMode\n'), ((31570, 31611), 'storage.dict.DictMode.delete', 'DictMode.delete', (['database', 'table', 'columns'], {}), '(database, table, columns)\n', (31585, 31611), False, 'from storage.dict import DictMode\n'), ((32347, 32382), 'storage.b.BMode.extractTable', 'BMode.extractTable', (['database', 'table'], {}), '(database, table)\n', (32365, 32382), False, 'from storage.b import BMode\n'), ((33522, 33593), 'storage.dict.DictMode.extractRangeTable', 'DictMode.extractRangeTable', (['database', 'table', 'columnNumber', 'lower', 'upper'], {}), '(database, table, columnNumber, lower, upper)\n', (33548, 33593), False, 'from storage.dict import DictMode\n'), ((34356, 34406), 'storage.bplus.BPlusMode.alterTable', 'BPlusMode.alterTable', (['database', 'tableOld', 'tableNew'], {}), '(database, tableOld, tableNew)\n', (34376, 34406), False, 'from storage.bplus import BPlusMode\n'), ((35595, 35631), 'storage.bplus.BPlusMode.dropTable', 'BPlusMode.dropTable', (['database', 'table'], {}), '(database, table)\n', (35614, 35631), False, 'from storage.bplus import BPlusMode\n'), ((37353, 37389), 'storage.bplus.BPlusMode.dropTable', 'BPlusMode.dropTable', (['database', 'table'], {}), '(database, table)\n', (37372, 37389), False, 'from storage.bplus import BPlusMode\n'), ((38664, 38709), 'storage.dict.DictMode.extractRow', 'DictMode.extractRow', (['database', 'table', 'columns'], {}), '(database, table, columns)\n', (38683, 38709), False, 'from storage.dict import DictMode\n'), ((39470, 39521), 'storage.dict.DictMode.update', 'DictMode.update', (['database', 'table', 'register', 'columns'], {}), '(database, table, register, columns)\n', (39485, 39521), False, 'from storage.dict import DictMode\n'), ((40232, 40266), 'storage.dict.DictMode.truncate', 'DictMode.truncate', (['database', 'table'], {}), '(database, table)\n', (40249, 40266), False, 'from storage.dict import DictMode\n'), ((40922, 40968), 'storage.bplus.BPlusMode.alterAddPK', 'BPlusMode.alterAddPK', (['database', 'table', 'columns'], {}), '(database, table, columns)\n', (40942, 40968), False, 'from storage.bplus import BPlusMode\n'), ((2116, 2149), 'storage.isam.ISAMMode.createDatabase', 'ISAMMode.createDatabase', (['database'], {}), '(database)\n', (2139, 2149), False, 'from storage.isam import ISAMMode\n'), ((4197, 4230), 'storage.isam.ISAMMode.createDatabase', 'ISAMMode.createDatabase', (['database'], {}), '(database)\n', (4220, 4230), False, 'from storage.isam import ISAMMode\n'), ((11263, 11312), 'storage.bplus.BPlusMode.alterDatabase', 'BPlusMode.alterDatabase', (['databaseOld', 'databaseNew'], {}), '(databaseOld, databaseNew)\n', (11286, 11312), False, 'from storage.bplus import BPlusMode\n'), ((12858, 12890), 'storage.bplus.BPlusMode.dropDatabase', 'BPlusMode.dropDatabase', (['database'], {}), '(database)\n', (12880, 12890), False, 'from storage.bplus import BPlusMode\n'), ((13420, 13474), 'storage.b.Serializable.commit', 'Serializable.commit', (['databases', '"""lista_bases_de_datos"""'], {}), "(databases, 'lista_bases_de_datos')\n", (13439, 13474), False, 'from storage.b import Serializable\n'), ((14634, 14666), 'storage.bplus.BPlusMode.dropDatabase', 'BPlusMode.dropDatabase', (['database'], {}), '(database)\n', (14656, 14666), False, 'from storage.bplus import BPlusMode\n'), ((15285, 15339), 'storage.b.Serializable.commit', 'Serializable.commit', (['databases', '"""lista_bases_de_datos"""'], {}), "(databases, 'lista_bases_de_datos')\n", (15304, 15339), False, 'from storage.b import Serializable\n'), ((16577, 16629), 'storage.isam.ISAMMode.createTable', 'ISAMMode.createTable', (['database', 'table', 'numberColumns'], {}), '(database, table, numberColumns)\n', (16597, 16629), False, 'from storage.isam import ISAMMode\n'), ((18431, 18483), 'storage.dict.DictMode.createTable', 'DictMode.createTable', (['database', 'table', 'numberColumns'], {}), '(database, table, numberColumns)\n', (18451, 18483), False, 'from storage.dict import DictMode\n'), ((19042, 19096), 'storage.b.Serializable.commit', 'Serializable.commit', (['databases', '"""lista_bases_de_datos"""'], {}), "(databases, 'lista_bases_de_datos')\n", (19061, 19096), False, 'from storage.b import Serializable\n'), ((20150, 20179), 'storage.dict.DictMode.showTables', 'DictMode.showTables', (['database'], {}), '(database)\n', (20169, 20179), False, 'from storage.dict import DictMode\n'), ((21435, 21472), 'storage.dict.DictMode.alterDropPK', 'DictMode.alterDropPK', (['database', 'table'], {}), '(database, table)\n', (21455, 21472), False, 'from storage.dict import DictMode\n'), ((21942, 21996), 'storage.b.Serializable.commit', 'Serializable.commit', (['databases', '"""lista_bases_de_datos"""'], {}), "(databases, 'lista_bases_de_datos')\n", (21961, 21996), False, 'from storage.b import Serializable\n'), ((23181, 23230), 'storage.dict.DictMode.alterAddColumn', 'DictMode.alterAddColumn', (['database', 'table', 'default'], {}), '(database, table, default)\n', (23204, 23230), False, 'from storage.dict import DictMode\n'), ((23734, 23788), 'storage.b.Serializable.commit', 'Serializable.commit', (['databases', '"""lista_bases_de_datos"""'], {}), "(databases, 'lista_bases_de_datos')\n", (23753, 23788), False, 'from storage.b import Serializable\n'), ((25055, 25110), 'storage.dict.DictMode.alterDropColumn', 'DictMode.alterDropColumn', (['database', 'table', 'columnNumber'], {}), '(database, table, columnNumber)\n', (25079, 25110), False, 'from storage.dict import DictMode\n'), ((25632, 25686), 'storage.b.Serializable.commit', 'Serializable.commit', (['databases', '"""lista_bases_de_datos"""'], {}), "(databases, 'lista_bases_de_datos')\n", (25651, 25686), False, 'from storage.b import Serializable\n'), ((27858, 27900), 'storage.dict.DictMode.insert', 'DictMode.insert', (['database', 'table', 'register'], {}), '(database, table, register)\n', (27873, 27900), False, 'from storage.dict import DictMode\n'), ((29845, 29887), 'storage.dict.DictMode.insert', 'DictMode.insert', (['database', 'table', 'register'], {}), '(database, table, register)\n', (29860, 29887), False, 'from storage.dict import DictMode\n'), ((31659, 31700), 'storage.isam.ISAMMode.delete', 'ISAMMode.delete', (['database', 'table', 'columns'], {}), '(database, table, columns)\n', (31674, 31700), False, 'from storage.isam import ISAMMode\n'), ((32446, 32485), 'storage.bplus.BPlusMode.extractTable', 'BPlusMode.extractTable', (['database', 'table'], {}), '(database, table)\n', (32468, 32485), False, 'from storage.bplus import BPlusMode\n'), ((33638, 33709), 'storage.isam.ISAMMode.extractRangeTable', 'ISAMMode.extractRangeTable', (['database', 'table', 'columnNumber', 'lower', 'upper'], {}), '(database, table, columnNumber, lower, upper)\n', (33664, 33709), False, 'from storage.isam import ISAMMode\n'), ((34460, 34509), 'storage.dict.DictMode.alterTable', 'DictMode.alterTable', (['database', 'tableOld', 'tableNew'], {}), '(database, tableOld, tableNew)\n', (34479, 34509), False, 'from storage.dict import DictMode\n'), ((35018, 35072), 'storage.b.Serializable.commit', 'Serializable.commit', (['databases', '"""lista_bases_de_datos"""'], {}), "(databases, 'lista_bases_de_datos')\n", (35037, 35072), False, 'from storage.b import Serializable\n'), ((35687, 35722), 'storage.dict.DictMode.dropTable', 'DictMode.dropTable', (['database', 'table'], {}), '(database, table)\n', (35705, 35722), False, 'from storage.dict import DictMode\n'), ((36178, 36232), 'storage.b.Serializable.commit', 'Serializable.commit', (['databases', '"""lista_bases_de_datos"""'], {}), "(databases, 'lista_bases_de_datos')\n", (36197, 36232), False, 'from storage.b import Serializable\n'), ((37453, 37488), 'storage.dict.DictMode.dropTable', 'DictMode.dropTable', (['database', 'table'], {}), '(database, table)\n', (37471, 37488), False, 'from storage.dict import DictMode\n'), ((38005, 38059), 'storage.b.Serializable.commit', 'Serializable.commit', (['databases', '"""lista_bases_de_datos"""'], {}), "(databases, 'lista_bases_de_datos')\n", (38024, 38059), False, 'from storage.b import Serializable\n'), ((38757, 38802), 'storage.isam.ISAMMode.extractRow', 'ISAMMode.extractRow', (['database', 'table', 'columns'], {}), '(database, table, columns)\n', (38776, 38802), False, 'from storage.isam import ISAMMode\n'), ((39569, 39620), 'storage.isam.ISAMMode.update', 'ISAMMode.update', (['database', 'table', 'register', 'columns'], {}), '(database, table, register, columns)\n', (39584, 39620), False, 'from storage.isam import ISAMMode\n'), ((40314, 40348), 'storage.isam.ISAMMode.truncate', 'ISAMMode.truncate', (['database', 'table'], {}), '(database, table)\n', (40331, 40348), False, 'from storage.isam import ISAMMode\n'), ((41024, 41069), 'storage.dict.DictMode.alterAddPK', 'DictMode.alterAddPK', (['database', 'table', 'columns'], {}), '(database, table, columns)\n', (41043, 41069), False, 'from storage.dict import DictMode\n'), ((41569, 41623), 'storage.b.Serializable.commit', 'Serializable.commit', (['databases', '"""lista_bases_de_datos"""'], {}), "(databases, 'lista_bases_de_datos')\n", (41588, 41623), False, 'from storage.b import Serializable\n'), ((2192, 2225), 'storage.json_mode.jsonMode.createDatabase', 'jsonMode.createDatabase', (['database'], {}), '(database)\n', (2215, 2225), False, 'from storage.json_mode import jsonMode\n'), ((4273, 4306), 'storage.json_mode.jsonMode.createDatabase', 'jsonMode.createDatabase', (['database'], {}), '(database)\n', (4296, 4306), False, 'from storage.json_mode import jsonMode\n'), ((11374, 11422), 'storage.dict.DictMode.alterDatabase', 'DictMode.alterDatabase', (['databaseOld', 'databaseNew'], {}), '(databaseOld, databaseNew)\n', (11396, 11422), False, 'from storage.dict import DictMode\n'), ((12952, 12983), 'storage.dict.DictMode.dropDatabase', 'DictMode.dropDatabase', (['database'], {}), '(database)\n', (12973, 12983), False, 'from storage.dict import DictMode\n'), ((14736, 14767), 'storage.dict.DictMode.dropDatabase', 'DictMode.dropDatabase', (['database'], {}), '(database)\n', (14757, 14767), False, 'from storage.dict import DictMode\n'), ((16677, 16729), 'storage.json_mode.jsonMode.createTable', 'jsonMode.createTable', (['database', 'table', 'numberColumns'], {}), '(database, table, numberColumns)\n', (16697, 16729), False, 'from storage.json_mode import jsonMode\n'), ((18547, 18599), 'storage.isam.ISAMMode.createTable', 'ISAMMode.createTable', (['database', 'table', 'numberColumns'], {}), '(database, table, numberColumns)\n', (18567, 18599), False, 'from storage.isam import ISAMMode\n'), ((20236, 20265), 'storage.isam.ISAMMode.showTables', 'ISAMMode.showTables', (['database'], {}), '(database)\n', (20255, 20265), False, 'from storage.isam import ISAMMode\n'), ((21528, 21565), 'storage.isam.ISAMMode.alterDropPK', 'ISAMMode.alterDropPK', (['database', 'table'], {}), '(database, table)\n', (21548, 21565), False, 'from storage.isam import ISAMMode\n'), ((23286, 23335), 'storage.isam.ISAMMode.alterAddColumn', 'ISAMMode.alterAddColumn', (['database', 'table', 'default'], {}), '(database, table, default)\n', (23309, 23335), False, 'from storage.isam import ISAMMode\n'), ((25166, 25221), 'storage.isam.ISAMMode.alterDropColumn', 'ISAMMode.alterDropColumn', (['database', 'table', 'columnNumber'], {}), '(database, table, columnNumber)\n', (25190, 25221), False, 'from storage.isam import ISAMMode\n'), ((27956, 27998), 'storage.isam.ISAMMode.insert', 'ISAMMode.insert', (['database', 'table', 'register'], {}), '(database, table, register)\n', (27971, 27998), False, 'from storage.isam import ISAMMode\n'), ((29951, 29993), 'storage.isam.ISAMMode.insert', 'ISAMMode.insert', (['database', 'table', 'register'], {}), '(database, table, register)\n', (29966, 29993), False, 'from storage.isam import ISAMMode\n'), ((31748, 31789), 'storage.json_mode.jsonMode.delete', 'jsonMode.delete', (['database', 'table', 'columns'], {}), '(database, table, columns)\n', (31763, 31789), False, 'from storage.json_mode import jsonMode\n'), ((32548, 32586), 'storage.dict.DictMode.extractTable', 'DictMode.extractTable', (['database', 'table'], {}), '(database, table)\n', (32569, 32586), False, 'from storage.dict import DictMode\n'), ((33754, 33825), 'storage.json_mode.jsonMode.extractRangeTable', 'jsonMode.extractRangeTable', (['database', 'table', 'columnNumber', 'lower', 'upper'], {}), '(database, table, columnNumber, lower, upper)\n', (33780, 33825), False, 'from storage.json_mode import jsonMode\n'), ((34563, 34612), 'storage.isam.ISAMMode.alterTable', 'ISAMMode.alterTable', (['database', 'tableOld', 'tableNew'], {}), '(database, tableOld, tableNew)\n', (34582, 34612), False, 'from storage.isam import ISAMMode\n'), ((35778, 35813), 'storage.isam.ISAMMode.dropTable', 'ISAMMode.dropTable', (['database', 'table'], {}), '(database, table)\n', (35796, 35813), False, 'from storage.isam import ISAMMode\n'), ((37552, 37587), 'storage.isam.ISAMMode.dropTable', 'ISAMMode.dropTable', (['database', 'table'], {}), '(database, table)\n', (37570, 37587), False, 'from storage.isam import ISAMMode\n'), ((38850, 38895), 'storage.json_mode.jsonMode.extractRow', 'jsonMode.extractRow', (['database', 'table', 'columns'], {}), '(database, table, columns)\n', (38869, 38895), False, 'from storage.json_mode import jsonMode\n'), ((39668, 39719), 'storage.json_mode.jsonMode.update', 'jsonMode.update', (['database', 'table', 'register', 'columns'], {}), '(database, table, register, columns)\n', (39683, 39719), False, 'from storage.json_mode import jsonMode\n'), ((40396, 40430), 'storage.json_mode.jsonMode.truncate', 'jsonMode.truncate', (['database', 'table'], {}), '(database, table)\n', (40413, 40430), False, 'from storage.json_mode import jsonMode\n'), ((41125, 41170), 'storage.isam.ISAMMode.alterAddPK', 'ISAMMode.alterAddPK', (['database', 'table', 'columns'], {}), '(database, table, columns)\n', (41144, 41170), False, 'from storage.isam import ISAMMode\n'), ((2268, 2301), 'storage.hash.HashMode.createDatabase', 'HashMode.createDatabase', (['database'], {}), '(database)\n', (2291, 2301), False, 'from storage.hash import HashMode\n'), ((4349, 4382), 'storage.hash.HashMode.createDatabase', 'HashMode.createDatabase', (['database'], {}), '(database)\n', (4372, 4382), False, 'from storage.hash import HashMode\n'), ((11484, 11532), 'storage.isam.ISAMMode.alterDatabase', 'ISAMMode.alterDatabase', (['databaseOld', 'databaseNew'], {}), '(databaseOld, databaseNew)\n', (11506, 11532), False, 'from storage.isam import ISAMMode\n'), ((13045, 13076), 'storage.isam.ISAMMode.dropDatabase', 'ISAMMode.dropDatabase', (['database'], {}), '(database)\n', (13066, 13076), False, 'from storage.isam import ISAMMode\n'), ((14837, 14868), 'storage.isam.ISAMMode.dropDatabase', 'ISAMMode.dropDatabase', (['database'], {}), '(database)\n', (14858, 14868), False, 'from storage.isam import ISAMMode\n'), ((16777, 16829), 'storage.hash.HashMode.createTable', 'HashMode.createTable', (['database', 'table', 'numberColumns'], {}), '(database, table, numberColumns)\n', (16797, 16829), False, 'from storage.hash import HashMode\n'), ((18663, 18715), 'storage.json_mode.jsonMode.createTable', 'jsonMode.createTable', (['database', 'table', 'numberColumns'], {}), '(database, table, numberColumns)\n', (18683, 18715), False, 'from storage.json_mode import jsonMode\n'), ((20322, 20351), 'storage.json_mode.jsonMode.showTables', 'jsonMode.showTables', (['database'], {}), '(database)\n', (20341, 20351), False, 'from storage.json_mode import jsonMode\n'), ((21621, 21658), 'storage.json_mode.jsonMode.alterDropPK', 'jsonMode.alterDropPK', (['database', 'table'], {}), '(database, table)\n', (21641, 21658), False, 'from storage.json_mode import jsonMode\n'), ((23391, 23440), 'storage.json_mode.jsonMode.alterAddColumn', 'jsonMode.alterAddColumn', (['database', 'table', 'default'], {}), '(database, table, default)\n', (23414, 23440), False, 'from storage.json_mode import jsonMode\n'), ((25277, 25332), 'storage.json_mode.jsonMode.alterDropColumn', 'jsonMode.alterDropColumn', (['database', 'table', 'columnNumber'], {}), '(database, table, columnNumber)\n', (25301, 25332), False, 'from storage.json_mode import jsonMode\n'), ((28054, 28096), 'storage.json_mode.jsonMode.insert', 'jsonMode.insert', (['database', 'table', 'register'], {}), '(database, table, register)\n', (28069, 28096), False, 'from storage.json_mode import jsonMode\n'), ((30057, 30099), 'storage.json_mode.jsonMode.insert', 'jsonMode.insert', (['database', 'table', 'register'], {}), '(database, table, register)\n', (30072, 30099), False, 'from storage.json_mode import jsonMode\n'), ((31837, 31878), 'storage.hash.HashMode.delete', 'HashMode.delete', (['database', 'table', 'columns'], {}), '(database, table, columns)\n', (31852, 31878), False, 'from storage.hash import HashMode\n'), ((32649, 32687), 'storage.isam.ISAMMode.extractTable', 'ISAMMode.extractTable', (['database', 'table'], {}), '(database, table)\n', (32670, 32687), False, 'from storage.isam import ISAMMode\n'), ((33870, 33941), 'storage.hash.HashMode.extractRangeTable', 'HashMode.extractRangeTable', (['database', 'table', 'columnNumber', 'lower', 'upper'], {}), '(database, table, columnNumber, lower, upper)\n', (33896, 33941), False, 'from storage.hash import HashMode\n'), ((34666, 34715), 'storage.json_mode.jsonMode.alterTable', 'jsonMode.alterTable', (['database', 'tableOld', 'tableNew'], {}), '(database, tableOld, tableNew)\n', (34685, 34715), False, 'from storage.json_mode import jsonMode\n'), ((35869, 35904), 'storage.json_mode.jsonMode.dropTable', 'jsonMode.dropTable', (['database', 'table'], {}), '(database, table)\n', (35887, 35904), False, 'from storage.json_mode import jsonMode\n'), ((37651, 37686), 'storage.json_mode.jsonMode.dropTable', 'jsonMode.dropTable', (['database', 'table'], {}), '(database, table)\n', (37669, 37686), False, 'from storage.json_mode import jsonMode\n'), ((38943, 38988), 'storage.hash.HashMode.extractRow', 'HashMode.extractRow', (['database', 'table', 'columns'], {}), '(database, table, columns)\n', (38962, 38988), False, 'from storage.hash import HashMode\n'), ((39767, 39818), 'storage.hash.HashMode.update', 'HashMode.update', (['database', 'table', 'register', 'columns'], {}), '(database, table, register, columns)\n', (39782, 39818), False, 'from storage.hash import HashMode\n'), ((40478, 40512), 'storage.hash.HashMode.truncate', 'HashMode.truncate', (['database', 'table'], {}), '(database, table)\n', (40495, 40512), False, 'from storage.hash import HashMode\n'), ((41226, 41271), 'storage.json_mode.jsonMode.alterAddPK', 'jsonMode.alterAddPK', (['database', 'table', 'columns'], {}), '(database, table, columns)\n', (41245, 41271), False, 'from storage.json_mode import jsonMode\n'), ((11594, 11642), 'storage.json_mode.jsonMode.alterDatabase', 'jsonMode.alterDatabase', (['databaseOld', 'databaseNew'], {}), '(databaseOld, databaseNew)\n', (11616, 11642), False, 'from storage.json_mode import jsonMode\n'), ((13138, 13169), 'storage.json_mode.jsonMode.dropDatabase', 'jsonMode.dropDatabase', (['database'], {}), '(database)\n', (13159, 13169), False, 'from storage.json_mode import jsonMode\n'), ((14938, 14969), 'storage.json_mode.jsonMode.dropDatabase', 'jsonMode.dropDatabase', (['database'], {}), '(database)\n', (14959, 14969), False, 'from storage.json_mode import jsonMode\n'), ((18779, 18831), 'storage.hash.HashMode.createTable', 'HashMode.createTable', (['database', 'table', 'numberColumns'], {}), '(database, table, numberColumns)\n', (18799, 18831), False, 'from storage.hash import HashMode\n'), ((20408, 20437), 'storage.hash.HashMode.showTables', 'HashMode.showTables', (['database'], {}), '(database)\n', (20427, 20437), False, 'from storage.hash import HashMode\n'), ((21714, 21751), 'storage.hash.HashMode.alterDropPK', 'HashMode.alterDropPK', (['database', 'table'], {}), '(database, table)\n', (21734, 21751), False, 'from storage.hash import HashMode\n'), ((23496, 23545), 'storage.hash.HashMode.alterAddColumn', 'HashMode.alterAddColumn', (['database', 'table', 'default'], {}), '(database, table, default)\n', (23519, 23545), False, 'from storage.hash import HashMode\n'), ((25388, 25443), 'storage.hash.HashMode.alterDropColumn', 'HashMode.alterDropColumn', (['database', 'table', 'columnNumber'], {}), '(database, table, columnNumber)\n', (25412, 25443), False, 'from storage.hash import HashMode\n'), ((28152, 28194), 'storage.hash.HashMode.insert', 'HashMode.insert', (['database', 'table', 'register'], {}), '(database, table, register)\n', (28167, 28194), False, 'from storage.hash import HashMode\n'), ((30163, 30205), 'storage.hash.HashMode.insert', 'HashMode.insert', (['database', 'table', 'register'], {}), '(database, table, register)\n', (30178, 30205), False, 'from storage.hash import HashMode\n'), ((32750, 32788), 'storage.json_mode.jsonMode.extractTable', 'jsonMode.extractTable', (['database', 'table'], {}), '(database, table)\n', (32771, 32788), False, 'from storage.json_mode import jsonMode\n'), ((34769, 34818), 'storage.hash.HashMode.alterTable', 'HashMode.alterTable', (['database', 'tableOld', 'tableNew'], {}), '(database, tableOld, tableNew)\n', (34788, 34818), False, 'from storage.hash import HashMode\n'), ((35960, 35995), 'storage.hash.HashMode.dropTable', 'HashMode.dropTable', (['database', 'table'], {}), '(database, table)\n', (35978, 35995), False, 'from storage.hash import HashMode\n'), ((37750, 37785), 'storage.hash.HashMode.dropTable', 'HashMode.dropTable', (['database', 'table'], {}), '(database, table)\n', (37768, 37785), False, 'from storage.hash import HashMode\n'), ((41327, 41372), 'storage.hash.HashMode.alterAddPK', 'HashMode.alterAddPK', (['database', 'table', 'columns'], {}), '(database, table, columns)\n', (41346, 41372), False, 'from storage.hash import HashMode\n'), ((11704, 11752), 'storage.hash.HashMode.alterDatabase', 'HashMode.alterDatabase', (['databaseOld', 'databaseNew'], {}), '(databaseOld, databaseNew)\n', (11726, 11752), False, 'from storage.hash import HashMode\n'), ((13231, 13262), 'storage.hash.HashMode.dropDatabase', 'HashMode.dropDatabase', (['database'], {}), '(database)\n', (13252, 13262), False, 'from storage.hash import HashMode\n'), ((15039, 15070), 'storage.hash.HashMode.dropDatabase', 'HashMode.dropDatabase', (['database'], {}), '(database)\n', (15060, 15070), False, 'from storage.hash import HashMode\n'), ((32851, 32889), 'storage.hash.HashMode.extractTable', 'HashMode.extractTable', (['database', 'table'], {}), '(database, table)\n', (32872, 32889), False, 'from storage.hash import HashMode\n')] |
from RPA.Browser.Selenium import Selenium
from RPA.FileSystem import FileSystem
import datetime
import os
class PDFDownloader:
def __init__(self, page_urls, names):
self.browser = Selenium()
self.files = FileSystem()
self._dir = f'{os.getcwd()}/output'
self._urls = page_urls
self._names = names
def pdf_download(self):
dir = f'{os.getcwd()}/output'
self.browser.set_download_directory(dir, True)
for num, url in enumerate(self._urls):
self.browser.open_available_browser(url)
self.browser.wait_until_element_is_visible(
locator="css:div#business-case-pdf>a",
timeout=datetime.timedelta(minutes=1))
self.browser.click_element(locator="css:div#business-case-pdf>a")
self.files.wait_until_created('{}/{}.pdf'.format(
dir, self._names[num]),
timeout=60.0 * 5)
self.browser.close_browser()
| [
"datetime.timedelta",
"RPA.FileSystem.FileSystem",
"RPA.Browser.Selenium.Selenium",
"os.getcwd"
] | [((196, 206), 'RPA.Browser.Selenium.Selenium', 'Selenium', ([], {}), '()\n', (204, 206), False, 'from RPA.Browser.Selenium import Selenium\n'), ((228, 240), 'RPA.FileSystem.FileSystem', 'FileSystem', ([], {}), '()\n', (238, 240), False, 'from RPA.FileSystem import FileSystem\n'), ((264, 275), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (273, 275), False, 'import os\n'), ((390, 401), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (399, 401), False, 'import os\n'), ((703, 732), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(1)'}), '(minutes=1)\n', (721, 732), False, 'import datetime\n')] |
"""Constants for the Ridwell integration."""
import logging
DOMAIN = "ridwell"
LOGGER = logging.getLogger(__package__)
DATA_ACCOUNT = "account"
DATA_COORDINATOR = "coordinator"
SENSOR_TYPE_NEXT_PICKUP = "next_pickup"
| [
"logging.getLogger"
] | [((90, 120), 'logging.getLogger', 'logging.getLogger', (['__package__'], {}), '(__package__)\n', (107, 120), False, 'import logging\n')] |
import subprocess
import fnmatch
from pathlib import Path
import os
import re
def all_files(src):
regex_include = re.compile("|".join((fnmatch.translate(e) for e in src.included_files)))
regex_exclude = re.compile("|".join((fnmatch.translate(e) for e in src.excluded_files)))
for root, dirs, files in os.walk(src.root):
dirs[:] = [d for d in dirs if d not in src.excluded_paths]
path = os.path.relpath(root, src.root)
for file in files:
if regex_include.match(file) and not (src.excluded_files and regex_exclude.match(file)):
yield os.path.join(path, file)
def _run_rg(args):
result = subprocess.run(args, stdout=subprocess.PIPE)
out = result.stdout.decode("utf-8")
if out:
print(out, end="", flush=True)
def _chunk_by_len(arr, max_size):
chunk = []
size = 0
for e in arr:
if len(e) + size + len(chunk) > max_size:
yield chunk
chunk.clear()
size = 0
size += len(e)
chunk.append(e)
if chunk:
yield chunk
def grep(config, args):
os.chdir(config.src.root)
cmd = [config.unix.rg, "--color", "never", "-j1"] + args
max = 4096 - sum(len(e) for e in cmd)
for chunk in _chunk_by_len(all_files(config.src), max):
_run_rg(cmd + chunk)
| [
"fnmatch.translate",
"subprocess.run",
"os.path.join",
"os.chdir",
"os.walk",
"os.path.relpath"
] | [((316, 333), 'os.walk', 'os.walk', (['src.root'], {}), '(src.root)\n', (323, 333), False, 'import os\n'), ((658, 702), 'subprocess.run', 'subprocess.run', (['args'], {'stdout': 'subprocess.PIPE'}), '(args, stdout=subprocess.PIPE)\n', (672, 702), False, 'import subprocess\n'), ((1109, 1134), 'os.chdir', 'os.chdir', (['config.src.root'], {}), '(config.src.root)\n', (1117, 1134), False, 'import os\n'), ((417, 448), 'os.path.relpath', 'os.path.relpath', (['root', 'src.root'], {}), '(root, src.root)\n', (432, 448), False, 'import os\n'), ((141, 161), 'fnmatch.translate', 'fnmatch.translate', (['e'], {}), '(e)\n', (158, 161), False, 'import fnmatch\n'), ((234, 254), 'fnmatch.translate', 'fnmatch.translate', (['e'], {}), '(e)\n', (251, 254), False, 'import fnmatch\n'), ((599, 623), 'os.path.join', 'os.path.join', (['path', 'file'], {}), '(path, file)\n', (611, 623), False, 'import os\n')] |
import click
class command:
def __init__(self, name=None, cls=click.Command, **attrs):
self.name = name
self.cls = cls
self.attrs = attrs
def __call__(self, method):
def __command__(this):
def wrapper(*args, **kwargs):
return method(this, *args, **kwargs)
if hasattr(method, "__options__"):
options = method.__options__
return self.cls(self.name, callback=wrapper, params=options, **self.attrs)
method.__command__ = __command__
return method
class option:
def __init__(self, *param_decls, **attrs):
self.param_decls = param_decls
self.attrs = attrs
def __call__(self, method):
if not hasattr(method, "__options__"):
method.__options__ = []
method.__options__.append(
click.Option(param_decls=self.param_decls, **self.attrs)
)
return method
class Cli:
def __new__(cls, *args, **kwargs):
self = super(Cli, cls).__new__(cls, *args, **kwargs)
self._cli = click.Group()
# Wrap instance options
self.__option_callbacks__ = set()
for attr_name in dir(cls):
attr = getattr(cls, attr_name)
if hasattr(attr, "__options__") and not hasattr(attr, "__command__"):
self._cli.params.extend(attr.__options__)
self.__option_callbacks__.add(attr)
# Wrap commands
for attr_name in dir(cls):
attr = getattr(cls, attr_name)
if hasattr(attr, "__command__"):
command = attr.__command__(self)
# command.params.extend(_options)
self._cli.add_command(command)
return self
def run(self):
"""Run the CLI application."""
self()
def __call__(self):
"""Run the CLI application."""
self._cli() | [
"click.Group",
"click.Option"
] | [((1086, 1099), 'click.Group', 'click.Group', ([], {}), '()\n', (1097, 1099), False, 'import click\n'), ((864, 920), 'click.Option', 'click.Option', ([], {'param_decls': 'self.param_decls'}), '(param_decls=self.param_decls, **self.attrs)\n', (876, 920), False, 'import click\n')] |
# Leechy Prototype Spectrum Analyzer.
# Important: MAKE SURE KEYBOARD IS ON ENGLISH AND CAPSLOCK IS NOT ON!
import cv2,pickle,xlsxwriter,time,datetime,os, os.path
from imutils import rotate_bound
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Button
from PIL import Image, ImageTk
print('\033[1m' + '\033[91m' + '\nLeechy Labs | Spectrum Analyser\n' + '\033[0m')
drunkdatanum = 0
not_drunkdatanum = 0
DRUNK_DIR = 'data/graph/drunk'
drunkdatanum = len([name for name in os.listdir(DRUNK_DIR) if os.path.isfile(os.path.join(DRUNK_DIR, name))])
NOT_DRUNK_DIR = 'data/graph/not_drunk'
not_drunkdatanum = len([name for name in os.listdir(DRUNK_DIR) if os.path.isfile(os.path.join(NOT_DRUNK_DIR, name))])
row = 1
sensetivity = 615
x1 = 480
y1 = 1000
x2 = 578
y2 = 1040
angle = 52.5
flip = False
plot_graph = False
cmp_graph = [500]
nor_color = (0, 0, 1)
mouse_rectangle = False
clear = True
live = False
auto = False
plt.ion()
def rotate_clockwise(matrix, degree=90):
try: return matrix if not degree else rotate_clockwise(zip(*matrix[::-1]), degree-90)
except TypeError: return [[0]]
def mouse_event(event,x,y,flags,param):
global x1, x2, y1, y2, mouse_rectangle
if event == cv2.EVENT_LBUTTONDOWN:
mouse_rectangle = True
x1,y1 = x,y
elif event == cv2.EVENT_MOUSEMOVE:
if mouse_rectangle == True:
x2,y2 = x, y
elif event == cv2.EVENT_LBUTTONUP:
x2,y2 = x, y
if x2 < x1: x1,x2 = x2,x1
if y2 < y1: y1,y2 = y2,y1
mouse_rectangle = False
cv2.namedWindow("Spectrum Analyser", cv2.WINDOW_NORMAL)
cv2.setMouseCallback("Spectrum Analyser", mouse_event)
vc = cv2.VideoCapture(0)
SHEET_DIR = "data/sheet"
sheet_name = "data"
if len([name for name in os.listdir(SHEET_DIR) if os.path.isfile(os.path.join(SHEET_DIR, name))])-1 > 0:
sheet_name = sheet_name + str(len([name for name in os.listdir(SHEET_DIR) if os.path.isfile(os.path.join(SHEET_DIR, name))])-1)
workbook = xlsxwriter.Workbook('data/sheet/' + sheet_name + '.xlsx')
worksheet = workbook.add_worksheet()
worksheet.set_column("A1:J5", 52)
worksheet.set_default_row(220)
worksheet.write(0,1, "Graph")
worksheet.write(0,2, "Status")
worksheet.write(0,3, "Image")
worksheet.write(0,4, "Settings")
worksheet.write(0,5, "Date")
if row < 0 :
print("row can't be less then 0!")
exit()
def AddToWorksheet(row,status):
worksheet.insert_image(row,1, 'data/graph/' + str(status) + '/' + (str(not_drunkdatanum) if status=="not_drunk" else str(drunkdatanum)) + '.png', {'x_scale': 0.47,'y_scale': 0.5,'x_offset': 2,'y_offset': 2,'positioning': 1})
worksheet.insert_image(row,3, 'data/frames/' + str(status) + '/' + (str(not_drunkdatanum) if status=="not_drunk" else str(drunkdatanum)) + '.png', {'x_scale': 0.47,'y_scale': 0.5,'x_offset': 2,'y_offset': 2,'positioning': 1})
worksheet.write(row, 0, row)
worksheet.write(row, 2, status)
worksheet.write(row, 4, str(settingsJson))
worksheet.write(row, 5, str(datetime.datetime.now()))
print("\n----------------\nSaved!" + "\nRow:" + str(row) + "\nNot Drunk Data Num:" + str(not_drunkdatanum) + "\nDrunk Data Num:" + str(drunkdatanum))
def AutoSpectrumFinder(frame):
frame = (rotate_bound(frame, angle))
height, width = frame.shape[:2]
start_row, start_col = 0, 0
end_row, end_col = height, width // 2
frame = frame[start_row:end_row , start_col:end_col]
grayscale = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
ret,th = cv2.threshold(grayscale,127,255, 0)
contours, hierarchy = cv2.findContours(th, 2, 1)
cnt = contours
big_contour = []
max = 0
for i in cnt:
area = cv2.contourArea(i)
if(area > max):
max = area
big_contour = i
final = cv2.drawContours(frame, big_contour, -1, (0,255,0), 3)
point = big_contour[int(len(big_contour)/2)][0]
x = point[0]
y = point[1]
return x - 50, y - 20, x + 50, y + 10
if vc.isOpened(): rval, frame = vc.read()
else: rval = False
while rval:
frame = (rotate_bound(frame, angle))
prntframe = frame.copy()
cv2.rectangle(prntframe, (x1, y1), (x2, y2), (0,100,0), 2)
prntframe = prntframe
try: cutframe = cv2.cvtColor(frame[y1:y2, x1:x2], cv2.COLOR_BGR2GRAY)
except cv2.error: cutframe = [[0]]
cv2.imshow("Spectrum Analyser", prntframe)
key = cv2.waitKey(500)
rval, frame = vc.read()
graph = [500]
if plot_graph:
if not mouse_rectangle:
if live == False: plot_graph = False
else: plt.clf()
for line in list(rotate_clockwise(cutframe)):
bright = 0
for point in line: bright += point
graph.append(bright)
graph.append(500)
plt.ylim(top=max(graph))
plt.ylim(bottom=min(graph))
if not cmp_graph == [500]:
plt.plot(cmp_graph[::-1] if flip else cmp_graph, color=(0, 0, 0))
plt.ylim(top=max([max(cmp_graph), max(graph)]))
plt.ylim(bottom=min([min(cmp_graph), min(graph)]))
nor_color = (0, 1, 0)
for i, x in enumerate(graph):
if abs(cmp_graph[i] - x) < sensetivity: nor_color = (1, 0, 0)
plt.plot(graph[::-1] if flip else graph, color=nor_color)
plt.draw()
plt.pause(0.001)
else: plt.draw()
if key == ord("w"): y1 -= 2
elif key == ord("s"): y2 += 2
elif key == ord("l"):
if live: live = False
else: live = True
print("Live = " + str(live))
elif key == ord("/"):
print("Searching...")
try:
x1, y1, x2, y2 = AutoSpectrumFinder(frame)
print("Found", (x1, y1, x2, y2))
except:
print("Could'nt find, please select manually")
elif key == ord("\\"): cv2.destroyWindow("mask")
elif key == ord("a"): x1 -= 2
elif key == ord("d"): x2 += 2
elif key == ord("z"): y1 += 2
elif key == ord("x"): y2 -= 2
elif key == ord("q"): x1 += 2
elif key == ord("e"): x2 -= 2
elif key == ord("r"):
angle -= 0.5
if angle <= 0: angle = 360
print("Angle = " + str(angle))
elif key == ord("t"):
angle += 0.5
if angle >= 360: angle = 0
print("Angle = " + str(angle))
elif key == ord("y"):
angle += 180
sleep(0.1)
if angle <= 0: angle = 360
if angle >= 360: angle = 0
print("Angle = " + str(angle))
elif key == ord("n"):
sensetivity -= 10
print("sensitivity = " + str(sensetivity))
elif key == ord("m"):
sensetivity += 10
print("sensitivity = " + str(sensetivity))
elif key == ord("o"):
plot_graph = False
plt.clf()
plt.close()
print("\nGraph: Closed")
elif key == ord("p"):
plot_graph = True
plt.clf()
print("\nGraph: Plotting...")
elif key == ord("v"):
flip = not flip
sleep(0.1)
print("Flip = " + str(flip))
elif key == ord("c"):
print("System: Closed")
save = ""
while save != 'y' or save != 'n':
save = input("Do you want to save the sheet? (y/n)")
if save == "n":
workbook.close()
os.remove("data/sheet/" + sheet_name + ".xlsx")
print("Deleted!")
break
else:
workbook.close()
break
break
elif key == ord("k"):
cmp_graph = graph
nor_color = (0,1,0)
elif key == ord("j"):
cmp_graph = [500]
nor_color = (1, 0, 0)
elif key == ord("h"):
pickle.dump({"sensitivity": sensetivity, "x1": x1, "y1": y1, "x2": x2, "y2": y2, "angle": angle, "flip": flip}, open("settings.p", "wb"))
elif key == ord("g"):
try:
print("Paused graph, press CTRL+C while focused on the terminal to resume it.")
plt.pause(10000000)
except KeyboardInterrupt:
print("Resumed program")
elif key == ord("]") and not live:
print("Recorded Drunk!")
plt.savefig('data/graph/drunk/' + str(drunkdatanum) + '.png')
cv2.imwrite("data/frames/drunk/" + str(drunkdatanum) + '.png', cutframe)
AddToWorksheet(row,"drunk")
drunkdatanum += 1
row += 1
elif key == ord("[") and not live:
print("Recorded Not Drunk!")
plt.savefig('data/graph/not_drunk/' + str(not_drunkdatanum) + '.png')
cv2.imwrite("data/frames/not_drunk/" + str(drunkdatanum) + '.png', cutframe)
AddToWorksheet(row,"not_drunk")
not_drunkdatanum += 1
row += 1
cv2.destroyWindow("Infrared spectrometer")
workbook.close() | [
"cv2.rectangle",
"cv2.imshow",
"os.remove",
"cv2.setMouseCallback",
"os.listdir",
"cv2.threshold",
"matplotlib.pyplot.plot",
"cv2.contourArea",
"matplotlib.pyplot.close",
"cv2.waitKey",
"xlsxwriter.Workbook",
"cv2.drawContours",
"cv2.cvtColor",
"matplotlib.pyplot.ion",
"matplotlib.pyplot... | [((995, 1004), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (1002, 1004), True, 'import matplotlib.pyplot as plt\n'), ((1560, 1615), 'cv2.namedWindow', 'cv2.namedWindow', (['"""Spectrum Analyser"""', 'cv2.WINDOW_NORMAL'], {}), "('Spectrum Analyser', cv2.WINDOW_NORMAL)\n", (1575, 1615), False, 'import cv2, pickle, xlsxwriter, time, datetime, os, os.path\n'), ((1617, 1671), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['"""Spectrum Analyser"""', 'mouse_event'], {}), "('Spectrum Analyser', mouse_event)\n", (1637, 1671), False, 'import cv2, pickle, xlsxwriter, time, datetime, os, os.path\n'), ((1680, 1699), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (1696, 1699), False, 'import cv2, pickle, xlsxwriter, time, datetime, os, os.path\n'), ((2001, 2058), 'xlsxwriter.Workbook', 'xlsxwriter.Workbook', (["('data/sheet/' + sheet_name + '.xlsx')"], {}), "('data/sheet/' + sheet_name + '.xlsx')\n", (2020, 2058), False, 'import cv2, pickle, xlsxwriter, time, datetime, os, os.path\n'), ((8014, 8056), 'cv2.destroyWindow', 'cv2.destroyWindow', (['"""Infrared spectrometer"""'], {}), "('Infrared spectrometer')\n", (8031, 8056), False, 'import cv2, pickle, xlsxwriter, time, datetime, os, os.path\n'), ((3243, 3269), 'imutils.rotate_bound', 'rotate_bound', (['frame', 'angle'], {}), '(frame, angle)\n', (3255, 3269), False, 'from imutils import rotate_bound\n'), ((3450, 3489), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (3462, 3489), False, 'import cv2, pickle, xlsxwriter, time, datetime, os, os.path\n'), ((3501, 3538), 'cv2.threshold', 'cv2.threshold', (['grayscale', '(127)', '(255)', '(0)'], {}), '(grayscale, 127, 255, 0)\n', (3514, 3538), False, 'import cv2, pickle, xlsxwriter, time, datetime, os, os.path\n'), ((3564, 3590), 'cv2.findContours', 'cv2.findContours', (['th', '(2)', '(1)'], {}), '(th, 2, 1)\n', (3580, 3590), False, 'import cv2, pickle, xlsxwriter, time, datetime, os, os.path\n'), ((3748, 3804), 'cv2.drawContours', 'cv2.drawContours', (['frame', 'big_contour', '(-1)', '(0, 255, 0)', '(3)'], {}), '(frame, big_contour, -1, (0, 255, 0), 3)\n', (3764, 3804), False, 'import cv2, pickle, xlsxwriter, time, datetime, os, os.path\n'), ((4018, 4044), 'imutils.rotate_bound', 'rotate_bound', (['frame', 'angle'], {}), '(frame, angle)\n', (4030, 4044), False, 'from imutils import rotate_bound\n'), ((4075, 4135), 'cv2.rectangle', 'cv2.rectangle', (['prntframe', '(x1, y1)', '(x2, y2)', '(0, 100, 0)', '(2)'], {}), '(prntframe, (x1, y1), (x2, y2), (0, 100, 0), 2)\n', (4088, 4135), False, 'import cv2, pickle, xlsxwriter, time, datetime, os, os.path\n'), ((4275, 4317), 'cv2.imshow', 'cv2.imshow', (['"""Spectrum Analyser"""', 'prntframe'], {}), "('Spectrum Analyser', prntframe)\n", (4285, 4317), False, 'import cv2, pickle, xlsxwriter, time, datetime, os, os.path\n'), ((4326, 4342), 'cv2.waitKey', 'cv2.waitKey', (['(500)'], {}), '(500)\n', (4337, 4342), False, 'import cv2, pickle, xlsxwriter, time, datetime, os, os.path\n'), ((3663, 3681), 'cv2.contourArea', 'cv2.contourArea', (['i'], {}), '(i)\n', (3678, 3681), False, 'import cv2, pickle, xlsxwriter, time, datetime, os, os.path\n'), ((4179, 4232), 'cv2.cvtColor', 'cv2.cvtColor', (['frame[y1:y2, x1:x2]', 'cv2.COLOR_BGR2GRAY'], {}), '(frame[y1:y2, x1:x2], cv2.COLOR_BGR2GRAY)\n', (4191, 4232), False, 'import cv2, pickle, xlsxwriter, time, datetime, os, os.path\n'), ((525, 546), 'os.listdir', 'os.listdir', (['DRUNK_DIR'], {}), '(DRUNK_DIR)\n', (535, 546), False, 'import cv2, pickle, xlsxwriter, time, datetime, os, os.path\n'), ((682, 703), 'os.listdir', 'os.listdir', (['DRUNK_DIR'], {}), '(DRUNK_DIR)\n', (692, 703), False, 'import cv2, pickle, xlsxwriter, time, datetime, os, os.path\n'), ((3014, 3037), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3035, 3037), False, 'import cv2, pickle, xlsxwriter, time, datetime, os, os.path\n'), ((5061, 5118), 'matplotlib.pyplot.plot', 'plt.plot', (['(graph[::-1] if flip else graph)'], {'color': 'nor_color'}), '(graph[::-1] if flip else graph, color=nor_color)\n', (5069, 5118), True, 'import matplotlib.pyplot as plt\n'), ((5123, 5133), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (5131, 5133), True, 'import matplotlib.pyplot as plt\n'), ((5138, 5154), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.001)'], {}), '(0.001)\n', (5147, 5154), True, 'import matplotlib.pyplot as plt\n'), ((5164, 5174), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (5172, 5174), True, 'import matplotlib.pyplot as plt\n'), ((565, 594), 'os.path.join', 'os.path.join', (['DRUNK_DIR', 'name'], {}), '(DRUNK_DIR, name)\n', (577, 594), False, 'import cv2, pickle, xlsxwriter, time, datetime, os, os.path\n'), ((722, 755), 'os.path.join', 'os.path.join', (['NOT_DRUNK_DIR', 'name'], {}), '(NOT_DRUNK_DIR, name)\n', (734, 755), False, 'import cv2, pickle, xlsxwriter, time, datetime, os, os.path\n'), ((4484, 4493), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4491, 4493), True, 'import matplotlib.pyplot as plt\n'), ((4749, 4814), 'matplotlib.pyplot.plot', 'plt.plot', (['(cmp_graph[::-1] if flip else cmp_graph)'], {'color': '(0, 0, 0)'}), '(cmp_graph[::-1] if flip else cmp_graph, color=(0, 0, 0))\n', (4757, 4814), True, 'import matplotlib.pyplot as plt\n'), ((1777, 1798), 'os.listdir', 'os.listdir', (['SHEET_DIR'], {}), '(SHEET_DIR)\n', (1787, 1798), False, 'import cv2, pickle, xlsxwriter, time, datetime, os, os.path\n'), ((1817, 1846), 'os.path.join', 'os.path.join', (['SHEET_DIR', 'name'], {}), '(SHEET_DIR, name)\n', (1829, 1846), False, 'import cv2, pickle, xlsxwriter, time, datetime, os, os.path\n'), ((5571, 5596), 'cv2.destroyWindow', 'cv2.destroyWindow', (['"""mask"""'], {}), "('mask')\n", (5588, 5596), False, 'import cv2, pickle, xlsxwriter, time, datetime, os, os.path\n'), ((1911, 1932), 'os.listdir', 'os.listdir', (['SHEET_DIR'], {}), '(SHEET_DIR)\n', (1921, 1932), False, 'import cv2, pickle, xlsxwriter, time, datetime, os, os.path\n'), ((1951, 1980), 'os.path.join', 'os.path.join', (['SHEET_DIR', 'name'], {}), '(SHEET_DIR, name)\n', (1963, 1980), False, 'import cv2, pickle, xlsxwriter, time, datetime, os, os.path\n'), ((6376, 6385), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6383, 6385), True, 'import matplotlib.pyplot as plt\n'), ((6389, 6400), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6398, 6400), True, 'import matplotlib.pyplot as plt\n'), ((6477, 6486), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6484, 6486), True, 'import matplotlib.pyplot as plt\n'), ((6814, 6861), 'os.remove', 'os.remove', (["('data/sheet/' + sheet_name + '.xlsx')"], {}), "('data/sheet/' + sheet_name + '.xlsx')\n", (6823, 6861), False, 'import cv2, pickle, xlsxwriter, time, datetime, os, os.path\n'), ((7371, 7390), 'matplotlib.pyplot.pause', 'plt.pause', (['(10000000)'], {}), '(10000000)\n', (7380, 7390), True, 'import matplotlib.pyplot as plt\n')] |
"""
Spacer components to add horizontal or vertical space to a layout.
"""
import param
from bokeh.models import Div as BkDiv, Spacer as BkSpacer
from ..reactive import Reactive
class Spacer(Reactive):
"""
The `Spacer` layout is a very versatile component which makes it easy to
put fixed or responsive spacing between objects.
Like all other components spacers support both absolute and responsive
sizing modes.
Reference: https://panel.holoviz.org/user_guide/Customization.html#spacers
:Example:
>>> pn.Row(
... 1, pn.Spacer(width=200),
... 2, pn.Spacer(width=100),
... 3
... )
"""
_bokeh_model = BkSpacer
def _get_model(self, doc, root=None, parent=None, comm=None):
properties = self._process_param_change(self._init_params())
model = self._bokeh_model(**properties)
if root is None:
root = model
self._models[root.ref['id']] = (model, parent)
return model
class VSpacer(Spacer):
"""
The `VSpacer` layout provides responsive vertical spacing.
Using this component we can space objects equidistantly in a layout and
allow the empty space to shrink when the browser is resized.
Reference: https://panel.holoviz.org/user_guide/Customization.html#spacers
:Example:
>>> pn.Column(
... pn.layout.VSpacer(), 'Item 1',
... pn.layout.VSpacer(), 'Item 2',
... pn.layout.VSpacer()
... )
"""
sizing_mode = param.Parameter(default='stretch_height', readonly=True)
class HSpacer(Spacer):
"""
The `HSpacer` layout provides responsive vertical spacing.
Using this component we can space objects equidistantly in a layout and
allow the empty space to shrink when the browser is resized.
Reference: https://panel.holoviz.org/user_guide/Customization.html#spacers
:Example:
>>> pn.Row(
... pn.layout.HSpacer(), 'Item 1',
... pn.layout.HSpacer(), 'Item 2',
... pn.layout.HSpacer()
... )
"""
sizing_mode = param.Parameter(default='stretch_width', readonly=True)
class Divider(Reactive):
"""
A `Divider` draws a horizontal rule (a `<hr>` tag in HTML) to separate
multiple components in a layout. It automatically spans the full width of
the container.
Reference: https://panel.holoviz.org/reference/layouts/Divider.html
:Example:
>>> pn.Column(
... '# Lorem Ipsum',
... pn.layout.Divider(),
... 'A very long text... '
>>> )
"""
width_policy = param.ObjectSelector(default="fit", readonly=True)
_bokeh_model = BkDiv
def _get_model(self, doc, root=None, parent=None, comm=None):
properties = self._process_param_change(self._init_params())
properties['style'] = {'width': '100%', 'height': '100%'}
model = self._bokeh_model(text='<hr style="margin: 0px">', **properties)
if root is None:
root = model
self._models[root.ref['id']] = (model, parent)
return model
| [
"param.Parameter",
"param.ObjectSelector"
] | [((1501, 1557), 'param.Parameter', 'param.Parameter', ([], {'default': '"""stretch_height"""', 'readonly': '(True)'}), "(default='stretch_height', readonly=True)\n", (1516, 1557), False, 'import param\n'), ((2063, 2118), 'param.Parameter', 'param.Parameter', ([], {'default': '"""stretch_width"""', 'readonly': '(True)'}), "(default='stretch_width', readonly=True)\n", (2078, 2118), False, 'import param\n'), ((2569, 2619), 'param.ObjectSelector', 'param.ObjectSelector', ([], {'default': '"""fit"""', 'readonly': '(True)'}), "(default='fit', readonly=True)\n", (2589, 2619), False, 'import param\n')] |
#!/usr/bin/env python3
import member
m1 = member.SomeClass("Pavel")
print ("name =",m1.name)
m1.name = "Gunther"
print ("name =",m1.name)
m1.number = 7.3
print ("number =",m1.number)
| [
"member.SomeClass"
] | [((44, 69), 'member.SomeClass', 'member.SomeClass', (['"""Pavel"""'], {}), "('Pavel')\n", (60, 69), False, 'import member\n')] |
from distutils.core import setup
setup(name='Bluemix',
version='0.1',
description='A bluemix datasource to be used with cloudbase-init',
packages=['bluemix', 'bluemix.conf'])
| [
"distutils.core.setup"
] | [((34, 185), 'distutils.core.setup', 'setup', ([], {'name': '"""Bluemix"""', 'version': '"""0.1"""', 'description': '"""A bluemix datasource to be used with cloudbase-init"""', 'packages': "['bluemix', 'bluemix.conf']"}), "(name='Bluemix', version='0.1', description=\n 'A bluemix datasource to be used with cloudbase-init', packages=[\n 'bluemix', 'bluemix.conf'])\n", (39, 185), False, 'from distutils.core import setup\n')] |
import matplotlib
matplotlib.use('Agg') # this lets us do some headless stuff
import matplotlib.pylab as plt
import numpy as np
x = np.asarray([0,5,2])
y = np.asarray([0,1,3])
f = plt.figure()
ax = f.add_subplot(111)
ax.plot(x,y)
#plt.show() # we have a headless display, can't do this!
f.savefig('basicplot.eps',format='eps',orientation='portrait',transparent=True,dpi=5e4)
| [
"matplotlib.use",
"numpy.asarray",
"matplotlib.pylab.figure"
] | [((18, 39), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (32, 39), False, 'import matplotlib\n'), ((133, 154), 'numpy.asarray', 'np.asarray', (['[0, 5, 2]'], {}), '([0, 5, 2])\n', (143, 154), True, 'import numpy as np\n'), ((157, 178), 'numpy.asarray', 'np.asarray', (['[0, 1, 3]'], {}), '([0, 1, 3])\n', (167, 178), True, 'import numpy as np\n'), ((181, 193), 'matplotlib.pylab.figure', 'plt.figure', ([], {}), '()\n', (191, 193), True, 'import matplotlib.pylab as plt\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
from itertools import product
from pathlib import Path
import numpy as np
import tensorflow as tf
from dotenv import load_dotenv
from annotation.direction import (Direction, get_diagonal_directions,
get_cross_directions)
from annotation.piece import Piece
from ..ry import BlackRyMoveLayer
__author__ = 'Yasuhiro'
__date__ = '2018/3/14'
class TestBlackRyMove(tf.test.TestCase):
@classmethod
def setUpClass(cls):
dotenv_path = Path(__file__).parents[3] / '.env'
load_dotenv(str(dotenv_path))
cls.data_format = os.environ.get('DATA_FORMAT')
cls.use_cudnn = bool(os.environ.get('USE_CUDNN'))
def test_ry_move(self):
"""
RYについて成り、成らずの判定のテスト
利きが通るかどうかは別のところで判定しているので、ここでは考えない
:return:
"""
shape = (1, 1, 9, 9) if self.data_format == 'NCHW' else (1, 9, 9, 1)
# 移動距離ごとに用意
effect = {
direction: [np.empty(shape, dtype=np.bool) for _ in range(8)]
for direction in get_cross_directions()
}
effect.update({
direction: np.empty(shape, dtype=np.bool)
for direction in get_diagonal_directions()
})
board = np.empty(shape, dtype=np.int32)
ph_board = tf.placeholder(tf.int32, shape=shape)
ry_effect = {
direction: [
tf.placeholder(tf.bool, shape=shape) for _ in range(8)
] for direction in get_cross_directions()
}
ry_effect.update({
direction: tf.placeholder(tf.bool, shape=shape)
for direction in get_diagonal_directions()
})
non_promoting = BlackRyMoveLayer()(ph_board, ry_effect)
# アクセスしやすいように次元を下げる
non_promoting = {key: tf.squeeze(value)
for key, value in non_promoting.items()}
feed_dict = {}
for direction, ph_list in ry_effect.items():
if direction in get_cross_directions():
for ph, e in zip(ph_list, effect[direction]):
feed_dict[ph] = e
else:
feed_dict[ph_list] = effect[direction]
feed_dict[ph_board] = board
with self.test_session() as sess:
for i, j, piece in product(range(9), range(9), range(Piece.SIZE)):
for direction, effect_list in effect.items():
if direction in get_cross_directions():
for e in effect_list:
e[:] = False
if self.data_format == 'NCHW':
e[0, 0, i, j] = True
else:
e[0, i, j, 0] = True
else:
effect_list[:] = False
if self.data_format == 'NCHW':
effect_list[0, 0, i, j] = True
else:
effect_list[0, i, j, 0] = True
piece = Piece(piece)
board[:] = piece
n = sess.run(non_promoting, feed_dict=feed_dict)
b = np.squeeze(board)
for direction, distance in product(effect.keys(), range(8)):
if direction in get_diagonal_directions():
if distance > 0:
continue
if (direction == Direction.RIGHT_UP and
(i == 8 or j == 8)):
continue
elif (direction == Direction.RIGHT_DOWN and
(i == 8 or j == 0)):
continue
elif (direction == Direction.LEFT_UP and
(i == 8 or j == 8)):
continue
elif (direction == Direction.LEFT_DOWN and
(i == 0 or j == 0)):
continue
if direction == Direction.RIGHT:
if i + distance >= 8:
continue
elif direction == Direction.UP:
if j + distance >= 8:
continue
elif direction == Direction.DOWN:
if j - distance <= 0:
continue
elif direction == Direction.LEFT:
if i - distance <= 0:
continue
n_move = n[direction]
if direction in get_cross_directions():
with self.subTest(i=i, j=j, piece=piece,
direction=direction,
distance=distance):
self.assertTupleEqual((8, 9, 9), n_move.shape)
if b[i, j] < Piece.WHITE_FU:
# 自身の駒があって動けない
self.assertFalse(np.all(n_move[distance]))
else:
self.assertTrue(n_move[distance, i, j])
n_move[distance, i, j] = False
self.assertFalse(np.all(n_move[distance]))
else:
with self.subTest(i=i, j=j, piece=piece,
direction=direction):
self.assertTupleEqual((9, 9), n_move.shape)
if b[i, j] < Piece.WHITE_FU:
# 自身の駒があって動けない
self.assertFalse(np.all(n_move))
else:
self.assertTrue(n_move[i, j])
n_move[i, j] = False
self.assertFalse(np.all(n_move))
| [
"annotation.direction.get_cross_directions",
"pathlib.Path",
"tensorflow.placeholder",
"os.environ.get",
"numpy.squeeze",
"annotation.piece.Piece",
"numpy.empty",
"annotation.direction.get_diagonal_directions",
"numpy.all",
"tensorflow.squeeze"
] | [((639, 668), 'os.environ.get', 'os.environ.get', (['"""DATA_FORMAT"""'], {}), "('DATA_FORMAT')\n", (653, 668), False, 'import os\n'), ((1280, 1311), 'numpy.empty', 'np.empty', (['shape'], {'dtype': 'np.int32'}), '(shape, dtype=np.int32)\n', (1288, 1311), True, 'import numpy as np\n'), ((1332, 1369), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': 'shape'}), '(tf.int32, shape=shape)\n', (1346, 1369), True, 'import tensorflow as tf\n'), ((698, 725), 'os.environ.get', 'os.environ.get', (['"""USE_CUDNN"""'], {}), "('USE_CUDNN')\n", (712, 725), False, 'import os\n'), ((1827, 1844), 'tensorflow.squeeze', 'tf.squeeze', (['value'], {}), '(value)\n', (1837, 1844), True, 'import tensorflow as tf\n'), ((1007, 1037), 'numpy.empty', 'np.empty', (['shape'], {'dtype': 'np.bool'}), '(shape, dtype=np.bool)\n', (1015, 1037), True, 'import numpy as np\n'), ((1086, 1108), 'annotation.direction.get_cross_directions', 'get_cross_directions', ([], {}), '()\n', (1106, 1108), False, 'from annotation.direction import Direction, get_diagonal_directions, get_cross_directions\n'), ((1166, 1196), 'numpy.empty', 'np.empty', (['shape'], {'dtype': 'np.bool'}), '(shape, dtype=np.bool)\n', (1174, 1196), True, 'import numpy as np\n'), ((1433, 1469), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {'shape': 'shape'}), '(tf.bool, shape=shape)\n', (1447, 1469), True, 'import tensorflow as tf\n'), ((1519, 1541), 'annotation.direction.get_cross_directions', 'get_cross_directions', ([], {}), '()\n', (1539, 1541), False, 'from annotation.direction import Direction, get_diagonal_directions, get_cross_directions\n'), ((1602, 1638), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {'shape': 'shape'}), '(tf.bool, shape=shape)\n', (1616, 1638), True, 'import tensorflow as tf\n'), ((2016, 2038), 'annotation.direction.get_cross_directions', 'get_cross_directions', ([], {}), '()\n', (2036, 2038), False, 'from annotation.direction import Direction, get_diagonal_directions, get_cross_directions\n'), ((3080, 3092), 'annotation.piece.Piece', 'Piece', (['piece'], {}), '(piece)\n', (3085, 3092), False, 'from annotation.piece import Piece\n'), ((3213, 3230), 'numpy.squeeze', 'np.squeeze', (['board'], {}), '(board)\n', (3223, 3230), True, 'import numpy as np\n'), ((539, 553), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (543, 553), False, 'from pathlib import Path\n'), ((1226, 1251), 'annotation.direction.get_diagonal_directions', 'get_diagonal_directions', ([], {}), '()\n', (1249, 1251), False, 'from annotation.direction import Direction, get_diagonal_directions, get_cross_directions\n'), ((1668, 1693), 'annotation.direction.get_diagonal_directions', 'get_diagonal_directions', ([], {}), '()\n', (1691, 1693), False, 'from annotation.direction import Direction, get_diagonal_directions, get_cross_directions\n'), ((2469, 2491), 'annotation.direction.get_cross_directions', 'get_cross_directions', ([], {}), '()\n', (2489, 2491), False, 'from annotation.direction import Direction, get_diagonal_directions, get_cross_directions\n'), ((3345, 3370), 'annotation.direction.get_diagonal_directions', 'get_diagonal_directions', ([], {}), '()\n', (3368, 3370), False, 'from annotation.direction import Direction, get_diagonal_directions, get_cross_directions\n'), ((4701, 4723), 'annotation.direction.get_cross_directions', 'get_cross_directions', ([], {}), '()\n', (4721, 4723), False, 'from annotation.direction import Direction, get_diagonal_directions, get_cross_directions\n'), ((5144, 5168), 'numpy.all', 'np.all', (['n_move[distance]'], {}), '(n_move[distance])\n', (5150, 5168), True, 'import numpy as np\n'), ((5388, 5412), 'numpy.all', 'np.all', (['n_move[distance]'], {}), '(n_move[distance])\n', (5394, 5412), True, 'import numpy as np\n'), ((5795, 5809), 'numpy.all', 'np.all', (['n_move'], {}), '(n_move)\n', (5801, 5809), True, 'import numpy as np\n'), ((6009, 6023), 'numpy.all', 'np.all', (['n_move'], {}), '(n_move)\n', (6015, 6023), True, 'import numpy as np\n')] |
# Generated by Django 2.0.2 on 2018-03-22 21:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('palsbet', '0002_viptipsgames'),
]
operations = [
migrations.AlterField(
model_name='viptipsgames',
name='cathegory',
field=models.CharField(max_length=100),
),
]
| [
"django.db.models.CharField"
] | [((339, 371), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (355, 371), False, 'from django.db import migrations, models\n')] |
import gym
from gym import spaces
import numpy as np
import learning_data
from Simulation import Simulation
def get_value_or_delimiter(value, delimiter):
return min(delimiter[1], max(delimiter[0], value))
class Environment(gym.Env):
def __init__(self, simulation, training):
super(Environment, self).__init__()
self.simulation = simulation
self.training = training
self.action_space = spaces.Discrete(2)
observation_space_dictionary = dict()
observation_space_dictionary['lights_settings'] = spaces.Box(low=0, high=1, shape=(1,), dtype=np.uint8)
observation_space_dictionary['intersection_cars'] = spaces.Box(low=0, high=1000, shape=(2,), dtype=np.uint8)
observation_space_dictionary['input_cars'] = spaces.Box(low=0, high=1000, shape=(2,), dtype=np.uint8)
observation_space_dictionary['output_cars'] = spaces.Box(low=0, high=1000, shape=(2,), dtype=np.uint8)
self.observation_space = spaces.Dict(observation_space_dictionary)
def reset(self, reset_simulation=True):
print('\033[94m' + 'resetting environment...' + '\033[0;0m')
if reset_simulation:
self.simulation = Simulation(self.simulation.traffic_volume, self.simulation.rows, self.simulation.cols, self.simulation.road_length, self.simulation.simulation_time)
observation = self.simulation.get_observation()
learning_data.previous_observation = observation
return observation
def step(self, action):
if self.training:
previous_observation = learning_data.previous_observation
done = self.simulation.advance_step(action)
current_observation = self.simulation.get_observation()
learning_data.previous_observation = current_observation
reward = self.definitive2x2(previous_observation, action)
else:
done = self.simulation.advance_step(action)
current_observation = self.simulation.get_observation()
reward = 0
return current_observation, reward, done, {}
def render(self, mode='human', close=False):
pass
def reward_function_1(self, previous_observation, current_observation):
previous_horizontal_waiting_time = 0
previous_vertical_waiting_time = 0
current_horizontal_waiting_time = 0
current_vertical_waiting_time = 0
for intersection in range(self.simulation.rows * self.simulation.cols):
for car in range(self.simulation.road_length):
if previous_observation['horizontal_waiting_time'][intersection][car] != -1:
previous_horizontal_waiting_time += previous_observation['horizontal_waiting_time'][intersection][car]
if previous_observation['vertical_waiting_time'][intersection][car] != -1:
previous_vertical_waiting_time += previous_observation['vertical_waiting_time'][intersection][car]
if current_observation['horizontal_waiting_time'][intersection][car] != -1:
current_horizontal_waiting_time += current_observation['horizontal_waiting_time'][intersection][car]
if current_observation['vertical_waiting_time'][intersection][car] != -1:
current_vertical_waiting_time += current_observation['vertical_waiting_time'][intersection][car]
previous_waiting_time = previous_horizontal_waiting_time + previous_vertical_waiting_time
current_waiting_time = current_horizontal_waiting_time + current_vertical_waiting_time
return previous_waiting_time - current_waiting_time
def reward_function_2(self, previous_observation, action):
reward = 0
num_of_intersections = self.simulation.rows * self.simulation.cols
for intersection in range(num_of_intersections):
# If the vertical lights turn green
if previous_observation['lights_settings'][intersection] == 1 and action[intersection] >= 0.5:
reward = reward + previous_observation['vertical_num_of_cars_waiting'][intersection] - previous_observation['horizontal_num_of_cars_waiting'][intersection]
# If the horizontal lights turn green
elif previous_observation['lights_settings'][intersection] == 0 and action[intersection] >= 0.5:
reward = reward + previous_observation['horizontal_num_of_cars_waiting'][intersection] - previous_observation['vertical_num_of_cars_waiting'][intersection]
return reward
def reward_function_3(self, previous_observation, action):
reward = 0
num_of_intersections = self.simulation.rows * self.simulation.cols
for intersection in range(num_of_intersections):
# If the vertical lights turn green
if previous_observation['lights_settings'][intersection] >= 1 and action[intersection] >= 0.5:
for car in range(self.simulation.road_length):
if previous_observation['vertical_waiting_time'][intersection][car] != -1:
reward += previous_observation['vertical_waiting_time'][intersection][car]
if previous_observation['horizontal_waiting_time'][intersection][car] != -1:
reward -= previous_observation['horizontal_waiting_time'][intersection][car]
# If the horizontal lights turn green
if previous_observation['lights_settings'][intersection] == 0 and action[intersection] >= 0.5:
for car in range(self.simulation.road_length):
if previous_observation['horizontal_waiting_time'][intersection][car] != -1:
reward += previous_observation['horizontal_waiting_time'][intersection][car]
if previous_observation['vertical_waiting_time'][intersection][car] != -1:
reward -= previous_observation['vertical_waiting_time'][intersection][car]
return reward
def reward_function_4(self, previous_observation, action):
reward = 0
num_of_intersections = self.simulation.rows * self.simulation.cols
for intersection in range(num_of_intersections):
# If the vertical lights turn green
if previous_observation['lights_settings'][intersection] == 1 and action[intersection] == 1:
if previous_observation['vertical_waiting_time'][intersection][0] != -1:
reward += previous_observation['vertical_waiting_time'][intersection][0]
if previous_observation['horizontal_waiting_time'][intersection][0] != -1:
reward -= previous_observation['horizontal_waiting_time'][intersection][0]
# If the horizontal lights turn green
if previous_observation['lights_settings'][intersection] == 0 and action[intersection] == 1:
if previous_observation['horizontal_waiting_time'][intersection][0] != -1:
reward += previous_observation['horizontal_waiting_time'][intersection][0]
if previous_observation['vertical_waiting_time'][intersection][0] != -1:
reward -= previous_observation['vertical_waiting_time'][intersection][0]
return reward
def reward_function_6(self, current_observation):
return - current_observation['average_waiting_time'][0]
def reward_function_7(self, previous_observation, action):
reward = 0
if action < self.simulation.rows * self.simulation.cols:
if previous_observation['ready_to_switch'][action] == 0:
reward -= 1
return reward
def reward_function_8(self, previous_observation, action):
reward = 0
if action < self.simulation.rows * self.simulation.cols:
if previous_observation['lights_settings'][action] == 1:
reward += (previous_observation['vertical_num_of_cars_waiting'][action] - previous_observation['horizontal_num_of_cars_waiting'][action]) / self.simulation.road_length
else:
reward += (previous_observation['horizontal_num_of_cars_waiting'][action] - previous_observation['vertical_num_of_cars_waiting'][action]) / self.simulation.road_length
if previous_observation['ready_to_switch'][action] == 0:
reward -= 1
return reward
def reward_function_9(self, previous_observation, action):
reward = -4
if action < self.simulation.rows * self.simulation.cols:
if previous_observation['lights_settings'][action] == 1:
reward += previous_observation['vertical_num_of_cars_waiting'][action] - previous_observation['horizontal_num_of_cars_waiting'][action]
else:
reward += previous_observation['horizontal_num_of_cars_waiting'][action] - previous_observation['vertical_num_of_cars_waiting'][action]
return reward
def reward_function_10(self, previous_observation, action):
if action < self.simulation.rows * self.simulation.cols:
# print('vertical cars: ' + str(previous_observation['vertical_num_of_cars'][action]), 'horizontal cars: ' + str(previous_observation['horizontal_num_of_cars'][action]))
# reward = -10 if previous_observation['ready_to_switch'][action] == 0 else 0
reward = - self.simulation.road_length / 2
if previous_observation['lights_settings'][action] == 1:
reward += min(previous_observation['vertical_num_of_cars'][action] - previous_observation['horizontal_num_of_cars'][action], self.simulation.road_length)
else:
reward += min(previous_observation['horizontal_num_of_cars'][action] - previous_observation['vertical_num_of_cars'][action], self.simulation.road_length)
else:
reward = 0
not_action = action - self.simulation.rows * self.simulation.cols
if previous_observation['lights_settings'][not_action] == 1:
reward += max(previous_observation['horizontal_num_of_cars'][not_action] - previous_observation['vertical_num_of_cars'][not_action], - self.simulation.road_length)
else:
reward += max(previous_observation['vertical_num_of_cars'][not_action] - previous_observation['horizontal_num_of_cars'][not_action], - self.simulation.road_length)
# print('action: ' + str(action) + ', reward: ' + str(reward))
return reward
def reward_function_11(self, prev_obs, action):
reward = 0
if action == 1:
reward = - self.simulation.road_length
if prev_obs['lights_settings'][0] == 1:
reward += get_value_or_delimiter(value=prev_obs['num_of_cars'][0][0] - prev_obs['num_of_cars'][0][1], delimiter=[-self.simulation.road_length, self.simulation.road_length])
# reward += get_value_or_delimiter(value=prev_obs['num_of_cars'][0][0] - prev_obs['num_of_cars'][1][0], delimiter=[-self.simulation.road_length, self.simulation.road_length])
# reward += get_value_or_delimiter(value=- prev_obs['num_of_cars'][0][1] + self.simulation.road_length, delimiter=[-self.simulation.road_length, 0])
else:
reward += get_value_or_delimiter(value=prev_obs['num_of_cars'][0][1] - prev_obs['num_of_cars'][0][0], delimiter=[-self.simulation.road_length, self.simulation.road_length])
# reward += get_value_or_delimiter(value=prev_obs['num_of_cars'][0][1] - prev_obs['num_of_cars'][1][1], delimiter=[-self.simulation.road_length, self.simulation.road_length])
# reward += get_value_or_delimiter(value=- prev_obs['num_of_cars'][0][0] + self.simulation.road_length, delimiter=[-self.simulation.road_length, 0])
else:
if prev_obs['lights_settings'][0] == 0:
reward += get_value_or_delimiter(value=prev_obs['num_of_cars'][0][0] - prev_obs['num_of_cars'][0][1], delimiter=[-self.simulation.road_length, self.simulation.road_length])
# reward += get_value_or_delimiter(value=prev_obs['num_of_cars'][0][0] - prev_obs['num_of_cars'][1][0], delimiter=[-self.simulation.road_length, self.simulation.road_length])
# reward += get_value_or_delimiter(value=- prev_obs['num_of_cars'][0][0] + self.simulation.road_length, delimiter=[-self.simulation.road_length, 0])
else:
reward += get_value_or_delimiter(value=prev_obs['num_of_cars'][0][1] - prev_obs['num_of_cars'][0][0], delimiter=[-self.simulation.road_length, self.simulation.road_length])
# reward += get_value_or_delimiter(value=prev_obs['num_of_cars'][0][1] - prev_obs['num_of_cars'][1][1], delimiter=[-self.simulation.road_length, self.simulation.road_length])
# reward += get_value_or_delimiter(value=- prev_obs['num_of_cars'][0][1] + self.simulation.road_length, delimiter=[-self.simulation.road_length, 0])
# print('action: ' + str(action) + ', reward: ' + str(reward))
# print('vertical cars: ' + str(prev_obs['num_of_cars'][0][0]), 'horizontal cars: ' + str(prev_obs['num_of_cars'][0][1]) + '\n')
return reward
def reward_function_12(self, prev_obs, action):
reward = 0
if prev_obs['ready_to_switch'] == 0:
if action == 1:
reward = -10
else:
if action == 1:
if prev_obs['lights_settings'][0] == 1:
reward += 1 if prev_obs['num_of_cars'][0][0] > 8 and prev_obs['num_of_cars'][0][1] < 2 else 0
# reward += 1 if prev_obs['num_of_cars'][1][0] < 2 else 0
else:
reward += 1 if prev_obs['num_of_cars'][0][1] > 8 and prev_obs['num_of_cars'][0][0] < 2 else 0
# reward += 1 if prev_obs['num_of_cars'][2][1] < 2 else 0
else:
if prev_obs['lights_settings'][0] == 0:
reward += 1 if prev_obs['num_of_cars'][0][0] > 8 else 0
else:
reward += 1 if prev_obs['num_of_cars'][0][1] > 8 else 0
# if self.simulation.intersection_to_process == 2:
# print('action: ' + str(action) + ', reward: ' + str(reward))
# print('vertical cars: ' + str(prev_obs['num_of_cars'][0][0]), 'horizontal cars: ' + str(prev_obs['num_of_cars'][0][1]) + '\n')
return reward
def definitive2x2(self, prev_obs, action):
reward = 0
vertical_load = prev_obs['intersection_cars'][0] # + prev_obs['input_cars'][0]
horizontal_load = prev_obs['intersection_cars'][1] # + prev_obs['input_cars'][1]
if prev_obs['lights_settings'][0] == 0:
if horizontal_load > 2 * vertical_load:
if action == 1:
reward = 1
else:
reward = -1
elif vertical_load > 2 * horizontal_load:
if action == 1:
reward = -1
else:
reward = 1
elif action == 1:
reward = -1
if prev_obs['lights_settings'][0] == 1:
if horizontal_load > 2 * vertical_load:
if action == 1:
reward = -1
else:
reward = 1
elif vertical_load > 2 * horizontal_load:
if action == 1:
reward = 1
else:
reward = -1
elif action == 1:
reward = -1
if self.simulation.intersection_to_process == 20:
print('The vertical load is: ' + str(vertical_load))
print('The horizontal load is: ' + str(horizontal_load))
print('The green light is: ' + str('VERTICAL' if prev_obs['lights_settings'][0] == 0 else 'HORIZONTAL'))
print('The action is: ' + str('MAINTAIN' if action == 0 else 'CHANGE'))
print('The reward is: ' + str(reward))
print('\n')
return reward
def definitive3x3(self, prev_obs, action):
reward = 0
vertical_load = prev_obs['intersection_cars'][0]
horizontal_load = prev_obs['intersection_cars'][1]
vertical_output_load = prev_obs['output_cars'][0]
horizontal_output_load = prev_obs['output_cars'][1]
if prev_obs['lights_settings'][0] == 0:
if horizontal_load - vertical_load > 6:
reward = 1 if action == 1 else -1
elif vertical_load - horizontal_load > 4:
reward = -1 if action == 1 else 1
elif vertical_output_load - horizontal_output_load > 6:
reward = 1 if action == 1 else -1
elif horizontal_output_load - vertical_output_load > 4:
reward = -1 if action == 1 else 1
elif vertical_load > 5 and horizontal_load > 5 and vertical_output_load - horizontal_output_load > 3:
reward = 1 if action == 1 else -1
elif vertical_output_load == 10:
reward = 1 if action == 1 else -1
elif action == 1:
reward = -1
if prev_obs['lights_settings'][0] == 1:
if vertical_load - horizontal_load > 6:
reward = 1 if action == 1 else -1
elif horizontal_load - vertical_load > 4:
reward = -1 if action == 1 else 1
elif horizontal_output_load - vertical_output_load > 6:
reward = 1 if action == 1 else -1
elif vertical_output_load - horizontal_output_load > 4:
reward = -1 if action == 1 else 1
elif horizontal_load > 5 and vertical_load > 5 and horizontal_output_load - vertical_output_load > 3:
reward = 1 if action == 1 else -1
elif horizontal_output_load == 10:
reward = 1 if action == 1 else -1
elif action == 1:
reward = -1
print('The intersection is: ' + str(self.simulation.intersection_to_process))
print('The vertical load is: ' + str(vertical_load))
print('The horizontal load is: ' + str(horizontal_load))
print('The vertical output load is: ' + str(vertical_output_load))
print('The horizontal output load is: ' + str(horizontal_output_load))
print('The green light is: ' + str('VERTICAL' if prev_obs['lights_settings'][0] == 0 else 'HORIZONTAL'))
print('The action is: ' + str('MAINTAIN' if action == 0 else 'CHANGE'))
print('The reward is: ' + str(reward))
print('\n')
return reward
| [
"gym.spaces.Dict",
"Simulation.Simulation",
"gym.spaces.Discrete",
"gym.spaces.Box"
] | [((431, 449), 'gym.spaces.Discrete', 'spaces.Discrete', (['(2)'], {}), '(2)\n', (446, 449), False, 'from gym import spaces\n'), ((554, 607), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(1)', 'shape': '(1,)', 'dtype': 'np.uint8'}), '(low=0, high=1, shape=(1,), dtype=np.uint8)\n', (564, 607), False, 'from gym import spaces\n'), ((668, 724), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(1000)', 'shape': '(2,)', 'dtype': 'np.uint8'}), '(low=0, high=1000, shape=(2,), dtype=np.uint8)\n', (678, 724), False, 'from gym import spaces\n'), ((778, 834), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(1000)', 'shape': '(2,)', 'dtype': 'np.uint8'}), '(low=0, high=1000, shape=(2,), dtype=np.uint8)\n', (788, 834), False, 'from gym import spaces\n'), ((889, 945), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(1000)', 'shape': '(2,)', 'dtype': 'np.uint8'}), '(low=0, high=1000, shape=(2,), dtype=np.uint8)\n', (899, 945), False, 'from gym import spaces\n'), ((979, 1020), 'gym.spaces.Dict', 'spaces.Dict', (['observation_space_dictionary'], {}), '(observation_space_dictionary)\n', (990, 1020), False, 'from gym import spaces\n'), ((1194, 1352), 'Simulation.Simulation', 'Simulation', (['self.simulation.traffic_volume', 'self.simulation.rows', 'self.simulation.cols', 'self.simulation.road_length', 'self.simulation.simulation_time'], {}), '(self.simulation.traffic_volume, self.simulation.rows, self.\n simulation.cols, self.simulation.road_length, self.simulation.\n simulation_time)\n', (1204, 1352), False, 'from Simulation import Simulation\n')] |
# Copyright <NAME> 2011-2017
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
#-------------------------------------------------------------------------------
# CreateVersionFileCpp
#-------------------------------------------------------------------------------
import os
from os.path import splitext, relpath, sep
from SCons.Script import File
import cuppa.location
from cuppa.utility.attr_tools import try_attr_as_str
def offset_path( path, env ):
build_dir = env['build_dir']
offset_dir = env['offset_dir']
return offset_dir + sep + relpath( path, build_dir)
def hpp_from_cpp( cpp_file ):
return splitext( cpp_file )[0] + '.hpp'
def txt_from_cpp( cpp_file ):
return splitext( cpp_file )[0] + '.txt'
class CreateVersionHeaderCpp:
def __init__( self, env, namespaces, version, location, build_id=None ):
self.__env = env
self.__namespace_guard = "_".join( namespaces )
self.__namespaces = namespaces
self.__version = version
self.__location = location
self.__build_id = build_id
self.__variant = self.__env['variant'].name()
self.__working_dir = os.path.join( env['base_path'], env['build_dir'] )
if not os.path.exists( self.__working_dir ):
os.makedirs( self.__working_dir )
def __call__( self, target, source, env ):
cpp_file = offset_path( target[0].path, env )
hpp_file = hpp_from_cpp( cpp_file )
txt_file = txt_from_cpp( cpp_file )
output_dir = os.path.split( hpp_file )[0]
if output_dir:
output_dir = os.path.join( self.__working_dir, output_dir )
if not os.path.exists( output_dir ):
os.makedirs( output_dir )
version_hpp = open( os.path.join( self.__working_dir, hpp_file ), "w" )
version_hpp.write( get_build_identity_header( self.__namespace_guard, self.__namespaces ) )
version_hpp.close()
version_txt = open( os.path.join( self.__working_dir, txt_file ), "w" )
version_txt.write( get_build_identity_txt( self.__version, relpath( env['base_path'], self.__location), self.__namespaces ) )
version_txt.close()
target[0] = File( cpp_file )
source.append( hpp_file )
source.append( txt_file )
return target, source
def get_build_identity_txt( version, location, namespaces ):
lines = []
lines += [ '// v v v v v v v v v v v v v v v v v v v v v v v v v v v v v v v v v v v v v v v\n'
'// Version File for product version [ ' + version + ' ]\n'
'// Location for dependency versions [ ' + location + ' ]\n'
'// Namespace [ ' + "::".join( namespaces ) + ' ]\n'
'// v v v v v v v v v v v v v v v v v v v v v v v v v v v v v v v v v v v v v v v\n' ]
return "\n".join( lines )
def get_build_identity_header( namespace_guard, namespaces ):
lines = []
lines += [ '// G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G\n'
'#ifndef INCLUDED_' + namespace_guard.upper() + '_BUILD_GENERATED_VERSION_HPP\n'
'#define INCLUDED_' + namespace_guard.upper() + '_BUILD_GENERATED_VERSION_HPP\n'
'// G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G\n'
'\n'
'// I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I\n'
'#include <string>\n'
'#include <vector>\n'
'#include <map>\n'
'// I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I\n'
'\n'
'// n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n' ]
for namespace in namespaces:
lines += [ 'namespace ' + namespace + ' {' ]
lines += [ 'namespace build {\n'
'// n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n\n'
'\n'
'class identity\n'
'{\n'
'public:\n'
'\n'
' typedef std::string string_t;\n'
' typedef std::vector< string_t > revisions_t;\n'
'\n'
'private:\n'
'\n'
' struct dependency\n'
' {\n'
' dependency()\n'
' {\n'
' }\n'
'\n'
' dependency( const string_t& Name,\n'
' const string_t& Version,\n'
' const string_t& Repository,\n'
' const string_t& Branch,\n'
' const revisions_t& Revisions )\n'
' : name ( Name )\n'
' , version ( Version )\n'
' , repository ( Repository )\n'
' , branch ( Branch )\n'
' , revisions ( Revisions )\n'
' {\n'
' }\n'
'\n'
' string_t name;\n'
' string_t version;\n'
' string_t repository;\n'
' string_t branch;\n'
' revisions_t revisions;\n'
' };\n'
'\n'
'public:\n'
'\n'
' typedef dependency dependency_t;\n'
' typedef std::map< string_t, dependency > dependencies_t;\n'
'\n'
'public:\n' ]
lines += [ function_declaration_from_variable( 'product_version' ) ]
lines += [ function_declaration_from_variable( 'product_repository' ) ]
lines += [ function_declaration_from_variable( 'product_branch' ) ]
lines += [ function_declaration_from_variable( 'product_revision' ) ]
lines += [ function_declaration_from_variable( 'build_variant' ) ]
lines += [ function_declaration_from_variable( 'build_time' ) ]
lines += [ function_declaration_from_variable( 'build_user' ) ]
lines += [ function_declaration_from_variable( 'build_host' ) ]
lines += [ function_declaration_from_variable( 'build_id' ) ]
lines += [ function_declaration_dependencies() ]
lines += [ function_declaration_report() ]
lines += [ '\nprivate:\n'
' static const dependencies_t Dependencies_;\n'
' static const string_t Report_;\n'
'};\n'
'\n'
'// n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n\n'
'} //end namespace build' ]
for namespace in namespaces:
lines += [ '} //end namespace ' + namespace ]
lines += [ '// n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n\n'
'\n'
'// G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G\n'
'#endif\n'
'// G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G\n'
'\n' ]
return "\n".join( lines )
def function_declaration_from_variable( name ):
lines = []
lines += [ ' static const char* ' + name + '();' ]
return "\n".join( lines )
def function_declaration_dependencies():
lines = []
lines += [ ' static const dependencies_t& dependencies();' ]
return "\n".join( lines )
def function_declaration_report():
lines = []
lines += [ ' static const char* report();' ]
return "\n".join( lines )
class CreateVersionFileCpp:
def __init__( self, env, namespaces, version, location, build_id=None ):
self.__env = env
self.__namespace_guard = "_".join( namespaces )
self.__namespaces = namespaces
self.__version = version
self.__location = location
self.__build_id = build_id
location = cuppa.location.Location( env, location )
self.__repository = location.repository()
self.__branch = location.branch()
self.__revision = location.revisions()[0]
self.__variant = self.__env['variant'].name()
def __call__( self, target, source, env ):
cpp_file = target[0].path
hpp_file = hpp_from_cpp( cpp_file )
#print "Create CPP Version File at [" + cpp_file + "]"
version_cpp = open( cpp_file, "w" )
version_cpp.write( self.get_build_identity_source( env['BUILD_WITH'], hpp_file, self.__build_id ) )
version_cpp.close()
return None
def function_definition_from_variable( self, name, variable ):
lines = []
lines += [ '\nconst char* identity::' + name + '()' ]
lines += [ '{' ]
lines += [ ' return "' + str( variable ) + '";' ]
lines += [ '}\n' ]
return "\n".join( lines )
def function_definition_dependencies( self ):
lines = []
lines += [ '\nconst identity::dependencies_t& identity::dependencies()\n'
'{\n'
' return Dependencies_;\n'
'}\n' ]
return "\n".join( lines )
def initialise_dependencies_definition( self, dependencies ):
lines = []
lines += [ '\nidentity::dependencies_t initialise_dependencies()\n'
'{\n'
' typedef identity::dependencies_t dependencies_t;\n'
' typedef identity::dependency_t dependency_t;\n'
' typedef identity::revisions_t revisions_t;\n'
' dependencies_t Dependencies;' ]
for name in dependencies:
if name in self.__env['dependencies']:
dependency_factory = self.__env['dependencies'][name]
dependency = dependency_factory( self.__env )
lines += [ ' Dependencies[ "' + name + '" ] = dependency_t( "'
+ try_attr_as_str( dependency, "name", "N/A" ) + '", "'
+ try_attr_as_str( dependency, "version", "N/A" ) + '", "'
+ try_attr_as_str( dependency, "repository", "N/A" ) + '", "'
+ try_attr_as_str( dependency, "branch", "N/A" )
+ '", revisions_t() );' ]
try:
if callable( getattr( dependency, 'revisions' ) ):
revisions = dependency.revisions()
if revisions:
for revision in revisions:
lines += [ ' Dependencies[ "' + name + '" ].revisions.push_back( "' + str(revision) + '" );' ]
except AttributeError:
pass
lines += [ ' return Dependencies;\n'
'}\n'
'\n'
'const identity::dependencies_t identity::Dependencies_ = initialise_dependencies();\n' ]
return "\n".join( lines )
def function_definition_report( self ):
lines = []
lines += [ '\nconst char* identity::report()' ]
lines += [ '{' ]
lines += [ ' return Report_.c_str();' ]
lines += [ '}\n' ]
return "\n".join( lines )
def initialise_report_definition( self ):
lines = []
lines += [ '\nidentity::string_t initialise_report()\n'
'{\n'
' std::ostringstream Report;\n'
'\n'
' Report\n'
' << "Product:\\n"\n'
' " |- Version = " << identity::product_version() << "\\n"\n'
' " |- Repository = " << identity::product_repository() << "\\n"\n'
' " |- Branch = " << identity::product_branch() << "\\n"\n'
' " +- Revision = " << identity::product_revision() << "\\n"\n'
' "Build:\\n"\n'
' " |- Variant = " << identity::build_variant() << "\\n"\n'
' " |- Time = " << identity::build_time() << "\\n"\n'
' " |- User = " << identity::build_user() << "\\n"\n'
' " |- Host = " << identity::build_host() << "\\n"\n'
' " +- ID = " << identity::build_id() << "\\n";\n'
'\n'
' if( !identity::dependencies().empty() )\n'
' {\n'
' Report << "Dependencies:\\n";\n'
' }\n'
'\n'
' identity::dependencies_t::const_iterator Dependency = identity::dependencies().begin();\n'
' identity::dependencies_t::const_iterator End = identity::dependencies().end();\n'
'\n'
' for( ; Dependency != End; ++Dependency )\n'
' {\n'
' Report\n'
' << " " << Dependency->second.name << "\\n"\n'
' << " |- Version = " << Dependency->second.version << "\\n"\n'
' << " |- Repository = " << Dependency->second.repository << "\\n"\n'
' << " |- Branch = " << Dependency->second.branch << "\\n";\n'
'\n'
' identity::revisions_t::const_iterator Revision = Dependency->second.revisions.begin();\n'
' identity::revisions_t::const_iterator End = Dependency->second.revisions.end();\n'
'\n'
' for( ; Revision != End; )\n'
' {\n'
' identity::string_t Value( *Revision );\n'
' if( ++Revision != End )\n'
' {\n'
' Report << " |";\n'
' }\n'
' else\n'
' {\n'
' Report << " +";\n'
' }\n'
' Report << "- Revision = " << Value << "\\n";\n'
' }\n'
' }\n'
'\n'
' return Report.str();\n'
'}\n'
'\n'
'const identity::string_t identity::Report_ = initialise_report();' ]
return "\n".join( lines )
def get_build_identity_source( self, dependencies, header_file, build_id ):
from datetime import datetime
from getpass import getuser
from socket import gethostname
build_time = datetime.utcnow()
build_user = getuser()
build_host = gethostname()
lines = []
lines += [ '// I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I\n'
'// Self Include' ]
lines += [ '#include "' + header_file + '"' ]
lines += [ ''
'// C++ Standard Includes\n'
'#include <sstream>\n'
'\n'
'// I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I\n'
'\n'
'// n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n' ]
for namespace in self.__namespaces:
lines += [ 'namespace ' + namespace + ' {' ]
lines += [ 'namespace build {\n'
'// n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n\n' ]
lines += [ self.function_definition_from_variable( 'product_version', self.__version ) ]
lines += [ self.function_definition_from_variable( 'product_repository', self.__repository ) ]
lines += [ self.function_definition_from_variable( 'product_branch', self.__branch ) ]
lines += [ self.function_definition_from_variable( 'product_revision', self.__revision ) ]
lines += [ self.function_definition_from_variable( 'build_variant', self.__variant ) ]
lines += [ self.function_definition_from_variable( 'build_time', build_time ) ]
lines += [ self.function_definition_from_variable( 'build_user', build_user ) ]
lines += [ self.function_definition_from_variable( 'build_host', build_host ) ]
lines += [ self.function_definition_from_variable( 'build_id', build_id ) ]
lines += [ self.initialise_dependencies_definition( dependencies ) ]
lines += [ self.function_definition_dependencies() ]
lines += [ self.initialise_report_definition() ]
lines += [ self.function_definition_report() ]
lines += [ '\n// n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n\n'
'} //end namespace build' ]
for namespace in self.__namespaces:
lines += [ '} //end namespace ' + namespace ]
lines += [ '// n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n\n'
'\n' ]
return "\n".join( lines )
| [
"os.path.exists",
"SCons.Script.File",
"os.makedirs",
"datetime.datetime.utcnow",
"cuppa.utility.attr_tools.try_attr_as_str",
"os.path.join",
"os.path.splitext",
"os.path.split",
"getpass.getuser",
"socket.gethostname",
"os.path.relpath"
] | [((686, 710), 'os.path.relpath', 'relpath', (['path', 'build_dir'], {}), '(path, build_dir)\n', (693, 710), False, 'from os.path import splitext, relpath, sep\n'), ((1282, 1330), 'os.path.join', 'os.path.join', (["env['base_path']", "env['build_dir']"], {}), "(env['base_path'], env['build_dir'])\n", (1294, 1330), False, 'import os\n'), ((2335, 2349), 'SCons.Script.File', 'File', (['cpp_file'], {}), '(cpp_file)\n', (2339, 2349), False, 'from SCons.Script import File\n'), ((15302, 15319), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (15317, 15319), False, 'from datetime import datetime\n'), ((15341, 15350), 'getpass.getuser', 'getuser', ([], {}), '()\n', (15348, 15350), False, 'from getpass import getuser\n'), ((15372, 15385), 'socket.gethostname', 'gethostname', ([], {}), '()\n', (15383, 15385), False, 'from socket import gethostname\n'), ((755, 773), 'os.path.splitext', 'splitext', (['cpp_file'], {}), '(cpp_file)\n', (763, 773), False, 'from os.path import splitext, relpath, sep\n'), ((831, 849), 'os.path.splitext', 'splitext', (['cpp_file'], {}), '(cpp_file)\n', (839, 849), False, 'from os.path import splitext, relpath, sep\n'), ((1348, 1382), 'os.path.exists', 'os.path.exists', (['self.__working_dir'], {}), '(self.__working_dir)\n', (1362, 1382), False, 'import os\n'), ((1398, 1429), 'os.makedirs', 'os.makedirs', (['self.__working_dir'], {}), '(self.__working_dir)\n', (1409, 1429), False, 'import os\n'), ((1646, 1669), 'os.path.split', 'os.path.split', (['hpp_file'], {}), '(hpp_file)\n', (1659, 1669), False, 'import os\n'), ((1724, 1768), 'os.path.join', 'os.path.join', (['self.__working_dir', 'output_dir'], {}), '(self.__working_dir, output_dir)\n', (1736, 1768), False, 'import os\n'), ((1891, 1933), 'os.path.join', 'os.path.join', (['self.__working_dir', 'hpp_file'], {}), '(self.__working_dir, hpp_file)\n', (1903, 1933), False, 'import os\n'), ((2100, 2142), 'os.path.join', 'os.path.join', (['self.__working_dir', 'txt_file'], {}), '(self.__working_dir, txt_file)\n', (2112, 2142), False, 'import os\n'), ((1790, 1816), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (1804, 1816), False, 'import os\n'), ((1836, 1859), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (1847, 1859), False, 'import os\n'), ((2219, 2261), 'os.path.relpath', 'relpath', (["env['base_path']", 'self.__location'], {}), "(env['base_path'], self.__location)\n", (2226, 2261), False, 'from os.path import splitext, relpath, sep\n'), ((10603, 10647), 'cuppa.utility.attr_tools.try_attr_as_str', 'try_attr_as_str', (['dependency', '"""branch"""', '"""N/A"""'], {}), "(dependency, 'branch', 'N/A')\n", (10618, 10647), False, 'from cuppa.utility.attr_tools import try_attr_as_str\n'), ((10510, 10558), 'cuppa.utility.attr_tools.try_attr_as_str', 'try_attr_as_str', (['dependency', '"""repository"""', '"""N/A"""'], {}), "(dependency, 'repository', 'N/A')\n", (10525, 10558), False, 'from cuppa.utility.attr_tools import try_attr_as_str\n'), ((10420, 10465), 'cuppa.utility.attr_tools.try_attr_as_str', 'try_attr_as_str', (['dependency', '"""version"""', '"""N/A"""'], {}), "(dependency, 'version', 'N/A')\n", (10435, 10465), False, 'from cuppa.utility.attr_tools import try_attr_as_str\n'), ((10333, 10375), 'cuppa.utility.attr_tools.try_attr_as_str', 'try_attr_as_str', (['dependency', '"""name"""', '"""N/A"""'], {}), "(dependency, 'name', 'N/A')\n", (10348, 10375), False, 'from cuppa.utility.attr_tools import try_attr_as_str\n')] |
import pandas as pd
from scipy.stats import ttest_rel
"""
output
"""
# Note: some output is shortened to save spaces.
# This file discusses statistical analysis (Part II).
# ------------------------------------------------------------------------------
# Data stored in form of xlsx with contents:
"""
group data
0 1 34
1 1 37
2 1 28
3 1 36
4 1 30
5 2 43
6 2 45
7 2 47
8 2 49
9 2 39
"""
# Assume these data are paired sample.
# ------------------------------------------------------------------------------
IS_t_test = pd.read_excel('E:\\IS_t_test.xlsx')
Group1 = IS_t_test[IS_t_test['group']==1]['data']
Group2 = IS_t_test[IS_t_test['group']==2]['data']
print (ttest_rel(Group1,Group2))
"""
(-5.6873679190073361, 0.00471961872448184)
"""
# The first element from output is the value of t
# The second element from output is p-value
| [
"scipy.stats.ttest_rel",
"pandas.read_excel"
] | [((610, 645), 'pandas.read_excel', 'pd.read_excel', (['"""E:\\\\IS_t_test.xlsx"""'], {}), "('E:\\\\IS_t_test.xlsx')\n", (623, 645), True, 'import pandas as pd\n'), ((756, 781), 'scipy.stats.ttest_rel', 'ttest_rel', (['Group1', 'Group2'], {}), '(Group1, Group2)\n', (765, 781), False, 'from scipy.stats import ttest_rel\n')] |
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from .layers import *
PRIMITIVES = [
'MBI_k3_e3',
'MBI_k3_e6',
'MBI_k5_e3',
'MBI_k5_e6',
'MBI_k3_e3_se',
'MBI_k3_e6_se',
'MBI_k5_e3_se',
'MBI_k5_e6_se',
# 'skip',
]
OPS = {
'MBI_k3_e3' : lambda ic, mc, oc, s, aff, act: MBInvertedResBlock(ic, mc, 0, oc, 3, s, affine=aff, act_func=act),
'MBI_k3_e6' : lambda ic, mc, oc, s, aff, act: MBInvertedResBlock(ic, mc, 0, oc, 3, s, affine=aff, act_func=act),
'MBI_k5_e3' : lambda ic, mc, oc, s, aff, act: MBInvertedResBlock(ic, mc, 0, oc, 5, s, affine=aff, act_func=act),
'MBI_k5_e6' : lambda ic, mc, oc, s, aff, act: MBInvertedResBlock(ic, mc, 0, oc, 5, s, affine=aff, act_func=act),
'MBI_k3_e3_se' : lambda ic, mc, oc, s, aff, act: MBInvertedResBlock(ic, mc, ic , oc, 3, s, affine=aff, act_func=act),
'MBI_k3_e6_se' : lambda ic, mc, oc, s, aff, act: MBInvertedResBlock(ic, mc, ic*2, oc, 3, s, affine=aff, act_func=act),
'MBI_k5_e3_se' : lambda ic, mc, oc, s, aff, act: MBInvertedResBlock(ic, mc, ic , oc, 5, s, affine=aff, act_func=act),
'MBI_k5_e6_se' : lambda ic, mc, oc, s, aff, act: MBInvertedResBlock(ic, mc, ic*2, oc, 5, s, affine=aff, act_func=act),
# 'skip' : lambda ic, mc, oc, s, aff, act: IdentityLayer(ic, oc),
}
class MixedOP(nn.Module):
def __init__(self, in_channels, out_channels, stride, affine, act_func, num_ops, mc_num_dict, lat_lookup):
super(MixedOP, self).__init__()
self.num_ops = num_ops
self.lat_lookup = lat_lookup
self.mc_num_dict = mc_num_dict
self.m_ops = nn.ModuleList()
for i in range(num_ops):
primitive = PRIMITIVES[i]
mid_channels = self.mc_num_dict[i]
op = OPS[primitive](in_channels, mid_channels, out_channels, stride, affine, act_func)
self.m_ops.append(op)
self._initialize_log_alphas()
self.reset_switches()
def fink_ori_idx(self, idx):
count = 0
for ori_idx in range(len(self.switches)):
if self.switches[ori_idx]:
count += 1
if count == (idx + 1):
break
return ori_idx
def forward(self, x, sampling, mode):
if sampling:
weights = self.log_alphas[self.switches]
if mode == 'gumbel':
weights = F.gumbel_softmax(F.log_softmax(weights, dim=-1), self.T, hard=False)
idx = torch.argmax(weights).item()
self.switches[idx] = False
elif mode == 'gumbel_2':
weights = F.gumbel_softmax(F.log_softmax(weights, dim=-1), self.T, hard=False)
idx = torch.argmax(weights).item()
idx = self.fink_ori_idx(idx)
self.reset_switches()
elif mode == 'min_alphas':
idx = torch.argmin(weights).item()
idx = self.fink_ori_idx(idx)
self.reset_switches()
elif mode == 'max_alphas':
idx = torch.argmax(weights).item()
idx = self.fink_ori_idx(idx)
self.reset_switches()
elif mode == 'random':
idx = random.choice(range(len(weights)))
idx = self.fink_ori_idx(idx)
self.reset_switches()
else:
raise ValueError('invalid sampling mode...')
op = self.m_ops[idx]
return op(x), 0
else:
weights = F.gumbel_softmax(self.log_alphas, self.T, hard=False)
lats = self.get_lookup_latency(x.size(-1))
out = sum(w*op(x) for w, op in zip(weights, self.m_ops))
out_lat = sum(w*lat for w, lat in zip(weights, lats))
return out, out_lat
def get_lookup_latency(self, size):
lats = []
for idx, op in enumerate(self.m_ops):
if isinstance(op, IdentityLayer):
lats.append(0)
else:
key = '{}_{}_{}_{}_{}_k{}_s{}_{}'.format(
op.name,
size,
op.in_channels,
op.se_channels,
op.out_channels,
op.kernel_size,
op.stride,
op.act_func)
mid_channels = op.mid_channels
lats.append(self.lat_lookup[key][mid_channels])
return lats
def _initialize_log_alphas(self):
alphas = torch.zeros((self.num_ops,))
log_alphas = F.log_softmax(alphas, dim=-1)
self.register_parameter('log_alphas', nn.Parameter(log_alphas))
def reset_switches(self):
self.switches = [True] * self.num_ops
def set_temperature(self, T):
self.T = T
class MixedStage(nn.Module):
def __init__(self, ics, ocs, ss, affs, acts, mc_num_ddict, lat_lookup, stage_type):
super(MixedStage, self).__init__()
self.lat_lookup = lat_lookup
self.mc_num_ddict = mc_num_ddict
self.stage_type = stage_type # 0 for stage6 || 1 for stage1 || 2 for stage2 || 3 for stage3/4/5
self.start_res = 0 if ((ics[0] == ocs[0]) and (ss[0] == 1)) else 1
self.num_res = len(ics) - self.start_res + 1
# stage6
if stage_type == 0:
self.block1 = MixedOP(ics[0], ocs[0], ss[0], affs[0], acts[0], len(PRIMITIVES), mc_num_ddict['block1'], lat_lookup)
# stage1
elif stage_type == 1:
self.block1 = MixedOP(ics[0], ocs[0], ss[0], affs[0], acts[0], len(PRIMITIVES), mc_num_ddict['block1'], lat_lookup)
self.block2 = MixedOP(ics[1], ocs[1], ss[1], affs[1], acts[1], len(PRIMITIVES), mc_num_ddict['block2'], lat_lookup)
# stage2
elif stage_type == 2:
self.block1 = MixedOP(ics[0], ocs[0], ss[0], affs[0], acts[0], len(PRIMITIVES), mc_num_ddict['block1'], lat_lookup)
self.block2 = MixedOP(ics[1], ocs[1], ss[1], affs[1], acts[1], len(PRIMITIVES), mc_num_ddict['block2'], lat_lookup)
self.block3 = MixedOP(ics[2], ocs[2], ss[2], affs[2], acts[2], len(PRIMITIVES), mc_num_ddict['block3'], lat_lookup)
# stage3, stage4, stage5
elif stage_type == 3:
self.block1 = MixedOP(ics[0], ocs[0], ss[0], affs[0], acts[0], len(PRIMITIVES), mc_num_ddict['block1'], lat_lookup)
self.block2 = MixedOP(ics[1], ocs[1], ss[1], affs[1], acts[1], len(PRIMITIVES), mc_num_ddict['block2'], lat_lookup)
self.block3 = MixedOP(ics[2], ocs[2], ss[2], affs[2], acts[2], len(PRIMITIVES), mc_num_ddict['block3'], lat_lookup)
self.block4 = MixedOP(ics[3], ocs[3], ss[3], affs[3], acts[3], len(PRIMITIVES), mc_num_ddict['block4'], lat_lookup)
else:
raise ValueError('invalid stage_type...')
self._initialize_betas()
def forward(self, x, sampling, mode):
res_list = [x,]
lat_list = [0.,]
# stage6
if self.stage_type == 0:
out1, lat1 = self.block1(x, sampling, mode)
res_list.append(out1)
lat_list.append(lat1)
# stage1
elif self.stage_type == 1:
out1, lat1 = self.block1(x, sampling, mode)
res_list.append(out1)
lat_list.append(lat1)
out2, lat2 = self.block2(out1, sampling, mode)
res_list.append(out2)
lat_list.append(lat1+lat2)
# stage2
elif self.stage_type == 2:
out1, lat1 = self.block1(x, sampling, mode)
res_list.append(out1)
lat_list.append(lat1)
out2, lat2 = self.block2(out1, sampling, mode)
res_list.append(out2)
lat_list.append(lat1+lat2)
out3, lat3 = self.block3(out2, sampling, mode)
res_list.append(out3)
lat_list.append(lat1+lat2+lat3)
# stage3, stage4, stage5
elif self.stage_type == 3:
out1, lat1 = self.block1(x, sampling, mode)
res_list.append(out1)
lat_list.append(lat1)
out2, lat2 = self.block2(out1, sampling, mode)
res_list.append(out2)
lat_list.append(lat1+lat2)
out3, lat3 = self.block3(out2, sampling, mode)
res_list.append(out3)
lat_list.append(lat1+lat2+lat3)
out4, lat4 = self.block4(out3, sampling, mode)
res_list.append(out4)
lat_list.append(lat1+lat2+lat3+lat4)
else:
raise ValueError
weights = F.softmax(self.betas, dim=-1)
out = sum(w*res for w, res in zip(weights, res_list[self.start_res:]))
out_lat = sum(w*lat for w, lat in zip(weights, lat_list[self.start_res:]))
return out, out_lat
def _initialize_betas(self):
betas = torch.zeros((self.num_res))
self.register_parameter('betas', nn.Parameter(betas))
class Network(nn.Module):
def __init__(self, num_classes, mc_num_dddict, lat_lookup):
super(Network, self).__init__()
self.lat_lookup = lat_lookup
self.mc_num_dddict = mc_num_dddict
self.first_stem = ConvLayer(3, 32, kernel_size=3, stride=2, affine=False, act_func='relu')
self.second_stem = MBInvertedResBlock(32, 32, 8, 16, kernel_size=3, stride=1, affine=False, act_func='relu')
self.stage1 = MixedStage(
ics = [16,24],
ocs = [24,24],
ss = [2,1],
affs = [False, False],
acts = ['relu', 'relu'],
mc_num_ddict = mc_num_dddict['stage1'],
lat_lookup = lat_lookup,
stage_type = 1,)
self.stage2 = MixedStage(
ics = [24,40,40],
ocs = [40,40,40],
ss = [2,1,1],
affs = [False, False, False],
acts = ['swish', 'swish', 'swish'],
mc_num_ddict = mc_num_dddict['stage2'],
lat_lookup = lat_lookup,
stage_type = 2,)
self.stage3 = MixedStage(
ics = [40,80,80,80],
ocs = [80,80,80,80],
ss = [2,1,1,1],
affs = [False, False, False, False],
acts = ['swish', 'swish', 'swish', 'swish'],
mc_num_ddict = mc_num_dddict['stage3'],
lat_lookup = lat_lookup,
stage_type = 3,)
self.stage4 = MixedStage(
ics = [80,112,112,112],
ocs = [112,112,112,112],
ss = [1,1,1,1],
affs = [False, False, False, False],
acts = ['swish', 'swish', 'swish', 'swish'],
mc_num_ddict = mc_num_dddict['stage4'],
lat_lookup = lat_lookup,
stage_type = 3,)
self.stage5 = MixedStage(
ics = [112,192,192,192],
ocs = [192,192,192,192],
ss = [2,1,1,1],
affs = [False, False, False, False],
acts = ['swish', 'swish', 'swish', 'swish'],
mc_num_ddict = mc_num_dddict['stage5'],
lat_lookup = lat_lookup,
stage_type = 3,)
self.stage6 = MixedStage(
ics = [192,],
ocs = [320,],
ss = [1,],
affs = [False,],
acts = ['swish',],
mc_num_ddict = mc_num_dddict['stage6'],
lat_lookup = lat_lookup,
stage_type = 0,)
self.feature_mix_layer = ConvLayer(320, 1280, kernel_size=1, stride=1, affine=False, act_func='swish')
self.global_avg_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = LinearLayer(1280, num_classes)
self._initialization()
def forward(self, x, sampling, mode='max'):
out_lat = self.lat_lookup['base'] if not sampling else 0.0
x = self.first_stem(x)
x = self.second_stem(x)
x, lat = self.stage1(x, sampling, mode)
out_lat += lat
x, lat = self.stage2(x, sampling, mode)
out_lat += lat
x, lat = self.stage3(x, sampling, mode)
out_lat += lat
x, lat = self.stage4(x, sampling, mode)
out_lat += lat
x, lat = self.stage5(x, sampling, mode)
out_lat += lat
x, lat = self.stage6(x, sampling, mode)
out_lat += lat
x = self.feature_mix_layer(x)
x = self.global_avg_pooling(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x, out_lat
def set_temperature(self, T):
for m in self.modules():
if isinstance(m, MixedOP):
m.set_temperature(T)
def weight_parameters(self):
_weight_parameters = []
for k, v in self.named_parameters():
if not (k.endswith('log_alphas') or k.endswith('betas')):
_weight_parameters.append(v)
return _weight_parameters
def arch_parameters(self):
_arch_parameters = []
for k, v in self.named_parameters():
if k.endswith('log_alphas') or k.endswith('betas'):
_arch_parameters.append(v)
return _arch_parameters
def log_alphas_parameters(self):
_log_alphas_parameters = []
for k, v in self.named_parameters():
if k.endswith('log_alphas'):
_log_alphas_parameters.append(v)
return _log_alphas_parameters
def betas_parameters(self):
_betas_parameters = []
for k, v in self.named_parameters():
if k.endswith('betas'):
_betas_parameters.append(v)
return _betas_parameters
def reset_switches(self):
for m in self.modules():
if isinstance(m, MixedOP):
m.reset_switches()
def _initialization(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
| [
"torch.nn.functional.gumbel_softmax",
"torch.nn.init.constant_",
"torch.nn.ModuleList",
"torch.nn.Parameter",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.functional.log_softmax",
"torch.zeros",
"torch.argmin",
"torch.nn.functional.softmax",
"torch.argmax"
] | [((1549, 1564), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (1562, 1564), True, 'import torch.nn as nn\n'), ((3802, 3830), 'torch.zeros', 'torch.zeros', (['(self.num_ops,)'], {}), '((self.num_ops,))\n', (3813, 3830), False, 'import torch\n'), ((3846, 3875), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['alphas'], {'dim': '(-1)'}), '(alphas, dim=-1)\n', (3859, 3875), True, 'import torch.nn.functional as F\n'), ((7245, 7274), 'torch.nn.functional.softmax', 'F.softmax', (['self.betas'], {'dim': '(-1)'}), '(self.betas, dim=-1)\n', (7254, 7274), True, 'import torch.nn.functional as F\n'), ((7489, 7514), 'torch.zeros', 'torch.zeros', (['self.num_res'], {}), '(self.num_res)\n', (7500, 7514), False, 'import torch\n'), ((9839, 9862), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1)'], {}), '(1)\n', (9859, 9862), True, 'import torch.nn as nn\n'), ((3012, 3065), 'torch.nn.functional.gumbel_softmax', 'F.gumbel_softmax', (['self.log_alphas', 'self.T'], {'hard': '(False)'}), '(self.log_alphas, self.T, hard=False)\n', (3028, 3065), True, 'import torch.nn.functional as F\n'), ((3916, 3940), 'torch.nn.Parameter', 'nn.Parameter', (['log_alphas'], {}), '(log_alphas)\n', (3928, 3940), True, 'import torch.nn as nn\n'), ((7552, 7571), 'torch.nn.Parameter', 'nn.Parameter', (['betas'], {}), '(betas)\n', (7564, 7571), True, 'import torch.nn as nn\n'), ((2173, 2203), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['weights'], {'dim': '(-1)'}), '(weights, dim=-1)\n', (2186, 2203), True, 'import torch.nn.functional as F\n'), ((11754, 11782), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (11771, 11782), True, 'import torch.nn as nn\n'), ((2235, 2256), 'torch.argmax', 'torch.argmax', (['weights'], {}), '(weights)\n', (2247, 2256), False, 'import torch\n'), ((2354, 2384), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['weights'], {'dim': '(-1)'}), '(weights, dim=-1)\n', (2367, 2384), True, 'import torch.nn.functional as F\n'), ((11849, 11877), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (11866, 11877), True, 'import torch.nn as nn\n'), ((2416, 2437), 'torch.argmax', 'torch.argmax', (['weights'], {}), '(weights)\n', (2428, 2437), False, 'import torch\n'), ((11951, 11981), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.weight', '(1)'], {}), '(m.weight, 1)\n', (11968, 11981), True, 'import torch.nn as nn\n'), ((12014, 12042), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (12031, 12042), True, 'import torch.nn as nn\n'), ((2544, 2565), 'torch.argmin', 'torch.argmin', (['weights'], {}), '(weights)\n', (2556, 2565), False, 'import torch\n'), ((2672, 2693), 'torch.argmax', 'torch.argmax', (['weights'], {}), '(weights)\n', (2684, 2693), False, 'import torch\n')] |
import numpy as np
import random as random
def move_to_sample(Rover):
delX = 0; delY = 0;
if len(Rover.rock_angles) > 0:
dist_to_rock = np.mean(np.abs(Rover.rock_dist))
angle_to_rock = np.mean(Rover.rock_angles);
Rover.steer = np.clip(angle_to_rock* 180/np.pi, -15, 15)
if Rover.vel>0.5:
Rover.brake = 0.1;
else:
Rover.brake = 0;
Rover.throttle = 0;
if Rover.vel <0.2 and Rover.near_sample == 0:
Rover.throttle = 0.1;
Rover.brake = 0 ;
if Rover.Is_Stuck:
Rover.brake = 0;
Rover.throttle = Rover.StuckThrottle;
Rover.steer = Rover.StuckSteering;
if Rover.near_sample:
Rover.brake = Rover.brake_set;
return Rover
def is_terrain_navigable(Rover):
if len(Rover.nav_dists)>0:
if (len(Rover.nav_angles) < Rover.stop_forward):
terrain_navigable = 0;
else:
terrain_navigable = 1;
else:
terrain_navigable = 0;
return terrain_navigable;
def is_rover_stuck(Rover):
SteerVel = np.mean(np.diff(Rover.SteerVel[0:6]));
no_new_area_mapped = (np.abs(Rover.new_perc_mapped - Rover.old_perc_mapped) <= 0.25);
rover_unstucking = (np.abs(Rover.total_time - Rover.map_time) <= 2);
rover_steer_not_changing = (np.abs(SteerVel) <= 2);
is_rover_stuck = no_new_area_mapped and rover_steer_not_changing and rover_unstucking;
if no_new_area_mapped and rover_steer_not_changing and Rover.Is_Stuck == 0:
# Rover was not stuck before, but is stuck now
Rover.Is_Stuck = 1;
Rover.StuckSteering = np.random.randint(-15, 16);
Rover.StuckThrottle = np.random.randint(-1, 2);
if Rover.Is_Stuck and ~rover_unstucking:
# Rover unstucking is done
Rover.Is_Stuck = 0;
# This is where you can build a decision tree for determining throttle, brake and steer
# commands based on the output of the perception_step() function
def decision_step(Rover):
# Implement conditionals to decide what to do given perception data
# Here you're all set up with some basic functionality but you'll need to
# improve on this decision tree to do a good job of navigating autonomously!
# Example:
# Check if we have vision data to make decisions with
# If there are, we'll step through the known sample positions
# to confirm whether detections are real
is_rover_stuck(Rover);
if Rover.nav_angles is not None:
# Check for Rover.mode status
#if near a sample, navigate towards the sample and stop
if (Rover.samples_located > Rover.samples_collected):
Rover = move_to_sample(Rover);
elif Rover.mode == 'forward':
# Check the extent of navigable terrain
if Rover.Is_Stuck:
# Rover is stuck, unstuck it
Rover.brake = 0;
Rover.throttle = Rover.StuckThrottle;
Rover.steer = Rover.StuckSteering;
elif is_terrain_navigable(Rover):
# If mode is forward, navigable terrain looks good
# and velocity is below max, then throttle
if Rover.vel < Rover.max_vel:
# Set throttle value to throttle setting
Rover.throttle = Rover.throttle_set
else: # Else coast
Rover.throttle = 0
Rover.brake = 0
# Set steering to average angle clipped to the range +/- 15
Weighted_Angles = np.mean(Rover.nav_angles);
Rover.steer = np.clip(np.mean(Weighted_Angles * 180/np.pi), -15, 15)
else:
# Set mode to "stop" and hit the brakes!
Rover.throttle = 0
# Set brake to stored brake value
Rover.brake = Rover.brake_set
Rover.steer = -15;
Rover.mode = 'stop';
# If we're already in "stop" mode then make different decisions
elif Rover.mode == 'stop':
# If we're in stop mode but still moving keep braking
if Rover.vel > 0.2:
Rover.throttle = 0
Rover.brake = Rover.brake_set
Rover.steer = 0
# If we're not moving (vel < 0.2) then do something else
elif Rover.vel <= 0.2:
# Now we're stopped and we have vision data to see if there's a path forward
if len(Rover.nav_angles) < Rover.go_forward:
# Release the brake to allow turning
Rover.brake = 0;
# Turn range is +/- 15 degrees, when stopped the next line will induce 4-wheel turning
Rover.steer = -15 # Could be more clever here about which way to turn
# If we're stopped but see sufficient navigable terrain in front then go!
if is_terrain_navigable(Rover):
# Set throttle back to stored value
Rover.throttle = Rover.throttle_set
# Release the brake
Rover.brake = 0
# Set steer to mean angle
Rover.steer = np.clip(np.mean(Rover.nav_angles * 180/np.pi), -15, 15)
Rover.mode = 'forward'
# Just to make the rover do something
# even if no modifications have been made to the code
else:
Rover.throttle = Rover.throttle_set
Rover.steer = 0
Rover.brake = 0
# If in a state where want to pickup a rock send pickup command
if Rover.near_sample and Rover.vel == 0 and not Rover.picking_up:
Rover.send_pickup = True
Rover.mode = 'stop';
Rover.SteerVel[0:9] = Rover.SteerVel[1:10];
Rover.SteerVel[9] = Rover.steer;
if Rover.vel == 0:
Rover.Ok_To_Map = 0;
else:
Rover.Ok_To_Map = 1;
return Rover
| [
"numpy.clip",
"numpy.mean",
"numpy.abs",
"numpy.diff",
"numpy.random.randint"
] | [((211, 237), 'numpy.mean', 'np.mean', (['Rover.rock_angles'], {}), '(Rover.rock_angles)\n', (218, 237), True, 'import numpy as np\n'), ((261, 306), 'numpy.clip', 'np.clip', (['(angle_to_rock * 180 / np.pi)', '(-15)', '(15)'], {}), '(angle_to_rock * 180 / np.pi, -15, 15)\n', (268, 306), True, 'import numpy as np\n'), ((1123, 1151), 'numpy.diff', 'np.diff', (['Rover.SteerVel[0:6]'], {}), '(Rover.SteerVel[0:6])\n', (1130, 1151), True, 'import numpy as np\n'), ((1180, 1233), 'numpy.abs', 'np.abs', (['(Rover.new_perc_mapped - Rover.old_perc_mapped)'], {}), '(Rover.new_perc_mapped - Rover.old_perc_mapped)\n', (1186, 1233), True, 'import numpy as np\n'), ((1269, 1310), 'numpy.abs', 'np.abs', (['(Rover.total_time - Rover.map_time)'], {}), '(Rover.total_time - Rover.map_time)\n', (1275, 1310), True, 'import numpy as np\n'), ((1350, 1366), 'numpy.abs', 'np.abs', (['SteerVel'], {}), '(SteerVel)\n', (1356, 1366), True, 'import numpy as np\n'), ((1663, 1689), 'numpy.random.randint', 'np.random.randint', (['(-15)', '(16)'], {}), '(-15, 16)\n', (1680, 1689), True, 'import numpy as np\n'), ((1721, 1745), 'numpy.random.randint', 'np.random.randint', (['(-1)', '(2)'], {}), '(-1, 2)\n', (1738, 1745), True, 'import numpy as np\n'), ((162, 185), 'numpy.abs', 'np.abs', (['Rover.rock_dist'], {}), '(Rover.rock_dist)\n', (168, 185), True, 'import numpy as np\n'), ((3616, 3641), 'numpy.mean', 'np.mean', (['Rover.nav_angles'], {}), '(Rover.nav_angles)\n', (3623, 3641), True, 'import numpy as np\n'), ((3681, 3719), 'numpy.mean', 'np.mean', (['(Weighted_Angles * 180 / np.pi)'], {}), '(Weighted_Angles * 180 / np.pi)\n', (3688, 3719), True, 'import numpy as np\n'), ((5289, 5328), 'numpy.mean', 'np.mean', (['(Rover.nav_angles * 180 / np.pi)'], {}), '(Rover.nav_angles * 180 / np.pi)\n', (5296, 5328), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from datetime import date, datetime
from odoo.tests.common import Form
from odoo.addons.hr_holidays.tests.common import TestHrHolidaysCommon
from odoo.exceptions import ValidationError
class TestAutomaticLeaveDates(TestHrHolidaysCommon):
def setUp(self):
super(TestAutomaticLeaveDates, self).setUp()
self.leave_type = self.env['hr.leave.type'].create({
'name': 'Automatic Test',
'time_type': 'leave',
'allocation_type': 'no',
'validity_start': False,
})
def test_no_attendances(self):
calendar = self.env['resource.calendar'].create({
'name': 'No Attendances',
'attendance_ids': [(5, 0, 0)],
})
employee = self.employee_emp
employee.resource_calendar_id = calendar
with Form(self.env['hr.leave'].with_context(default_employee_id=employee.id)) as leave_form:
leave_form.holiday_status_id = self.leave_type
leave_form.request_date_from = date(2019, 9, 2)
leave_form.request_date_to = date(2019, 9, 2)
leave_form.request_unit_half = True
leave_form.request_date_from_period = 'am'
self.assertEqual(leave_form.number_of_days_display, 0.5)
self.assertEqual(leave_form.number_of_hours_text, '4 Hours')
def test_single_attendance_on_morning_and_afternoon(self):
calendar = self.env['resource.calendar'].create({
'name': 'simple morning + afternoon',
'attendance_ids': [(5, 0, 0),
(0, 0, {
'name': 'monday morning',
'hour_from': 8,
'hour_to': 12,
'day_period': 'morning',
'dayofweek': '0',
}),
(0, 0, {
'name': 'monday afternoon',
'hour_from': 13,
'hour_to': 17,
'day_period': 'afternoon',
'dayofweek': '0',
})]
})
employee = self.employee_emp
employee.resource_calendar_id = calendar
with Form(self.env['hr.leave'].with_context(default_employee_id=employee.id)) as leave_form:
leave_form.holiday_status_id = self.leave_type
leave_form.request_date_from = date(2019, 9, 2)
leave_form.request_date_to = date(2019, 9, 2)
leave_form.request_unit_half = True
leave_form.request_date_from_period = 'am'
self.assertEqual(leave_form.number_of_days_display, .5)
self.assertEqual(leave_form.number_of_hours_text, '4 Hours')
leave_form.request_date_from_period = 'pm'
self.assertEqual(leave_form.number_of_days_display, .5)
self.assertEqual(leave_form.number_of_hours_text, '4 Hours')
def test_multiple_attendance_on_morning(self):
calendar = self.env['resource.calendar'].create({
'name': 'multi morning',
'attendance_ids': [(5, 0, 0),
(0, 0, {
'name': 'monday morning 1',
'hour_from': 8,
'hour_to': 10,
'day_period': 'morning',
'dayofweek': '0',
}),
(0, 0, {
'name': 'monday morning 2',
'hour_from': 10.25,
'hour_to': 12.25,
'day_period': 'morning',
'dayofweek': '0',
}),
(0, 0, {
'name': 'monday afternoon',
'hour_from': 13,
'hour_to': 17,
'day_period': 'afternoon',
'dayofweek': '0',
})]
})
employee = self.employee_emp
employee.resource_calendar_id = calendar
with Form(self.env['hr.leave'].with_context(default_employee_id=employee.id)) as leave_form:
leave_form.holiday_status_id = self.leave_type
leave_form.request_date_from = date(2019, 9, 2)
leave_form.request_date_to = date(2019, 9, 2)
leave_form.request_unit_half = True
leave_form.request_date_from_period = 'am'
self.assertEqual(leave_form.number_of_days_display, .5)
self.assertEqual(leave_form.number_of_hours_text, '4 Hours')
leave_form.request_date_from_period = 'pm'
self.assertEqual(leave_form.number_of_days_display, .5)
self.assertEqual(leave_form.number_of_hours_text, '4 Hours')
def test_attendance_on_morning(self):
calendar = self.env['resource.calendar'].create({
'name': 'Morning only',
'attendance_ids': [(5, 0, 0),
(0, 0, {
'name': 'Monday All day',
'hour_from': 8,
'hour_to': 16,
'day_period': 'morning',
'dayofweek': '0',
})],
})
employee = self.employee_emp
employee.resource_calendar_id = calendar
with Form(self.env['hr.leave'].with_context(default_employee_id=employee.id)) as leave_form:
leave_form.holiday_status_id = self.leave_type
leave_form.request_date_from = date(2019, 9, 2)
leave_form.request_date_to = date(2019, 9, 2)
leave_form.request_unit_half = True
# Ask for morning
leave_form.request_date_from_period = 'am'
self.assertEqual(leave_form.number_of_days_display, 0.5)
self.assertEqual(leave_form.number_of_hours_text, '8 Hours')
# Ask for afternoon
leave_form.request_date_from_period = 'pm'
self.assertEqual(leave_form.number_of_days_display, 0.5)
self.assertEqual(leave_form.number_of_hours_text, '8 Hours')
def test_attendance_next_day(self):
self.env.user.tz = 'Europe/Brussels'
calendar = self.env['resource.calendar'].create({
'name': 'auto next day',
'attendance_ids': [(5, 0, 0),
(0, 0, {
'name': 'tuesday morning',
'hour_from': 8,
'hour_to': 12,
'day_period': 'morning',
'dayofweek': '1',
})]
})
employee = self.employee_emp
employee.resource_calendar_id = calendar
with Form(self.env['hr.leave'].with_context(default_employee_id=employee.id)) as leave_form:
leave_form.holiday_status_id = self.leave_type
leave_form.request_date_from = date(2019, 9, 2)
leave_form.request_date_to = date(2019, 9, 2)
leave_form.request_unit_half = True
leave_form.request_date_from_period = 'am'
self.assertEqual(leave_form.number_of_days_display, 0.5)
self.assertEqual(leave_form.number_of_hours_text, '4 Hours')
self.assertEqual(leave_form.date_from, datetime(2019, 9, 2, 6, 0, 0))
self.assertEqual(leave_form.date_to, datetime(2019, 9, 2, 10, 0, 0))
def test_attendance_previous_day(self):
self.env.user.tz = 'Europe/Brussels'
calendar = self.env['resource.calendar'].create({
'name': 'auto next day',
'attendance_ids': [(5, 0, 0),
(0, 0, {
'name': 'monday morning',
'hour_from': 8,
'hour_to': 12,
'day_period': 'morning',
'dayofweek': '0',
})]
})
employee = self.employee_emp
employee.resource_calendar_id = calendar
with Form(self.env['hr.leave'].with_context(default_employee_id=employee.id)) as leave_form:
leave_form.holiday_status_id = self.leave_type
leave_form.request_date_from = date(2019, 9, 3)
leave_form.request_date_to = date(2019, 9, 3)
leave_form.request_unit_half = True
leave_form.request_date_from_period = 'am'
self.assertEqual(leave_form.number_of_days_display, 0.5)
self.assertEqual(leave_form.number_of_hours_text, '4 Hours')
self.assertEqual(leave_form.date_from, datetime(2019, 9, 3, 6, 0, 0))
self.assertEqual(leave_form.date_to, datetime(2019, 9, 3, 10, 0, 0))
def test_2weeks_calendar(self):
self.env.user.tz = 'Europe/Brussels'
calendar = self.env['resource.calendar'].create({
'name': 'auto next day',
'two_weeks_calendar': True,
'attendance_ids': [(5, 0, 0),
(0, 0, {
'name': 'monday morning odd week',
'hour_from': 8,
'hour_to': 12,
'day_period': 'morning',
'dayofweek': '0',
'week_type': '0',
}),
(0, 0, {
'name': 'monday morning even week',
'hour_from': 10,
'hour_to': 12,
'day_period': 'morning',
'dayofweek': '0',
'week_type': '1',
})]
})
employee = self.employee_emp
employee.resource_calendar_id = calendar
with Form(self.env['hr.leave'].with_context(default_employee_id=employee.id)) as leave_form:
leave_form.holiday_status_id = self.leave_type
# even week, works 2 hours
leave_form.request_date_from = date(2019, 9, 2)
leave_form.request_date_to = date(2019, 9, 2)
leave_form.request_unit_half = True
leave_form.request_date_from_period = 'am'
self.assertEqual(leave_form.number_of_days_display, 0.5)
self.assertEqual(leave_form.number_of_hours_text, '2 Hours')
self.assertEqual(leave_form.date_from, datetime(2019, 9, 2, 8, 0, 0))
self.assertEqual(leave_form.date_to, datetime(2019, 9, 2, 10, 0, 0))
with Form(self.env['hr.leave'].with_context(default_employee_id=employee.id)) as leave_form:
leave_form.holiday_status_id = self.leave_type
# odd week, works 4 hours
leave_form.request_date_from = date(2019, 9, 9)
leave_form.request_date_to = date(2019, 9, 9)
leave_form.request_unit_half = True
leave_form.request_date_from_period = 'am'
self.assertEqual(leave_form.number_of_days_display, 0.5)
self.assertEqual(leave_form.number_of_hours_text, '4 Hours')
self.assertEqual(leave_form.date_from, datetime(2019, 9, 9, 6, 0, 0))
self.assertEqual(leave_form.date_to, datetime(2019, 9, 9, 10, 0, 0))
def test_2weeks_calendar_next_week(self):
self.env.user.tz = 'Europe/Brussels'
calendar = self.env['resource.calendar'].create({
'name': 'auto next day',
'two_weeks_calendar': True,
'attendance_ids': [(5, 0, 0),
(0, 0, {
'name': 'monday morning odd week',
'hour_from': 8,
'hour_to': 12,
'day_period': 'morning',
'dayofweek': '0',
'week_type': '0',
})]
})
employee = self.employee_emp
employee.resource_calendar_id = calendar
with Form(self.env['hr.leave'].with_context(default_employee_id=employee.id)) as leave_form:
leave_form.holiday_status_id = self.leave_type
# even week, does not work
leave_form.request_date_from = date(2019, 9, 2)
leave_form.request_date_to = date(2019, 9, 2)
leave_form.request_unit_half = True
leave_form.request_date_from_period = 'am'
self.assertEqual(leave_form.number_of_days_display, 0.5)
self.assertEqual(leave_form.number_of_hours_text, '4 Hours')
self.assertEqual(leave_form.date_from, datetime(2019, 9, 2, 6, 0, 0))
self.assertEqual(leave_form.date_to, datetime(2019, 9, 2, 10, 0, 0))
| [
"datetime.datetime",
"datetime.date"
] | [((1035, 1051), 'datetime.date', 'date', (['(2019)', '(9)', '(2)'], {}), '(2019, 9, 2)\n', (1039, 1051), False, 'from datetime import date, datetime\n'), ((1093, 1109), 'datetime.date', 'date', (['(2019)', '(9)', '(2)'], {}), '(2019, 9, 2)\n', (1097, 1109), False, 'from datetime import date, datetime\n'), ((2577, 2593), 'datetime.date', 'date', (['(2019)', '(9)', '(2)'], {}), '(2019, 9, 2)\n', (2581, 2593), False, 'from datetime import date, datetime\n'), ((2635, 2651), 'datetime.date', 'date', (['(2019)', '(9)', '(2)'], {}), '(2019, 9, 2)\n', (2639, 2651), False, 'from datetime import date, datetime\n'), ((4651, 4667), 'datetime.date', 'date', (['(2019)', '(9)', '(2)'], {}), '(2019, 9, 2)\n', (4655, 4667), False, 'from datetime import date, datetime\n'), ((4709, 4725), 'datetime.date', 'date', (['(2019)', '(9)', '(2)'], {}), '(2019, 9, 2)\n', (4713, 4725), False, 'from datetime import date, datetime\n'), ((5999, 6015), 'datetime.date', 'date', (['(2019)', '(9)', '(2)'], {}), '(2019, 9, 2)\n', (6003, 6015), False, 'from datetime import date, datetime\n'), ((6057, 6073), 'datetime.date', 'date', (['(2019)', '(9)', '(2)'], {}), '(2019, 9, 2)\n', (6061, 6073), False, 'from datetime import date, datetime\n'), ((7456, 7472), 'datetime.date', 'date', (['(2019)', '(9)', '(2)'], {}), '(2019, 9, 2)\n', (7460, 7472), False, 'from datetime import date, datetime\n'), ((7514, 7530), 'datetime.date', 'date', (['(2019)', '(9)', '(2)'], {}), '(2019, 9, 2)\n', (7518, 7530), False, 'from datetime import date, datetime\n'), ((8819, 8835), 'datetime.date', 'date', (['(2019)', '(9)', '(3)'], {}), '(2019, 9, 3)\n', (8823, 8835), False, 'from datetime import date, datetime\n'), ((8877, 8893), 'datetime.date', 'date', (['(2019)', '(9)', '(3)'], {}), '(2019, 9, 3)\n', (8881, 8893), False, 'from datetime import date, datetime\n'), ((10729, 10745), 'datetime.date', 'date', (['(2019)', '(9)', '(2)'], {}), '(2019, 9, 2)\n', (10733, 10745), False, 'from datetime import date, datetime\n'), ((10787, 10803), 'datetime.date', 'date', (['(2019)', '(9)', '(2)'], {}), '(2019, 9, 2)\n', (10791, 10803), False, 'from datetime import date, datetime\n'), ((11455, 11471), 'datetime.date', 'date', (['(2019)', '(9)', '(9)'], {}), '(2019, 9, 9)\n', (11459, 11471), False, 'from datetime import date, datetime\n'), ((11513, 11529), 'datetime.date', 'date', (['(2019)', '(9)', '(9)'], {}), '(2019, 9, 9)\n', (11517, 11529), False, 'from datetime import date, datetime\n'), ((12960, 12976), 'datetime.date', 'date', (['(2019)', '(9)', '(2)'], {}), '(2019, 9, 2)\n', (12964, 12976), False, 'from datetime import date, datetime\n'), ((13018, 13034), 'datetime.date', 'date', (['(2019)', '(9)', '(2)'], {}), '(2019, 9, 2)\n', (13022, 13034), False, 'from datetime import date, datetime\n'), ((7829, 7858), 'datetime.datetime', 'datetime', (['(2019)', '(9)', '(2)', '(6)', '(0)', '(0)'], {}), '(2019, 9, 2, 6, 0, 0)\n', (7837, 7858), False, 'from datetime import date, datetime\n'), ((7909, 7939), 'datetime.datetime', 'datetime', (['(2019)', '(9)', '(2)', '(10)', '(0)', '(0)'], {}), '(2019, 9, 2, 10, 0, 0)\n', (7917, 7939), False, 'from datetime import date, datetime\n'), ((9192, 9221), 'datetime.datetime', 'datetime', (['(2019)', '(9)', '(3)', '(6)', '(0)', '(0)'], {}), '(2019, 9, 3, 6, 0, 0)\n', (9200, 9221), False, 'from datetime import date, datetime\n'), ((9272, 9302), 'datetime.datetime', 'datetime', (['(2019)', '(9)', '(3)', '(10)', '(0)', '(0)'], {}), '(2019, 9, 3, 10, 0, 0)\n', (9280, 9302), False, 'from datetime import date, datetime\n'), ((11101, 11130), 'datetime.datetime', 'datetime', (['(2019)', '(9)', '(2)', '(8)', '(0)', '(0)'], {}), '(2019, 9, 2, 8, 0, 0)\n', (11109, 11130), False, 'from datetime import date, datetime\n'), ((11181, 11211), 'datetime.datetime', 'datetime', (['(2019)', '(9)', '(2)', '(10)', '(0)', '(0)'], {}), '(2019, 9, 2, 10, 0, 0)\n', (11189, 11211), False, 'from datetime import date, datetime\n'), ((11827, 11856), 'datetime.datetime', 'datetime', (['(2019)', '(9)', '(9)', '(6)', '(0)', '(0)'], {}), '(2019, 9, 9, 6, 0, 0)\n', (11835, 11856), False, 'from datetime import date, datetime\n'), ((11907, 11937), 'datetime.datetime', 'datetime', (['(2019)', '(9)', '(9)', '(10)', '(0)', '(0)'], {}), '(2019, 9, 9, 10, 0, 0)\n', (11915, 11937), False, 'from datetime import date, datetime\n'), ((13332, 13361), 'datetime.datetime', 'datetime', (['(2019)', '(9)', '(2)', '(6)', '(0)', '(0)'], {}), '(2019, 9, 2, 6, 0, 0)\n', (13340, 13361), False, 'from datetime import date, datetime\n'), ((13412, 13442), 'datetime.datetime', 'datetime', (['(2019)', '(9)', '(2)', '(10)', '(0)', '(0)'], {}), '(2019, 9, 2, 10, 0, 0)\n', (13420, 13442), False, 'from datetime import date, datetime\n')] |
# encoding: utf-8
from __future__ import absolute_import, unicode_literals
import onnxruntime
class ONNXModel:
def __init__(self, model_file=None, session=None, task_name=''):
self.model_file = model_file
self.session = session
self.task_name = task_name
if self.session is None:
assert self.model_file is not None
self.session = onnxruntime.InferenceSession(self.model_file, None)
| [
"onnxruntime.InferenceSession"
] | [((393, 444), 'onnxruntime.InferenceSession', 'onnxruntime.InferenceSession', (['self.model_file', 'None'], {}), '(self.model_file, None)\n', (421, 444), False, 'import onnxruntime\n')] |
# -*- coding: utf-8 -*-
from django.shortcuts import get_object_or_404, render_to_response, redirect
from django.template import RequestContext
from django.core.context_processors import csrf
from django.views.decorators.csrf import csrf_exempt
from django.http import Http404, HttpResponse, HttpResponseForbidden, HttpResponseNotFound
from django.utils.encoding import smart_str
from django.conf import settings
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect
from django.db import connections
from django.core.paginator import InvalidPage, EmptyPage, Paginator
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.contrib import messages
import subprocess
from django.utils import timezone
from servers.models import Server
from fabrun.models import Task
from fabrun.tasks import run_task
import datetime
KEYWORDS = ('[$AG:NeedGestion]', '[$AG:NeedKM]', '[$AG:NeedUser]', '[$AG:NeedKomUser]', '[$AG:NeedSudo]', '[$AG:NeedMysqlPassword]', '[$AG:NeedSrvIp]')
@login_required
@staff_member_required
def home(request):
"""Show the page to execute scripts"""
if request.method == 'POST':
task = request.POST.get('script')
if task:
for spk in request.POST.getlist('server'):
server = get_object_or_404(Server, pk=spk)
t = Task(creation_date=timezone.now(), server=server, command=task)
t.save()
run_task.delay(t.pk)
messages.success(request, "Created task for server " + str(server))
liste = []
out, __ = subprocess.Popen(['fab', '--shortlist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=settings.FABRIC_FOLDER).communicate()
# for command in out.split('\n'):
# if command:
# out2, __ = subprocess.Popen(['fab', '-d', command], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=settings.FABRIC_FOLDER).communicate()
# description = out2.split('\n')[2]
# for keyword in KEYWORDS:
# description = description.replace(keyword, '')
# description = description.strip()
# liste.append((command, description))
liste = out.split('\n')
servers = Server.objects.exclude(ssh_connection_string_from_gestion=None).order_by('name').all()
tasks = Task.objects.order_by('-creation_date').all()
return render_to_response('fabrun/home.html', {'liste': liste, 'tasks': tasks, 'servers': servers}, context_instance=RequestContext(request))
@login_required
@staff_member_required
def show_run(request, pk):
"""Show output for a run"""
task = get_object_or_404(Task, pk=pk)
return render_to_response('fabrun/show_run.html', {'task': task}, context_instance=RequestContext(request))
@login_required
@staff_member_required
def clean_up(request):
Task.objects.filter(creation_date__lt=timezone.now() - datetime.timedelta(days=1)).delete()
messages.success(request, "Old fabric runs have been deleted")
return HttpResponseRedirect(reverse('fabrun.views.home'))
@login_required
@staff_member_required
def get_description(request):
command = request.GET.get('task')
out, __ = subprocess.Popen(['fab', '--shortlist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=settings.FABRIC_FOLDER).communicate()
if command in out.split('\n'):
out2, __ = subprocess.Popen(['fab', '-d', command], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=settings.FABRIC_FOLDER).communicate()
description = out2.split('\n')[2]
for keyword in KEYWORDS:
description = description.replace(keyword, '')
description = description.strip()
return HttpResponse(description)
raise Http404
| [
"django.http.HttpResponse",
"subprocess.Popen",
"django.shortcuts.get_object_or_404",
"fabrun.models.Task.objects.order_by",
"django.template.RequestContext",
"django.core.urlresolvers.reverse",
"datetime.timedelta",
"servers.models.Server.objects.exclude",
"django.utils.timezone.now",
"django.con... | [((2762, 2792), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Task'], {'pk': 'pk'}), '(Task, pk=pk)\n', (2779, 2792), False, 'from django.shortcuts import get_object_or_404, render_to_response, redirect\n'), ((3072, 3134), 'django.contrib.messages.success', 'messages.success', (['request', '"""Old fabric runs have been deleted"""'], {}), "(request, 'Old fabric runs have been deleted')\n", (3088, 3134), False, 'from django.contrib import messages\n'), ((3168, 3196), 'django.core.urlresolvers.reverse', 'reverse', (['"""fabrun.views.home"""'], {}), "('fabrun.views.home')\n", (3175, 3196), False, 'from django.core.urlresolvers import reverse\n'), ((3836, 3861), 'django.http.HttpResponse', 'HttpResponse', (['description'], {}), '(description)\n', (3848, 3861), False, 'from django.http import Http404, HttpResponse, HttpResponseForbidden, HttpResponseNotFound\n'), ((1708, 1829), 'subprocess.Popen', 'subprocess.Popen', (["['fab', '--shortlist']"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'cwd': 'settings.FABRIC_FOLDER'}), "(['fab', '--shortlist'], stdout=subprocess.PIPE, stderr=\n subprocess.PIPE, cwd=settings.FABRIC_FOLDER)\n", (1724, 1829), False, 'import subprocess\n'), ((2457, 2496), 'fabrun.models.Task.objects.order_by', 'Task.objects.order_by', (['"""-creation_date"""'], {}), "('-creation_date')\n", (2478, 2496), False, 'from fabrun.models import Task\n'), ((2625, 2648), 'django.template.RequestContext', 'RequestContext', (['request'], {}), '(request)\n', (2639, 2648), False, 'from django.template import RequestContext\n'), ((2881, 2904), 'django.template.RequestContext', 'RequestContext', (['request'], {}), '(request)\n', (2895, 2904), False, 'from django.template import RequestContext\n'), ((3323, 3444), 'subprocess.Popen', 'subprocess.Popen', (["['fab', '--shortlist']"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'cwd': 'settings.FABRIC_FOLDER'}), "(['fab', '--shortlist'], stdout=subprocess.PIPE, stderr=\n subprocess.PIPE, cwd=settings.FABRIC_FOLDER)\n", (3339, 3444), False, 'import subprocess\n'), ((1411, 1444), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Server'], {'pk': 'spk'}), '(Server, pk=spk)\n', (1428, 1444), False, 'from django.shortcuts import get_object_or_404, render_to_response, redirect\n'), ((1571, 1591), 'fabrun.tasks.run_task.delay', 'run_task.delay', (['t.pk'], {}), '(t.pk)\n', (1585, 1591), False, 'from fabrun.tasks import run_task\n'), ((3510, 3631), 'subprocess.Popen', 'subprocess.Popen', (["['fab', '-d', command]"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'cwd': 'settings.FABRIC_FOLDER'}), "(['fab', '-d', command], stdout=subprocess.PIPE, stderr=\n subprocess.PIPE, cwd=settings.FABRIC_FOLDER)\n", (3526, 3631), False, 'import subprocess\n'), ((2357, 2420), 'servers.models.Server.objects.exclude', 'Server.objects.exclude', ([], {'ssh_connection_string_from_gestion': 'None'}), '(ssh_connection_string_from_gestion=None)\n', (2379, 2420), False, 'from servers.models import Server\n'), ((1485, 1499), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (1497, 1499), False, 'from django.utils import timezone\n'), ((3013, 3027), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (3025, 3027), False, 'from django.utils import timezone\n'), ((3030, 3056), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (3048, 3056), False, 'import datetime\n')] |
########################################
# CS/CNS/EE 155 2018
# Problem Set 1
#
# Author: <NAME>
# Description: Set 1 Perceptron helper
########################################
import numpy as np
import matplotlib.pyplot as plt
def predict(x, w, b):
'''
The method takes the weight vector and bias of a perceptron model, and
predicts the label for a single point x.
Inputs:
x: A (D, ) shaped numpy array containing a single point.
w: A (D, ) shaped numpy array containing the weight vector.
b: A float containing the bias term.
Output:
The label (1 or -1) for the point x.
'''
prod = np.dot(w, x) + b
return 1 if prod >= 0 else -1
def plot_data(X, Y, ax):
# This method plots a labeled (with -1 or 1) 2D dataset.
ax.scatter(X[Y == 1, 0], X[Y == 1, 1], c = 'green', marker='+')
ax.scatter(X[Y == -1, 0], X[Y == -1, 1], c = 'red')
def boundary(x_1, w, b):
# Gets the corresponding x_2 value given x_1 and the decision boundary of a
# perceptron model. Note this only works for a 2D perceptron.
if w[1] == 0.0:
denom = 1e-6
else:
denom = w[1]
return (-w[0] * x_1 - b) / denom
def plot_perceptron(w, b, ax):
# This method plots a perceptron decision boundary line. Note this only works for
# 2D perceptron.
xlim = ax.get_xlim(); ylim = ax.get_ylim()
x_2s = [boundary(x_1, w, b) for x_1 in xlim]
ax.plot(xlim, x_2s)
if predict([xlim[0], ylim[0]], w, b) == -1:
ax.fill_between(xlim, ylim[0], x_2s, facecolor='red', alpha=0.5)
else:
ax.fill_between(xlim, x_2s, ylim[-1], facecolor='red', alpha=0.5)
| [
"numpy.dot"
] | [((663, 675), 'numpy.dot', 'np.dot', (['w', 'x'], {}), '(w, x)\n', (669, 675), True, 'import numpy as np\n')] |
from models import StandardHMM, DenseHMM, HMMLoggingMonitor
from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences
from data import penntreebank_tag_sequences, protein_sequences, train_test_split
from datetime import datetime
import os
import copy
import numpy as np
""" Initializes a StandardHMM and a DenseHMM and fits given data to it
"""
def _standard_vs_dense(train_X, test_X, standard_params=None, dense_params=None, gt_AB=None):
t = Timer()
train_X, train_lengths, train_unique = prepare_data(train_X)
test_X, test_lengths, test_unique = prepare_data(test_X)
standard_hmms = []
if standard_params is None:
standard_hmms.append(StandardHMM())
elif type(standard_params) is list or type(standard_params) is tuple:
for params in standard_params:
standard_hmms.append(StandardHMM(**params))
else:
standard_params = dict(standard_params)
standard_hmms.append(StandardHMM(**standard_params))
dense_params = {} if dense_params is None else dict(dense_params)
dense_hmm = DenseHMM(**dense_params)
opt_schemes = dict_get(dense_params, 'opt_schemes', default=('em', 'cooc'))
if 'em' in opt_schemes:
t.tic("Fitting dense HMM in mode 'em' ...")
dense_hmm.fit(train_X, train_lengths, test_X, test_lengths)
t.toc("Fitting finished.")
if 'cooc' in opt_schemes:
t.tic("Fitting dense HMM in mode 'cooc' ...")
dense_hmm.fit_coocs(train_X, train_lengths, test_X, test_lengths, gt_AB)
t.toc("Fitting finished.")
for i, standard_hmm in enumerate(standard_hmms):
t.tic("Fitting standard hmm %d/%d" % (i+1, len(standard_hmms)))
standard_hmm.fit(train_X, train_lengths, test_X, test_lengths)
t.toc("Fitting finished.")
def _dirichlet_random_numbers(alpha_size, sample_size, dirichlet_param, random_state):
return random_state.dirichlet(np.ones(alpha_size) * dirichlet_param,
size=(sample_size,))
""" Initializes the transition matrices of given hmm to dirichlet distributions.
Assumes that random_state is an instance of np.RandomState """
def _dirichlet_matrix_initializer(dirichlet_param, n_hidden_states, n_observables, random_state):
pi = 1. / n_hidden_states * np.ones(n_hidden_states)
A = _dirichlet_random_numbers(n_hidden_states, n_hidden_states, dirichlet_param, random_state)
B = _dirichlet_random_numbers(n_observables, n_hidden_states, dirichlet_param, random_state) # Note: This results in an n x m matrix
return pi, A, B
def _stationary_matrix_init(n, m, rng, matrix_init_func):
pi, A, B = matrix_init_func(n, m, rng)
pi = compute_stationary(A)
return pi, A, B
def _default_standard_hmm_init():
return dict(n_hidden_states=1, startprob_prior=1.0, transmat_prior=1.0,
random_state=None, em_iter=10, convergence_tol=1e-2, verbose=False)
def _default_dense_hmm_init():
return dict(n_hidden_states=1, n_observables=None, startprob_prior=1.0, transmat_prior=1.0,
random_state=None, em_iter=10, convergence_tol=1e-10, verbose=False,
params="ste", init_params="ste", logging_monitor=None, mstep_config=None)
def _compute_fair_standard_n(m, n_dense, l_dense):
pre = - (m - 1)/2
discriminant = pre**2 + l_dense*(3*n_dense + m + 1)
if discriminant < 0:
raise Exception("Complex solution")
n_plus = pre + np.sqrt(discriminant)
n_minus = pre - np.sqrt(discriminant)
n = np.max((n_plus, n_minus))
if n <= 0:
raise Exception("Only negative solutions")
return int(np.around(n))
def _parse_base_parameters(exp_params, path_dict):
path_dict = dict(path_dict)
exp_params = dict(exp_params)
exp_params['standard_params'] = dict_get(exp_params, 'standard_params', default=_default_standard_hmm_init(), cast=dict)
exp_params['dense_params'] = dict_get(exp_params, 'dense_params', default=_default_dense_hmm_init(), cast=dict)
exp_params['dense_opt_schemes'] = dict_get(exp_params, 'dense_opt_schemes', default=('em',))
exp_params['compare_to_fair_standard'] = dict_get(exp_params, 'compare_to_fair_standard', default=False)
return exp_params
def _parse_syntheticgt_parameters(exp_params, path_dict):
exp_params = _parse_base_parameters(exp_params, path_dict)
exp_params['gt_params'] = dict_get(exp_params, 'gt_params', default=_default_standard_hmm_init(), cast=dict)
exp_params['n_seqs_train'] = dict_get(exp_params, 'n_seqs_train', default=10, cast=int)
exp_params['seqlen_train'] = dict_get(exp_params, 'seqlen_train', default=10, cast=int)
exp_params['n_seqs_test'] = dict_get(exp_params, 'n_seqs_test', default=10, cast=int)
exp_params['seqlen_test'] = dict_get(exp_params, 'seqlen_test', default=10, cast=int)
exp_params['gt_stationary'] = dict_get(exp_params, 'gt_stationary', default=False)
exp_params['gt_params']['n_observables'] = exp_params['n_emissions']
# Making sure the initializer returns stationary pi (if gt_stationary = true)...
init_params = dict_get(exp_params['gt_params'], 'init_params', default=None)
if init_params is not None and callable(init_params) and exp_params['gt_stationary']:
init_params_ = lambda n, m, rng: _stationary_matrix_init(n, m, rng, init_params)
init_params = init_params_
exp_params['gt_params']['init_params'] = init_params
if 'experiment_directory' in path_dict:
exp_dir = str(path_dict['experiment_directory'])
check_dir(exp_dir)
# Log GT EM optimization by default
gt_log_config = dict_get(exp_params, 'gt_log_config', default=dict(), cast=dict)
gt_log_config['exp_folder'] = dict_get(gt_log_config, 'exp_folder', default=exp_dir)
gt_log_config['log_folder'] = dict_get(gt_log_config, 'log_folder', default='/gt_logs/em_opt')
gt_logmon = HMMLoggingMonitor(gt_log_config)
exp_params['gt_params']['logging_monitor'] = gt_logmon
return exp_params
def _parse_syntheticgt_dirichlet_parameters(exp_params, path_dict):
exp_params = _parse_syntheticgt_parameters(exp_params, path_dict)
exp_params['dirichlet_param'] = dict_get(exp_params, 'dirichlet_param', default=0.1, cast=float)
exp_params['n_emissions'] = dict_get(exp_params, 'n_emissions', default=None)
# Initialize ground truth hmm
def _dirichlet_matrix_init(n, m, rng):
return _dirichlet_matrix_initializer(exp_params['dirichlet_param'], n, m, rng)
init_params = dict_get(exp_params['gt_params'], 'init_params', default=_dirichlet_matrix_init)
if init_params is not None and callable(init_params) and exp_params['gt_stationary']:
init_params_ = copy.deepcopy(init_params)
init_params = lambda n, m, rng: _stationary_matrix_init(n, m, rng, init_params_)
exp_params['gt_params']['init_params'] = init_params
return exp_params
def _parse_standard_and_dense(exp_params, path_dict, n_emissions):
exp_params['n_emissions'] = n_emissions
# Number of emissions must be the same for all models
exp_params['standard_params']['n_observables'] = n_emissions
exp_params['dense_params']['n_observables'] = n_emissions
# Set opt_schemes that are needed
exp_params['dense_params']['opt_schemes'] = exp_params['dense_opt_schemes']
# Setup fair standard hmm
if exp_params['compare_to_fair_standard']:
# TODO check l_uz = l_vw
n_dense, l_dense = exp_params['dense_params']['n_hidden_states'], exp_params['dense_params']['mstep_config']['l_uz']
n_fair = _compute_fair_standard_n(exp_params['n_emissions'], n_dense, l_dense)
exp_params['fair_standard_params'] = copy.deepcopy(exp_params['standard_params'])
exp_params['fair_standard_params']['n_hidden_states'] = n_fair
if 'experiment_directory' in path_dict:
exp_dir = str(path_dict['experiment_directory'])
check_dir(exp_dir)
standard_log_config = dict_get(exp_params, 'standard_log_config', default=dict(), cast=dict)
dense_log_config = dict_get(exp_params, 'dense_log_config', default=dict(), cast=dict)
standard_log_config['exp_folder'] = dict_get(standard_log_config, 'exp_folder', default=exp_dir)
standard_log_config['log_folder'] = dict_get(standard_log_config, 'log_folder', default='/standard_logs')
dense_log_config['exp_folder'] = dict_get(dense_log_config, 'exp_folder', default=exp_dir)
dense_log_config['log_folder'] = dict_get(dense_log_config, 'log_folder', default='/dense_logs')
standard_logmon, dense_logmon = HMMLoggingMonitor(standard_log_config), HMMLoggingMonitor(dense_log_config)
exp_params['standard_params']['logging_monitor'] = standard_logmon
exp_params['dense_params']['logging_monitor'] = dense_logmon
fair_standard_logmon = None
if 'fair_standard_params' in exp_params:
fair_standard_log_config = dict_get(exp_params, 'fair_standard_log_config', default=dict(), cast=dict)
fair_standard_log_config['exp_folder'] = dict_get(fair_standard_log_config, 'exp_folder', default=exp_dir)
fair_standard_log_config['log_folder'] = dict_get(fair_standard_log_config, 'log_folder', default='/fair_standard_logs')
fair_standard_logmon = HMMLoggingMonitor(fair_standard_log_config)
exp_params['fair_standard_params']['logging_monitor'] = fair_standard_logmon
return exp_params
def _sample_sequences_from_gt_hmm(exp_params, path_dict, gt_hmm=None, sample_retries=100):
t = Timer()
n_emissions = exp_params['gt_params']['n_observables']
if gt_hmm is None:
gt_hmm = StandardHMM(**exp_params['gt_params'])
# Sample train and test sequences, save them
t.tic()
cur_sample_try = 0
train_X = None
while cur_sample_try < sample_retries and not is_multinomial(train_X, min_symbols=n_emissions):
train_X = gt_hmm.sample_sequences(exp_params['n_seqs_train'], exp_params['seqlen_train'])
cur_sample_try += 1
if not is_multinomial(train_X, min_symbols=n_emissions):
raise Exception("Could not sample a multinomial distribution. Try to increase sequence length and number of sequences. Or change the dirichlet parameter")
cur_sample_try = 0
test_X = None
while cur_sample_try < sample_retries and not is_multinomial(test_X, min_symbols=n_emissions):
test_X = gt_hmm.sample_sequences(exp_params['n_seqs_test'], exp_params['seqlen_test'])
cur_sample_try += 1
t.toc("Generated train and test sequences")
if not is_multinomial(train_X, min_symbols=n_emissions):
raise Exception("Could not sample a multinomial distribution. Try to increase sequence length and number of sequences. Or change the dirichlet parameter.")
t.tic()
if 'gt_dir' in path_dict:
gt_dir = str(path_dict['gt_dir'])
check_dir(gt_dir)
np.save(gt_dir + '/transmat', gt_hmm.transmat_)
np.save(gt_dir + '/emissionprob', gt_hmm.emissionprob_)
np.save(gt_dir + '/startprob', gt_hmm.startprob_)
gt_samples = dict_get(exp_params, 'gt_samples', default=None, cast=tuple)
t.toc("Ground truth parameters logged")
gt_AB = None
if exp_params['gt_stationary']:
gt_AB = (gt_hmm.transmat_, gt_hmm.emissionprob_)
_save_data(path_dict, train_X, test_X, gt_AB)
return train_X, test_X, gt_AB
def _save_data(path_dict, train_X, test_X=None, gt_AB=None):
if 'data_dir' in path_dict:
data_dir = str(path_dict['data_dir'])
check_dir(data_dir)
np.save(data_dir + '/train_X', train_X)
if test_X is not None:
np.save(data_dir + '/test_X', test_X)
if gt_AB is not None:
np.save(data_dir + '/gt_A', gt_AB[0])
np.save(data_dir + '/gt_B', gt_AB[1])
timestamp_msg("Saved data in %s" % data_dir)
def _save_experiment_parameters(exp_params, path_dict):
if 'experiment_directory' in path_dict:
exp_dir = str(path_dict['experiment_directory'])
check_dir(exp_dir)
_exp_params = copy.deepcopy(exp_params)
gt_params = dict_get(_exp_params, 'gt_params', default=None, cast=dict)
if gt_params is not None:
_exp_params['gt_params'] = gt_params
init_params = dict_get(gt_params, 'init_params', default=None)
if callable(init_params):
_exp_params['gt_params']['init_params'] = str(init_params.__name__)
gt_logmon = dict_get(gt_params, 'logging_monitor', default=None)
if gt_logmon is not None and isinstance(gt_logmon, HMMLoggingMonitor):
_exp_params['gt_params']['logging_monitor'] = dict(gt_logmon.log_config)
standard_params = dict_get(_exp_params, 'standard_params', default=None, cast=dict)
standard_logmon = dict_get(standard_params, 'logging_monitor', default=None)
if standard_logmon is not None and isinstance(standard_logmon, HMMLoggingMonitor):
_exp_params['standard_params']['logging_monitor'] = dict(standard_logmon.log_config)
dense_params = dict_get(_exp_params, 'dense_params', default=None, cast=dict)
dense_logmon = dict_get(standard_params, 'logging_monitor', default=None)
if dense_logmon is not None and isinstance(dense_logmon, HMMLoggingMonitor):
_exp_params['dense_params']['logging_monitor'] = dict(dense_logmon.log_config)
fair_standard_params = dict_get(_exp_params, 'fair_standard_params', default=None, cast=dict)
fair_standard_logmon = dict_get(fair_standard_params, 'logging_monitor', default=None)
if fair_standard_logmon is not None and isinstance(fair_standard_logmon, HMMLoggingMonitor):
_exp_params['fair_standard_params']['logging_monitor'] = dict(fair_standard_logmon.log_config)
np.save(exp_dir + '/exp_params', _exp_params)
timestamp_msg("Saved experiment parameters in %s" % exp_dir)
return _exp_params
def synthetic_sequences_experiment(exp_params, path_dict, sample_retries=100, reuse_sequences=None):
t_exp = Timer()
start_time = t_exp.tic("Starting a 'synthetic sequences' experiment.")
# Get parameters
t = Timer()
t.tic("Parsing parameters ...")
exp_params = _parse_syntheticgt_dirichlet_parameters(exp_params, path_dict)
exp_params = _parse_standard_and_dense(exp_params, path_dict, exp_params['n_emissions'])
_exp_params = _save_experiment_parameters(exp_params, path_dict)
t.toc("Parameters parsed. Using parameters: %s" % str(_exp_params))
train_X, test_X, gt_AB = None, None, None
if reuse_sequences is None or type(reuse_sequences) != tuple or len(reuse_sequences) != 3:
train_X, test_X, gt_AB = _sample_sequences_from_gt_hmm(exp_params, path_dict, sample_retries=sample_retries)
else:
train_X, test_X, gt_AB = reuse_sequences
timestamp_msg("Reusing sequences")
if 'fair_standard_params' in exp_params:
_standard_vs_dense(train_X, test_X, (exp_params['standard_params'], exp_params['fair_standard_params']),
exp_params['dense_params'], gt_AB)
else:
_standard_vs_dense(train_X, test_X, exp_params['standard_params'], exp_params['dense_params'], gt_AB)
fin_time, diff = t_exp.toc("Finished a 'synthetic sequences' experiment.")
SUPPORTED_DATASETS = frozenset(('penntree_tag','protein'))
def get_dataset_sequences(ident, ds_params={}, log_dir=None):
if ident not in SUPPORTED_DATASETS:
raise Exception("Given Dataset %s is not supported." % str(ident))
sequences, tag_to_symb, symb_to_tag = None, None, None
if ident == 'penntree_tag':
sequences, tag_to_symb, symb_to_tag = penntreebank_tag_sequences(**ds_params)
elif ident == 'protein':
sequences, tag_to_symb, symb_to_tag = protein_sequences(**ds_params)
if log_dir is not None:
np.save(log_dir + '/symb_to_tag.npy', symb_to_tag)
np.save(log_dir + '/tag_to_symb.npy', tag_to_symb)
return sequences, tag_to_symb, symb_to_tag
def dataset_synthetic_sequences_experiment(exp_params, path_dict, sample_retries=100):
t_exp = Timer()
exp_params = dict(exp_params)
ident = dict_get(exp_params, 'dataset_ident', default='', cast=str)
start_time = t_exp.tic("Starting a 'dataset synthetic sequences' experiment. (%s)" % str(ident))
gt_dir = dict_get(path_dict, 'gt_dir', default=None)
check_dir(gt_dir)
ds_params = dict_get(exp_params, 'dataset_params', default=dict(), cast=dict)
gt_sequences, _, _ = get_dataset_sequences(ident, ds_params, gt_dir)
# Get parameters
t = Timer()
t.tic("Parsing parameters ...")
# Check gt_sequences
sequences, lengths, n_emissions = check_sequences(gt_sequences)
exp_params['n_emissions'] = n_emissions
exp_params = _parse_syntheticgt_parameters(exp_params, path_dict)
exp_params = _parse_standard_and_dense(exp_params, path_dict, exp_params['n_emissions'])
_exp_params = _save_experiment_parameters(exp_params, path_dict)
t.toc("Parameters parsed. Using parameters: %s" % str(_exp_params))
t.tic("Fitting GT HMM...")
gt_hmm = StandardHMM(**exp_params['gt_params'])
gt_hmm.fit(sequences, lengths)
t.toc("Fitting finished")
train_X, test_X, gt_AB = _sample_sequences_from_gt_hmm(exp_params, path_dict, gt_hmm=gt_hmm, sample_retries=sample_retries)
if 'fair_standard_params' in exp_params:
_standard_vs_dense(train_X, test_X, (exp_params['standard_params'], exp_params['fair_standard_params']),
exp_params['dense_params'], gt_AB)
else:
_standard_vs_dense(train_X, test_X, exp_params['standard_params'], exp_params['dense_params'], gt_AB)
fin_time, diff = t_exp.toc("Finished a 'dataset synthetic sequences' experiment.")
def dataset_sequences_experiment(exp_params, path_dict, reuse_sequences=None):
t_exp = Timer()
exp_params = dict(exp_params)
ident = dict_get(exp_params, 'dataset_ident', default='', cast=str)
start_time = t_exp.tic("Starting a 'dataset sequences' experiment. (%s)" % str(ident))
# Get parameters
t = Timer()
t.tic("Parsing parameters ...")
train_perc = dict_get(exp_params, 'train_perc', default=1., cast=float)
gt_dir = dict_get(path_dict, 'gt_dir', default=None)
check_dir(gt_dir)
ds_params = dict_get(exp_params, 'dataset_params', default=dict(), cast=dict)
if reuse_sequences is None or type(reuse_sequences) != tuple or len(reuse_sequences) != 2:
gt_sequences, _, _ = get_dataset_sequences(ident, ds_params, gt_dir)
train_X, test_X = train_test_split(gt_sequences, train_perc)
else:
train_X, test_X = reuse_sequences
timestamp_msg("Reusing sequences ...")
# Check gt_sequences
_, _, n_train_emissions = check_sequences(train_X)
n_test_emissions = None
if test_X is not None and len(test_X) > 0:
_, _, n_test_emissions = check_sequences(test_X)
_save_data(path_dict, train_X, test_X)
if n_test_emissions is not None and n_train_emissions != n_test_emissions:
raise Exception("Number of emissions in train and test sequence differs")
exp_params['n_emissions'] = n_train_emissions
exp_params = _parse_base_parameters(exp_params, path_dict)
exp_params = _parse_standard_and_dense(exp_params, path_dict, exp_params['n_emissions'])
_exp_params = _save_experiment_parameters(exp_params, path_dict)
t.toc("Parameters parsed. Using parameters: %s" % str(_exp_params))
if 'fair_standard_params' in exp_params:
_standard_vs_dense(train_X, test_X, (exp_params['standard_params'], exp_params['fair_standard_params']),
exp_params['dense_params'])
else:
_standard_vs_dense(train_X, test_X, exp_params['standard_params'], exp_params['dense_params'])
fin_time, diff = t_exp.toc("Finished a 'dataset sequences' experiment.")
def run_experiment(exp_type, exp_name, exp_params, reuse_setup=None):
experiment_directory, path_dict = None, None
if reuse_setup is None or type(reuse_setup) != tuple or len(reuse_setup) != 2:
experiment_directory, path_dict = setup_experiment(exp_name, exp_params)
else:
experiment_directory, path_dict = reuse_setup
supported_exp_types = ('synthetic_sequences', 'dataset_synthetic_sequences', 'dataset_sequences')
if exp_type == 'synthetic_sequences':
synthetic_sequences_experiment(exp_params, path_dict)
elif exp_type == 'dataset_synthetic_sequences':
dataset_synthetic_sequences_experiment(exp_params, path_dict)
elif exp_type == 'dataset_sequences':
dataset_sequences_experiment(exp_params, path_dict)
else:
raise Exception('Given experiment type "%s" is not supported. \n'
'It has to be one of the following: %s' % (str(exp_type), str(supported_exp_types)))
print(experiment_directory)
return experiment_directory
def setup_experiment(exp_name, exp_params):
path_dict = {}
experiment_directory = os.getcwd() + '/' + exp_name + datetime.now().strftime('%Y%m%d_%H-%M-%S')
path_dict['experiment_directory'] = experiment_directory
path_dict['data_dir'] = experiment_directory + '/data'
path_dict['gt_dir'] = experiment_directory + '/gt_logs'
return experiment_directory, path_dict
| [
"data.penntreebank_tag_sequences",
"numpy.sqrt",
"utils.is_multinomial",
"copy.deepcopy",
"models.HMMLoggingMonitor",
"numpy.save",
"utils.Timer",
"numpy.max",
"utils.compute_stationary",
"utils.check_sequences",
"data.protein_sequences",
"numpy.ones",
"data.train_test_split",
"numpy.aroun... | [((571, 578), 'utils.Timer', 'Timer', ([], {}), '()\n', (576, 578), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((627, 648), 'utils.prepare_data', 'prepare_data', (['train_X'], {}), '(train_X)\n', (639, 648), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((689, 709), 'utils.prepare_data', 'prepare_data', (['test_X'], {}), '(test_X)\n', (701, 709), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((1193, 1217), 'models.DenseHMM', 'DenseHMM', ([], {}), '(**dense_params)\n', (1201, 1217), False, 'from models import StandardHMM, DenseHMM, HMMLoggingMonitor\n'), ((1241, 1302), 'utils.dict_get', 'dict_get', (['dense_params', '"""opt_schemes"""'], {'default': "('em', 'cooc')"}), "(dense_params, 'opt_schemes', default=('em', 'cooc'))\n", (1249, 1302), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((2863, 2884), 'utils.compute_stationary', 'compute_stationary', (['A'], {}), '(A)\n', (2881, 2884), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((3704, 3729), 'numpy.max', 'np.max', (['(n_plus, n_minus)'], {}), '((n_plus, n_minus))\n', (3710, 3729), True, 'import numpy as np\n'), ((4237, 4295), 'utils.dict_get', 'dict_get', (['exp_params', '"""dense_opt_schemes"""'], {'default': "('em',)"}), "(exp_params, 'dense_opt_schemes', default=('em',))\n", (4245, 4295), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((4341, 4404), 'utils.dict_get', 'dict_get', (['exp_params', '"""compare_to_fair_standard"""'], {'default': '(False)'}), "(exp_params, 'compare_to_fair_standard', default=False)\n", (4349, 4404), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((4705, 4763), 'utils.dict_get', 'dict_get', (['exp_params', '"""n_seqs_train"""'], {'default': '(10)', 'cast': 'int'}), "(exp_params, 'n_seqs_train', default=10, cast=int)\n", (4713, 4763), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((4797, 4855), 'utils.dict_get', 'dict_get', (['exp_params', '"""seqlen_train"""'], {'default': '(10)', 'cast': 'int'}), "(exp_params, 'seqlen_train', default=10, cast=int)\n", (4805, 4855), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((4888, 4945), 'utils.dict_get', 'dict_get', (['exp_params', '"""n_seqs_test"""'], {'default': '(10)', 'cast': 'int'}), "(exp_params, 'n_seqs_test', default=10, cast=int)\n", (4896, 4945), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((4978, 5035), 'utils.dict_get', 'dict_get', (['exp_params', '"""seqlen_test"""'], {'default': '(10)', 'cast': 'int'}), "(exp_params, 'seqlen_test', default=10, cast=int)\n", (4986, 5035), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((5070, 5122), 'utils.dict_get', 'dict_get', (['exp_params', '"""gt_stationary"""'], {'default': '(False)'}), "(exp_params, 'gt_stationary', default=False)\n", (5078, 5122), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((5304, 5366), 'utils.dict_get', 'dict_get', (["exp_params['gt_params']", '"""init_params"""'], {'default': 'None'}), "(exp_params['gt_params'], 'init_params', default=None)\n", (5312, 5366), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((6436, 6500), 'utils.dict_get', 'dict_get', (['exp_params', '"""dirichlet_param"""'], {'default': '(0.1)', 'cast': 'float'}), "(exp_params, 'dirichlet_param', default=0.1, cast=float)\n", (6444, 6500), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((6533, 6582), 'utils.dict_get', 'dict_get', (['exp_params', '"""n_emissions"""'], {'default': 'None'}), "(exp_params, 'n_emissions', default=None)\n", (6541, 6582), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((6775, 6860), 'utils.dict_get', 'dict_get', (["exp_params['gt_params']", '"""init_params"""'], {'default': '_dirichlet_matrix_init'}), "(exp_params['gt_params'], 'init_params', default=_dirichlet_matrix_init\n )\n", (6783, 6860), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((9884, 9891), 'utils.Timer', 'Timer', ([], {}), '()\n', (9889, 9891), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((14526, 14533), 'utils.Timer', 'Timer', ([], {}), '()\n', (14531, 14533), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((14643, 14650), 'utils.Timer', 'Timer', ([], {}), '()\n', (14648, 14650), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((16634, 16641), 'utils.Timer', 'Timer', ([], {}), '()\n', (16639, 16641), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((16688, 16747), 'utils.dict_get', 'dict_get', (['exp_params', '"""dataset_ident"""'], {'default': '""""""', 'cast': 'str'}), "(exp_params, 'dataset_ident', default='', cast=str)\n", (16696, 16747), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((16867, 16910), 'utils.dict_get', 'dict_get', (['path_dict', '"""gt_dir"""'], {'default': 'None'}), "(path_dict, 'gt_dir', default=None)\n", (16875, 16910), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((16915, 16932), 'utils.check_dir', 'check_dir', (['gt_dir'], {}), '(gt_dir)\n', (16924, 16932), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((17122, 17129), 'utils.Timer', 'Timer', ([], {}), '()\n', (17127, 17129), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((17234, 17263), 'utils.check_sequences', 'check_sequences', (['gt_sequences'], {}), '(gt_sequences)\n', (17249, 17263), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((17667, 17705), 'models.StandardHMM', 'StandardHMM', ([], {}), "(**exp_params['gt_params'])\n", (17678, 17705), False, 'from models import StandardHMM, DenseHMM, HMMLoggingMonitor\n'), ((18443, 18450), 'utils.Timer', 'Timer', ([], {}), '()\n', (18448, 18450), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((18497, 18556), 'utils.dict_get', 'dict_get', (['exp_params', '"""dataset_ident"""'], {'default': '""""""', 'cast': 'str'}), "(exp_params, 'dataset_ident', default='', cast=str)\n", (18505, 18556), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((18682, 18689), 'utils.Timer', 'Timer', ([], {}), '()\n', (18687, 18689), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((18748, 18807), 'utils.dict_get', 'dict_get', (['exp_params', '"""train_perc"""'], {'default': '(1.0)', 'cast': 'float'}), "(exp_params, 'train_perc', default=1.0, cast=float)\n", (18756, 18807), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((18820, 18863), 'utils.dict_get', 'dict_get', (['path_dict', '"""gt_dir"""'], {'default': 'None'}), "(path_dict, 'gt_dir', default=None)\n", (18828, 18863), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((18868, 18885), 'utils.check_dir', 'check_dir', (['gt_dir'], {}), '(gt_dir)\n', (18877, 18885), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((19368, 19392), 'utils.check_sequences', 'check_sequences', (['train_X'], {}), '(train_X)\n', (19383, 19392), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((2460, 2484), 'numpy.ones', 'np.ones', (['n_hidden_states'], {}), '(n_hidden_states)\n', (2467, 2484), True, 'import numpy as np\n'), ((3632, 3653), 'numpy.sqrt', 'np.sqrt', (['discriminant'], {}), '(discriminant)\n', (3639, 3653), True, 'import numpy as np\n'), ((3674, 3695), 'numpy.sqrt', 'np.sqrt', (['discriminant'], {}), '(discriminant)\n', (3681, 3695), True, 'import numpy as np\n'), ((3816, 3828), 'numpy.around', 'np.around', (['n'], {}), '(n)\n', (3825, 3828), True, 'import numpy as np\n'), ((5752, 5770), 'utils.check_dir', 'check_dir', (['exp_dir'], {}), '(exp_dir)\n', (5761, 5770), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((5951, 6005), 'utils.dict_get', 'dict_get', (['gt_log_config', '"""exp_folder"""'], {'default': 'exp_dir'}), "(gt_log_config, 'exp_folder', default=exp_dir)\n", (5959, 6005), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((6044, 6108), 'utils.dict_get', 'dict_get', (['gt_log_config', '"""log_folder"""'], {'default': '"""/gt_logs/em_opt"""'}), "(gt_log_config, 'log_folder', default='/gt_logs/em_opt')\n", (6052, 6108), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((6129, 6161), 'models.HMMLoggingMonitor', 'HMMLoggingMonitor', (['gt_log_config'], {}), '(gt_log_config)\n', (6146, 6161), False, 'from models import StandardHMM, DenseHMM, HMMLoggingMonitor\n'), ((6969, 6995), 'copy.deepcopy', 'copy.deepcopy', (['init_params'], {}), '(init_params)\n', (6982, 6995), False, 'import copy\n'), ((7972, 8016), 'copy.deepcopy', 'copy.deepcopy', (["exp_params['standard_params']"], {}), "(exp_params['standard_params'])\n", (7985, 8016), False, 'import copy\n'), ((8202, 8220), 'utils.check_dir', 'check_dir', (['exp_dir'], {}), '(exp_dir)\n', (8211, 8220), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((8470, 8530), 'utils.dict_get', 'dict_get', (['standard_log_config', '"""exp_folder"""'], {'default': 'exp_dir'}), "(standard_log_config, 'exp_folder', default=exp_dir)\n", (8478, 8530), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((8575, 8644), 'utils.dict_get', 'dict_get', (['standard_log_config', '"""log_folder"""'], {'default': '"""/standard_logs"""'}), "(standard_log_config, 'log_folder', default='/standard_logs')\n", (8583, 8644), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((8686, 8743), 'utils.dict_get', 'dict_get', (['dense_log_config', '"""exp_folder"""'], {'default': 'exp_dir'}), "(dense_log_config, 'exp_folder', default=exp_dir)\n", (8694, 8743), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((8785, 8848), 'utils.dict_get', 'dict_get', (['dense_log_config', '"""log_folder"""'], {'default': '"""/dense_logs"""'}), "(dense_log_config, 'log_folder', default='/dense_logs')\n", (8793, 8848), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((9996, 10034), 'models.StandardHMM', 'StandardHMM', ([], {}), "(**exp_params['gt_params'])\n", (10007, 10034), False, 'from models import StandardHMM, DenseHMM, HMMLoggingMonitor\n'), ((10385, 10433), 'utils.is_multinomial', 'is_multinomial', (['train_X'], {'min_symbols': 'n_emissions'}), '(train_X, min_symbols=n_emissions)\n', (10399, 10433), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((10930, 10978), 'utils.is_multinomial', 'is_multinomial', (['train_X'], {'min_symbols': 'n_emissions'}), '(train_X, min_symbols=n_emissions)\n', (10944, 10978), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((11241, 11258), 'utils.check_dir', 'check_dir', (['gt_dir'], {}), '(gt_dir)\n', (11250, 11258), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((11267, 11314), 'numpy.save', 'np.save', (["(gt_dir + '/transmat')", 'gt_hmm.transmat_'], {}), "(gt_dir + '/transmat', gt_hmm.transmat_)\n", (11274, 11314), True, 'import numpy as np\n'), ((11323, 11378), 'numpy.save', 'np.save', (["(gt_dir + '/emissionprob')", 'gt_hmm.emissionprob_'], {}), "(gt_dir + '/emissionprob', gt_hmm.emissionprob_)\n", (11330, 11378), True, 'import numpy as np\n'), ((11387, 11436), 'numpy.save', 'np.save', (["(gt_dir + '/startprob')", 'gt_hmm.startprob_'], {}), "(gt_dir + '/startprob', gt_hmm.startprob_)\n", (11394, 11436), True, 'import numpy as np\n'), ((11467, 11527), 'utils.dict_get', 'dict_get', (['exp_params', '"""gt_samples"""'], {'default': 'None', 'cast': 'tuple'}), "(exp_params, 'gt_samples', default=None, cast=tuple)\n", (11475, 11527), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((11937, 11956), 'utils.check_dir', 'check_dir', (['data_dir'], {}), '(data_dir)\n', (11946, 11956), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((11965, 12004), 'numpy.save', 'np.save', (["(data_dir + '/train_X')", 'train_X'], {}), "(data_dir + '/train_X', train_X)\n", (11972, 12004), True, 'import numpy as np\n'), ((12224, 12268), 'utils.timestamp_msg', 'timestamp_msg', (["('Saved data in %s' % data_dir)"], {}), "('Saved data in %s' % data_dir)\n", (12237, 12268), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((12445, 12463), 'utils.check_dir', 'check_dir', (['exp_dir'], {}), '(exp_dir)\n', (12454, 12463), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((12491, 12516), 'copy.deepcopy', 'copy.deepcopy', (['exp_params'], {}), '(exp_params)\n', (12504, 12516), False, 'import copy\n'), ((12538, 12597), 'utils.dict_get', 'dict_get', (['_exp_params', '"""gt_params"""'], {'default': 'None', 'cast': 'dict'}), "(_exp_params, 'gt_params', default=None, cast=dict)\n", (12546, 12597), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((13154, 13219), 'utils.dict_get', 'dict_get', (['_exp_params', '"""standard_params"""'], {'default': 'None', 'cast': 'dict'}), "(_exp_params, 'standard_params', default=None, cast=dict)\n", (13162, 13219), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((13246, 13304), 'utils.dict_get', 'dict_get', (['standard_params', '"""logging_monitor"""'], {'default': 'None'}), "(standard_params, 'logging_monitor', default=None)\n", (13254, 13304), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((13517, 13579), 'utils.dict_get', 'dict_get', (['_exp_params', '"""dense_params"""'], {'default': 'None', 'cast': 'dict'}), "(_exp_params, 'dense_params', default=None, cast=dict)\n", (13525, 13579), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((13603, 13661), 'utils.dict_get', 'dict_get', (['standard_params', '"""logging_monitor"""'], {'default': 'None'}), "(standard_params, 'logging_monitor', default=None)\n", (13611, 13661), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((13870, 13940), 'utils.dict_get', 'dict_get', (['_exp_params', '"""fair_standard_params"""'], {'default': 'None', 'cast': 'dict'}), "(_exp_params, 'fair_standard_params', default=None, cast=dict)\n", (13878, 13940), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((13972, 14035), 'utils.dict_get', 'dict_get', (['fair_standard_params', '"""logging_monitor"""'], {'default': 'None'}), "(fair_standard_params, 'logging_monitor', default=None)\n", (13980, 14035), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((14265, 14310), 'numpy.save', 'np.save', (["(exp_dir + '/exp_params')", '_exp_params'], {}), "(exp_dir + '/exp_params', _exp_params)\n", (14272, 14310), True, 'import numpy as np\n'), ((14319, 14379), 'utils.timestamp_msg', 'timestamp_msg', (["('Saved experiment parameters in %s' % exp_dir)"], {}), "('Saved experiment parameters in %s' % exp_dir)\n", (14332, 14379), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((15331, 15365), 'utils.timestamp_msg', 'timestamp_msg', (['"""Reusing sequences"""'], {}), "('Reusing sequences')\n", (15344, 15365), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((16175, 16214), 'data.penntreebank_tag_sequences', 'penntreebank_tag_sequences', ([], {}), '(**ds_params)\n', (16201, 16214), False, 'from data import penntreebank_tag_sequences, protein_sequences, train_test_split\n'), ((16362, 16412), 'numpy.save', 'np.save', (["(log_dir + '/symb_to_tag.npy')", 'symb_to_tag'], {}), "(log_dir + '/symb_to_tag.npy', symb_to_tag)\n", (16369, 16412), True, 'import numpy as np\n'), ((16421, 16471), 'numpy.save', 'np.save', (["(log_dir + '/tag_to_symb.npy')", 'tag_to_symb'], {}), "(log_dir + '/tag_to_symb.npy', tag_to_symb)\n", (16428, 16471), True, 'import numpy as np\n'), ((19166, 19208), 'data.train_test_split', 'train_test_split', (['gt_sequences', 'train_perc'], {}), '(gt_sequences, train_perc)\n', (19182, 19208), False, 'from data import penntreebank_tag_sequences, protein_sequences, train_test_split\n'), ((19269, 19307), 'utils.timestamp_msg', 'timestamp_msg', (['"""Reusing sequences ..."""'], {}), "('Reusing sequences ...')\n", (19282, 19307), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((19501, 19524), 'utils.check_sequences', 'check_sequences', (['test_X'], {}), '(test_X)\n', (19516, 19524), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((799, 812), 'models.StandardHMM', 'StandardHMM', ([], {}), '()\n', (810, 812), False, 'from models import StandardHMM, DenseHMM, HMMLoggingMonitor\n'), ((2056, 2075), 'numpy.ones', 'np.ones', (['alpha_size'], {}), '(alpha_size)\n', (2063, 2075), True, 'import numpy as np\n'), ((8898, 8936), 'models.HMMLoggingMonitor', 'HMMLoggingMonitor', (['standard_log_config'], {}), '(standard_log_config)\n', (8915, 8936), False, 'from models import StandardHMM, DenseHMM, HMMLoggingMonitor\n'), ((8938, 8973), 'models.HMMLoggingMonitor', 'HMMLoggingMonitor', (['dense_log_config'], {}), '(dense_log_config)\n', (8955, 8973), False, 'from models import StandardHMM, DenseHMM, HMMLoggingMonitor\n'), ((9380, 9445), 'utils.dict_get', 'dict_get', (['fair_standard_log_config', '"""exp_folder"""'], {'default': 'exp_dir'}), "(fair_standard_log_config, 'exp_folder', default=exp_dir)\n", (9388, 9445), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((9499, 9578), 'utils.dict_get', 'dict_get', (['fair_standard_log_config', '"""log_folder"""'], {'default': '"""/fair_standard_logs"""'}), "(fair_standard_log_config, 'log_folder', default='/fair_standard_logs')\n", (9507, 9578), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((9614, 9657), 'models.HMMLoggingMonitor', 'HMMLoggingMonitor', (['fair_standard_log_config'], {}), '(fair_standard_log_config)\n', (9631, 9657), False, 'from models import StandardHMM, DenseHMM, HMMLoggingMonitor\n'), ((10193, 10241), 'utils.is_multinomial', 'is_multinomial', (['train_X'], {'min_symbols': 'n_emissions'}), '(train_X, min_symbols=n_emissions)\n', (10207, 10241), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((10694, 10741), 'utils.is_multinomial', 'is_multinomial', (['test_X'], {'min_symbols': 'n_emissions'}), '(test_X, min_symbols=n_emissions)\n', (10708, 10741), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((12048, 12085), 'numpy.save', 'np.save', (["(data_dir + '/test_X')", 'test_X'], {}), "(data_dir + '/test_X', test_X)\n", (12055, 12085), True, 'import numpy as np\n'), ((12128, 12165), 'numpy.save', 'np.save', (["(data_dir + '/gt_A')", 'gt_AB[0]'], {}), "(data_dir + '/gt_A', gt_AB[0])\n", (12135, 12165), True, 'import numpy as np\n'), ((12178, 12215), 'numpy.save', 'np.save', (["(data_dir + '/gt_B')", 'gt_AB[1]'], {}), "(data_dir + '/gt_B', gt_AB[1])\n", (12185, 12215), True, 'import numpy as np\n'), ((12707, 12755), 'utils.dict_get', 'dict_get', (['gt_params', '"""init_params"""'], {'default': 'None'}), "(gt_params, 'init_params', default=None)\n", (12715, 12755), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((12902, 12954), 'utils.dict_get', 'dict_get', (['gt_params', '"""logging_monitor"""'], {'default': 'None'}), "(gt_params, 'logging_monitor', default=None)\n", (12910, 12954), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((16290, 16320), 'data.protein_sequences', 'protein_sequences', ([], {}), '(**ds_params)\n', (16307, 16320), False, 'from data import penntreebank_tag_sequences, protein_sequences, train_test_split\n'), ((1070, 1100), 'models.StandardHMM', 'StandardHMM', ([], {}), '(**standard_params)\n', (1081, 1100), False, 'from models import StandardHMM, DenseHMM, HMMLoggingMonitor\n'), ((21668, 21679), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (21677, 21679), False, 'import os\n'), ((21699, 21713), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (21711, 21713), False, 'from datetime import datetime\n'), ((960, 981), 'models.StandardHMM', 'StandardHMM', ([], {}), '(**params)\n', (971, 981), False, 'from models import StandardHMM, DenseHMM, HMMLoggingMonitor\n')] |
'''
TaxiMDPClass.py: Contains the TaxiMDP class.
From:
Dietterich, <NAME>. "Hierarchical reinforcement learning with the
MAXQ value function decomposition." J. Artif. Intell. Res.(JAIR) 13
(2000): 227-303.
Author: <NAME> (cs.brown.edu/~dabel/)
'''
# Python imports.
from __future__ import print_function
import random
import copy
# Other imports.
from simple_rl.mdp.oomdp.OOMDPClass import OOMDP
from simple_rl.mdp.oomdp.OOMDPObjectClass import OOMDPObject
from simple_rl.tasks.taxi.TaxiStateClass import TaxiState
from simple_rl.tasks.taxi import taxi_helpers
class TaxiOOMDP(OOMDP):
''' Class for a Taxi OO-MDP '''
# Static constants.
ACTIONS = ["up", "down", "left", "right", "pickup", "dropoff"]
ATTRIBUTES = ["x", "y", "has_passenger", "in_taxi", "dest_x", "dest_y"]
CLASSES = ["agent", "wall", "passenger"]
def __init__(self, width, height, agent, walls, passengers, slip_prob=0, gamma=0.99):
self.height = height
self.width = width
agent_obj = OOMDPObject(attributes=agent, name="agent")
wall_objs = self._make_oomdp_objs_from_list_of_dict(walls, "wall")
pass_objs = self._make_oomdp_objs_from_list_of_dict(passengers, "passenger")
init_state = self._create_state(agent_obj, wall_objs, pass_objs)
OOMDP.__init__(self, TaxiOOMDP.ACTIONS, self._taxi_transition_func, self._taxi_reward_func, init_state=init_state, gamma=gamma)
self.slip_prob = slip_prob
def _create_state(self, agent_oo_obj, walls, passengers):
'''
Args:
agent_oo_obj (OOMDPObjects)
walls (list of OOMDPObject)
passengers (list of OOMDPObject)
Returns:
(OOMDP State)
TODO: Make this more egneral and put it in OOMDPClass.
'''
objects = {c : [] for c in TaxiOOMDP.CLASSES}
objects["agent"].append(agent_oo_obj)
# Make walls.
for w in walls:
objects["wall"].append(w)
# Make passengers.
for p in passengers:
objects["passenger"].append(p)
return TaxiState(objects)
def _taxi_reward_func(self, state, action):
'''
Args:
state (OOMDP State)
action (str)
Returns
(float)
'''
_error_check(state, action)
# Stacked if statements for efficiency.
if action == "dropoff":
# If agent is dropping off.
agent = state.get_first_obj_of_class("agent")
# Check to see if all passengers at destination.
if agent.get_attribute("has_passenger"):
for p in state.get_objects_of_class("passenger"):
if p.get_attribute("x") != p.get_attribute("dest_x") or p.get_attribute("y") != p.get_attribute("dest_y"):
return 0 - self.step_cost
return 1 - self.step_cost
return 0 - self.step_cost
def _taxi_transition_func(self, state, action):
'''
Args:
state (State)
action (str)
Returns
(State)
'''
_error_check(state, action)
if self.slip_prob > random.random():
# Flip dir.
if action == "up":
action = "down"
elif action == "down":
action = "up"
elif action == "left":
action = "right"
elif action == "right":
action = "left"
if action == "up" and state.get_agent_y() < self.height:
next_state = self.move_agent(state, self.slip_prob, dy=1)
elif action == "down" and state.get_agent_y() > 1:
next_state = self.move_agent(state, self.slip_prob, dy=-1)
elif action == "right" and state.get_agent_x() < self.width:
next_state = self.move_agent(state, self.slip_prob, dx=1)
elif action == "left" and state.get_agent_x() > 1:
next_state = self.move_agent(state, self.slip_prob, dx=-1)
elif action == "dropoff":
next_state = self.agent_dropoff(state)
elif action == "pickup":
next_state = self.agent_pickup(state)
else:
next_state = state
# Make terminal.
if taxi_helpers.is_taxi_terminal_state(next_state):
next_state.set_terminal(True)
# All OOMDP states must be updated.
next_state.update()
return next_state
def __str__(self):
return "taxi_h-" + str(self.height) + "_w-" + str(self.width)
def visualize_agent(self, agent):
from ...utils.mdp_visualizer import visualize_agent
from taxi_visualizer import _draw_state
visualize_agent(self, agent, _draw_state)
_ = input("Press anything to quit ")
sys.exit(1)
def visualize_interaction(self):
from simple_rl.utils.mdp_visualizer import visualize_interaction
from taxi_visualizer import _draw_state
visualize_interaction(self, _draw_state)
raw_input("Press anything to quit ")
sys.exit(1)
# ----------------------------
# -- Action Implementations --
# ----------------------------
def move_agent(self, state, slip_prob=0, dx=0, dy=0):
'''
Args:
state (TaxiState)
dx (int) [optional]
dy (int) [optional]
Returns:
(TaxiState)
'''
if taxi_helpers._is_wall_in_the_way(state, dx=dx, dy=dy):
# There's a wall in the way.
return state
next_state = copy.deepcopy(state)
# Move Agent.
agent_att = next_state.get_first_obj_of_class("agent").get_attributes()
agent_att["x"] += dx
agent_att["y"] += dy
# Move passenger.
taxi_helpers._move_pass_in_taxi(next_state, dx=dx, dy=dy)
return next_state
def agent_pickup(self, state):
'''
Args:
state (TaxiState)
'''
next_state = copy.deepcopy(state)
agent = next_state.get_first_obj_of_class("agent")
# update = False
if agent.get_attribute("has_passenger") == 0:
# If the agent does not have a passenger.
for i, passenger in enumerate(next_state.get_objects_of_class("passenger")):
if agent.get_attribute("x") == passenger.get_attribute("x") and agent.get_attribute("y") == passenger.get_attribute("y"):
# Pick up passenger at agent location.
agent.set_attribute("has_passenger", 1)
passenger.set_attribute("in_taxi", 1)
return next_state
def agent_dropoff(self, state):
'''
Args:
state (TaxiState)
Returns:
(TaxiState)
'''
next_state = copy.deepcopy(state)
# Get Agent, Walls, Passengers.
agent = next_state.get_first_obj_of_class("agent")
# agent = OOMDPObject(attributes=agent_att, name="agent")
passengers = next_state.get_objects_of_class("passenger")
if agent.get_attribute("has_passenger") == 1:
# Update if the agent has a passenger.
for i, passenger in enumerate(passengers):
if passenger.get_attribute("in_taxi") == 1:
# Drop off the passenger.
passengers[i].set_attribute("in_taxi", 0)
agent.set_attribute("has_passenger", 0)
return next_state
def _error_check(state, action):
'''
Args:
state (State)
action (str)
Summary:
Checks to make sure the received state and action are of the right type.
'''
if action not in TaxiOOMDP.ACTIONS:
raise ValueError("Error: the action provided (" + str(action) + ") was invalid.")
if not isinstance(state, TaxiState):
raise ValueError("Error: the given state (" + str(state) + ") was not of the correct class.")
def main():
agent = {"x":1, "y":1, "has_passenger":0}
passengers = [{"x":8, "y":4, "dest_x":2, "dest_y":2, "in_taxi":0}]
taxi_world = TaxiOOMDP(10, 10, agent=agent, walls=[], passengers=passengers)
if __name__ == "__main__":
main()
| [
"simple_rl.tasks.taxi.taxi_helpers._is_wall_in_the_way",
"simple_rl.mdp.oomdp.OOMDPClass.OOMDP.__init__",
"simple_rl.tasks.taxi.TaxiStateClass.TaxiState",
"simple_rl.utils.mdp_visualizer.visualize_interaction",
"random.random",
"simple_rl.tasks.taxi.taxi_helpers.is_taxi_terminal_state",
"copy.deepcopy",... | [((1020, 1063), 'simple_rl.mdp.oomdp.OOMDPObjectClass.OOMDPObject', 'OOMDPObject', ([], {'attributes': 'agent', 'name': '"""agent"""'}), "(attributes=agent, name='agent')\n", (1031, 1063), False, 'from simple_rl.mdp.oomdp.OOMDPObjectClass import OOMDPObject\n'), ((1306, 1438), 'simple_rl.mdp.oomdp.OOMDPClass.OOMDP.__init__', 'OOMDP.__init__', (['self', 'TaxiOOMDP.ACTIONS', 'self._taxi_transition_func', 'self._taxi_reward_func'], {'init_state': 'init_state', 'gamma': 'gamma'}), '(self, TaxiOOMDP.ACTIONS, self._taxi_transition_func, self.\n _taxi_reward_func, init_state=init_state, gamma=gamma)\n', (1320, 1438), False, 'from simple_rl.mdp.oomdp.OOMDPClass import OOMDP\n'), ((2106, 2124), 'simple_rl.tasks.taxi.TaxiStateClass.TaxiState', 'TaxiState', (['objects'], {}), '(objects)\n', (2115, 2124), False, 'from simple_rl.tasks.taxi.TaxiStateClass import TaxiState\n'), ((4289, 4336), 'simple_rl.tasks.taxi.taxi_helpers.is_taxi_terminal_state', 'taxi_helpers.is_taxi_terminal_state', (['next_state'], {}), '(next_state)\n', (4324, 4336), False, 'from simple_rl.tasks.taxi import taxi_helpers\n'), ((5003, 5043), 'simple_rl.utils.mdp_visualizer.visualize_interaction', 'visualize_interaction', (['self', '_draw_state'], {}), '(self, _draw_state)\n', (5024, 5043), False, 'from simple_rl.utils.mdp_visualizer import visualize_interaction\n'), ((5461, 5514), 'simple_rl.tasks.taxi.taxi_helpers._is_wall_in_the_way', 'taxi_helpers._is_wall_in_the_way', (['state'], {'dx': 'dx', 'dy': 'dy'}), '(state, dx=dx, dy=dy)\n', (5493, 5514), False, 'from simple_rl.tasks.taxi import taxi_helpers\n'), ((5604, 5624), 'copy.deepcopy', 'copy.deepcopy', (['state'], {}), '(state)\n', (5617, 5624), False, 'import copy\n'), ((5821, 5878), 'simple_rl.tasks.taxi.taxi_helpers._move_pass_in_taxi', 'taxi_helpers._move_pass_in_taxi', (['next_state'], {'dx': 'dx', 'dy': 'dy'}), '(next_state, dx=dx, dy=dy)\n', (5852, 5878), False, 'from simple_rl.tasks.taxi import taxi_helpers\n'), ((6032, 6052), 'copy.deepcopy', 'copy.deepcopy', (['state'], {}), '(state)\n', (6045, 6052), False, 'import copy\n'), ((6847, 6867), 'copy.deepcopy', 'copy.deepcopy', (['state'], {}), '(state)\n', (6860, 6867), False, 'import copy\n'), ((3199, 3214), 'random.random', 'random.random', ([], {}), '()\n', (3212, 3214), False, 'import random\n')] |
#coding:utf-8
#
# id: bugs.core_2923
# title: Problem with dependencies between a procedure and a view using that procedure
# decription:
# tracker_id: CORE-2923
# min_versions: ['2.5.0']
# versions: 3.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
set term ^;
create procedure sp_test returns (i smallint) as
begin
i = 32767;
suspend;
end
^
create view v0 as
select i
from sp_test
^
alter procedure sp_test returns (i int) as
begin
i = 32768;
suspend;
end
^
set term ;^
commit;
---
create table t1 (n1 smallint);
insert into t1(n1) values(32767);
commit;
create view v1 as
select *
from t1;
alter table t1 alter n1 type integer;
commit;
insert into t1(n1) values(32768);
commit;
---
create table t2 (n2 smallint);
insert into t2(n2) values(32767);
commit;
create domain d2 integer;
create view v2 as
select * from t2;
alter table t2 alter n2 type d2;
insert into t2(n2) values(32768);
commit;
---
set list on;
select '0' as test_no, v.* from v0 v
union all
select '1', v.* from v1 v
union all
select '2', v.* from v2 v
;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
TEST_NO 0
I 32768
TEST_NO 1
I 32767
TEST_NO 1
I 32768
TEST_NO 2
I 32767
TEST_NO 2
I 32768
"""
@pytest.mark.version('>=3.0')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_stdout == act_1.clean_expected_stdout
| [
"pytest.mark.version",
"firebird.qa.db_factory",
"firebird.qa.isql_act"
] | [((407, 452), 'firebird.qa.db_factory', 'db_factory', ([], {'sql_dialect': '(3)', 'init': 'init_script_1'}), '(sql_dialect=3, init=init_script_1)\n', (417, 452), False, 'from firebird.qa import db_factory, isql_act, Action\n'), ((1498, 1560), 'firebird.qa.isql_act', 'isql_act', (['"""db_1"""', 'test_script_1'], {'substitutions': 'substitutions_1'}), "('db_1', test_script_1, substitutions=substitutions_1)\n", (1506, 1560), False, 'from firebird.qa import db_factory, isql_act, Action\n'), ((1992, 2020), 'pytest.mark.version', 'pytest.mark.version', (['""">=3.0"""'], {}), "('>=3.0')\n", (2011, 2020), False, 'import pytest\n')] |
from django.db import models
from django.core.validators import MinValueValidator, MaxValueValidator
class Control(models.Model):
objects=models.Manager()
TYPE_CHOICES=(
('Primitive','Primitive'),
('Corpse','CORPSE'),
('Gaussian','Gaussian'),
('CinBB','CinBB'),
)
#pk i.e id --> Refered to as pk while we use it as a lookup variable
name = models.CharField(max_length=200)
type = models.CharField(max_length=200, choices=TYPE_CHOICES, default='Primitive')
maximum_rabi_rate = models.FloatField(validators = [MinValueValidator(0), MaxValueValidator(100)])
polar_angle = models.FloatField(validators = [MinValueValidator(0), MaxValueValidator(1)])
def __str__(self):
return self.name
| [
"django.core.validators.MinValueValidator",
"django.db.models.Manager",
"django.core.validators.MaxValueValidator",
"django.db.models.CharField"
] | [((143, 159), 'django.db.models.Manager', 'models.Manager', ([], {}), '()\n', (157, 159), False, 'from django.db import models\n'), ((394, 426), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (410, 426), False, 'from django.db import models\n'), ((438, 513), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'choices': 'TYPE_CHOICES', 'default': '"""Primitive"""'}), "(max_length=200, choices=TYPE_CHOICES, default='Primitive')\n", (454, 513), False, 'from django.db import models\n'), ((570, 590), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(0)'], {}), '(0)\n', (587, 590), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n'), ((592, 614), 'django.core.validators.MaxValueValidator', 'MaxValueValidator', (['(100)'], {}), '(100)\n', (609, 614), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n'), ((667, 687), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(0)'], {}), '(0)\n', (684, 687), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n'), ((689, 709), 'django.core.validators.MaxValueValidator', 'MaxValueValidator', (['(1)'], {}), '(1)\n', (706, 709), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n')] |