id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
3575155 | from functools import lru_cache
from .base import ErConnector
from .customfield import get_custom_field_by_key
from .candidate import delete_candidate_custom_field_rest
gender_choices = [(0, 'Male'), (1, 'Female'), (3, 'Other'), (4, 'Decline') ]
# not in api, convenience #
gender_pronoun_choices = ['he/him/his','she/her/hers','they/them/theirs','ze/zir/zirs', 'ze/hir/hirs']
gender_pronoun_field = 'Pronouns'
not_self_identify_field = 'Self Identified?'
class EEOC(object):
def __init__(self,
candidate_id,
data=None,
ethnicity_list=None,
veteran_status_list=None,
disability_status_list=None,
use_pronouns=False
):
self.candidate_id = candidate_id
self.gender_id = None
self.ethnicity_id = None
self.veteran_status_id = None
self.disability_status_id = None
self.gender = None
self.use_pronouns = use_pronouns
self.pronouns = None
self.pronouns_current = None
self.not_self_identify = None
self.not_self_identify_current = None
self.disability_status_list = disability_status_list
self.veteran_status_list = veteran_status_list
self.ethnicity_list = ethnicity_list
if not data:
# Fetch from remote
self.refresh()
else:
# Allows it to be populated by list methods without an additional fetch
self.data = data
self.pronoun_field = None
self.not_self_identify_field = None
self.populate_from_data()
def fetch_and_populate_pronoun_field(self):
try:
self.pronoun_field = get_custom_field_by_key('Candidate', gender_pronoun_field, self.candidate_id)
self.pronouns = self.pronouns_current = self.pronoun_field.value
except Exception as e:
print(e)
pass
def fetch_and_populate_not_self_identify_field(self):
try:
self.not_self_identify_field = get_custom_field_by_key('Candidate', not_self_identify_field, self.candidate_id)
self.not_self_identify = self.not_self_identify_current = self.not_self_identify_field.value
except Exception as e:
print(e)
pass
def refresh(self):
self.data = get_candidate_eeoc_by_candidate_id(self.candidate_id).data
self.populate_from_data()
def populate_from_data(self):
self.gender_id = self.data.get('Gender', None)
try:
self.gender = [x[1] for x in list_choices_gender() if x[0] == self.gender_id][0]
except:
pass
self.ethnicity_id = self.data.get('EthnicityID', None)
self.veteran_status_id = self.data.get('VeteranStatusID', None)
self.disability_status_id = self.data.get('DisabilityStatusID', None)
self.fetch_and_populate_not_self_identify_field()
if self.use_pronouns:
try:
self.fetch_and_populate_pronoun_field()
except:
pass
def ethnicity(self):
value_list = self.ethnicity_list if self.ethnicity_list else list_choices_ethnicity()
try:
return [x[1] for x in value_list if x[0] == self.ethnicity_id][0]
except:
return None
def disability_status(self):
value_list = self.disability_status_list if self.disability_status_list else list_choices_disability_status()
try:
return [x[1] for x in value_list if x[0] == self.disability_status_id][0]
except:
return None
def veteran_status(self):
value_list = self.veteran_status_list if self.veteran_status_list else list_choices_veteran_status()
try:
return [x[1] for x in value_list if x[0] == self.veteran_status_id][0]
except:
return None
def save(self, validate=True):
connector = ErConnector() # 2.0 API
url = 'EEOC/Candidate/{candidate_id}'.format(
candidate_id=self.candidate_id,
)
if validate:
gender_values = [x[0] for x in list_choices_gender()]
if self.gender_id and self.gender_id not in gender_values:
raise AttributeError('Error: Gender must be one of the following values:{values}'.format(values=','.join(gender_values)))
ethnicity_values = [x[0] for x in (self.ethnicity_list if self.ethnicity_list else list_choices_ethnicity())]
if self.ethnicity_id and self.ethnicity_id not in ethnicity_values:
raise AttributeError(
'Error: EthnicityID must be one of the following values:{values}'.format(values=','.join([str(x) for x in ethnicity_values])))
veteran_status_values = [x[0] for x in (self.veteran_status_list if self.veteran_status_list else list_choices_veteran_status())]
if self.veteran_status_id and self.veteran_status_id not in veteran_status_values:
raise AttributeError(
'Error: VeteranStatusID must be one of the following values:{values}'.format(
values=','.join([str(x) for x in veteran_status_values])))
disability_status_values = [x[0] for x in (
self.disability_status_list if self.disability_status_list else list_choices_disability_status())]
if self.disability_status_id and self.disability_status_id not in disability_status_values:
raise AttributeError(
'Error: DisabilityStatusID must be one of the following values:{values}'.format(
values=','.join([str(x) for x in disability_status_values])))
payload = self.data
if self.use_pronouns:
self.save_pronouns()
self.save_not_identify()
payload['Gender'] = self.gender_id
payload['EthnicityID'] = self.ethnicity_id
payload['VeteranStatusID'] = self.veteran_status_id
payload['DisabilityStatusID'] = self.disability_status_id
response = connector.send_request(
path=url,
verb='PUT',
payload=payload
)
self.refresh()
return self
def save_pronouns(self):
if not self.pronouns:
# Deleting a value previously stored
delete_candidate_custom_field_rest(self.candidate_id, gender_pronoun_field)
else:
self.pronoun_field.value = self.pronouns
self.pronoun_field.save(obj_id=self.candidate_id)
def clear_pronouns(self):
try:
delete_candidate_custom_field_rest(self.candidate_id, gender_pronoun_field)
except:
pass
def clear(self):
if self.gender_id:
self.gender_id = 4
self.gender = None
if self.veteran_status_id:
self.veteran_status_id = 6
if self.ethnicity_id:
self.ethnicity_id = 17
if self.disability_status_id:
self.disability_status_id = 3
if self.pronouns:
self.pronouns = None
self.clear_pronouns()
def save_not_identify(self):
if self.not_self_identify is True:
self.clear()
self.not_self_identify_field.value = 'True'
self.not_self_identify_field.save(obj_id=self.candidate_id)
elif self.not_self_identify is False:
self.not_self_identify_field.value = 'False'
self.not_self_identify_field.save(obj_id=self.candidate_id)
def get_candidate_eeoc_by_candidate_id(candidate_id, use_pronouns=False):
connector = ErConnector() # 2.0 API
url = 'EEOC/Candidate/{candidate_id}'.format(
candidate_id=candidate_id,
)
response = connector.send_request(
path=url,
verb='GET',
)
return EEOC(candidate_id, use_pronouns=use_pronouns, data=response)
def list_choices_gender():
return gender_choices
def list_choices_ethnicity():
connector = ErConnector() # 2.0 API
url = 'EEOC/Ethnicity'
try:
response = connector.send_request(
path=url,
verb='GET',
)
return [(x['ID'], x['Name']) for x in response] if response else []
except:
return []
def list_choices_veteran_status():
connector = ErConnector() # 2.0 API
url = '/EEOC/VeteranStatus'
try:
response = connector.send_request(
path=url,
verb='GET',
)
return [(x['ID'], x['Name']) for x in response] if response else []
except:
return []
def list_choices_disability_status():
connector = ErConnector() # 2.0 API
url = '/EEOC/DisabilityStatus'
try:
response = connector.send_request(
path=url,
verb='GET',
)
return [(x['ID'], x['Name']) for x in response] if response else []
except:
return [] | StarcoderdataPython |
9742322 | """"
This class will plot all waveforms that have been recorded. It reads the created pulses for each event
and plot the waveforms in a single plot
"""
import matplotlib as plt
import pax.plugins.plotting.Plotting
class ShowWaveforms(pax.PlotBase):
def PlotAllChannels(self, event):
fig, ax = plt.subplots(nrows=7, ncols=2, sharex=True, sharey=True, squeeze=False, figsize=(12, 12))
plt.subplots_adjust(hspace=0, wspace=0.05)
for xi in range(7):
for yi in range(2):
ax[xi][yi].plot(range(10), range(10))
| StarcoderdataPython |
11391567 | <reponame>bkktimber/gluon-nlp<filename>tests/unittest/test_models.py
# coding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import sys
import mxnet as mx
from mxnet import gluon
import gluonnlp as nlp
import pytest
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
# disabled since it takes a long time to download the model
@pytest.mark.serial
def _test_pretrained_big_text_models():
text_models = ['big_rnn_lm_2048_512']
pretrained_to_test = {'big_rnn_lm_2048_512': 'gbw'}
for model_name in text_models:
eprint('testing forward for %s' % model_name)
pretrained_dataset = pretrained_to_test.get(model_name)
model, _ = nlp.model.get_model(model_name, dataset_name=pretrained_dataset,
pretrained=True, root='tests/data/model/')
print(model)
batch_size = 10
hidden = model.begin_state(batch_size=batch_size, func=mx.nd.zeros)
output, state = model(mx.nd.arange(330).reshape((33, 10)), hidden)
output.wait_to_read()
@pytest.mark.serial
def test_big_text_models(wikitext2_val_and_counter):
# use a small vocabulary for testing
val, val_freq = wikitext2_val_and_counter
vocab = nlp.Vocab(val_freq)
text_models = ['big_rnn_lm_2048_512']
for model_name in text_models:
eprint('testing forward for %s' % model_name)
model, _ = nlp.model.get_model(model_name, vocab=vocab, root='tests/data/model/')
print(model)
model.collect_params().initialize()
batch_size = 10
hidden = model.begin_state(batch_size=batch_size, func=mx.nd.zeros)
output, state = model(mx.nd.arange(330).reshape((33, 10)), hidden)
output.wait_to_read()
@pytest.mark.serial
def test_text_models():
text_models = ['standard_lstm_lm_200', 'standard_lstm_lm_650', 'standard_lstm_lm_1500', 'awd_lstm_lm_1150', 'awd_lstm_lm_600']
pretrained_to_test = {'standard_lstm_lm_1500': 'wikitext-2',
'standard_lstm_lm_650': 'wikitext-2',
'standard_lstm_lm_200': 'wikitext-2',
'awd_lstm_lm_1150': 'wikitext-2',
'awd_lstm_lm_600': 'wikitext-2'}
for model_name in text_models:
eprint('testing forward for %s' % model_name)
pretrained_dataset = pretrained_to_test.get(model_name)
model, _ = nlp.model.get_model(model_name, dataset_name=pretrained_dataset,
pretrained=pretrained_dataset is not None,
root='tests/data/model/')
print(model)
if not pretrained_dataset:
model.collect_params().initialize()
output, state = model(mx.nd.arange(330).reshape(33, 10))
output.wait_to_read()
del model
mx.nd.waitall()
@pytest.mark.serial
def test_cache_models():
cache_language_models = ['awd_lstm_lm_1150', 'awd_lstm_lm_600', 'standard_lstm_lm_200',
'standard_lstm_lm_650', 'standard_lstm_lm_1500']
datasets = ['wikitext-2']
for name in cache_language_models:
for dataset_name in datasets:
cache_cell = nlp.model.train.get_cache_model(name, dataset_name, window=1, theta=0.6,
lambdas=0.2, root='tests/data/model/')
outs, word_history, cache_history, hidden = \
cache_cell(mx.nd.arange(10).reshape(10, 1), mx.nd.arange(10).reshape(10, 1), None, None)
print(cache_cell)
print("outs:")
print(outs)
print("word_history:")
print(word_history)
print("cache_history:")
print(cache_history)
@pytest.mark.serial
def test_get_cache_model_noncache_models():
language_models_params = {'awd_lstm_lm_1150': 'awd_lstm_lm_1150_wikitext-2-f9562ed0.params',
'awd_lstm_lm_600': 'awd_lstm_lm_600_wikitext-2-e952becc.params',
'standard_lstm_lm_200': 'standard_lstm_lm_200_wikitext-2-b233c700.params',
'standard_lstm_lm_650': 'standard_lstm_lm_650_wikitext-2-631f3904.params',
'standard_lstm_lm_1500': 'standard_lstm_lm_1500_wikitext-2-a4163513.params'}
datasets = ['wikitext-2']
for name in language_models_params.keys():
for dataset_name in datasets:
_, vocab = nlp.model.get_model(name=name, dataset_name=dataset_name, pretrained=True,
root='tests/data/model')
ntokens = len(vocab)
cache_cell_0 = nlp.model.train.get_cache_model(name, dataset_name, window=1, theta=0.6,
lambdas=0.2, root='tests/data/model/')
print(cache_cell_0)
model, _ = nlp.model.get_model(name=name, dataset_name=dataset_name, pretrained=True,
root='tests/data/model/')
cache_cell_1 = nlp.model.train.CacheCell(model, ntokens, window=1, theta=0.6, lambdas=0.2)
cache_cell_1.load_parameters('tests/data/model/' + language_models_params.get(name))
print(cache_cell_1)
outs0, word_history0, cache_history0, hidden0 = \
cache_cell_0(mx.nd.arange(10).reshape(10, 1), mx.nd.arange(10).reshape(10, 1), None, None)
outs1, word_history1, cache_history1, hidden1 = \
cache_cell_1(mx.nd.arange(10).reshape(10, 1), mx.nd.arange(10).reshape(10, 1), None, None)
assert outs0.shape == outs1.shape, outs0.shape
assert len(word_history0) == len(word_history1), len(word_history0)
assert len(cache_history0) == len(cache_history1), len(cache_history0)
assert len(hidden0) == len(hidden1), len(hidden0)
@pytest.mark.serial
def test_save_load_cache_models():
cache_language_models = ['awd_lstm_lm_1150', 'awd_lstm_lm_600', 'standard_lstm_lm_200',
'standard_lstm_lm_650', 'standard_lstm_lm_1500']
datasets = ['wikitext-2']
for name in cache_language_models:
for dataset_name in datasets:
cache_cell = nlp.model.train.get_cache_model(name, dataset_name, window=1, theta=0.6,
lambdas=0.2, root='tests/data/model/')
print(cache_cell)
cache_cell.save_parameters('tests/data/model/' + name + '-' + dataset_name + '.params')
cache_cell.load_parameters('tests/data/model/' + name + '-' + dataset_name + '.params')
@pytest.mark.serial
def test_save_load_big_rnn_models():
ctx = mx.cpu()
seq_len = 1
batch_size = 1
num_sampled = 6
# network
eval_model = nlp.model.language_model.BigRNN(10, 2, 3, 4, 5, 0.1, prefix='bigrnn')
model = nlp.model.language_model.train.BigRNN(10, 2, 3, 4, 5, num_sampled, 0.1,
prefix='bigrnn')
loss = mx.gluon.loss.SoftmaxCrossEntropyLoss()
# verify param names
model_params = sorted(model.collect_params().keys())
eval_model_params = sorted(eval_model.collect_params().keys())
for p0, p1 in zip(model_params, eval_model_params):
assert p0 == p1, (p0, p1)
model.initialize(mx.init.Xavier(), ctx=ctx)
trainer = mx.gluon.Trainer(model.collect_params(), 'sgd')
# prepare data, label and samples
x = mx.nd.ones((seq_len, batch_size))
y = mx.nd.ones((seq_len, batch_size))
sampled_cls = mx.nd.ones((num_sampled,))
sampled_cls_cnt = mx.nd.ones((num_sampled,))
true_cls_cnt = mx.nd.ones((seq_len,batch_size))
samples = (sampled_cls, sampled_cls_cnt, true_cls_cnt)
hidden = model.begin_state(batch_size=batch_size, func=mx.nd.zeros, ctx=ctx)
# test forward
with mx.autograd.record():
pred, hidden, new_y = model(x, y, hidden, samples)
assert pred.shape == (seq_len, batch_size, 1+num_sampled)
assert new_y.shape == (seq_len, batch_size)
pred = pred.reshape((-3, -1))
new_y = new_y.reshape((-1,))
l = loss(pred, new_y)
l.backward()
mx.nd.waitall()
path = 'tests/data/model/test_save_load_big_rnn_models.params'
model.save_parameters(path)
eval_model.load_parameters(path)
def test_big_rnn_model_share_params():
ctx = mx.cpu()
seq_len = 2
batch_size = 1
num_sampled = 6
vocab_size = 10
shape = (seq_len, batch_size)
model = nlp.model.language_model.train.BigRNN(vocab_size, 2, 3, 4, 5, num_sampled, 0.1,
prefix='bigrnn', sparse_weight=False,
sparse_grad=False)
loss = mx.gluon.loss.SoftmaxCrossEntropyLoss()
model.hybridize()
model.initialize(mx.init.Xavier(), ctx=ctx)
trainer = mx.gluon.Trainer(model.collect_params(), 'sgd')
batch_size = 1
x = mx.nd.ones(shape)
y = mx.nd.ones(shape)
sampled_cls = mx.nd.ones((num_sampled,))
sampled_cls_cnt = mx.nd.ones((num_sampled,))
true_cls_cnt = mx.nd.ones(shape)
samples = (sampled_cls, sampled_cls_cnt, true_cls_cnt)
hidden = model.begin_state(batch_size=batch_size, func=mx.nd.zeros, ctx=ctx)
with mx.autograd.record():
pred, hidden, new_y = model(x, y, hidden, samples)
assert pred.shape == (seq_len, batch_size, 1+num_sampled)
assert new_y.shape == (seq_len, batch_size)
pred = pred.reshape((-3, -1))
new_y = new_y.reshape((-1,))
l = loss(pred, new_y)
l.backward()
assert model.decoder.weight._grad_stype == 'default'
mx.nd.waitall()
eval_model = nlp.model.language_model.BigRNN(vocab_size, 2, 3, 4, 5, 0.1, prefix='bigrnn',
params=model.collect_params())
eval_model.hybridize()
eval_model.initialize(mx.init.Xavier(), ctx=ctx)
pred, hidden = eval_model(x, hidden)
assert pred.shape == (seq_len, batch_size, vocab_size)
mx.nd.waitall()
def test_weight_drop():
class RefBiLSTM(gluon.Block):
def __init__(self, size, **kwargs):
super(RefBiLSTM, self).__init__(**kwargs)
with self.name_scope():
self._lstm_fwd = gluon.rnn.LSTM(size, bidirectional=False, prefix='l0')
self._lstm_bwd = gluon.rnn.LSTM(size, bidirectional=False, prefix='r0')
def forward(self, inpt):
fwd = self._lstm_fwd(inpt)
bwd_inpt = mx.nd.flip(inpt, 0)
bwd = self._lstm_bwd(bwd_inpt)
bwd = mx.nd.flip(bwd, 0)
return mx.nd.concat(fwd, bwd, dim=2)
net1 = RefBiLSTM(10)
shared_net1 = RefBiLSTM(10, params=net1.collect_params())
net2 = gluon.rnn.LSTM(10)
shared_net2 = gluon.rnn.LSTM(10, params=net2.collect_params())
net3 = gluon.nn.HybridSequential()
net3.add(gluon.rnn.LSTM(10))
shared_net3 = gluon.nn.HybridSequential(params=net3.collect_params())
shared_net3.add(gluon.rnn.LSTM(10, params=net3[0].collect_params()))
x = mx.nd.ones((3, 4, 5))
nets = [(net1, shared_net1),
(net2, shared_net2),
(net3, shared_net3)]
for net, shared_net in nets:
net.initialize('ones')
mx.test_utils.assert_almost_equal(net(x).asnumpy(),
shared_net(x).asnumpy())
with mx.autograd.train_mode():
mx.test_utils.assert_almost_equal(net(x).asnumpy(),
shared_net(x).asnumpy())
grads = {}
with mx.autograd.record():
y = net(x)
y.backward()
for name, param in net.collect_params().items():
grads[name] = param.grad().copy()
with mx.autograd.record():
y = shared_net(x)
y.backward()
for name, param in shared_net.collect_params().items():
mx.test_utils.assert_almost_equal(grads[name].asnumpy(), param.grad().asnumpy())
drop_rate = 0.5
nlp.model.utils.apply_weight_drop(net, '.*h2h_weight', drop_rate)
net.initialize('ones')
mx.test_utils.assert_almost_equal(net(x).asnumpy(),
shared_net(x).asnumpy())
with mx.autograd.train_mode():
assert not mx.test_utils.almost_equal(net(x).asnumpy(),
shared_net(x).asnumpy())
grads = {}
with mx.autograd.record():
y = net(x)
y.backward()
for name, param in net.collect_params().items():
grads[name] = param.grad().copy()
with mx.autograd.record():
y = shared_net(x)
y.backward()
for name, param in shared_net.collect_params().items():
assert not mx.test_utils.almost_equal(grads[name].asnumpy(), param.grad().asnumpy())
| StarcoderdataPython |
9755230 | <filename>chapter-8/code/pre-built-vision-ai.py
from google.cloud import vision
from google.cloud import translate_v2 as translate
# TODO: Change to your gcs bucket
GCS_BUCKET = "packt-data-eng-on-gcp-data-bucket"
GCS_URI = "gs://{}/chapter-8/chapter-8-example-text.jpg".format(GCS_BUCKET)
def detect_text(GCS_URI : str):
print("Looking for text from image in GCS: {}".format(GCS_URI))
image = vision.Image(
source=vision.ImageSource(gcs_image_uri=GCS_URI)
)
text_detection_response = vision_client.text_detection(image=image)
annotations = text_detection_response.text_annotations
if len(annotations) > 0:
text = annotations[0].description
else:
text = ""
print("Extracted text : \n{}".format(text))
detect_language_response = translate_client.detect_language(text)
src_lang = detect_language_response["language"]
print("Detected language {}".format(src_lang))
vision_client = vision.ImageAnnotatorClient()
translate_client = translate.Client()
detect_text(GCS_URI)
| StarcoderdataPython |
11219253 | <reponame>sunbelbd/PaddleEBM
import os
import numpy as np
import scipy.io
from .builder import DATASETS
from .base_dataset import BaseDataset
@DATASETS.register()
class VoxelDataSet(BaseDataset):
"""Import voxel from mat files.
"""
def __init__(self, dataroot, data_size=100000, resolution=64, mode="train", category="modelnet10"):
"""Initialize this dataset class.
Args:
dataroot (str): Directory of dataset.
preprocess (list[dict]): A sequence of data preprocess config.
"""
super(VoxelDataSet, self).__init__()
self.dataset = self.load_data(dataroot, mode, category)
self.dataset = self.dataset[:data_size].astype(np.float32)
if resolution == 32:
self.dataset = self._down_sampling(self.dataset)
elif resolution != 64:
raise "Resolution should be 32 or 64"
self.dataset = self._normalization(self.dataset)
def load_data(self, dataroot, mode="train", category="modelnet10"):
train_data = []
if category == "modelnet40":
categories = ['cup', 'bookshelf', 'lamp', 'stool', 'desk', 'toilet', 'night_stand', 'bowl', 'door', 'flower_pot', 'plant', 'stairs', 'bottle', 'mantel', 'sofa', 'laptop', 'xbox', 'tent', 'piano', 'car', 'wardrobe', 'tv_stand', 'cone', 'range_hood', 'bathtub', 'curtain', 'sink', 'glass_box', 'bed', 'chair', 'person', 'radio', 'dresser', 'bench', 'airplane', 'guitar', 'keyboard', 'table', 'monitor', 'vase']
for cat in categories:
with open(os.path.join(dataroot, "%s_%s_voxel.mat" % (cat, mode)), "rb") as f:
d = scipy.io.loadmat(f)["voxel"]
train_data.append(d)
train_data = np.concatenate(train_data)
elif category == "modelnet10":
categories = ['desk', 'toilet', 'night_stand', 'sofa', 'bathtub', 'bed', 'chair', 'dresser', 'table', 'monitor']
for cat in categories:
with open(os.path.join(dataroot, "%s_%s_voxel.mat" % (cat, mode)), "rb") as f:
d = scipy.io.loadmat(f)["voxel"]
train_data.append(d)
train_data = np.concatenate(train_data)
else:
with open(os.path.join(dataroot, "%s_%s_voxel.mat" % (category, mode)), "rb") as f:
train_data = scipy.io.loadmat(f)["voxel"]
return train_data
def _down_sampling(self, data):
import skimage.measure
return skimage.measure.block_reduce(data, (1,2,2,2), np.max)
def _normalization(self, data):
data_mean = data.mean()
print("Perform normalization, mean = %.4f" % data_mean)
return data - data_mean
| StarcoderdataPython |
3574967 | '''
Reading file of graph & displaying by networkx
Created in 1. Oct. 2018 by JuneTech
'''
import json
import networkx as nx
def read_json_file(filename):
'''
Reads json file & returns networkx graph instance
'''
with open(filename) as f:
js_graph = json.load(f)
return nx.readwrite.json_graph.node_link_graph(js_graph)
def draw_graph_with_edgecost(G):
'''
You may be happy by using this...
Draw networkx graph with edge cost attribute
'''
import matplotlib.pyplot as plt
pos = nx.spring_layout(G)
nx.draw_networkx(G, pos)
labels = nx.get_edge_attributes(G, "cost")
nx.draw_networkx_edge_labels(G, pos, edge_labels=labels)
plt.show()
def solve(filename):
# TA comment: start by reading json file into networkx graph
nx_graph = read_json_file(filename)
'''
for the reference of accessing edge weights!
you had better delete those two lines before submission
'''
for edge in nx_graph.edges():
print("edge", edge, "have cost of", nx_graph[edge[0]][edge[1]]["cost"])
'''just use it when you are interested
import matplotlib.pyplot as plt
draw_graph_with_edgecost(nx_graph)
'''
def main():
solve("nt_hw4_graph1.json")
if __name__ == '__main__':
main()
| StarcoderdataPython |
1631233 | <filename>tests/test_agent_genome.py
from ..nss.enviroments.Enviroment import Enviroment
from ..nss.agents.Agent import Agent
from ..nss.worlds.World import World
import itertools
import time
import copy
import numpy as np
def test_genome():
world = World(10,10)
np.random.seed(0)
env = Enviroment((100,100),60)
agent = Agent(env, world, 10)
nrg = copy.copy(agent.reqEnergy)
initial_genome = agent.genome.copy()
agent.curEnergy = 1000000
agent.reproduce(env,world, 1000, 10, 1)
agent.determine_next(env, 1000, [agent])
agent.eatFood(env)
agent.curEnergy = 1000000
reproduced = agent.reproduce(env, world, 1000, 10, 1)
reproduced.append(agent)
agent = reproduced[-1]
print(agent.genome)
print(agent.reqEnergy)
assert nrg == agent.reqEnergy
| StarcoderdataPython |
1988452 | <gh_stars>0
#homework 11 main
import homework_11
from homework_11 import Student
from homework_11 import Course
from homework_11 import Enrollment
from homework_11 import Gradebook
student_record = homework_11.Gradebook()
keep_going = 'y'
while keep_going == 'y':
enrollment_id = int(input("please input an enrollemnt id"))
Grade = input("please input a letter grade for this enrollment")
while Grade.upper() != "A" and Grade.upper() != "B" and Grade.upper() != "C" and Grade.upper() != "D" and Grade.upper() != "F" and Grade.upper() != "I" and Grade.upper() != "W":
Grade = input("Please input a valid letter Grade A,B,C,D,F,I,W")
if enrollment_id in student_record.enrollments:
student_record.enrollments[enrollment_id].changeGrade(Grade)
else:
print("This enrollment is not in our records")
keep_going = input(" enter y to keep going or anything else to stop")
print("enrollment_id ", "course title ", "credit hours ", "student name ", "grade ")
for entry in student_record.enrollments:
student_record.enrollments[entry].displayEnrollment()
| StarcoderdataPython |
11227518 | from FieldData import FieldData
import numpy as np
from math import sqrt
# fields = FieldData('U')
#
# U = fields.readFieldsData()['U']
#
# ccx, ccy, ccz, cc = fields.readCellCenterCoordinates()
#
# meshSize, cellSizeMin, ccx3D, ccy3D, ccz3D = fields.getMeshInfo(ccx, ccy, ccz)
#
# Uslice, ccSlice, sliceDim = fields.createSliceData(U, (1500, 1500, 0), normalVector = (0.5, -sqrt(3)/2., 0))
#
# UmagSlice = np.zeros((Uslice.shape[0], 1))
# for i, row in enumerate(Uslice):
# UmagSlice[i] = np.sqrt(row[0]**2 + row[1]**2 + row[2]**2)
#
# UmagSliceMesh = UmagSlice.reshape((sliceDim[2], sliceDim[0]))
# X = ccSlice[:, 0].reshape((sliceDim[2], sliceDim[0]))
# Z = ccSlice[:, 2].reshape((sliceDim[2], sliceDim[0]))
# Y = ccSlice[:, 1].reshape((sliceDim[2], sliceDim[0]))
# './ABL_N_H/Slices/20000.9038025/U_alongWind_Slice.raw'
# 'I:/SOWFA Data/ALM_N_H/Slices/20500.9078025/U_alongWind_Slice.raw'
data = np.genfromtxt('/media/yluan/Toshiba External Drive/ALM_N_H/Slices/22000.0558025/U_hubHeight_Slice.raw', skip_header = 2)
x = data[:, 0]
y = data[:, 1]
z = data[:, 2]
# Mesh size in x
valOld = x[0]
for i, val in enumerate(x[1:]):
if val < valOld:
print(val, valOld)
meshSizeXY = i + 1
break
valOld = val
X, Y, Z = x.reshape((-1, meshSizeXY)), y.reshape((-1, meshSizeXY)), z.reshape((-1, meshSizeXY))
u, v, w = data[:, 3], data[:, 4], data[:, 5]
# U
UmagSlice = np.zeros((data.shape[0], 1))
for i, row in enumerate(data):
if np.nan in row:
print(row)
UmagSlice[i] = np.sqrt(row[3]**2 + row[4]**2 + row[5]**2)
# uvMagSlice = np.zeros((data.shape[0], 1))
# for i, row in enumerate(data):
# if np.nan in row:
# print(row)
# uvMagSlice[i] = np.sqrt((row[3] - 8*np.cos(np.pi/6))**2 + (row[4] - 8*np.sin(np.pi/6))**2)
UmagSliceMesh = UmagSlice.reshape((-1, meshSizeXY))
# uvMagSliceMesh = uvMagSlice.reshape((-1, meshSizeXY))
uMesh, vMesh, wMesh = u.reshape((-1, meshSizeXY)), v.reshape((-1, meshSizeXY)), w.reshape((-1, meshSizeXY))
# # FFt
# uvFft, wFft = np.fft.fft2(uvMagSliceMesh), np.fft.fft2(wMesh)
# # uFft = npfft.fft(uMesh)
# uFft = uvFft
#
# nX, nY = uFft.shape[1], uFft.shape[0]
# freqX, freqY = np.fft.fftfreq(nX, d = 10.), np.fft.fftfreq(nX, d = 10.)
#
# freqX, freqY = np.fft.fftshift(freqX), np.fft.fftshift(freqY)
# # now we can initialize some arrays to hold the wavenumber co-ordinates of each cell
# kx_array = np.zeros(uFft.shape)
# ky_array = np.zeros(uFft.shape)
#
# # before we can calculate the wavenumbers we need to know the total length of the spatial
# # domain data in x and y. This assumes that the spatial domain units are metres and
# # will result in wavenumber domain units of radians / metre.
# x_length = 3000.
# y_length = 3000.
#
# # now the loops to calculate the wavenumbers
# for row in range(uFft.shape[0]):
#
# for column in range(uFft.shape[1]):
#
# kx_array[row][column] = ( 2.0 * np.pi * freqs[column] ) / x_length
# ky_array[row][column] = ( 2.0 * np.pi * freqs[row] ) / y_length
# # Is that right?
# # Shift freqs all to non-negative
# kX, kY = 2*np.pi*(freqX - freqX.min()), 2*np.pi*(freqY - freqY.min())
#
# krOld = 0
# E, kr = np.zeros((uFft.shape[0], 1)), np.zeros((uFft.shape[0], 1))
# for i in range(uFft.shape[0]):
# kr[i] = np.sqrt(kX[i]**2 + kY[i]**2)
# dk = abs(krOld - kr[i])
# eii = float(uFft[i, i]*np.conj(uFft[i, i]))
# E[i] = eii/2.
#
# krOld = kr[i]
from PlottingTool import PlotSurfaceSlices3D
myplot = PlotSurfaceSlices3D(X, Y, Z, UmagSliceMesh, name = 'surf', figDir = './', xLim = (0, 3000), yLim = (0, 3000), zLim = (0, 1000), viewAngles = (20, -100))
myplot.initializeFigure()
myplot.plotFigure()
myplot.finalizeFigure()
# import matplotlib as mpl
# import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import Axes3D
# from scipy.interpolate import griddata
# from scipy import interpolate
#
#
# # plt.loglog(kr, E)
#
#
#
#
#
# # Refinement
# # gridX, gridY, gridZ = np.mgrid[X.min():X.max():200j, Y.min():Y.max():200j, Z.min():Z.max():150j]
# # Uinterp = griddata(ccSlice, UmagSlice, (gridX, gridY, gridZ))
#
# # gridX, gridZ = np.mgrid[X.min():X.max():200j, Z.min():Z.max():150j]
# # tck = interpolate.bisplrep(X, Z, UmagSliceMesh, s=0)
# # Uinterp = interpolate.bisplev(gridX[:, 0], gridZ[0, :], tck)
#
# colorDim = UmagSliceMesh
# colorMin, colorMax = colorDim.min(), colorDim.max()
# norm = mpl.colors.Normalize(colorMin, colorMax)
# cmap = plt.cm.ScalarMappable(norm = norm, cmap = 'plasma')
# cmap.set_array([])
# fColors = cmap.to_rgba(colorDim)
#
#
# fig = plt.figure()
# ax = fig.gca(projection = '3d')
# ax.get_proj = lambda: np.dot(Axes3D.get_proj(ax), np.diag([1.2, 1.2, 0.6, 1]))
# # cstride/rstride refers to use value of every n cols/rows for facecolors
# plot = ax.plot_surface(X, Y, Z, cstride = 1, rstride = 1, facecolors = fColors, vmin = colorMin, vmax = colorMax,
# shade = False)
# plt.colorbar(cmap, extend = 'both')
#
#
# ax.set_xlim(0, 3000)
# ax.set_ylim(0, 3000)
# ax.set_zlim(0, 1000)
# ax.set_xlabel('x')
# ax.set_ylabel('y')
# ax.set_zlabel('z')
# ax.view_init(20, -100)
# plt.tight_layout()
# plt.show()
# #
# # # from PlottingTool import plotSlices3D
# # # plotSlices3D([UmagSliceMesh], X, Z, [0])
| StarcoderdataPython |
1808928 | # -*- coding: utf-8 -*-
import numpy as np
import csv
###########
### I/O ###
###########
def find_path(file_name, directory="data", file_type=".csv"):
"""
input:
file name
file direcrtory
file type
find path for a file
if directory is not find, create a new one.
"""
import os
cwd = os.getcwd()
if not os.path.exists(directory):
os.makedirs(directory)
d = os.path.join(cwd, os.path.join(directory,file_name+file_type))
return d
def create_file(file_name):
import os
d = find_path(file_name)
if not os.path.isfile(d):
open(d, 'w').close()
return d
def file_exists(file_name):
import os
d = find_path(file_name)
return os.path.isfile(d)
def load_csv(file_name, delimiter=';', comment=None):
d = find_path(file_name)
pass
def write_columns_csv(lst, file_name, header=[], index=None, start_char=None, delimiter=';', open_as='wb'):
d = find_path(file_name)
if index is not None:
index.extend(lst)
output_lst = zip(*index)
else:
output_lst = zip(*lst)
with open(d, open_as) as f:
writer = csv.writer(f, delimiter=delimiter)
if start_char is not None:
writer.writerow([start_char])
if header:
writer.writerow(header)
for row in output_lst:
writer.writerow(row)
def write_columns_to_existing(lst, file_name, header="", delimiter=';'):
d = find_path(file_name)
with open(d, 'r') as finput:
reader = csv.reader(finput, delimiter=delimiter)
all_lst = []
row = next(reader)
nested_list = isinstance(lst[0], list) or isinstance(lst[0], np.ndarray)
if nested_list:
lst = zip(*lst)
row.extend(header)
else:
row.append(header)
all_lst.append(row)
n = len(lst)
i = 0
for row in reader:
if nested_list:
row.extend(lst[i])
else:
row.append(lst[i])
all_lst.append(row)
i += 1
with open(d, 'w') as foutput:
writer = csv.writer(foutput, delimiter=delimiter)
writer.writerows(all_lst)
def append_to_existing(lst, file_name, header="", index=None, delimiter=';', start_char=None):
write_columns_csv(lst, file_name, header, index, start_char=start_char, delimiter=delimiter, open_as='a')
def import_csv(file_name, delimiter=';', header=True, indices=None, start_at=0, break_at='\n', ignore=""):
d = find_path(file_name)
input_lst = []
indices_lst = []
with open(d, 'r') as f:
reader = csv.reader(f, delimiter=delimiter)
for _ in range(0, start_at):
next(reader)
if header:
header_row = next(reader)
for row in reader:
if row[0] == break_at:
break
if row[0] == ignore:
continue
if indices:
input_lst.append(row[indices:])
indices_lst.append(row[:indices])
else:
input_lst.append(row)
if header and not indices :
return header_row, np.array(input_lst, dtype="float64")
elif header and indices:
return header_row[indices:], indices_lst, np.array(input_lst, dtype="float64")
return np.array(input_lst, dtype="float64")
##########
### MP ###
##########
def _pickle_method(method):
func_name = method.im_func.__name__
obj = method.im_self
cls = method.im_class
if func_name.startswith('__') and not func_name.endswith('__'): #deal with mangled names
cls_name = cls.__name__.lstrip('_')
func_name = '_' + cls_name + func_name
return _unpickle_method, (func_name, obj, cls)
def _unpickle_method(func_name, obj, cls):
for cls in cls.__mro__:
try:
func = cls.__dict__[func_name]
except KeyError:
pass
else:
break
return func.__get__(obj, cls)
| StarcoderdataPython |
132 | <filename>examples/first_char_last_column.py
#!/usr/bin/env python3
"""
For the last column, print only the first character.
Usage:
$ printf "100,200\n0,\n" | python3 first_char_last_column.py
Should print "100,2\n0,"
"""
import csv
from sys import stdin, stdout
def main():
reader = csv.reader(stdin)
writer = csv.writer(stdout)
for row in reader:
try:
row[-1] = row[-1][0]
except IndexError:
# Python: Better to ask forgiveness than permission
# Alternative: Look before you leap
pass
writer.writerow(row)
if __name__ == "__main__":
main()
| StarcoderdataPython |
3345426 | import openpyxl
import pandas as pd
REQUIRED_COLUMNS = ['<NAME>', 'Name', 'M/F', 'Field of Study', 'Nationality']
teaming_columns = ['1st', '2nd', 'Partner']
# Source: https://sashat.me/2017/01/11/list-of-20-simple-distinct-colors/
_colors = ['#e6194B', '#3cb44b', '#ffe119', '#4363d8', '#f58231', '#911eb4',
'#42d4f4', '#f032e6', '#bfef45', '#fabebe', '#469990', '#e6beff',
'#9A6324', '#fffac8', '#800000', '#000075']
_font_colors = ['white', 'white', 'black', 'white', 'white', 'white',
'black', 'white', 'black', 'black', 'white', 'black',
'white', 'black', 'white', 'white']
# Specified by Christian
colors = [
'#c27ba0', '#8e7ac5', '#d9d9d9', '#6b9cee', '#92c57a', '#ffda5c', '#f7b365',
'#ce4019', '#ead1dc', '#d9d0ea', '#f3f3f3', '#c7d8f9', '#d9e9d2', '#fff1ca',
'#fde5cb', '#f5ccca']
font_colors = [
'white', 'white', 'black', 'white', 'black', 'black', 'black',
'white', 'black', 'black', 'black', 'black', 'black', 'black',
'black', 'black']
disciplines = [
'Business', 'Creative Disciplines', 'Engineering', 'Humanities',
'Life Sciences', 'Media', 'Social Sciences' ]
discipline_colors = [
'#4783eb', '#ff9a00', '#68a94a', '#8e7ac5', '#d9d0ea', '#fde5cb', '#ffff00']
discipline_font_colors = [
'white', 'black', 'white', 'white', 'black', 'black', 'black']
def add_teaming_colors(teaming, workbook, worksheet):
first_column = 65 + len(teaming.columns) - 3 # A=65, equals list(teaming.columns).index('1st')
start = f'{chr(first_column)}2'
end = f'{chr(first_column+2)}{len(teaming.index)+1}'
for i, (color, font_color) in enumerate(zip(colors, font_colors)):
color_format = workbook.add_format({'bg_color': color, 'font_color': font_color})
worksheet.conditional_format(f'{start}:{end}', {
'type': 'cell',
'criteria': 'equal to',
'value': i + 1,
'format': color_format})
def add_discipline_colors(teaming, workbook, worksheet):
discipline_column = chr(65 + list(teaming.columns).index('Field of Study'))
start = f'{discipline_column}2'
end = f'{discipline_column}{len(teaming.index)+1}'
for discipline, color, font_color in zip(disciplines, discipline_colors, discipline_font_colors):
color_format = workbook.add_format({'bg_color': color, 'font_color': font_color})
worksheet.conditional_format(f'{start}:{end}', {
'type': 'cell',
'criteria': 'equal to',
# Excel requires strings to be double quoted
'value': f'"{discipline}"',
'format': color_format})
def add_centering_and_spacing(teaming, workbook, worksheet):
centered = workbook.add_format()
centered.set_align('center')
for idx, col_name in enumerate(teaming):
col_len = max((
teaming[col_name].astype(str).str.len().max(), # len of largest item
len(str(col_name)) # len of column name/header
)) + 1 # Adding a little extra space
worksheet.set_column(idx, idx, col_len, centered if col_len < 5 else None)
def add_collisions(collisions, writer, workbook):
writer.createWorkSheet('Collisions')
collisions = pd.DataFrame(collisions, columns=['Student', 'Student', 'Teams'])
collisions.to_excel(writer, sheet_name='Collisions', index=False)
worksheet = writer.sheets['Teamings']
def export(teaming, filename, collisions=None):
# Create a Pandas Excel writer using XlsxWriter as the engine.
writer = pd.ExcelWriter(f'{filename}.xlsx', engine='xlsxwriter')
teaming.to_excel(writer, sheet_name='Teamings', index=False)
workbook = writer.book
worksheet = writer.sheets['Teamings']
add_teaming_colors(teaming, workbook, worksheet)
add_discipline_colors(teaming, workbook, worksheet)
add_centering_and_spacing(teaming, workbook, worksheet)
# if collisions is not None:
# add_collisions(collisions, writer, workbook)
writer.save()
def remove_hidden_columns(df, file):
wb = openpyxl.load_workbook(file)
ws = wb.worksheets[0]
visible_columns = []
for i, name in enumerate(df.columns):
# If a hidden column is required, we ignore the visibility
if name in REQUIRED_COLUMNS:
visible_columns.append(i)
# In some cases, the column dimensions appear to be missing
if i not in ws.column_dimensions:
continue
if ws.column_dimensions[i].hidden != True:
visible_columns.append(i)
return df[df.columns[visible_columns]]
| StarcoderdataPython |
6594608 | <reponame>TheaperDeng/anomalib<gh_stars>0
from pytorch_lightning import Trainer, seed_everything
from anomalib.config import get_configurable_parameters
from anomalib.core.callbacks import get_callbacks
from anomalib.data import get_datamodule
from anomalib.models import get_model
from tests.helpers.dataset import get_dataset_path
def run_train_test(config):
model = get_model(config)
datamodule = get_datamodule(config)
callbacks = get_callbacks(config)
trainer = Trainer(**config.trainer, callbacks=callbacks)
trainer.fit(model=model, datamodule=datamodule)
results = trainer.test(model=model, datamodule=datamodule)
return results
def test_normalizer():
config = get_configurable_parameters(model_config_path="anomalib/models/padim/config.yaml")
config.dataset.path = get_dataset_path(config.dataset.path)
config.model.threshold.adaptive = True
# run without normalization
config.model.normalization_method = "none"
seed_everything(42)
results_without_normalization = run_train_test(config)
# run with cdf normalization
config.model.normalization_method = "cdf"
seed_everything(42)
results_with_cdf_normalization = run_train_test(config)
# run without normalization
config.model.normalization_method = "min_max"
seed_everything(42)
results_with_minmax_normalization = run_train_test(config)
# performance should be the same
for metric in ["image_AUROC", "image_F1"]:
assert round(results_without_normalization[0][metric], 3) == round(results_with_cdf_normalization[0][metric], 3)
assert round(results_without_normalization[0][metric], 3) == round(
results_with_minmax_normalization[0][metric], 3
)
| StarcoderdataPython |
349450 | import socket
def ServerOnPort(Number_Port, Protocol):
ServiceName = socket.getservbyport(Number_Port, Protocol)
print("[+] port number %d : %s"%(Number_Port, ServiceName)) | StarcoderdataPython |
4886256 | <filename>timing.py
'''
Post-processing script that implements pause / gap transcription in terms
of both beats and absolute timing.
Part of the Gailbot-3 development project.
Developed by:
<NAME>
Tufts University
Human Interaction Lab at Tufts
Initial development: 6/6/19
'''
import sys,os
from termcolor import colored # Text coloring library.
# Gailbot scripts
import rateAnalysis
# *** Global variables / invariants ***
# Describes how many beats 1 second is equal to.
SECStoBEATS = 4
latchMarker = u'\u2248'
# *** Main pause / gap transcription functions ***
# Function that adds pause markers to the combined speaker transcripts.
# Pauses added to combined list to prevent end of line pause transcriptions.
# Input: list of lists containing dictionaries.
# CHATVals dictionary containing transcription thresholds
# Output : list of lists containing dictionaries.
def pauses(infoList,CHATVals):
for item in infoList:
# Getting appropriate transcription function
pauseFunc,closure = transcriptionFunction(item,CHATVals)
# Adding pause markers
newList = []
jsonListCombined = item[0]['jsonListCombined']
for count,curr in enumerate(jsonListCombined[:-1]):
nxt = jsonListCombined[count+1]
# Only add pauses if current and next speaker is the same.
if curr[0] != nxt[0]:newList.append(curr);continue
diff = round(nxt[1] - curr[2],2)
# In this case, the latch marker is added.
if diff >= CHATVals['lowerBoundLatch'] and diff <= CHATVals['upperBoundLatch']:
curr[3] += ' ' + latchMarker + ' '
# In this case, the normal pause markers are added.
elif diff >= CHATVals['lowerBoundPause'] and diff <= CHATVals['upperBoundPause']:
curr[3] += pauseFunc(diff,closure)
# In this case, micropause markers are added.
elif diff >= CHATVals['lowerBoundMicropause']and diff <= CHATVals['upperBoundMicropause']:
curr[3] += pauseFunc(diff,closure)
# In this case, very large pause markers are added
elif diff > CHATVals['LargePause']:
largePause = ['*PAU',curr[2],nxt[1],pauseFunc(diff,closure)]
newList.extend([curr,largePause]) ; continue
newList.append(curr)
newList.append(jsonListCombined[-1])
for dic in item: dic['jsonListCombined'] = newList
return infoList
# Function that adds gaps to the transcript
# Input: list of lists containing dictionaries.
# Output : list of lists containing dictionaries.
def gaps(infoList,CHATVals):
for item in infoList:
if CHATVals['beatsMode']: gapFunc = beatsTiming ; closure = item[0]['syllPerSec']
else: gapFunc = absoluteTiming ; closure = CHATVals['upperBoundMicropause']
newList = [] ; jsonListCombined = item[0]['jsonListCombined']
for count,curr in enumerate(jsonListCombined[:-1]):
nxt = jsonListCombined[count+1]
diff = round(nxt[1] - curr[2],2)
if diff >= CHATVals['gap']:
gap = ['*GAP',curr[2],nxt[1],gapFunc(diff,closure)]
newList.extend([curr,gap]);
else:newList.append(curr)
newList.append(jsonListCombined[-1])
for dic in item: dic['jsonListCombined'] = newList
return infoList
# *** Functions involved in calculating pauses / gaps in beat timing ***
# Function to determine beat / abolute and return appropriate function to apply
# Input: Item list containing one or more dictionaries.
# Return: Pause function to apply
# Sets the syllPerSec parameter.
def transcriptionFunction(item,CHATVals):
if CHATVals['beatsMode']:
syllPerSec = calcSyllPerSec(item[0]['jsonListCombined'])
pauseFunc = beatsTiming ; closure = syllPerSec
else:
syllPerSec = None ; pauseFunc = absoluteTiming
closure = CHATVals['upperBoundMicropause']
for dic in item: dic['syllPerSec'] = syllPerSec
return pauseFunc,closure
# Function used to calculate the syllable rate per second for a combined conversation.
# Input: Combined conversation jsonList
# Returns: Median Syllable rate per second.
def calcSyllPerSec(jsonListCombined):
dictionaryList = rateAnalysis.findSyllables(jsonListCombined)
statsDic = rateAnalysis.stats(dictionaryList)
syllPerSec = statsDic['median']
return syllPerSec
# Function that returns pauses / gaps in beats timing format
def beatsTiming(diff,syllPerSec):
beats = (float(diff*syllPerSec))
# Converting the beat value to seconds.
beatsSeconds = float(beats / SECStoBEATS)
#return ' ( ' + ('.'*beats) +' ) '
return ' (' +str(round(beatsSeconds,1)) + ')'
# Function that returns pauses / gaps in absolute timing format
def absoluteTiming(diff,upperBoundMicropause):
if diff <= upperBoundMicropause: return ' (.) '
return ' (' + str(round(diff,1)) + ')'
| StarcoderdataPython |
6494456 |
def read_file_to_list(filename):
"""Read file to List"""
list = []
file = open(filename, "r")
for line in file:
policy , password = line.split(':')
range, character = policy.split(' ')
fromRange, toRange = range.split('-')
list.append(( int(fromRange), int(toRange), character, password.strip()))
file.close()
return list
def count(password,character):
count = 0
for c in password:
if(c == character):
count = count + 1
return count
def check(password,index,character):
if(password[index-1] == character):
return 1
return 0
| StarcoderdataPython |
9721125 | <gh_stars>0
from builtins import object
import os.path
import threading
import time
class NightFilenameGen(object):
def __init__(self, rootDir='.',
seqnoFile='nextSeqno',
namesFunc=None,
filePrefix='TEST', fileSuffix="fits",
filePattern="%(filePrefix)s%(seqno)08d.%(fileSuffix)s",
dayOffset=-3600*12):
""" Set up a per-night filename generator.
Under a given root, each night gets a subdirectory, and all the files inder the root
are named using a managed sequence number. For example:
/data/PFS
/data/PFS/2014-04-02/PFSA00000012.fits
/data/PFS/2014-04-03/PFSA00000013.fits
We do _not_ create any files, except for the directories and a seqno file.
Parameters
----------
rootDir - string
The root directory that we manage. Will be created if
it does not exist.
seqnoFile - string, optional
The name of the file where we save the next sequence number.
genFilesFunc - callable, optional
A function which takes (directoryName, sequenceNumber), and
returns a list of complete paths.
filePrefix - string, optional, default="TEST"
filePattern - string, optional, default="%(filePrefix)s%(seqno)08d.%(fileSuffix)",
dayOffset - integer, optional, default=3600*12
The night's rollover time. By default, noon UT.
"""
self.rootDir = rootDir
self.filePrefix = filePrefix
self.filePattern = filePattern
self.fileSuffix = fileSuffix
self.namesFunc = namesFunc if namesFunc is not None else self.defaultNamesFunc
self.simRoot = None
self.simSeqno = None
self.dayOffset = dayOffset
head, tail = os.path.split(seqnoFile)
if not head:
seqnoFile = os.path.join(rootDir, tail)
self.seqnoFile = seqnoFile
self.seqnoFileLock = threading.Lock()
self.seqno = 0
self.setup()
def setup(self, rootDir=None, seqnoFile=None, seqno=1):
""" If necessary, create directories and sequence files. """
if not rootDir:
rootDir = self.rootDir
if not seqnoFile:
seqnoFile = self.seqnoFile
if not os.path.isdir(rootDir):
os.makedirs(rootDir)
if not os.access(seqnoFile, os.F_OK):
seqFile = open(seqnoFile, "w")
seqFile.write("%d\n" % (seqno))
def defaultNamesFunc(self, rootDir, seqno):
""" Returns a list of filenames. """
d = dict(filePrefix=self.filePrefix, seqno=seqno, fileSuffix=self.fileSuffix)
filename = os.path.join(rootDir, self.filePattern % d)
return (filename,)
def consumeNextSeqno(self, seqno=None):
""" Return the next free sequence number. """
with self.seqnoFileLock:
try:
sf = open(self.seqnoFile, "r")
seq = sf.readline()
seq = seq.strip()
fileSeqno = int(seq)
except Exception as e:
raise RuntimeError("could not read sequence integer from %s: %s" %
(self.seqnoFile, e))
# If seqno is passed in, it is the seqno we want.
# The file contains the _last_ seqno
if seqno is None:
seqno = fileSeqno
else:
seqno -= 1
nextSeqno = seqno+1
try:
sf = open(self.seqnoFile, "w")
sf.write("%d\n" % (nextSeqno))
sf.truncate()
sf.close()
except Exception as e:
raise RuntimeError("could not WRITE sequence integer to %s: %s" %
(self.seqnoFile, e))
self.seqno = nextSeqno
return nextSeqno
def dirname(self):
""" Return the next directory to use. """
dirnow = time.time() + self.dayOffset
utday = time.strftime('%Y-%m-%d', time.gmtime(dirnow))
dataDir = os.path.join(self.rootDir, utday)
if not os.path.isdir(dataDir):
# cmd.respond('text="creating new directory %s"' % (dataDir))
os.mkdir(dataDir, 0o2775)
return dataDir
def genNextRealPath(self, seqno=None):
""" Return the next filename to create. """
dataDir = self.dirname()
seqno = self.consumeNextSeqno(seqno=seqno)
imgFiles = self.namesFunc(dataDir, seqno)
return imgFiles
def genNextSimPath(self):
""" Return the next filename to read. """
filenames = self.namesFunc(self.simRoot, self.simSeqno)
self.simSeqno += 1
return filenames if os.path.isfile(filenames[0]) else None
def getNextFileset(self, seqno=None):
if self.simRoot:
return self.genNextSimPath()
else:
return self.genNextRealPath(seqno=seqno)
def test1():
# def __init__(self, rootDir, seqnoFile, filePrefix='test', namesFunc=None):
gen = FilenameGen('/tmp', 'testSeq')
gen.setup()
| StarcoderdataPython |
187042 | # Algoritmos y Complejidad
# Profesor: <NAME>
# Alumno: <NAME>
import datetime as time
import numpy as np
from matplotlib import pyplot as plt
import AlgoritmosOrdenacion as sort
# Configuaracion
inicio = 0 # Tamano inicial del arreglo
aumento = 1 # Aumento del tamano del arreglo
tamMax = 1000001 # Tamano maximo del arreglo
#arr = [] # Arreglo generado aleatoriamente
bubbleT = [] # Tiempo del bubble sort
insertionT = [] # Tiempo del insertion sort
mergeT = [] # Tiempo del merge sort
tamX = [] # Valores de la grafica en X
# Prueba los algoritmos de ordenacion y regresa un arreglo con los tiempos de ejecucion
def ProbarOrdenacion(n):
res = []
arr = []
# Bubble sort
arr = np.random.randint(1, 1000, size=n)
a = time.datetime.now()
sort.BubbleSort(arr)
b = time.datetime.now()
res.append(int((b-a).total_seconds() * 1000000))
# Insertion sort
arr = np.random.randint(1, 1000, size=n)
a = time.datetime.now()
sort.InsertionSort(arr)
b = time.datetime.now()
res.append(int((b-a).total_seconds() * 1000000))
# Merge sort
arr = np.random.randint(1, 1000, size=n)
a = time.datetime.now()
sort.MergeSort(arr, 0, n-1)
b = time.datetime.now()
res.append(int((b-a).total_seconds() * 1000000))
return res
# Dibuja la grafica
def dibujar():
# plt.scatter(i, y)
plt.plot(tamX, bubbleT, 'b')
plt.plot(tamX, insertionT, 'r')
plt.plot(tamX, mergeT, 'g')
plt.title("Algoritmos de ordenacion")
plt.xlabel("Tamano del arreglo")
plt.ylabel("Tiempo")
plt.legend(["bubble sort", "insertion sort", "merge sort"])
# Funcion main
def main():
tam = inicio
while tam < tamMax:
res = ProbarOrdenacion(tam)
bubbleT.append(res[0])
insertionT.append(res[1])
mergeT.append(res[2])
tamX.append(tam)
tam += aumento
dibujar()
plt.pause(0.05)
print("----------------------------------")
print("Tiempos:")
print(tamX)
print("Bubble Sort:")
print(bubbleT)
print("Insertion Sort:")
print(insertionT)
print("Merge Sort:")
print(mergeT)
main()
dibujar()
plt.show()
| StarcoderdataPython |
6550114 | <gh_stars>1-10
import click
from neoload_cli_lib import user_data
@click.command()
def cli():
"""Log out remove configuration file"""
user_data.do_logout()
print("logout successfully")
| StarcoderdataPython |
5000704 | import typing
import torch
from .base_trainer import BaseTrainer
from fba import logger, utils
class Trainer(BaseTrainer):
def __init__(
self,
generator: torch.nn.Module,
discriminator: torch.nn.Module,
EMA_generator: torch.nn.Module,
D_optimizer: torch.optim.Optimizer,
G_optimizer: torch.optim.Optimizer,
data_train: typing.Iterator,
data_val: typing.Iterable,
scaler: torch.cuda.amp.GradScaler,
ims_per_log: int,
loss_handler,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.generator = generator
self.discriminator = discriminator
self.EMA_generator = EMA_generator
self.D_optimizer = D_optimizer
self.G_optimizer = G_optimizer
self.data_train = data_train
self.data_val = data_val
self.scaler = scaler
self._ims_per_log = ims_per_log
self._next_log_point = logger.global_step
logger.log_model([self.generator, self.discriminator])
logger.log_dictionary({
"stats/discriminator_parameters": utils.num_parameters(self.discriminator),
"stats/generator_parameters": utils.num_parameters(self.generator),
}, commit=False)
self.load_checkpoint()
self.to_log = {}
self.loss_handler = loss_handler
def state_dict(self):
G_sd = self.generator.state_dict()
D_sd = self.discriminator.state_dict()
EMA_sd = self.EMA_generator.state_dict()
if isinstance(self.generator, (torch.nn.DataParallel, torch.nn.parallel.DistributedDataParallel)):
G_sd = self.generator.module.state_dict()
D_sd = self.discriminator.module.state_dict()
state_dict = {
"D": D_sd,
"G": G_sd,
"EMA_generator": EMA_sd,
"D_optimizer": self.D_optimizer.state_dict(),
"G_optimizer": self.G_optimizer.state_dict(),
"global_step": logger.global_step,
}
state_dict.update(super().state_dict())
return state_dict
def load_state_dict(self, state_dict: dict):
logger.update_global_step(state_dict["global_step"])
self.EMA_generator.load_state_dict(state_dict["EMA_generator"])
if isinstance(self.generator, (torch.nn.DataParallel, torch.nn.parallel.DistributedDataParallel)):
self.discriminator.module.load_state_dict(state_dict["D"])
self.generator.module.load_state_dict(state_dict["G"])
else:
self.discriminator.load_state_dict(state_dict["D"])
self.generator.load_state_dict(state_dict["G"])
self.D_optimizer.load_state_dict(state_dict["D_optimizer"])
self.G_optimizer.load_state_dict(state_dict["G_optimizer"])
super().load_state_dict(state_dict)
def train_step(self):
with torch.autograd.profiler.record_function("data_fetch"):
batch = next(self.data_train)
self.to_log.update(self.step_D(batch))
self.to_log.update(self.step_G(batch))
self.EMA_generator.update(self.generator)
if logger.global_step >= self._next_log_point:
log = {f"loss/{key}": item.item() for key, item in self.to_log.items()}
logger.log_variable("amp/grad_scale", self.scaler.get_scale())
logger.log_dictionary(log, commit=True)
self._next_log_point += self._ims_per_log
self.to_log = {}
def step_D(self, batch):
utils.set_requires_grad(self.discriminator, True)
utils.set_requires_grad(self.generator, False)
utils.zero_grad(self.discriminator)
loss, to_log = self.loss_handler.D_loss(batch)
with torch.autograd.profiler.record_function("D_step"):
self.scaler.scale(loss).backward()
self.scaler.step(self.D_optimizer)
self.scaler.update()
utils.set_requires_grad(self.discriminator, False)
utils.set_requires_grad(self.generator, False)
return to_log
def step_G(self, batch):
utils.set_requires_grad(self.discriminator, False)
utils.set_requires_grad(self.generator, True)
utils.zero_grad(self.generator)
loss, to_log = self.loss_handler.G_loss(batch)
with torch.autograd.profiler.record_function("G_step"):
self.scaler.scale(loss).backward()
self.scaler.step(self.G_optimizer)
self.scaler.update()
utils.set_requires_grad(self.discriminator, False)
utils.set_requires_grad(self.generator, False)
return to_log
def before_step(self):
super().before_step()
self.EMA_generator.update_beta()
| StarcoderdataPython |
6575016 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_date', models.DateTimeField(default=django.utils.timezone.now, editable=False, blank=True)),
('modified_date', models.DateTimeField(auto_now=True, null=True)),
('is_completed', models.BooleanField(default=False)),
('completed_date', models.DateTimeField(null=True, editable=False, blank=True)),
('expire_date', models.DateTimeField(null=True, blank=True)),
('is_expired', models.BooleanField(default=False, editable=False)),
('is_active', models.BooleanField(default=True)),
('priority', models.PositiveIntegerField()),
('title', models.CharField(max_length=60)),
('title_slug', models.SlugField(max_length=60, editable=False)),
('content', models.TextField()),
('created_by', models.ForeignKey(related_name='luzfcb_todo_task_created_by', on_delete=django.db.models.deletion.SET_NULL, blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True)),
('modified_by', models.ForeignKey(related_name='luzfcb_todo_task_modified_by', on_delete=django.db.models.deletion.SET_NULL, blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='TodoList',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_date', models.DateTimeField(default=django.utils.timezone.now, editable=False, blank=True)),
('modified_date', models.DateTimeField(auto_now=True, null=True)),
('is_completed', models.BooleanField(default=False)),
('completed_date', models.DateTimeField(null=True, editable=False, blank=True)),
('expire_date', models.DateTimeField(null=True, blank=True)),
('is_expired', models.BooleanField(default=False, editable=False)),
('is_active', models.BooleanField(default=True)),
('priority', models.PositiveIntegerField()),
('name', models.CharField(max_length=60)),
('name_slug', models.SlugField(max_length=60, editable=False)),
('created_by', models.ForeignKey(related_name='luzfcb_todo_todolist_created_by', on_delete=django.db.models.deletion.SET_NULL, blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True)),
('modified_by', models.ForeignKey(related_name='luzfcb_todo_todolist_modified_by', on_delete=django.db.models.deletion.SET_NULL, blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='task',
name='todolist',
field=models.ForeignKey(to='luzfcb_todo.TodoList'),
),
]
| StarcoderdataPython |
4866644 | <filename>app/database/test.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
import psycopg2
import sys
con = None
try:
con = psycopg2.connect("host='localhost' dbname='testdb' user='pythonspot' password='password'")
cur = con.cursor()
cur.execute("SELECT * FROM Products")
while True:
row = cur.fetchone()
if row == None:
break
print("Product: " + row[1] + "\t\tPrice: " + str(row[2]))
except psycopg2.DatabaseError, e:
if con:
con.rollback()
print 'Error %s' % e
sys.exit(1)
finally:
if con:
con.close() | StarcoderdataPython |
8193295 | import logging
import pandas as pd
from data.dataset import Metric
def match_jobs_to_node(jobs: pd.DataFrame, nodes: pd.DataFrame):
"""Match job information to node performance information and return a dataframe that contains the union of
the previous columns.
"""
all_job_nodes = jobs[Metric.HOST_NAME.value].unique()
available_hosts = nodes[Metric.HOST_NAME.value].unique()
logging.debug("Number of hosts jobs were run on: {}".format(len(all_job_nodes)))
logging.debug("Number of hosts in resource environment: {}".format(len(available_hosts)))
unmatched_job_nodes = [node for node in all_job_nodes if node not in available_hosts]
nodes_without_jobs = [node for node in available_hosts if node not in all_job_nodes]
logging.debug("Found {} nodes with jobs.".format(len([node for node in all_job_nodes if node in available_hosts])))
logging.debug("Found {} unmatched job nodes: {}".format(len(unmatched_job_nodes), unmatched_job_nodes))
logging.debug("Number of jobs per unmatched job node:")
# Retrieve number of jobs that cannot be matched
logging.debug(", ".join(
["node: {}, jobs: {}".format(node, jobs[jobs[Metric.HOST_NAME.value] == node].shape[0]) for node in
unmatched_job_nodes]))
logging.debug("Found {} nodes without jobs.".format(len(nodes_without_jobs)))
# Merge jobs and nodes tables
jobs_nodes = pd.merge(jobs, nodes, how='left', on=Metric.HOST_NAME.value)
logging.debug("Joined {} job rows to {} node rows, result with {} rows"
.format(jobs.shape[0], nodes.shape[0], jobs_nodes.shape[0]))
return jobs_nodes
| StarcoderdataPython |
5117615 | <filename>Scrapers/wiredReviewsScraper.py
import requests, time, csv, sqlite3
from bs4 import BeautifulSoup
from sqlite3 import Error
# Wired review object generated from web scrape
class WiredReview:
def __init__(self, phoneName, url):
self.phoneName = phoneName
self.url = url
self.soup = createSoup(url)
self.score = getWiredReviewScore(self.soup)
self.good = getWiredGood(self.soup)
self.bad = getWiredBad(self.soup)
def printReviewSummary(self):
outputList = [self.phoneName, self.url, self.score, self.good, self.bad]
return outputList
def getScore(self):
return self.score
def getGood(self):
return self.good
def getBad(self):
return self.bad
def getUrl(self):
return self.url
# review object created from data read in from a csv file of past web scrape
class LoadReview:
def __init__(self, phoneName, url, score, good, bad):
self.phoneName = phoneName
self.url = url
self.score = score
self.good = good
self.bad = bad
# function returns beautifulsoup soup from request html from reviewPageUrl
def createSoup(reviewPageUrl):
reviewPage = requests.get(reviewPageUrl)
reviewPageSoup = BeautifulSoup(reviewPage.content, "html.parser")
return reviewPageSoup
# returns float score out of ten from soup
def getWiredReviewScore(reviewPageSoup):
x = reviewPageSoup.find("li", class_="rating-review-component__rating")
if x is None:
return "NOSCORE"
if "Rate" in x.text:
scoreOutOfTen = float(x.text[4])
return scoreOutOfTen
# three formats of wired reviews handled with three different functions
def getWiredGoodComponentFormA(reviewPageSoup):
x = reviewPageSoup.find("li", class_="wired-tired-component__list-item wired-tired-component__list-item--pro")
good = ""
if x is not None:
if "Wired" in x.text:
good = x.find("span", class_="wired-tired-component__description").text.strip()
return good
def getWiredGoodComponentFormB(reviewPageSoup):
z = reviewPageSoup.find_all("h5", class_="brandon uppercase border-t")
good = ""
if z is not None:
for a in z:
if "Wired" in a.text:
good = a.parent.find("p", class_="gray-5").text.strip()
return good
def getWiredGoodComponentFormC(reviewPageSoup):
y = reviewPageSoup.find_all("strong")
good = ""
for h in y:
if h is not None:
if "WIRED" in h.text:
good = h.parent.text.replace("WIRED", "").strip()
return good
# returns pros text from soup
def getWiredGood(reviewPageSoup):
good = getWiredGoodComponentFormA(reviewPageSoup)
if good == "":
good = getWiredGoodComponentFormB(reviewPageSoup)
if good == "":
good = getWiredGoodComponentFormC(reviewPageSoup)
return good
# three formats of wired reviews handled with three different functions
def getWiredBadComponentFormA(reviewPageSoup):
x = reviewPageSoup.find("li", class_="wired-tired-component__list-item wired-tired-component__list-item--con")
bad = ""
if x is not None:
if "Tired" in x.text:
bad = x.find("span", class_="wired-tired-component__description").text.strip()
return bad
def getWiredBadComponentFormB(reviewPageSoup):
z = reviewPageSoup.find_all("h5", class_="brandon uppercase border-t")
bad = ""
if z is not None:
for a in z:
if "Wired" in a.text:
good = a.parent.find("p", class_="gray-5")
bad = good.find_next("p", class_="gray-5").text.strip()
return bad
def getWiredBadComponentFormC(reviewPageSoup):
y = reviewPageSoup.find_all("strong")
bad = ""
for h in y:
if h is not None:
if "TIRED" in h.text:
bad = h.parent.text.replace("TIRED", "").strip()
return bad
# returns cons text from soup
def getWiredBad(reviewPageSoup):
bad = getWiredBadComponentFormA(reviewPageSoup)
if bad == "":
bad = getWiredBadComponentFormB(reviewPageSoup)
if bad == "":
bad = getWiredBadComponentFormC(reviewPageSoup)
return bad
# urlCsv is csv file containing urls of wired reviews
# timeSleep is time to sleep in seconds between making each request
# calls writeCsv to write csv file from list of reviews created
def scrapeReviewsToCsv(urlCsv, timeSleep):
sourceFile = open(urlCsv, "r", encoding="utf8")
for row in sourceFile:
outputList = []
x = row.split(",")
phoneName = x[0].strip()
url = x[1].strip()
print(phoneName)
print(url)
review = WiredReview(phoneName, url)
if review.score != "NOSCORE":
for x in review.printReviewSummary():
outputList.append(x)
fancySleep(timeSleep)
writeCsv(outputList, "WiredScrapedData.csv")
print("Reached end of reviews")
# urlCsv is csv file containing urls of wired reviews
# timeSleep is time to sleep in seconds between making each request
# returns dictionary of review objects where key is phoneName
def scrapeReviewsToDictionary(urlCsv, timeSleep):
sourceFile = open(urlCsv, "r", encoding="utf8")
outputDict = {}
for row in sourceFile:
x = row.split(",")
phoneName = x[0].strip()
url = x[1].strip()
print(phoneName)
print(url)
review = WiredReview(phoneName, url)
if review.score != "NOSCORE":
outputDict[phoneName] = review
fancySleep(timeSleep)
print("Reached end of reviews")
return outputDict
# creates a csv file containing scraped data
def writeCsv(outputList, csvFileName):
dataOutput = open(csvFileName, "a+", encoding="utf8")
writer = csv.writer(dataOutput, delimiter='|', lineterminator="\r")
row = []
for y in outputList:
row.append(str(y))
writer.writerow(row)
# for sleeping fancy
def fancySleep(timeSleep):
print("sleeping " + str(int(timeSleep)) + " seconds", end="", flush=True) # https://stackoverflow.com/questions/5598181/multiple-prints-on-the-same-line-in-python
time.sleep(timeSleep / 4)
print(" .", end="", flush=True)
time.sleep(timeSleep / 4)
print(" .", end="", flush=True)
time.sleep(timeSleep / 4)
print(" .")
time.sleep(timeSleep / 4)
# creates connection
# https://www.sqlitetutorial.net/sqlite-python/insert/
def connect(dbFile):
con = None
try:
con = sqlite3.connect(dbFile)
except Error as e:
print(e)
return con
# inserts or updates data in all tables
def insertDataFromReview(connection, currentReview):
insertPhone(connection, currentReview)
insertRating(connection, currentReview)
insertGood(connection, currentReview)
insertBad(connection, currentReview)
# reads csv from backup CSV file to write to database
def insertDataFromCsv():
phoneData = open("WiredDatatest.csv", encoding="utf8")
reader = csv.reader(phoneData, delimiter='|')
for row in reader:
phoneName = row[0]
url = row[1]
score = row[2]
good = row[3]
bad = row[4]
currentReview = LoadReview(phoneName, url, score, good, bad)
connection = connect("../CSI2999/db.sqlite3")
insertDataFromReview(connection, currentReview)
# urlCsv is CSV file containing scraped Wired URLs formatted phoneName|url
# timeSleep is time to sleep in seconds between making each request
# selectiveScrape is set to only scrape a review page if there is not an existing Wired review URL in a Phone entry
# selectiveScrape=False will scrape all Wired URLs in CSV file and insert or update data
# backupCsvWrite is set to create a CSV backup of all reviews scraped
def wiredScrapeAndInsert(urlCsv, timeSleep, selectiveScrape=True, backupCsvWrite=False):
startTime = time.time()
sourceFile = open(urlCsv, "r", encoding="utf8")
connection = connect("../CSI2999/db.sqlite3")
backupCsvName = "WiredScrapedDataBackup.csv"
counter = 0
cur = connection.cursor()
for row in sourceFile:
x = row.split("|")
phoneName = x[0].strip().lower()
url = x[1].strip()
phoneName = phoneName.replace("+", " plus").replace("(", "").replace(")", "")
print(phoneName)
try:
if int(phoneName[0])<10 and int(phoneName[0])>3 and phoneName[1:6] == " plus":
phoneName = "apple iphone " + phoneName
except ValueError:
pass
print(url)
scrape = False
if selectiveScrape:
cur.execute("SELECT WiredUrl FROM CellCheck_Phone WHERE PhoneName=?", (phoneName,))
existingEntry = cur.fetchone()
if existingEntry is None:
scrape = True
elif existingEntry[0] == "":
print("Existing entry: " + str(existingEntry[0]))
scrape = True
else:
scrape = False
print("Existing entry: " + str(existingEntry[0]))
else:
scrape = True
if scrape:
review = WiredReview(phoneName, url)
if review.score != "NOSCORE":
insertDataFromReview(connection, review)
if backupCsvWrite:
outputList = []
for x in review.printReviewSummary():
outputList.append(x)
writeCsv(outputList, backupCsvName)
counter += 1
fancySleep(timeSleep)
connection.commit()
print("RUNTIME: " + str(time.time()-startTime) + " seconds.")
print("PHONE REVIEWS SCRAPED: "+ str(counter))
# con is connection
# review is of type WiredReview or LoadReview
def insertPhone(con, review):
cur = con.cursor()
cur.execute("SELECT * FROM CellCheck_Phone WHERE PhoneName=?", (review.phoneName.lower().strip(),))
existingEntry = cur.fetchone()
if existingEntry is not None:
sqlUpdate = "UPDATE CellCheck_Phone SET WiredURL=? WHERE phoneName=?"
cur.execute(sqlUpdate, (review.url.strip(), review.phoneName.lower().strip()))
cur.execute("SELECT id FROM CellCheck_Phone WHERE phoneName=?", (review.phoneName.lower().strip(),))
phoneId = cur.fetchone()[0]
print("Phone " + str(phoneId) + " " + review.phoneName.lower().strip()+ " updated")
else:
sqlInsert = "INSERT INTO CellCheck_Phone (PhoneName,CnetURL,WiredURL,PCMagURL,VergeURL,PhoneImageUrl,Manufacturer,ReleaseDate) VALUES(?,?,?,?,?,?,?,?)"
cur.execute(sqlInsert, (review.phoneName.lower().strip(), "", review.url, "", "", "", "", ""))
cur.execute("SELECT id FROM CellCheck_Phone WHERE PhoneName=?", (review.phoneName.lower().strip(),))
phoneId = cur.fetchone()[0]
print("Phone " + str(phoneId) + " " + review.phoneName.lower().strip() + " added")
con.commit()
# con is connection
# review is of type WiredReview or LoadReview
def insertRating(con, review, update=True):
cur = con.cursor()
cur.execute("SELECT id FROM CellCheck_Site WHERE siteName=?", ("Wired",))
wiredId = cur.fetchone()[0]
cur.execute("SELECT id FROM CellCheck_Phone WHERE phoneName=?", (review.phoneName.lower().strip(),))
phoneId = cur.fetchone()[0]
cur.execute("SELECT * FROM CellCheck_Rating WHERE Site_id=? AND Phone_id=?", (wiredId, phoneId,))
existingEntry = cur.fetchone()
if existingEntry is not None:
if update:
sqlUpdate = "UPDATE CellCheck_Rating SET Rating=? WHERE Site_id=? AND Phone_id=?"
cur.execute(sqlUpdate, (review.score, wiredId, phoneId,))
print("Rating updated")
else:
print("Rating already exists")
else:
sqlInsert = "INSERT INTO CellCheck_Rating (Rating,Phone_id,Site_id) VALUES(?,?,?)"
print(review.score)
cur.execute(sqlInsert, (review.score,phoneId,wiredId,))
print("Rating added")
con.commit()
# con is connection
# review is of type WiredReview or LoadReview
def insertGood(con, review, update=True):
cur = con.cursor()
cur.execute("SELECT id FROM CellCheck_Site WHERE siteName=?", ("Wired",))
wiredId = cur.fetchone()[0]
cur.execute("SELECT id FROM CellCheck_Phone WHERE phoneName=?", (review.phoneName.lower().strip(),))
phoneId = cur.fetchone()[0]
cur.execute("SELECT * FROM CellCheck_Prolist WHERE Site_id=? AND Phone_id=?", (wiredId, phoneId,))
existingEntry = cur.fetchone()
if existingEntry is not None:
if update:
sqlUpdate = "UPDATE CellCheck_Prolist SET Pros=? WHERE Site_id=? AND Phone_id=?"
cur.execute(sqlUpdate, (review.good, wiredId, phoneId,))
print("Pros entry updated")
else:
print("Pros entry already exists")
else:
sqlInsert = "INSERT INTO CellCheck_Prolist (Phone_id,Site_id,Pros) VALUES(?,?,?) "
cur.execute(sqlInsert, (phoneId, wiredId, review.good,))
print("Pros entry added")
con.commit()
# con is connection
# review is of type WiredReview or LoadReview
def insertBad(con, review, update=True):
cur = con.cursor()
cur.execute("SELECT id FROM CellCheck_Site WHERE siteName=?", ("Wired",))
wiredId = cur.fetchone()[0]
cur.execute("SELECT id FROM CellCheck_Phone WHERE phoneName=?", (review.phoneName.lower().strip(),))
phoneId = cur.fetchone()[0]
cur.execute("SELECT * FROM CellCheck_Conlist WHERE Site_id=? AND Phone_id=?", (wiredId, phoneId,))
existingEntry = cur.fetchone()
if existingEntry is not None:
if update:
sqlUpdate = "UPDATE CellCheck_Conlist SET Cons=? WHERE Site_id=? AND Phone_id=?"
cur.execute(sqlUpdate, (review.bad, wiredId, phoneId,))
print("Cons entry updated")
else:
print("Cons entry already exists")
else:
sqlInsert = "INSERT INTO CellCheck_Conlist (Phone_id,Site_id,Cons) VALUES(?,?,?) "
cur.execute(sqlInsert, (phoneId, wiredId, review.bad,))
print("Cons entry added")
con.commit()
wiredScrapeAndInsert("WiredURLs.csv", 10)
| StarcoderdataPython |
4990440 | from .passive_components import Filter
from .active_components import Amplifier
VALID_PASSIVE = [
'Filter',
'Attenuator',
'Mixer',
'Coupler',
'Tap',
'Splitter',
]
VALID_ACTIVE = [
'Amplifier',
'ActiveMixer',
'Switch',
]
VALID_COMPONENTS = VALID_PASSIVE + VALID_ACTIVE
def component_builder(comp_dict):
"""
This function builds an actual component object from a dictionary as parsed from the xml_parser
Args:
comp_dict (dict): Component dictionary
Returns:
comp (Component): Component object of the correct type
"""
uid = comp_dict['uid']
name = comp_dict['name']
comp_type = comp_dict['type']
if comp_type in VALID_COMPONENTS:
# valid component
classHandle = globals()[comp_type] # get handle to class name
compObj = classHandle(uid, name) # create instance of the component class
# add all parameters to the component object
params_dict = comp_dict['params']
for key, val in params_dict.items():
compObj.add_parameter(**val)
return compObj
else:
raise Exception("Invalid component type ({}). Valid components: {}".format(comp_type, VALID_COMPONENTS))
| StarcoderdataPython |
11274040 |
class ProductCategoryMixin(object):
pass
class ProductMixin(object):
pass
class ProductDiscountMixin(object):
pass
class DiscountMixin(object):
pass
| StarcoderdataPython |
8082376 | import cv2
img = cv2.imread('/home/zhihaohe/Pictures/1.png')
cv2.imshow('a', img)
cv2.waitKey(0)
| StarcoderdataPython |
4854493 | #!/usr/bin/python
import re
from datetime import datetime
import ply.lex as lex
# ------------------------------------------------------------
# query_lexer.py
#
# tokenizer for log query expression
# ------------------------------------------------------------
class QueryLexer:
reserved = {
'in':'IN',
# 'between':'BETWEEN',
'like':'LIKE',
'and':'AND'
}
tokens = [
'IP',
'TIME',
'DAY',
'MONTH',
'NUMBER',
'STRING',
'ID',
'COMMA',
'LPAREN',
'RPAREN',
'GT',
'LT',
'GE',
'LE',
'EQ',
'NE'
] + list(reserved.values())
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_COMMA = r','
# t_BETWEEN = r'between'
t_IN = r'in'
t_LIKE = r'like'
t_AND = r'and'
t_IP = r'\b(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\b'
# t_TIME = r'([0-9]{4})-(1[0-2]|0[1-9])-(3[0-1]|0[1-9]|[1-2][0-9])T(2[0-3]|[01]?[0-9]):([0-5]?[0-9]):([0-5]?[0-9])Z' #2012-11-25T21:49:00Z
# t_DAY = r'([0-9]{4})-(1[0-2]|0[1-9])-(3[0-1]|0[1-9]|[1-2][0-9])' #2012-11-25
t_MONTH = r'\b([0-9]{4})-(1[0-2]|0[1-9])\b' #2012-08
# t_STRING = r'\'[^\']+\''
t_NE = r'!='
t_LE = r'<='
t_GE = r'>='
t_LT = r'<'
t_GT = r'>'
t_EQ = r'='
# t_NUMBER = r'\b\d+$'
# A string containing ignored characters (spaces and tabs)
t_ignore = ' \t'
def __init__(self):
self.lexer = lex.lex(module=self,debug=0)
def t_TIME(self,t):
r'([0-9]{4})-(1[0-2]|0[1-9])-(3[0-1]|0[1-9]|[1-2][0-9])T(2[0-3]|[01]?[0-9]):([0-5]?[0-9]):([0-5]?[0-9])Z'
t.value = datetime.strptime(t.value, "%Y-%m-%dT%H:%M:%SZ")
return t
def t_DAY(self,t):
r'\b([0-9]{4})-(1[0-2]|0[1-9])-(3[0-1]|0[1-9]|[1-2][0-9])\b'
t.value = datetime.strptime(t.value, "%Y-%m-%d")
return t
def t_NUMBER(self,t):
r'\b\d+\s+\b|\b\d+$'
t.value = int(t.value)
return t
def t_STRING(self,t):
r'\'[^\']+\''
raw_ip = t.value.strip()
t.value = re.sub("'",'', raw_ip).strip()
return t
# Check for reserved words
def t_ID(self,t):
r'[a-zA-Z_][a-zA-Z_0-9]*'
t.type = self.reserved.get(t.value,'ID')
return t
# Define a rule so we can track line numbers
def t_newline(self,t):
r'\n+'
t.lexer.lineno += len(t.value)
# Error handling rule
def t_error(self,t):
print "Illegal character '%s'" % t.value[0]
t.lexer.skip(1)
def test():
# data = 'ip in (10.30.2.1, 172.16.58.3)'
# data = 'domain in ( \'aa.com\',\'bb.ca\' ) and size >=3678'
# data = 'domain in ( \'aa.com\',\'bb.ca\',\'cc.ca\' ) and size >= 346 and ip=172.16.58.3'
# data = 'domain in ( \'aa.com\',\'bb.ca\',\'cc.ca\' ) and ip=172.16.58.3 and size >= 346'
# data = 'domain in ( \'aa.com\',\'bb.ca\',\'cc.ca\' ) and ip=172.16.58.3 and size >= 346 and create_time > 2010-12-19'
data = 'create_time >=2012-11-25T21:49:00Z and create_time <= 2012-11-25T21:49:13Z and domain like cnncom'
ql = QueryLexer()
lexer = ql.lexer
lexer.input(data)
while True:
token = lexer.token()
if not token: break
print token
if __name__ == "__main__":test() | StarcoderdataPython |
11327056 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ik_links', '0005_auto_20170511_1909'),
]
operations = [
migrations.AddField(
model_name='authorlink',
name='exclude_from_contributions',
field=models.BooleanField(default=False, help_text=b"Exclude this content from the author's contributions."),
),
]
| StarcoderdataPython |
4989721 | import csv
import logging
import os
import pathlib
import re
import struct
import uuid
from contextlib import contextmanager
from operator import attrgetter
from os.path import commonprefix
from urllib.parse import unquote, urlsplit
from zipfile import ZipFile
import ijson
import jsonref
from django.conf import settings
from django.utils.translation import activate, get_language
from ocdsextensionregistry import ProfileBuilder
from spoonbill.common import CURRENT_SCHEMA_TAG, DEFAULT_SCHEMA_URL
from spoonbill.stats import DataPreprocessor
from spoonbill.utils import SchemaHeaderExtractor, nonschema_title_formatter
from core.column_headings import headings
from core.constants import OCDS_LITE_CONFIG
logger = logging.getLogger(__name__)
# DON'T CHANGE ORDER
TABLES_ORDER = (
"parties",
"planning",
"tenders",
"awards",
"contracts",
"documents",
"milestones",
"amendments",
)
def instance_directory_path(instance, filename):
# file will be uploaded to MEDIA_ROOT/<id>/<filename>
return "{0}/{1}.json".format(instance.id, uuid.uuid4().hex)
def export_directory_path(instance, filename):
# file will be uploaded to MEDIA_ROOT/<id>/<filename>
selection = instance.dataselection_set.all()[0]
ds_set = selection.url_set.all() or selection.upload_set.all()
ds = ds_set[0]
return "{0}/{1}".format(ds.id, filename.split("/")[-1])
def retrieve_tables(analyzed_data):
tables = analyzed_data.tables
available_tables = []
unavailable_tables = []
for key in TABLES_ORDER:
table = tables.get(key, {})
if table.total_rows == 0:
unavailable_tables.append(key)
continue
arrays = {k: v for k, v in table.arrays.items() if v > 0}
available_table = {
"name": table.name,
"rows": table.total_rows,
"arrays": arrays,
"available_data": {
"columns": {
"additional": list(table.additional_columns.keys()),
"total": len(table.columns.keys()),
}
},
}
available_cols = 0
missing_columns_data = []
for col in table.columns.values():
if col.hits > 0:
available_cols += 1
else:
missing_columns_data.append(col.id)
available_table["available_data"]["columns"].update(
{"available": available_cols, "missing_data": missing_columns_data}
)
available_tables.append(available_table)
return available_tables, unavailable_tables
def store_preview_csv(columns_key, rows_key, table_data, preview_path):
columns = getattr(table_data, columns_key)
columns.update(table_data.additional_columns)
headers = [header for header, col in columns.items() if col.hits > 0]
if not columns_key.startswith("combined"):
headers.append("parentTable")
with open(preview_path, "w", newline="\n") as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=headers, extrasaction="ignore")
writer.writeheader()
rows = getattr(table_data, rows_key)
writer.writerows(rows)
def transform_to_r(value):
return value.replace(" ", "_").lower()
def get_column_headings(datasource, spec, table):
tables = spec.tables
heading_formatters = {
"en_r_friendly": transform_to_r,
"es_r_friendly": transform_to_r,
}
column_headings = {}
if datasource.headings_type == "ocds":
return column_headings
columns = tables[table.name].columns.keys() if table.split else tables[table.name].combined_columns.keys()
if "user_friendly" in datasource.headings_type:
pkg_type = getattr(spec, "pkg_type", "releases" if "Release" in spec.schema["title"] else "records")
ds_lang = datasource.headings_type[:2]
schema = get_schema(ds_lang, pkg_type)
schema_headers = SchemaHeaderExtractor(schema)
for col in columns:
column_headings[col] = tables[table.name].titles.get(col, col)
for k, v in column_headings.items():
if v and isinstance(v, list):
column_headings[k] = schema_headers.get_header(k, v)
elif v == []:
column_headings[k] = nonschema_title_formatter(k)
else:
column_headings[k] = nonschema_title_formatter(v)
for col in column_headings.keys():
for char in col:
if char.isnumeric() and char != "0":
title_col = col.replace(char, "0")
column_headings[col] = column_headings[title_col]
return column_headings
for col in columns:
non_index_based = re.sub(r"\d", "*", col)
column_headings.update({col: heading_formatters[datasource.headings_type](headings.get(non_index_based, col))})
return column_headings
def set_column_headings(selection, analyzed_file_path):
current_language_code = get_language()
spec = DataPreprocessor.restore(analyzed_file_path)
if selection.headings_type.startswith("es"):
activate("es")
for table in selection.tables.all():
table.column_headings = get_column_headings(selection, spec, table)
table.save(update_fields=["column_headings"])
if table.split:
for a_table in table.array_tables.all():
a_table.column_headings = get_column_headings(selection, spec, a_table)
a_table.save(update_fields=["column_headings"])
activate(current_language_code)
@contextmanager
def internationalization(lang_code="en"):
current_lang = get_language()
try:
activate(lang_code)
yield
finally:
activate(current_lang)
def zip_files(source_dir, zipfile, extension=None):
with ZipFile(zipfile, "w") as fzip:
for folder, _, files in os.walk(source_dir):
for file_ in files:
if extension and file_.endswith(extension):
fzip.write(os.path.join(folder, file_), file_)
def get_only_columns(table, table_config, analyzed_data=None):
only_columns = []
only = table_config.get("only", [])
if not only:
return only
columns = (
analyzed_data.tables[table.name].columns.keys()
if table.split
else analyzed_data.tables[table.name].combined_columns.keys()
)
for col in columns:
non_index_based = re.sub(r"\d", "*", col)
if non_index_based in only:
only_columns.append(col)
return only_columns
def get_options_for_table(selections, exclude_tables_list, selection, tables, parent=None, analyzed_data=None):
for table in tables.all():
if not table.include:
exclude_tables_list.append(table.name)
continue
else:
selections[table.name] = {"split": table.split}
if table.column_headings:
selections[table.name]["headers"] = table.column_headings
if table.heading:
selections[table.name]["name"] = table.heading
if selection.kind == selection.OCDS_LITE:
selections[table.name]["pretty_headers"] = True
lite_table_config = (
OCDS_LITE_CONFIG["tables"].get(table.name, {})
if not parent
else OCDS_LITE_CONFIG["tables"].get(parent.name, {}).get("child_tables", {}).get(table.name, {})
)
only = get_only_columns(table, lite_table_config, analyzed_data=analyzed_data)
if only:
selections[table.name]["only"] = only
if "repeat" in lite_table_config:
selections[table.name]["repeat"] = lite_table_config["repeat"]
if not table.parent and selection.kind != selection.OCDS_LITE:
get_options_for_table(selections, exclude_tables_list, selection, table.array_tables, table, analyzed_data)
def get_flatten_options(selection):
selections = {}
exclude_tables_list = []
spec = None
if selection.kind == selection.OCDS_LITE:
datasource = selection.url_set.all() or selection.upload_set.all()
spec = DataPreprocessor.restore(datasource[0].analyzed_file.path)
get_options_for_table(selections, exclude_tables_list, selection, selection.tables, analyzed_data=spec)
options = {"selection": selections}
if exclude_tables_list:
options["exclude"] = exclude_tables_list
return options
def get_protocol(url):
return urlsplit(url).scheme
def dataregistry_path_formatter(path):
path = path.replace("file://", "")
path = settings.DATAREGISTRY_MEDIA_ROOT / pathlib.Path(path)
return path
def dataregistry_path_resolver(path):
path = pathlib.Path(path).resolve()
return path
def multiple_file_assigner(files, paths):
for file in files:
file.file.name = paths[files.index(file)]
file.save()
return files
def gz_size(filename):
with open(filename, "rb") as f:
f.seek(-4, 2)
return struct.unpack("I", f.read(4))[0]
def get_schema(language, pkg_type):
url = DEFAULT_SCHEMA_URL[pkg_type][language]
getter = attrgetter("release_package_schema") if "releases" in pkg_type else attrgetter("record_package_schema")
profile = ProfileBuilder(CURRENT_SCHEMA_TAG, {}, schema_base_url=url)
schema = getter(profile)()
title = schema.get("title", "").lower()
if "package" in title:
schema = jsonref.JsonRef.replace_refs(schema)
schema = schema["properties"][pkg_type]["items"]
return schema
| StarcoderdataPython |
109954 | <gh_stars>0
import os
import numpy as np
path = os.path.dirname(os.path.realpath(__file__))
def bingo(numbers:list, boards:list, play2loose:bool=False):
def play(boards, number):
for b in range(len(boards)):
for r in range(5):
for c in range(5):
if boards[b][r][c] == number:
boards[b][r][c] = -1
def check(boards):
results = []
for b in range(len(boards)):
tot = 0
rows = [0 for i in range(5)]
cols = [0 for i in range(5)]
for r in range(len(boards[b])):
for c in range(len(boards[b][r])):
v = boards[b][r][c]
rows[r] = rows[r] + v
cols[c] = cols[c] + v
if v >= 0:
tot += v
if (-5 in rows) or (-5 in cols):
results.append([b, tot])
else:
results.append([b, None])
return results
leaderboard = []
for number in numbers:
play(boards, number)
results = check(boards)
for (board, total) in results:
if total != None:
if not play2loose:
return (board,total*number)
else:
if len(leaderboard)<len(boards)-1:
if (board not in leaderboard):
leaderboard.append(board)
else:
if (board not in leaderboard):
return (board,total*number)
return (None, None)
test_numbers = [7,4,9,5,11,17,23,2,0,14,21,24,10,16,13,6,15,25,12,22,18,20,8,19,3,26,1]
test_boards = [
[
[22,13,17,11,0],
[8,2,23,4,24],
[21,9,14,16,7],
[6,10,3,18,5],
[1,12,20,15,19],
],
[
[3,15,0,2,22],
[9,18,13,17,5],
[19,8,7,25,23],
[20,11,10,24,4],
[14,21,16,12,6],
],
[
[14,21,17,24,4],
[10,16,15,9,19],
[18,8,23,26,20],
[22,11,13,6,5],
[2,0,12,3,7],
]
]
numbers = []
boards = []
with open(path+"/input.txt") as file:
lines = file.readlines()
numbers = [int(i) for i in lines.pop(0).strip().split(",")]
while len(lines)>=5:
lines.pop(0)
board = []
for i in range(5):
row = [int(i) for i in lines.pop(0).strip().split()]
board.append(row)
boards.append(board)
assert bingo(test_numbers, test_boards) == (2, 4512), "Function is wrong"
print("Part A:", bingo(numbers, boards))
assert bingo(test_numbers, test_boards, play2loose=True) == (1, 1924), "Function is wrong"
print("Part B:", bingo(numbers, boards, play2loose=True))
| StarcoderdataPython |
1654418 | import fimfic
import pprint
import json
session = fimfic.Session()
session.enable_mature()
session.infodump()
print("-------------")
URLs = [
"http://www.fimfiction.net/bookshelf/1364962/xeno",
"https://www.fimfiction.net/bookshelf/683004/favourites?view_mode=1",
]
#"https://www.fimfiction.net/bookshelf/683004/favourites?page=7&&view_mode=1",
for url in URLs:
print(f"URL: {url}")
b = fimfic.Bookshelf(session=session,url=url)
s = b.load_stories(single_page=True)
b.infodump()
print("NUM STORIES FOUND: " + str(len(s)))
print()
print(json.dumps(json.loads(b.to_json()), indent=4))
# vim: ts=4 sw=4 et tw=100 :
| StarcoderdataPython |
3329412 | <filename>python/get_results_v2.py
'''
Fire this sweet script in the directory of the measurement you want to have a look at
and get a bunch of nice videos back.
python get_results.py reference_frame [startframe endframe]
'''
__author__ = 'jhaux'
import cv2
import numpy as np
import os # getting the files
import jimlib as jim # my own set of functions
import sys # commandline arguments and output
import time # timer
def listdir_nohidden(path):
list = [n for n in os.listdir(path) if not n.startswith(".")]
return list
def image_Directories_and_Types(working_directory):
'''Get the image directories and Types in an easy way. Like this you can have several types
of images without the need of declaring everything by hand
returns e.g. ['cwd/base/images/630_nm', 'cwd/base/images/dark', 'cwd/base/images/no_filter'], ['630_nm', 'dark', 'no_filter']'''
basename = working_directory.split('/')[-1]
Types = listdir_nohidden(str(working_directory + '/' + basename + '/images/'))
Directories = [working_directory + '/' + basename + '/images/' + type for type in Types]
# ret. e.g. ['cwd/base/images/630_nm', 'cwd/base/images/dark', 'cwd/base/images/no_filter']
return Types, Directories
# def define_reference_path(reference, Directories):
# ''' return a list of reference images (arrays!) as defined by the integer "reference"'''
# References = np.zeros(len(Directories)).astype('string')
# for image in np.arange(len(Directories)):
# References[image] = Directories[image] + '/' + listdir_nohidden(Directories[image])[reference]
#
# return References
def make_std_video(directory, type,
ref=1,
fps = 25, fourcc = cv2.cv.CV_FOURCC(*'mp4v'),
b_h = 200, b_c=(255,255,255),
font = cv2.FONT_HERSHEY_SIMPLEX, font_color=(0,0,0)):
''' make two standarized videos:
one with and one without overlayed colormap containing information about the CO2 concentracion'''
# Check if directory contains files. If not: abort!
if not listdir_nohidden(directory):
print "no images of type " + type
pass
elif type == 'dark':
print "skipping dark frames"
pass
else:
# adjust video hight to fit image and bar
reference = cv2.imread(directory + '/' + listdir_nohidden(directory)[ref])
hight, width = reference.shape[0:2]
hight = hight + b_h
shape = (width, hight)
# Tell std out what we are doing
infostring = "\n### VIDEO " + type + " ###"
print infostring
# Setup output files, where the video will be written in
grayscale = cv2.VideoWriter(str(type +'_grayscale.mov'), fourcc, fps, shape, False)
ovrld_cmp = cv2.VideoWriter(str(type +'_color-map.mov'), fourcc, fps, shape, False)
# stats for the loading bar function
current_image = 0
amount_of_images = len(listdir_nohidden(directory)) - 1
timer_start = time.clock()
for image in listdir_nohidden(directory):
# iterable is a list of image names!
# get timestamp from filename
timestamp = jim.get_timestamp(image)
# Grayscale video:
orig = cv2.imread( directory + '/' + image ).astype('uint8')
frame_g = jim.addColorBar(orig, bar_h=b_h, color=b_c)
cv2.putText(frame_g,timestamp,(10, hight - 40), font, 4,(0,0,0),2,cv2.CV_AA)
grayscale.write(frame_g)
# Colormap video:
conc = jim.getConcentration(orig, reference)
over = jim.overlay_colormap(conc, background=orig,
lower_threshold=3, upper_threshold=50,
cmap='HOT', blurKernel=(1,1))
frame_c = jim.addColorBar(over, bar_h=b_h, color=b_c)
cv2.putText(frame_c,timestamp,(10, hight - 40), font, 4,(255,255,255),2,cv2.CV_AA)
ovrld_cmp.write(frame_c)
# now generate a nice loading bar:
jim.progress_bar( current_image, amount_of_images, starttime=timer_start )
current_image += 1
grayscale.release()
ovrld_cmp.release()
print "\nfinished!"
return 0
def simple_main():
working_directory = os.getcwd()
Types, Directories = image_Directories_and_Types(working_directory)
ref = int(sys.argv[1])
# References = define_reference_path(int(sys.argv[1]), Directories)
for dir, type in zip(Directories, Types):
make_std_video(dir, type, ref)
if __name__ == '__main__':
simple_main()
| StarcoderdataPython |
3430496 | <reponame>FrancojFerrante/NLP-Labo<filename>.venv/Lib/site-packages/tools/google.py
# coding: utf-8
"""
Google parser.
Generic search algorithm:
With some query:
For page in 1...9999:
Build url for given query and page
Request the url
If captcha found:
Solve captcha or change proxy or do something else
If last page found:
Stop parsing
Module contents:
* CaptchaFound
* ParsingError
* build_search_url
* parse_index_size
* is_last_page
* parse_search_results
"""
try:
from urllib import quote, unquote_plus
except ImportError:
from urllib.parse import quote, unquote_plus
import logging
import re
import base64
from grab.tools.html import decode_entities
from grab.tools.lxml_tools import get_node_text, drop_node, render_html
from grab.tools.http import urlencode
from grab.tools.encoding import smart_str
from grab.tools.text import find_number
class CaptchaFound(Exception):
"""
Raised when google fucks you with captcha.
"""
class CaptchaError(CaptchaFound):
"""
TODO: display deprecation warning
"""
class AccessDenied(Exception):
"""
Raised when HTTP 403 code is received.
"""
class ParsingError(Exception):
"""
Raised when some unexpected HTML is found.
"""
def build_search_url(query, page=None, per_page=None, lang=None,
filter=None, **kwargs):
"""
Build google search url with specified query and pagination options.
:param per_page: 10, 20, 30, 50, 100
kwargs:
tbs=qdr:h
tbs=qdr:d
tbs=qdr:w
tbs=qdr:m
tbs=qdr:y
"""
if per_page is None:
per_page = 10
if page is None:
page = 1
if lang is None:
lang = 'en'
if filter is None:
filter = True
start = per_page * (page - 1)
if not 'hl' in kwargs:
kwargs['hl'] = lang
if not 'num' in kwargs:
kwargs['num'] = per_page
if not 'start' in kwargs:
kwargs['start'] = start
if not 'filter' in kwargs:
if not filter:
kwargs['filter'] = '0'
url = 'http://google.com/search?q=%s' % quote(smart_str(query))
if kwargs:
url += '&' + urlencode(kwargs)
return url
def parse_index_size(grab):
"""
Extract number of results from grab instance which
has received google search results.
"""
text = None
if grab.search(u'did not match any documents'):
return 0
if len(grab.css_list('#resultStats')):
text = grab.css_text('#resultStats')
if len(grab.xpath_list('//div[@id="subform_ctrl"]/div[2]')):
text = grab.xpath_text('//div[@id="subform_ctrl"]/div[2]')
if text is None:
logging.error('Unknown google page format')
return 0
text = text.replace(',', '').replace('.', '')
if 'about' in text:
number = find_number(text.split('about')[1])
return int(number)
elif 'of' in text:
number = find_number(text.split('of')[1])
return int(number)
else:
number = find_number(text)
return int(number)
#def search(query, grab=None, limit=None, per_page=None):
#if not grab:
#grab = Grab()
#stop = False
#count = 0
#grab.clear_cookies()
#if grab.proxylist:
#grab.change_proxy()
#for page in xrange(1, 9999):
#if stop:
#break
#url = build_search_url(query, page, per_page=per_page)
#index_size = None
#grab = google_request(url, grab=grab)
#count = 0
#for item in parse_search_results(grab):
#yield item # {url, title, index_size}
#count += 1
#if not count:
#stop = True
#if is_last_page(grab):
#logging.debug('Last page found')
#stop = True
#if limit is not None and count >= limit:
#logging.debug('Limit %d reached' % limit)
#stop = True
#grab.sleep(3, 5)
def is_last_page(grab):
"""
Detect if the fetched page is last page of search results.
"""
# <td class="b" style="text-align:left"><a href="/search?q=punbb&num=100&hl=ru&prmd=ivns&ei=67DBTs3TJMfpOfrhkcsB&start=100&sa=N" style="text-align:left"><span class="csb ch" style="background-position:-96px 0;width:71px"></span><span style="display:block;margin-left:53px">{NEXT MESSAGE}</span></a></td>
try:
#next_link_text = grab.xpath_list('//span[contains(@class, "csb ") and '\
#'contains(@class, " ch")]/..')[-1]\
#.text_content().strip()
next_link = grab.xpath_one('//a[@id="pnnext"]')
except IndexError:
logging.debug('No results found')
return True
else:
return False
#return not len(next_link_text)
def parse_search_results(grab, parse_index_size=False, strict_query=False):
"""
Parse google search results page content.
"""
#elif grab.search(u'please type the characters below'):
if grab.response.code == 403:
raise AccessDenied('Access denied (HTTP 403)')
elif grab.search(u'src="/sorry/image'):
# Captcha!!!
raise CaptchaFound('Captcha found')
elif grab.css_exists('#ires'):
if strict_query and \
grab.search(u'Нет результатов для') or \
grab.search(u'No results found for'):
pass
logging.debug('Query modified')
else:
if len(grab.css_list('#ires h3')):
# Something was found
if parse_index_size:
index_size = parse_index_size(grab)
else:
index_size = None
# Yield found results
results = []
for elem in grab.xpath_list('//*[h3[@class="r"]/a]'):
title_elem = elem.xpath('h3/a')[0]
# url
url = title_elem.get('href')
if url.startswith('/url?'):
url = url.split('?q=')[1].split('&')[0]
url = unquote_plus(url)
# title
title = get_node_text(title_elem)
# snippet
# Google could offer two type of snippet format: simple and extended
# It depends on user agent
# For <IE8, Opera, <FF3 you probably get simple format
try:
snippet_node = elem.xpath('div[@class="s"]')[0]
except IndexError as ex:
# Probably it is video or some other result
# Such result type is not supported yet
continue
try:
subnode = snippet_node.xpath('span[@class="st"]')[0]
snippet = get_node_text(subnode, smart=False)
extended_result = True
except IndexError:
drop_node(snippet_node, 'div')
drop_node(snippet_node, 'span[@class="f"]')
snippet = get_node_text(snippet_node, smart=False)
extended_result = False
# filetype
try:
filetype = elem.xpath('.//span[contains(@class, "xsm")]'\
'/text()')[0].lower().strip('[]')
except IndexError:
filetype = None
#if 'File Format':
if url:
results.append({
'url': url,
'title': title,
'snippet': snippet,
'filetype': filetype,
'index_size': index_size,
'extended': extended_result,
})
return results
else:
pass
#return []
elif grab.css_exists('#res'):
# Could be search results here?
# or just message "nothing was found"?
pass
else:
raise ParsingError('Could not identify google page format')
| StarcoderdataPython |
3535350 | <reponame>Nasdaq/flask-data-pipes
import os
from urllib.parse import quote_plus as urlquote
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
DEBUG = False
USE_SIGNALS = True
BASE_DIR = basedir
DATA = os.getenv('DATA_DIR', os.path.join(BASE_DIR, 'appdata'))
DATA_FORMAT = 'json_lines'
DATA_COMPRESSION = False
DATA_ENCODING = 'utf-8'
DATA_ENCODING_ERRORS = 'surrogateescape'
SQLALCHEMY_DATABASE_URI = f'sqlite:///{DATA}/demo.db'
CELERY_BROKER_URI = 'amqp://guest@localhost//'
MODULES = []
@classmethod
def init_app(cls, app):
# config modules
app.config['MODULES'].extend(['auth', 'celery', 'utils'])
app.config['MODULES'] = sorted(set(app.config['MODULES']))
# config sqlalchemy bind
app.config['SQLALCHEMY_BINDS'] = cls.config_sqlalchemy_binds(app)
@staticmethod
def config_sqlalchemy_binds(app):
db_connections = {}
for mod in app.config['MODULES']:
try:
db_connections.update(
{mod.lower(): app.config[mod.upper()]['dsn'].format(user=urlquote(app.config[mod.upper()]['user']),
pwd=urlquote(app.config[mod.upper()]['pwd']))})
except AttributeError:
for name, cnxn in app.config[mod.upper()]['dsn'].items():
try:
db_connections.update({name.lower(): cnxn['dsn'].format(user=urlquote(cnxn['user']),
pwd=urlquote(cnxn['pwd']))})
except KeyError:
pass
except KeyError:
pass
return db_connections
class DevelopmentConfig(Config):
ENV = 'DEV'
DEBUG = True
SQLALCHEMY_RECORD_QUERIES = True
SQLALCHEMY_ECHO = True
SQLALCHEMY_BINDS = {}
ETL_SAVE_ON_TEARDOWN = True
@classmethod
def init_app(cls, app):
app.config.from_json(os.path.join(basedir, 'config.json'))
Config.init_app(app)
class ProductionConfig(Config):
ENV = 'PROD'
DEBUG = False
DATA_COMPRESSION = True
SQLALCHEMY_RECORD_QUERIES = False
SQLALCHEMY_ECHO = False
SQLALCHEMY_BINDS = {}
@classmethod
def init_app(cls, app):
app.config.from_json(os.path.join(basedir, 'config.json'))
Config.init_app(app)
config = {
'development': DevelopmentConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
| StarcoderdataPython |
310807 | #author: akshitac8
from typing import no_type_check_decorator
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
import numpy as np
import util
from sklearn.preprocessing import MinMaxScaler
import sys
import copy
import pdb
class CLASSIFIER:
# train_Y is interger
def __init__(self, _train_X, _train_Y, data_loader, _nclass, _cuda, _lr=0.001, _beta1=0.5, _nepoch=20, _batch_size=100, generalized=True, netDec=None, dec_size=4096, dec_hidden_size=4096):
self.train_X = _train_X.clone()
self.train_Y = _train_Y.clone()
self.test_seen_feature = data_loader.test_seen_feature.clone()
self.test_seen_label = data_loader.test_seen_label
self.test_unseen_feature = data_loader.test_unseen_feature.clone()
self.test_unseen_label = data_loader.test_unseen_label
self.seenclasses = data_loader.seenclasses
self.unseenclasses = data_loader.unseenclasses
self.batch_size = _batch_size
self.nepoch = _nepoch
self.nclass = _nclass
self.input_dim = _train_X.size(1)
self.cuda = _cuda
self.model = LINEAR_LOGSOFTMAX_CLASSIFIER(self.input_dim, self.nclass)
self.netDec = netDec
if self.netDec:
self.netDec.eval()
self.input_dim = self.input_dim + dec_size
self.input_dim += dec_hidden_size
self.model = LINEAR_LOGSOFTMAX_CLASSIFIER(self.input_dim, self.nclass)
self.train_X = self.compute_dec_out(self.train_X, self.input_dim)
self.test_unseen_feature = self.compute_dec_out(self.test_unseen_feature, self.input_dim)
self.test_seen_feature = self.compute_dec_out(self.test_seen_feature, self.input_dim)
self.model.apply(util.weights_init)
self.criterion = nn.NLLLoss()
self.input = torch.FloatTensor(_batch_size, self.input_dim)
self.label = torch.LongTensor(_batch_size)
self.lr = _lr
self.beta1 = _beta1
self.optimizer = optim.Adam(self.model.parameters(), lr=_lr, betas=(_beta1, 0.999))
if self.cuda:
self.model.cuda()
self.criterion.cuda()
self.input = self.input.cuda()
self.label = self.label.cuda()
self.index_in_epoch = 0
self.epochs_completed = 0
self.ntrain = self.train_X.size()[0]
if generalized:
self.acc_seen, self.acc_unseen, self.H, self.epoch= self.fit()
#print('Final: acc_seen=%.4f, acc_unseen=%.4f, h=%.4f' % (self.acc_seen, self.acc_unseen, self.H))
else:
self.acc,self.best_model = self.fit_zsl()
#print('acc=%.4f' % (self.acc))
def fit_zsl(self):
best_acc = 0
mean_loss = 0
last_loss_epoch = 1e8
best_model = copy.deepcopy(self.model.state_dict())
for epoch in range(self.nepoch):
for i in range(0, self.ntrain, self.batch_size):
self.model.zero_grad()
batch_input, batch_label = self.next_batch(self.batch_size)
self.input.copy_(batch_input)
self.label.copy_(batch_label)
inputv = Variable(self.input)
labelv = Variable(self.label)
output = self.model(inputv)
loss = self.criterion(output, labelv)
mean_loss += loss.data[0]
loss.backward()
self.optimizer.step()
#print('Training classifier loss= ', loss.data[0])
acc = self.val(self.test_unseen_feature, self.test_unseen_label, self.unseenclasses)
#print('acc %.4f' % (acc))
if acc > best_acc:
best_acc = acc
best_model = copy.deepcopy(self.model.state_dict())
return best_acc, best_model
def fit(self):
best_H = 0
best_seen = 0
best_unseen = 0
out = []
best_model = copy.deepcopy(self.model.state_dict())
# early_stopping = EarlyStopping(patience=20, verbose=True)
for epoch in range(self.nepoch):
for i in range(0, self.ntrain, self.batch_size):
self.model.zero_grad()
batch_input, batch_label = self.next_batch(self.batch_size)
self.input.copy_(batch_input)
self.label.copy_(batch_label)
inputv = Variable(self.input)
labelv = Variable(self.label)
output = self.model(inputv)
loss = self.criterion(output, labelv)
loss.backward()
self.optimizer.step()
acc_seen = 0
acc_unseen = 0
acc_seen = self.val_gzsl(self.test_seen_feature, self.test_seen_label, self.seenclasses)
acc_unseen = self.val_gzsl(self.test_unseen_feature, self.test_unseen_label, self.unseenclasses)
H = 2*acc_seen*acc_unseen / (acc_seen+acc_unseen)
if H > best_H:
best_seen = acc_seen
best_unseen = acc_unseen
best_H = H
return best_seen, best_unseen, best_H,epoch
def next_batch(self, batch_size):
start = self.index_in_epoch
# shuffle the data at the first epoch
if self.epochs_completed == 0 and start == 0:
perm = torch.randperm(self.ntrain)
self.train_X = self.train_X[perm]
self.train_Y = self.train_Y[perm]
# the last batch
if start + batch_size > self.ntrain:
self.epochs_completed += 1
rest_num_examples = self.ntrain - start
if rest_num_examples > 0:
X_rest_part = self.train_X[start:self.ntrain]
Y_rest_part = self.train_Y[start:self.ntrain]
# shuffle the data
perm = torch.randperm(self.ntrain)
self.train_X = self.train_X[perm]
self.train_Y = self.train_Y[perm]
# start next epoch
start = 0
self.index_in_epoch = batch_size - rest_num_examples
end = self.index_in_epoch
X_new_part = self.train_X[start:end]
Y_new_part = self.train_Y[start:end]
#print(start, end)
if rest_num_examples > 0:
return torch.cat((X_rest_part, X_new_part), 0) , torch.cat((Y_rest_part, Y_new_part), 0)
else:
return X_new_part, Y_new_part
else:
self.index_in_epoch += batch_size
end = self.index_in_epoch
#print(start, end)
# from index start to index end-1
return self.train_X[start:end], self.train_Y[start:end]
def val_gzsl(self, test_X, test_label, target_classes):
start = 0
ntest = test_X.size()[0]
predicted_label = torch.LongTensor(test_label.size())
for i in range(0, ntest, self.batch_size):
end = min(ntest, start+self.batch_size)
if self.cuda:
inputX = Variable(test_X[start:end].cuda(), volatile=True)
else:
inputX = Variable(test_X[start:end], volatile=True)
output = self.model(inputX)
_, predicted_label[start:end] = torch.max(output.data, 1)
start = end
acc = self.compute_per_class_acc_gzsl(test_label, predicted_label, target_classes)
return acc
def compute_per_class_acc_gzsl(self, test_label, predicted_label, target_classes):
acc_per_class = 0
for i in target_classes:
idx = (test_label == i)
acc_per_class += torch.sum(test_label[idx]==predicted_label[idx]) / torch.sum(idx)
acc_per_class /= target_classes.size(0)
return acc_per_class
# test_label is integer
def val(self, test_X, test_label, target_classes):
start = 0
ntest = test_X.size()[0]
predicted_label = torch.LongTensor(test_label.size())
for i in range(0, ntest, self.batch_size):
end = min(ntest, start+self.batch_size)
if self.cuda:
inputX = Variable(test_X[start:end].cuda(), volatile=True)
else:
inputX = Variable(test_X[start:end], volatile=True)
output = self.model(inputX)
_, predicted_label[start:end] = torch.max(output.data, 1)
start = end
acc = self.compute_per_class_acc(util.map_label(test_label, target_classes), predicted_label, target_classes.size(0))
return acc
def compute_per_class_acc(self, test_label, predicted_label, nclass):
acc_per_class = torch.FloatTensor(nclass).fill_(0)
for i in range(nclass):
idx = (test_label == i)
acc_per_class[i] = torch.sum(test_label[idx]==predicted_label[idx]) / torch.sum(idx)
return acc_per_class.mean()
def compute_dec_out(self, test_X, new_size):
start = 0
ntest = test_X.size()[0]
new_test_X = torch.zeros(ntest,new_size)
for i in range(0, ntest, self.batch_size):
end = min(ntest, start+self.batch_size)
if self.cuda:
inputX = Variable(test_X[start:end].cuda(), volatile=True)
else:
inputX = Variable(test_X[start:end], volatile=True)
feat1 = self.netDec(inputX)
feat2 = self.netDec.getLayersOutDet()
new_test_X[start:end] = torch.cat([inputX,feat1,feat2],dim=1).data.cpu()
start = end
return new_test_X
class LINEAR_LOGSOFTMAX_CLASSIFIER(nn.Module):
def __init__(self, input_dim, nclass):
super(LINEAR_LOGSOFTMAX_CLASSIFIER, self).__init__()
self.fc = nn.Linear(input_dim, nclass)
self.logic = nn.LogSoftmax(dim=1)
def forward(self, x):
o = self.logic(self.fc(x))
return o
def init_fn(mod):
"""Initializes linear layers "diagonally"
(concerning [:in_features, :in_features]).
Function to pass to .apply()."""
classname = mod.__class__.__name__
if classname.find('Linear') != -1:
init = torch.randn(mod.weight.size()) / 10
init[range(mod.in_features), range(mod.in_features)] = 1
mod.weight = nn.Parameter(init, requires_grad=True)
if mod.bias is not None:
mod.bias = nn.Parameter(
torch.zeros_like(mod.bias).data,
requires_grad=True
)
class MLP(nn.Module):
"""Simple MLP.
Attributes:
layers(nn.Module): sequence of layers.
"""
def __init__(self, in_features, out_features, hidden_layers=None, dropout=0,
hidden_actf=nn.LeakyReLU(0.2), output_actf=nn.ReLU()):
"""Init.
Args:
in_features(int): input dimension.
out_features(int): final output dimension.
hidden_layers(list of ints|int|None, optional): list o
hidden layer sizes of arbitrary length or int for one
hidden layer, default `None` (no hidden layers).
dropout(float, optional): dropout probability, default
`0`.
hidden_actf(activation function, optional): activation
function of hidden layers, default `nn.LeakyReLU(0.2)`.
output_actf(activation function, optional): activation
function of output layers, default `nn.ReLU()`.
noise_std(float, optional): std dev of gaussian noise to add
to MLP result, default `0` (no noise).
"""
if hidden_layers is None:
hidden_layers = []
if isinstance(hidden_layers, int):
hidden_layers = [hidden_layers]
super().__init__()
hidden_layers = [in_features] + hidden_layers + [out_features]
layers = []
for i, (in_f, out_f) in enumerate(zip(hidden_layers[:-1], hidden_layers[1:])):
layers.append(nn.Linear(in_f, out_f))
if i != len(hidden_layers) - 2:
# up to second-to-last layer
layers.append(hidden_actf)
layers.append(nn.Dropout(dropout))
else:
layers.append(output_actf) # ok to use relu, resnet feats are >= 0
self.layers = nn.Sequential(*layers)
def forward(self, x):
"""Forward propagation.
Args:
x(torch.Tensor): input of size (batch, in_features).
Returns:
A torch.Tensor of size (batch, out_features).
"""
return self.layers(x)
def init_diagonal(self):
"""Sets weights of linear layers to approx I
and biases to 0.
"""
self.apply(init_fn)
class PrototypicalNet(nn.Module):
"""Classifies examples based on distance metric
from class prototypes. FSL setting.
Attributes:
mapper(nn.Module): mapper from feature to
embedding space.
dist(function): distance function. Accepts
2D torch.Tensor prototypes and 2D
torch.Tensor queries and returns a 2D torch.Tensor
whose [i, j] element is the distance between
queries[i] nad prototypes[j].
"""
def __init__(self, in_features, out_features, hidden_layers=None,
dist='euclidean', init_diagonal=False, **extra_features):
"""Init.
Args:
in_features(int): input features dimension.
out_features(int): final output dimension.
extra_features(int): extra feature dimension,
included for the model to be backwards
compatible with pretrained models without
extra features. DEfault is None, aka no extra
features.
hidden_layers(list|None): number of neurons
in hidden layers, default is one hidden
layer with units same as input.
dist(function|str): distance metric. If str,
predefined distance is used accordingly.
If function is passed, it should accept
2D torch.Tensor prototypes and 2D
torch.Tensor queries and return a 2D torch.Tensor
whose [i, j] element is the distance between
queries[i] nad prototypes[j].
init_diagonal(bool): whether to init linear layers
with diagonal weights and zero biases, default=`False`.
"""
super().__init__()
if hidden_layers is None:
hidden_layers = [in_features]
self.mapper = MLP(in_features, out_features, hidden_layers,
hidden_actf=nn.ReLU())
if extra_features:
extra_dim = extra_features['dim']
extra_layers = [extra_dim] + extra_features.get(
'hidden_layers', [extra_dim] * len(hidden_layers)
) + [extra_features.get('out_dim', extra_dim)]
self.mapper_from_extra = nn.ModuleList(
[
nn.Linear(in_feats, out_feats + extra_out_feats)
for in_feats, (out_feats, extra_out_feats) in zip(
extra_layers[:-1], zip(
hidden_layers + [out_features], extra_layers[1:]
)
)
]
)
self.mapper_to_extra = nn.ModuleList(
[
nn.Linear(in_feats, out_feats)
for in_feats, out_feats in zip(
[in_features] + hidden_layers, extra_layers[1:]
)
]
)
else:
self.mapper_from_extra = nn.Identity()
self.mapper_to_extra = nn.Identity()
if init_diagonal:
self.mapper.init_diagonal()
if extra_features:
self.mapper_from_extra.apply(init_fn)
if isinstance(dist, str):
self.dist = self.__getattribute__(dist)
else:
self.dist = dist
@staticmethod
def cosine(prototypes, queries):
"""Computes cosine distance between prototypes
and set of queries.
Args:
prototypes(torch.Tensor): prototypes of size
(way, embedding_dim).
queries(torch.Tensor): queries of size
(n_queries, embedding_dim).
Returns:
A torch.Tensor of size (n_queries, way) where
element [i,j] contains distance between queries[i]
and prototypes[j].
"""
inner_prod = queries.matmul(prototypes.T)
norm_i = queries.norm(dim=1, keepdim=True)
norm_j = prototypes.norm(dim=1, keepdim=True).T
return 1 - inner_prod / norm_i / norm_j
@staticmethod
def euclidean(prototypes, queries):
"""Computes euclidean distance between prototypes
and set of queries.
Args:
prototypes(torch.Tensor): prototypes of size
(way, embedding_dim).
queries(torch.Tensor): queries of size
(n_queries, embedding_dim).
Returns:
A torch.Tensor of size (n_queries, way) where
element [i,j] contains distance between queries[i]
and prototypes[j].
"""
way = prototypes.size(0)
n_queries = queries.size(0)
prototypes = prototypes.repeat(n_queries, 1)
queries = util.tensor_interleave(queries, way)
# after the repeats, prototypes have way classes after way classes after ...
# and queries have way repeats of 1st query, way repeats of 2nd query, ...
# so initial dist vector has distance of first query to all way classes
# then the distance of the second query to all way class, etc
return torch.norm(prototypes - queries, dim=1).view(n_queries, way)
def map(self, main_tensor, extra_tensor1, extra_tensor2):
extra_tensor = torch.cat((extra_tensor1, extra_tensor2), dim=1)
for layer, (m_from, m_to) in enumerate(
zip(self.mapper_from_extra, self.mapper_to_extra)
):
mapper = self.mapper.layers[3 * layer]
actf = self.mapper.layers[3 * layer + 1]
from_extra = m_from(extra_tensor)
to_extra = m_to(main_tensor)
main_z = mapper(main_tensor)
extra_tensor = actf(
to_extra + from_extra[:, :to_extra.size(1)]
)
main_tensor = actf(
main_z + from_extra[:, to_extra.size(1):]
)
try:
dropout = self.mapper.layers[3 * layer + 2]
main_tensor = dropout(main_tensor)
extra_tensor = dropout(extra_tensor)
except:
pass
return torch.cat((main_tensor, extra_tensor), dim=1)
def forward(self, support, query, netDec):
"""Episodic forward propagation.
Computes prototypes given the support set of an episode
and then makes inference on the corresponding query set.
Args:
support(list of torch.Tensors): support set list
whose every element is tensor of size
(shot, feature_dim), i.e. shot image features
belonging to the same class.
query(list of torch.Tensors): query set list
whose every element is tensor of size
(n_queries, feature_dim), i.e. n_queries image
features belonging to the same class (for consistency
purposes with support).
mix(bool): mix support and query, default=`False`.
Returns:
A list of torch.Tensor of size (n_queries, way) logits
whose i-th element consists of logits of queries belonging
to the i-th class.
"""
prototypes = []
for class_features in support:
# class_features are (shot, feature_dim)
feat1 = netDec(class_features)
feat2 = netDec.getLayersOutDet()
prototypes.append(self.map(class_features, feat1, feat2).mean(dim=0))
prototypes = torch.stack(prototypes)
logits = []
for class_features in query:
# class_features are (n_queries, feature_dim)
feat1 = netDec(class_features)
feat2 = netDec.getLayersOutDet()
logits.append(
-self.dist(prototypes, self.map(class_features, feat1, feat2))
)
return logits
def eval_protonet(fsl_classifier, netDec, dataset, support, labels, cuda):
"""Return ZSL or GZSL metrics of Z2FSL.
Args:
fsl_classifier (`nn.Module`): trained `PrototypicalNet`-like
classifier.
dataset (`MatDataset`): dataset used during training.
support (`list` of `torch.Tensor`s): support set.
labels (`iterable` of `int`s): corresponding labels, one
for each element in `support`.
cuda (`bool`): whether on CUDA.
Returns:
If ZSL:
`float`: ZSL accuracy.
Else:
`float`: harmonic mean.
`float`: seen accuracy.
`float`: unseen accuracy.
"""
fsl_classifier.eval()
dataset.eval()
query, _, align_labels, n_seen = dataset()
query = [Variable(query[align_labels.index(label)]) for label in labels]
if cuda:
query = [cls_query.cuda() for cls_query in query]
logits = fsl_classifier(support, query, netDec)
fsl_classifier.train()
dataset.train()
accs = []
for i, class_logits in enumerate(logits):
preds = np.argmax(class_logits.data.cpu(), -1)
correct = (preds == i).sum()
accs.append(correct / preds.size(0))
if n_seen > 0:
acc_s = sum(accs[:n_seen]) / n_seen
acc_u = sum(accs[n_seen:]) / (len(accs) - n_seen)
acc = 2 * acc_s * acc_u / (acc_s + acc_u)
return [acc, acc_s, acc_u]
else:
return sum(accs) / len(accs)
| StarcoderdataPython |
3385472 | <filename>test_hstore_field/models.py
from django.contrib import admin
from django.contrib.gis.admin import OSMGeoAdmin
from django.contrib.gis.db import models
from hstore_field import fields
class Item (models.Model):
name = models.CharField(max_length=64)
data = fields.HStoreField()
admin.site.register(Item)
class Related (models.Model):
item = models.ForeignKey(Item)
admin.site.register(Related)
class GeoItem (models.Model):
name = models.CharField(max_length=64)
point = models.PointField(null=True)
data = fields.HStoreField()
objects = models.GeoManager()
admin.site.register(GeoItem, OSMGeoAdmin)
| StarcoderdataPython |
1859467 | # -*- coding: utf-8 -*-
#! /usr/bin/python
"""
The imas-compatibility module of tofu
"""
import warnings
import traceback
import itertools as itt
try:
try:
from tofu.imas2tofu._core import *
from tofu.imas2tofu._mat2ids2calc import *
except Exception:
from ._core import *
from ._mat2ids2calc import *
except Exception as err:
if str(err) == 'imas not available':
msg = ""
msg += "\n\nIMAS python API issue\n"
msg += "imas could not be imported into tofu ('import imas' failed):\n"
msg += " - it may not be installed (optional dependency)\n"
msg += " - or you have loaded the wrong working environment\n\n"
msg += " => the optional sub-package tofu.imas2tofu is not usable\n"
else:
msg = str(traceback.format_exc())
msg += "\n\n => the optional sub-package tofu.imas2tofu is not usable\n"
raise Exception(msg)
# -----------------------------------------------
# Check IMAS version vs latest available in linux modules
# -----------------------------------------------
_KEYSTR = 'IMAS/'
# extract all IMAS versions from a str returned by modules
def extractIMAS(ss, keystr=_KEYSTR):
if keystr not in ss:
raise Exception
ls = ss[ss.index(keystr):].split('\n')
ls = itt.chain.from_iterable([s.split(' ') for s in ls])
ls = [s for s in ls if keystr in s]
ls = [s[len(keystr):s.index('(')] if '(' in s else s[len(keystr):]
for s in ls]
return sorted(ls)
# Compare current and latest available IMAS versions
def check_IMAS_version(verb=True, keystr=_KEYSTR):
import subprocess
# Get currently loaded IMAS
cmd = "module list"
proc = subprocess.run(cmd, check=True, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
lcur = extractIMAS(proc.stdout.decode(), keystr=keystr)
if len(lcur) != 1:
msg = ("You seem to have no / several IMAS version loaded:\n"
+ "\t- module list: {}".format(lcur))
raise Exception(msg)
# Get all available IMAS
cmd = "module av IMAS"
proc = subprocess.run(cmd, check=True, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
lav = extractIMAS(proc.stdout.decode(), keystr=keystr)
if len(lav) == 0:
msg = "There is not available IMAS version"
raise Exception(msg)
# Compare and warn
if lcur[0] not in lav:
msg = "The current IMAS version is not available!"
raise Exception(msg)
msg = None
c0 = (lav.index(lcur[0]) != len(lav)-1
and lcur[0]+'.bak' != lav[-1])
if c0:
msg = ("\nYou do not seem to be using the latest IMAS version:\n"
+ "'module list' vs 'module av IMAS' suggests:\n"
+ "\t- Current version: {}\n".format(lcur[0])
+ "\t- Latest version : {}".format(lav[-1]))
warnings.warn(msg)
return lcur[0], lav
# Try comparing and warning
try:
_, _ = check_IMAS_version(verb=True)
except Exception as err:
# This warning is an optional luxury, should not block anything
pass
__all__ = ['MultiIDSLoader', 'load_Config', 'load_Plasma2D',
'load_Cam', 'load_Data']
del warnings, traceback, itt, _KEYSTR
| StarcoderdataPython |
3491449 | <filename>examples/benchmark_tfmodel_ort.py
# SPDX-License-Identifier: Apache-2.0
"""
The following code compares the speed of tensorflow against onnxruntime
with a model downloaded from Tensorflow Hub.
"""
import time
import numpy
from tqdm import tqdm
import tensorflow_hub as hub
import onnxruntime as ort
def generate_random_images(shape=(100, 100), n=10):
imgs = []
for i in range(n):
sh = (1,) + shape + (3,)
img = numpy.clip(numpy.abs(numpy.random.randn(*sh)), 0, 1) * 255
img = img.astype(numpy.float32)
imgs.append(img)
return imgs
def measure_time(fct, imgs):
results = []
times = []
for img in tqdm(imgs):
begin = time.perf_counter()
result = fct(img)
end = time.perf_counter()
results.append(result)
times.append(end - begin)
return results, times
imgs = generate_random_images()
# Download model from https://tfhub.dev/captain-pool/esrgan-tf2/1
# python -m tf2onnx.convert --saved-model esrgan --output "esrgan-tf2.onnx" --opset 12
ort = ort.InferenceSession('esrgan-tf2.onnx')
fct_ort = lambda img: ort.run(None, {'input_0': img})
results_ort, duration_ort = measure_time(fct_ort, imgs)
print(len(imgs), duration_ort)
model = hub.load("https://tfhub.dev/captain-pool/esrgan-tf2/1")
results_tf, duration_tf = measure_time(model, imgs)
print(len(imgs), duration_tf)
print("ratio ORT / TF", sum(duration_ort) / sum(duration_tf))
| StarcoderdataPython |
3244086 | <reponame>ranjeethmahankali/galproject<gh_stars>1-10
import pygalfunc as pgf
import pygalview as pgv
pgv.set2dMode(True)
minpt = pgf.var_vec3((-1., -1., 0.))
maxpt = pgf.var_vec3((1., 1., 0.))
box = pgf.box3(minpt, maxpt)
npts = pgv.slideri32("Point count", 5, 50, 25)
cloud = pgf.randomPointsInBox(box, npts)
circ, *_ = pgf.boundingCircle(cloud)
pgv.show("cloud", pgf.pointCloud3d(cloud))
pgv.show("circ", circ)
pgv.print("Circle", circ)
| StarcoderdataPython |
8121392 | <reponame>jmshnds/eventstore_grpc
"""Reset projections."""
from eventstore_grpc.proto import projections_pb2, projections_pb2_grpc
def reset_projection(
stub: projections_pb2_grpc.ProjectionsStub, name: str, write_checkpoint: bool = True, **kwargs
) -> projections_pb2.ResetResp:
"""Resets a projection."""
request = projections_pb2.ResetReq()
options = projections_pb2.ResetReq.Options()
options.name = name
options.write_checkpoint = write_checkpoint
request.options.CopyFrom(options)
response = stub.Reset(request, **kwargs)
return response
| StarcoderdataPython |
11255718 | """Integration tests for client library"""
from hil.flaskapp import app
from hil.client.base import ClientBase, FailedAPICallException
from hil.errors import BadArgumentError
from hil.client.client import Client
from hil.test_common import config_testsuite, config_merge, \
fresh_database, fail_on_log_warnings, server_init, uuid_pattern, \
obmd_cfg, HybridHTTPClient, initial_db
from hil.model import db
from hil import config, deferred
import json
import pytest
import requests
from passlib.hash import sha512_crypt
ep = "http://127.0.0.1:8000"
username = "hil_user"
password = "<PASSWORD>"
http_client = HybridHTTPClient(endpoint=ep,
username=username,
password=password)
C = Client(ep, http_client) # Initializing client library
fail_on_log_warnings = pytest.fixture(fail_on_log_warnings)
fresh_database = pytest.fixture(fresh_database)
server_init = pytest.fixture(server_init)
obmd_cfg = pytest.fixture(obmd_cfg)
intial_db = pytest.fixture(initial_db)
@pytest.fixture
def dummy_verify():
"""replace sha512_crypt.verify with something faster (albeit broken).
This fixture is for testing User related client calls which use database
authentication backend.
This fixture works around a serious consequence of using the database
backend: doing password hashing is **SLOW** (by design; the algorithms
are intended to make brute-forcing hard), and we've got fixtures where
we're going through the request handler tens of times for every test
(before even hitting the test itself).
So, this fixture monkey-patches sha512_crypt.verify (the function that
does the work of checking the password), replacing it with a dummy
implementation. At the time of writing, this shaves about half an hour
off of our Travis CI runs.
"""
@staticmethod
def dummy(*args, **kwargs):
"""dummy replacement, which just returns True."""
return True
old = sha512_crypt.verify
sha512_crypt.verify = dummy # override the verify() function
yield # Test runs here
sha512_crypt.verify = old # restore the old implementation.
@pytest.fixture
def configure():
"""Configure HIL"""
config_testsuite()
config_merge({
'auth': {
'require_authentication': 'False',
},
'extensions': {
'hil.ext.switches.mock': '',
'hil.ext.network_allocators.null': None,
'hil.ext.network_allocators.vlan_pool': '',
},
'hil.ext.network_allocators.vlan_pool': {
'vlans': '1001-1040',
},
'devel': {
# Disable dry_run, so we can talk to obmd. Note: We register
# several "real" switches in this module, but never actually
# preform any "real" network operations on them, so a proper
# switch setup is still not necessary.
'dry_run': None,
},
})
config.load_extensions()
@pytest.fixture
def database_authentication():
"""setup the config file for using database authentication.
This fixture is only used by Test_user class"""
config_testsuite()
config_merge({
'auth': {
'require_authentication': 'False',
},
'extensions': {
'hil.ext.auth.null': None,
'hil.ext.auth.database': '',
},
})
config.load_extensions()
@pytest.fixture()
def obmd_node(obmd_cfg):
"""register a node with both obmd & HIL
...so we can use it in tests that touch the obmd-related calls.
"""
obmd_uri = 'http://localhost' + obmd_cfg['ListenAddr'] + \
'/node/obmd-node'
# Register the node with obmd:
resp = requests.put(
obmd_uri,
auth=('admin', obmd_cfg['AdminToken']),
data=json.dumps({
'type': 'mock',
'info': {
"addr": "10.0.0.23",
"NumWrites": 0,
},
}),
)
assert resp.ok, "Failed to register node with obmd."
# ...and with HIL:
assert C.node.register(
"obmd-node",
obmd_uri,
obmd_cfg['AdminToken'],
) is None
return 'obmd-node'
@pytest.fixture
def initial_admin():
"""Inserts an admin user into the database.
This fixture is used by Test_user tests
"""
with app.app_context():
from hil.ext.auth.database import User
db.session.add(User(username, password, is_admin=True))
db.session.commit()
class Test_ClientBase:
"""Tests client initialization and object_url creation. """
def test_object_url(self):
"""Test the object_url method."""
x = ClientBase(ep, 'some_base64_string')
y = x.object_url('abc', '123', 'xy23z')
assert y == 'http://127.0.0.1:8000/v0/abc/123/xy23z'
@pytest.mark.usefixtures('fail_on_log_warnings', 'configure', 'fresh_database',
'server_init', 'initial_db')
class Test_node:
""" Tests Node related client calls. """
def test_list_nodes_free(self):
"""(successful) to list_nodes('free')"""
assert C.node.list('free') == [
u'free_node_0', u'free_node_1', u'no_nic_node'
]
def test_list_nodes_all(self):
"""(successful) to list_nodes('all')"""
assert C.node.list('all') == [
u'free_node_0', u'free_node_1', u'manhattan_node_0',
u'manhattan_node_1', u'no_nic_node', u'runway_node_0',
u'runway_node_1'
]
def test_node_register(self):
"""Test node_register"""
assert C.node.register("dummy-node-01",
"http://obmd.example.com/node/dummy-node-01",
"secret",
) is None
def test_show_node(self):
"""(successful) to show_node"""
assert C.node.show('free_node_0') == {
u'metadata': {},
u'project': None,
u'nics': [
{
u'macaddr': u'Unknown',
u'port': None,
u'switch': None,
u'networks': {}, u'label': u'boot-nic'
},
{
u'macaddr': u'Unknown',
u'port': u'free_node_0_port',
u'switch': u'stock_switch_0',
u'networks': {}, u'label': u'nic-with-port'
}
],
u'name': u'free_node_0'
}
def test_show_node_reserved_chars(self):
""" test for catching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.node.show('node-/%]07')
def test_enable_disable_obm(self, obmd_node):
"""Test enable_obm/disable_obm"""
# The spec says that these calls should silently no-op if the
# state doesn't need to change so we call them repeatedly in
# different orders to verify.
C.node.disable_obm(obmd_node)
C.node.enable_obm(obmd_node)
C.node.enable_obm(obmd_node)
C.node.disable_obm(obmd_node)
C.node.disable_obm(obmd_node)
C.node.disable_obm(obmd_node)
C.node.enable_obm(obmd_node)
def test_power_cycle(self, obmd_node):
"""(successful) to node_power_cycle"""
C.node.enable_obm(obmd_node)
assert C.node.power_cycle(obmd_node) is None
def test_power_cycle_force(self, obmd_node):
"""(successful) to node_power_cycle(force=True)"""
C.node.enable_obm(obmd_node)
assert C.node.power_cycle(obmd_node, True) is None
def test_power_cycle_no_force(self, obmd_node):
"""(successful) to node_power_cycle(force=False)"""
C.node.enable_obm(obmd_node)
assert C.node.power_cycle(obmd_node, False) is None
def test_power_cycle_bad_arg(self, obmd_node):
"""error on call to power_cycle with bad argument."""
C.node.enable_obm(obmd_node)
with pytest.raises(FailedAPICallException):
C.node.power_cycle(obmd_node, 'wrong')
def test_power_cycle_reserved_chars(self):
""" test for catching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.node.power_cycle('node-/%]07', False)
def test_power_off(self, obmd_node):
"""(successful) to node_power_off"""
C.node.enable_obm(obmd_node)
assert C.node.power_off(obmd_node) is None
def test_power_off_reserved_chars(self):
""" test for catching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.node.power_off('node-/%]07')
def test_power_on(self, obmd_node):
"""(successful) to node_power_on"""
C.node.enable_obm(obmd_node)
assert C.node.power_on(obmd_node) is None
def test_set_bootdev(self, obmd_node):
""" (successful) to node_set_bootdev """
C.node.enable_obm(obmd_node)
assert C.node.set_bootdev(obmd_node, "A") is None
def test_power_status(self, obmd_node):
"""(successful) to node_power_status"""
C.node.enable_obm(obmd_node)
resp = C.node.power_status(obmd_node)
assert resp["power_status"] == "Mock Status"
def test_node_add_nic(self):
"""Test removing and then adding a nic."""
C.node.remove_nic('free_node_1', 'boot-nic')
assert C.node.add_nic('free_node_1', 'boot-nic', 'aa:bb:cc:dd:ee:ff') \
is None
def test_node_add_duplicate_nic(self):
"""Adding a nic twice should fail"""
C.node.remove_nic('free_node_1', 'boot-nic')
C.node.add_nic('free_node_1', 'boot-nic', 'aa:bb:cc:dd:ee:ff')
with pytest.raises(FailedAPICallException):
C.node.add_nic('free_node_1', 'boot-nic', 'aa:bb:cc:dd:ee:ff')
def test_nosuch_node_add_nic(self):
"""Adding a nic to a non-existent node should fail."""
with pytest.raises(FailedAPICallException):
C.node.add_nic('abcd', 'eth0', 'aa:bb:cc:dd:ee:ff')
def test_add_nic_reserved_chars(self):
""" test for catching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.node.add_nic('node-/%]08', 'eth0', 'aa:bb:cc:dd:ee:ff')
def test_remove_nic(self):
"""(successful) call to node_remove_nic"""
assert C.node.remove_nic('free_node_1', 'boot-nic') is None
def test_remove_duplicate_nic(self):
"""Removing a nic twice should fail"""
C.node.remove_nic('free_node_1', 'boot-nic')
with pytest.raises(FailedAPICallException):
C.node.remove_nic('free_node_1', 'boot-nic')
def test_remove_nic_reserved_chars(self):
""" test for catching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.node.remove_nic('node-/%]08', 'boot-nic')
def test_metadata_set(self):
""" test for registering metadata from a node """
assert C.node.metadata_set("free_node_0", "EK", "pk") is None
def test_metadata_delete(self):
""" test for deleting metadata from a node """
with pytest.raises(FailedAPICallException):
C.node.metadata_delete("free_node", "EK")
C.node.metadata_set("free_node_0", "EK", "pk")
assert C.node.metadata_delete("free_node_0", "EK") is None
def test_node_show_console(self, obmd_node):
"""various calls to node_show_console"""
# show console without enabling the obm.
with pytest.raises(FailedAPICallException):
C.node.show_console(obmd_node)
C.node.enable_obm(obmd_node)
# Read in a prefix of the output from the console; the obmd mock driver
# keeps counting forever.
console_stream = C.node.show_console(obmd_node)
expected = '\n'.join([str(i) for i in range(10)])
actual = ''
while len(actual) < len(expected):
actual += console_stream.next()
assert actual.startswith(expected)
C.node.disable_obm(obmd_node)
with pytest.raises(FailedAPICallException):
C.node.show_console(obmd_node)
def test_node_show_console_reserved_chars(self):
"""test for cataching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.node.show_console('node-/%]01')
def test_node_connect_network(self):
"""(successful) call to node_connect_network"""
response = C.node.connect_network(
'manhattan_node_1', 'nic-with-port', 'manhattan_pxe',
'vlan/native')
# check that the reponse contains a valid UUID.
assert uuid_pattern.match(response['status_id'])
deferred.apply_networking()
def test_node_connect_network_error(self):
"""Duplicate call to node_connect_network should fail."""
C.node.connect_network(
'manhattan_node_1', 'nic-with-port', 'manhattan_pxe',
'vlan/native')
deferred.apply_networking()
with pytest.raises(FailedAPICallException):
C.node.connect_network(
'manhattan_node_1', 'nic-with-port', 'manhattan_pxe',
'vlan/native')
deferred.apply_networking()
def test_node_connect_network_reserved_chars(self):
""" test for catching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.node.connect_network('node-/%]01', 'eth0', 'net-01',
'vlan/native')
def test_node_detach_network(self):
"""(successful) call to node_detach_network"""
C.node.connect_network(
'manhattan_node_0', 'nic-with-port', 'manhattan_pxe',
'vlan/native')
deferred.apply_networking()
response = C.node.detach_network(
'manhattan_node_0', 'nic-with-port', 'manhattan_pxe')
assert uuid_pattern.match(response['status_id'])
deferred.apply_networking()
def test_node_detach_network_error(self):
"""Duplicate call to node_detach_network should fail."""
C.node.connect_network(
'manhattan_node_0', 'nic-with-port', 'manhattan_pxe',
'vlan/native')
deferred.apply_networking()
C.node.detach_network(
'manhattan_node_0', 'nic-with-port', 'manhattan_pxe')
deferred.apply_networking()
with pytest.raises(FailedAPICallException):
C.node.detach_network(
'manhattan_node_0', 'nic-with-port', 'manhattan_pxe')
def test_node_detach_network_reserved_chars(self):
""" test for catching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.node.detach_network('node-/%]04', 'eth0', 'net-04')
@pytest.mark.usefixtures('fail_on_log_warnings', 'configure', 'fresh_database',
'server_init', 'initial_db')
class Test_project:
""" Tests project related client calls."""
def test_list_projects(self):
""" test for getting list of project """
assert C.project.list() == [u'empty-project', u'manhattan', u'runway']
def test_list_nodes_inproject(self):
""" test for getting list of nodes connected to a project. """
assert C.project.nodes_in('manhattan') == [
u'manhattan_node_0', u'manhattan_node_1']
assert C.project.nodes_in('runway') == [
u'runway_node_0', u'runway_node_1']
def test_list_nodes_inproject_reserved_chars(self):
""" test for catching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.project.nodes_in('pr/%[oj-01')
def test_list_networks_inproject(self):
""" test for getting list of networks connected to a project. """
assert C.project.networks_in('runway') == [
u'runway_provider', u'runway_pxe']
def test_list_networks_inproject_reserved_chars(self):
""" test for catching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.project.networks_in('pr/%[oj-01')
def test_project_create(self):
""" test for creating project. """
assert C.project.create('dummy-01') is None
def test_duplicate_project_create(self):
""" test for catching duplicate name while creating new project. """
C.project.create('dummy-02')
with pytest.raises(FailedAPICallException):
C.project.create('dummy-02')
def test_project_create_reserved_chars(self):
""" test for catching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.project.create('dummy/%[-02')
def test_project_delete(self):
""" test for deleting project. """
C.project.create('dummy-03')
assert C.project.delete('dummy-03') is None
def test_error_project_delete(self):
""" test to capture error condition in project delete. """
with pytest.raises(FailedAPICallException):
C.project.delete('dummy-03')
def test_project_delete_reserved_chars(self):
""" test for catching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.project.delete('dummy/%[-03')
def test_project_connect_detach_node(self):
""" test for connecting/detaching node to project. """
C.project.create('proj-04')
assert C.project.connect('proj-04', 'free_node_0') is None
# connecting it again should fail
with pytest.raises(FailedAPICallException):
C.project.connect('proj-04', 'free_node_0')
assert C.project.detach('proj-04', 'free_node_0') is None
def test_project_connect_node_nosuchobject(self):
""" test for connecting no such node or project """
C.project.create('proj-06')
with pytest.raises(FailedAPICallException):
C.project.connect('proj-06', 'no-such-node')
with pytest.raises(FailedAPICallException):
C.project.connect('no-such-project', 'free_node_1')
def test_project_connect_node_reserved_chars(self):
""" test for catching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.project.connect('proj/%[-04', 'free_node_1')
def test_project_detach_node_nosuchobject(self):
""" Test for while detaching node from project."""
C.project.create('proj-08')
with pytest.raises(FailedAPICallException):
C.project.detach('proj-08', 'no-such-node')
with pytest.raises(FailedAPICallException):
C.project.detach('no-such-project', 'free_node_1')
def test_project_detach_node_reserved_chars(self):
""" test for catching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.project.detach('proj/%]-08', 'free_node_0')
@pytest.mark.usefixtures('fail_on_log_warnings', 'configure', 'fresh_database',
'server_init', 'initial_db')
class Test_switch:
""" Tests switch related client calls."""
def test_list_switches(self):
"""(successful) call to list_switches"""
assert C.switch.list() == [u'empty-switch', u'stock_switch_0']
def test_show_switch(self):
"""(successful) call to show_switch"""
assert C.switch.show('empty-switch') == {
u'name': u'empty-switch', u'ports': [],
u'capabilities': ['nativeless-trunk-mode']}
def test_show_switch_reserved_chars(self):
""" test for catching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.switch.show('dell-/%]-01')
def test_delete_switch(self):
"""(successful) call to switch_delete"""
assert C.switch.delete('empty-switch') is None
def test_delete_switch_reserved_chars(self):
""" test for catching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.switch.delete('nexus/%]-01')
def test_switch_register(self):
"""test various cases of switch register"""
switchinfo = {
"type": "http://schema.massopencloud.org/haas/v0/switches/mock",
"username": "name",
"password": "<PASSWORD>",
"hostname": "example.com"}
subtype = "http://schema.massopencloud.org/haas/v0/switches/mock"
assert C.switch.register('mytestswitch', subtype, switchinfo) is None
def test_switch_register_fail(self):
"""test various cases of switch register"""
switchinfo = {
"type": "http://schema.massopencloud.org/haas/v0/switches/mock",
"username": "name",
"password": "<PASSWORD>",
"unknown_keyword": "example.com"}
subtype = "http://schema.massopencloud.org/haas/v0/switches/mock"
with pytest.raises(FailedAPICallException):
C.switch.register('mytestswitch', subtype, switchinfo)
@pytest.mark.usefixtures('fail_on_log_warnings', 'configure', 'fresh_database',
'server_init', 'initial_db')
class Test_port:
""" Tests port related client calls."""
def test_port_register(self):
"""(successful) call to port_register."""
assert C.port.register('stock_switch_0', 'gi1/1/1') is None
def test_port_dupregister(self):
"""Duplicate call to port_register should raise an error."""
C.port.register('stock_switch_0', 'gi1/1/2')
with pytest.raises(FailedAPICallException):
C.port.register('stock_switch_0', 'gi1/1/2')
def test_port_register_reserved_chars(self):
""" test for catching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.port.register('mock-/%[-01', 'gi1/1/1')
def test_port_delete(self):
"""(successful) call to port_delete"""
C.port.register('stock_switch_0', 'gi1/1/3')
assert C.port.delete('stock_switch_0', 'gi1/1/3') is None
def test_port_delete_error(self):
"""Deleting a port twice should fail with an error."""
C.port.register('stock_switch_0', 'gi1/1/4')
C.port.delete('stock_switch_0', 'gi1/1/4')
with pytest.raises(FailedAPICallException):
C.port.delete('stock_switch_0', 'gi1/1/4')
def test_port_delete_reserved_chars(self):
""" test for catching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.port.delete('mock/%]-01', 'gi1/1/4')
def test_port_connect_nic(self):
"""(successfully) Call port_connect_nic on an existent port"""
C.port.register('stock_switch_0', 'gi1/1/5')
assert C.port.connect_nic(
'stock_switch_0', 'gi1/1/5', 'free_node_0', 'boot-nic'
) is None
# port already connected
with pytest.raises(FailedAPICallException):
C.port.connect_nic('mock-01', 'gi1/1/5', 'free_node_1', 'boot-nic')
# port AND free_node_0 already connected
with pytest.raises(FailedAPICallException):
C.port.connect_nic('mock-01', 'gi1/1/5', 'free_node_0', 'boot-nic')
def test_port_connect_nic_reserved_chars(self):
""" test for catching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.port.connect_nic('mock/%]-01', 'gi1/1/6', 'node-09', 'eth0')
def test_port_detach_nic(self):
"""(succesfully) call port_detach_nic."""
C.port.register('stock_switch_0', 'gi1/1/7')
C.port.connect_nic(
'stock_switch_0', 'gi1/1/7', 'free_node_1', 'boot-nic')
assert C.port.detach_nic('stock_switch_0', 'gi1/1/7') is None
def test_port_detach_nic_error(self):
"""port_detach_nic on a port w/ no nic should error."""
C.port.register('stock_switch_0', 'gi1/1/8')
with pytest.raises(FailedAPICallException):
C.port.detach_nic('stock_switch_0', 'gi1/1/8')
def test_port_detach_nic_reserved_chars(self):
""" test for catching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.port.detach_nic('mock/%]-01', 'gi1/1/8')
def test_show_port(self):
"""Test show_port"""
# do show port on a port that's not registered yet
with pytest.raises(FailedAPICallException):
C.port.show('stock_switch_0', 'gi1/1/8')
C.port.register('stock_switch_0', 'gi1/1/8')
assert C.port.show('stock_switch_0', 'gi1/1/8') == {}
C.port.connect_nic(
'stock_switch_0', 'gi1/1/8', 'free_node_1', 'boot-nic')
assert C.port.show('stock_switch_0', 'gi1/1/8') == {
'node': 'free_node_1',
'nic': 'boot-nic',
'networks': {}}
# do show port on a non-existing switch
with pytest.raises(FailedAPICallException):
C.port.show('unknown-switch', 'unknown-port')
def test_show_port_reserved_chars(self):
""" test for catching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.port.show('mock/%]-01', 'gi1/1/8')
def test_port_revert(self):
"""Revert port should run without error and remove all networks"""
C.node.connect_network(
'runway_node_0', 'nic-with-port', 'runway_pxe', 'vlan/native')
deferred.apply_networking()
assert C.port.show('stock_switch_0', 'runway_node_0_port') == {
'node': 'runway_node_0',
'nic': 'nic-with-port',
'networks': {'vlan/native': 'runway_pxe'}}
response = C.port.port_revert('stock_switch_0', 'runway_node_0_port')
assert uuid_pattern.match(response['status_id'])
deferred.apply_networking()
assert C.port.show('stock_switch_0', 'runway_node_0_port') == {
'node': 'runway_node_0',
'nic': 'nic-with-port',
'networks': {}}
def test_port_revert_reserved_chars(self):
""" test for catching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.port.port_revert('mock/%]-01', 'gi1/0/1')
@pytest.mark.usefixtures('fail_on_log_warnings', 'database_authentication',
'fresh_database', 'server_init', 'initial_admin',
'dummy_verify')
class Test_user:
""" Tests user related client calls."""
def test_list_users(self):
"""Test for getting list of user"""
assert C.user.list() == {
u'hil_user': {u'is_admin': True, u'projects': []}
}
def test_user_create(self):
""" Test user creation. """
assert C.user.create('billy', '<PASSWORD>', is_admin=True) is None
assert C.user.create('bobby', '<PASSWORD>', is_admin=False) is None
def test_user_create_duplicate(self):
""" Test duplicate user creation. """
C.user.create('bill', '<PASSWORD>', is_admin=False)
with pytest.raises(FailedAPICallException):
C.user.create('bill', '<PASSWORD>', is_admin=False)
def test_user_create_reserved_chars(self):
""" test for catching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.user.create('b/%]ill', '<PASSWORD>', is_admin=True)
def test_user_delete(self):
""" Test user deletion. """
C.user.create('jack', '<PASSWORD>', is_admin=True)
assert C.user.delete('jack') is None
def test_user_delete_error(self):
""" Test error condition in user deletion. """
C.user.create('Ata', '<PASSWORD>', is_admin=True)
C.user.delete('Ata')
with pytest.raises(FailedAPICallException):
C.user.delete('Ata')
def test_user_delete_reserved_chars(self):
""" test for catching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.user.delete('A/%]ta')
def test_user_add(self):
""" test adding a user to a project. """
C.project.create('proj-sample')
C.user.create('Sam', '<PASSWORD>', is_admin=False)
assert C.user.add('Sam', 'proj-sample') is None
def test_user_add_error(self):
"""Test error condition while granting user access to a project."""
C.project.create('test-proj01')
C.user.create('sam01', '<PASSWORD>', is_admin=False)
C.user.add('sam01', 'test-proj01')
with pytest.raises(FailedAPICallException):
C.user.add('sam01', 'test-proj01')
def test_user_add_reserved_chars(self):
""" test for catching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.user.add('sam/%]01', 'test-proj01')
def test_user_remove(self):
"""Test revoking user's access to a project. """
C.project.create('test-proj02')
C.user.create('sam02', '<PASSWORD>', is_admin=False)
C.user.add('sam02', 'test-proj02')
assert C.user.remove('sam02', 'test-proj02') is None
def test_user_remove_error(self):
"""Test error condition while revoking user access to a project. """
C.project.create('test-proj03')
C.user.create('sam03', '<PASSWORD>', is_admin=False)
C.user.create('xxxx', '<PASSWORD>', is_admin=False)
C.user.add('sam03', 'test-proj03')
C.user.remove('sam03', 'test-proj03')
with pytest.raises(FailedAPICallException):
C.user.remove('sam03', 'test_proj03')
def test_user_remove_reserved_chars(self):
""" test for catching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.user.remove('sam/%]03', 'test-proj03')
def test_user_set_admin(self):
"""Test changing a user's admin status """
C.user.create('jimmy', '12345', is_admin=False)
C.user.create('jimbo', '678910', is_admin=True)
assert C.user.set_admin('jimmy', True) is None
assert C.user.set_admin('jimbo', False) is None
def test_user_set_admin_demote_error(self):
"""Tests error condition while editing a user who doesn't exist. """
C.user.create('gary', '12345', is_admin=True)
C.user.delete('gary')
with pytest.raises(FailedAPICallException):
C.user.set_admin('gary', False)
def test_user_set_admin_promote_error(self):
"""Tests error condition while editing a user who doesn't exist. """
C.user.create('hugo', '12345', is_admin=False)
C.user.delete('hugo')
with pytest.raises(FailedAPICallException):
C.user.set_admin('hugo', True)
def test_user_set_admin_reserved_chars(self):
""" test for catching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.user.set_admin('hugo/%]', True)
@pytest.mark.usefixtures('fail_on_log_warnings', 'configure', 'fresh_database',
'server_init', 'initial_db')
class Test_network:
""" Tests network related client calls. """
def test_network_list(self):
""" Test list of networks. """
assert C.network.list() == {
u'manhattan_provider':
{
u'network_id': u'manhattan_provider_chan',
u'projects': [u'manhattan']
},
u'runway_provider':
{
u'network_id': u'runway_provider_chan',
u'projects': [u'runway']
},
u'pub_default':
{
u'network_id': u'1002',
u'projects': None
},
u'manhattan_pxe':
{
u'network_id': u'1004',
u'projects': [u'manhattan']
},
u'stock_int_pub':
{
u'network_id': u'1001',
u'projects': None
},
u'stock_ext_pub':
{
u'network_id': u'ext_pub_chan',
u'projects': None
},
u'runway_pxe':
{
u'network_id': u'1003',
u'projects': [u'runway']}
}
def test_list_network_attachments(self):
""" Test list of network attachments """
assert C.network.list_network_attachments(
"manhattan_provider", "all") == {}
assert C.network.list_network_attachments(
"runway_provider", "runway") == {}
C.node.connect_network('manhattan_node_0', 'nic-with-port',
'manhattan_provider', 'vlan/native')
deferred.apply_networking()
assert C.network.list_network_attachments(
"manhattan_provider", "all") == {
'manhattan_node_0': {'project': 'manhattan',
'nic': 'nic-with-port',
'channel': 'vlan/native'}
}
def test_network_show(self):
""" Test show network. """
assert C.network.show('runway_provider') == {
u'access': [u'runway'],
u'channels': [u'vlan/native', u'vlan/runway_provider_chan'],
u'name': u'runway_provider',
u'owner': u'admin',
u'connected-nodes': {},
}
def test_network_show_reserved_chars(self):
""" test for catching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.network.show('net/%]-01')
def test_network_create(self):
""" Test create network. """
assert C.network.create(
'net-abcd', 'manhattan', 'manhattan', '') is None
def test_network_create_duplicate(self):
""" Test error condition in create network. """
C.network.create('net-123', 'manhattan', 'manhattan', '')
with pytest.raises(FailedAPICallException):
C.network.create('net-123', 'manhattan', 'manhattan', '')
def test_network_create_reserved_chars(self):
""" test for catching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.network.create('net/%]-123', 'manhattan', 'manhattan', '')
def test_network_delete(self):
""" Test network deletion """
C.network.create('net-xyz', 'manhattan', 'manhattan', '')
assert C.network.delete('net-xyz') is None
def test_network_delete_duplicate(self):
""" Test error condition in delete network. """
C.network.create('net-xyz', 'manhattan', 'manhattan', '')
C.network.delete('net-xyz')
with pytest.raises(FailedAPICallException):
C.network.delete('net-xyz')
def test_network_delete_reserved_chars(self):
""" test for catching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.network.delete('net/%]-xyz')
def test_network_grant_project_access(self):
""" Test granting a project access to a network. """
C.network.create('newnet01', 'admin', '', '')
assert C.network.grant_access('runway', 'newnet01') is None
assert C.network.grant_access('manhattan', 'newnet01') is None
def test_network_grant_project_access_error(self):
""" Test error while granting a project access to a network. """
C.network.create('newnet04', 'admin', '', '')
C.network.grant_access('runway', 'newnet04')
with pytest.raises(FailedAPICallException):
C.network.grant_access('runway', 'newnet04')
def test_network_grant_project_access_reserved_chars(self):
""" test for catching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.network.grant_access('proj/%]-02', 'newnet04')
def test_network_revoke_project_access(self):
""" Test revoking a project's access to a network. """
C.network.create('newnet02', 'admin', '', '')
C.network.grant_access('runway', 'newnet02')
assert C.network.revoke_access('runway', 'newnet02') is None
def test_network_revoke_project_access_error(self):
"""
Test error condition when revoking project's access to a network.
"""
C.network.create('newnet03', 'admin', '', '')
C.network.grant_access('runway', 'newnet03')
C.network.revoke_access('runway', 'newnet03')
with pytest.raises(FailedAPICallException):
C.network.revoke_access('runway', 'newnet03')
def test_network_revoke_project_access_reserved_chars(self):
""" test for catching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.network.revoke_access('proj/%]-02', 'newnet03')
@pytest.mark.usefixtures('fail_on_log_warnings', 'configure', 'fresh_database',
'server_init')
class Test_extensions:
""" Test extension related client calls. """
def test_extension_list(self):
""" Test listing active extensions. """
assert C.extensions.list_active() == [
"hil.ext.auth.null",
"hil.ext.network_allocators.vlan_pool",
"hil.ext.switches.mock",
]
@pytest.mark.usefixtures('fail_on_log_warnings', 'configure', 'fresh_database',
'server_init', 'initial_db')
class TestShowNetworkingAction:
"""Test calls to show networking action method"""
def test_show_networking_action(self):
"""(successful) call to show_networking_action"""
response = C.node.connect_network(
'manhattan_node_0', 'nic-with-port',
'manhattan_provider', 'vlan/native')
status_id = response['status_id']
response = C.node.show_networking_action(status_id)
assert response == {'status': 'PENDING',
'node': 'manhattan_node_0',
'nic': 'nic-with-port',
'type': 'modify_port',
'channel': 'vlan/native',
'new_network': 'manhattan_provider'}
deferred.apply_networking()
response = C.node.show_networking_action(status_id)
assert response['status'] == 'DONE'
def test_show_networking_action_fail(self):
"""(unsuccessful) call to show_networking_action"""
with pytest.raises(FailedAPICallException):
C.node.show_networking_action('non-existent-entry')
| StarcoderdataPython |
8135213 | #!/usr/bin/env python3.6
import random
from credential import Credential
from user import User
##credential
def create_credential(fname,lname,uname,pnumber,email,password):
'''
Function to create new credentials
'''
new_credential = Credential(fname,lname,uname,pnumber,email,password)
return new_credential
def save_credential(credential):
'''
Functiona to save credential
'''
credential.save_credential
def del_credential(credential):
'''
Function to delete credential
'''
credential.delete_credential
def find_credential(number):
'''
Function to find a credential by number
'''
return Credential.find_by_number(number)
def check_existing_credentials(number):
'''
Funtion that checks if a acrdential exists with that number and return a boolean
'''
return Credential.credential_exist(number)
def display_credentials():
'''
Function
'''
return Credential.display_contacts()
##user
def create_user(a_name,u_name,password):
'''
Function to create a new user
'''
new_user = User(a_name,u_name,password)
return new_user
def save_newuser(user):
'''
function to save new users
'''
User.new_user.save_newuser()
def test_deleteuser(user):
'''
to test if we can delete a user
from our user list
'''
User.new_user.deleteuser()
def find_userbyappname(user):
'''
finding a password using aname
'''
return User.find_credentialbyappname(user)
def user_exist(appname):
'''
check if user exists
'''
return User.user_exist(appname)
def display_alluser():
'''
displaying all users
'''
return User.display_allusers()
##main function
def main():
print("Hello Welcome to *PASSWORD_VAULT*. What is your name?")
user= input()
print(f"Hello {user}. what would you like to do?.\n To proceed,Use these short codes: \n ca - create an account using your own password \n ra - create an account using a randomly generated password \n ex - exit the application")
print('\n')
short_code = input().lower()
while True:
if short_code == 'ca':
print("Create an account using your own password")
print("*"*10)
print("First Name ")
fname = input()
print("Last Name ")
lname = input()
print("User Name ")
uname = input()
print("Phone Number ")
pnumber = input()
print("Email ")
email = input()
print("Password ")
password = input()
##create and save a new credential
save_credential(create_credential(fname,lname,uname,pnumber,email,password))
print('\n')
print(f"New Account {uname} successfully created!")
print('\n')
print("To proceed use the short code: \n lg - login into account \n ex - to exit the application")
short_codetwo = input().lower()
if short_codetwo == 'lg':
print("-"*10)
print("LogIn")
print("-"*10)
print("To log in, input your username and password")
print("UserName")
user_namein = input()
print("Password")
pass_wordin = input()
###verifying the username and password
if user_namein == uname and pass_wordin == password:
print("Access granted.\n To proceed use the following shortcodes: \n ca - create an account using your own password \n ra - create an account using a randomly generated password \n ex - exit the application")
short_codethree = input().lower()
if short_codethree == 'cc':
print("_"*10)
print("username")
u_name = input()
print("Password")
password = input()
###create and save a new user
save_newuser(create_user(a_name,u_name,password))
print('\n')
print("-"*10)
print(f"New Credential for {a_name} created.")
print('\n')
print("-"*10)
continue
elif short_codethree == 'dc':
if display_allusers():
print("Here is a list of all your contacts")
print('\n')
for user in display_allcredentials():
print(f"{user.a_name} {user.u_name} {user.password}")
print('\n')
else:
print('\n')
print("You do not seem to have any user saved yet.")
print('\n')
elif short_codethree == 'fc':
print("Enter the application name for the credential you want to search for.")
search_applicationname =input()
if credential_exist(search_applicationname):
search_credential = find_credentialbyappname(search_applicationname)
print(f"{search_applicationname.a_name} {search_applicationname.u_name} {search_applicationname.password}")
else:
print("That credential doesnot exist.")
else:
print("Wrong username or password.Please try again.")
elif short_codetwo == 'ex':
print("Bye Bye!")
break
else:
print("I really didn't get that.Please use the short codes")
elif short_code == 'ra':
print("Create an account using a randomly generated password")
print("-"*10)
print("First Name ")
fname = input()
print("Last Name ")
lname = input()
print("User Name ")
uname = input()
print("Phone Number ")
pnumber = input()
print("Email ")
email = input()
chars = "abcdefghijklmnopqrstuvwxyz1234567890"
password = "".join(random.choice(chars) for _ in range(8))
print(f"\nYour password is: **{password}**")
save_credential(create_credential(fname,lname,uname,pnumber,email,password))
print('\n')
print(f"New Account {uname} successfully created!")
print('\n')
print("To proceed use the short code: \n lg - login into account \n ex - to exit the application")
short_codetwo = input().lower()
if short_codetwo == 'lg':
print("-"*10)
print("LogIn")
print("-"*10)
print("To log in, input your username and password")
print("UserName")
user_namein = input()
print("Password")
pass_wordin = input()
###verifying the username and password
if user_namein == uname and pass_wordin == password:
print("*ACCESS GRANTED*.\n To proceed use the following shortcodes: \n cc - create a new credential \n dc - display user \n fc - find a credential by inputing the appname \n rc - to delete a credential \n ex - exit the application")
short_codethree = input().lower()
if short_codethree == 'cc':
print("-"*10)
print("To create a new user,Input the following.")
print("-"*10)
print("Application Name")
a_name = input()
print("Account Name")
u_name = input()
print("Password")
password = input()
###create and save a new credential
save_newcredential(create_credential(a_name,u_name,password))
print('\n')
print("-"*10)
print(f"New Credential for {u_name} created.")
print('\n')
print("-"*10)
continue
elif short_codethree == 'dc':
if display_allcredentials():
print("Here is a list of all your contacts")
print('\n')
for user in display_allcredentials():
print(f"{user.a_name} {user.u_name} {user.password}")
print('\n')
else:
print('\n')
print("You do not seem to have any user saved yet.")
print('\n')
elif short_codethree == 'fc':
print("Enter the application name for the credential you want to search for.")
search_applicationname =input()
if credential_exist(search_applicationname):
search_credential = find_credentialbyappname(search_applicationname)
print(f"{search_applicationname.a_name} {search_applicationname.u_name} {search_applicationname.password}")
else:
print("That credential doesnot exist.")
else:
print("Wrong username or password.Please try again.")
elif short_codetwo == 'ex':
print("Goodbye!")
break
else:
print("I really didn't get that.Please use the short codes")
break
elif short_code == "ex":
print("Goodbye!")
break
else:
print("I really didn't get that.Please use the short codes")
break
if __name__ == '__main__':
main() | StarcoderdataPython |
8023866 | from functools import reduce
def is_palindrome(n):
l = list(map(int, str(n)))
l2 = l[::-1]
def cheng(x, y):
return x * 10 + y
n1 = reduce(cheng, l2)
return n == n1
# 测试:
output = filter(is_palindrome, range(1, 1000))
print('1~1000:', list(output))
if list(filter(is_palindrome, range(1, 200))) == [1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 22, 33, 44, 55, 66, 77, 88, 99, 101,
111, 121, 131, 141, 151, 161, 171, 181, 191]:
print('测试成功!')
else:
print('测试失败!')
| StarcoderdataPython |
9799515 | #
# Copyright (C) 2012-2014 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
"""convert PRCS revisions to Mercurial changesets
"""
import sys
import re
import os
import hglib
from string import join
from prcslib import PrcsProject
class Converter(object):
def __init__(self, name, verbose = False):
"""Construct a Converter object."""
self.name = name
self.verbose = verbose
self.revisionmap = {}
self.symlink_warned = {}
self.prcs = PrcsProject(self.name)
self.revisions = self.prcs.revisions()
self.hgclient = hglib.open(".")
def convert(self):
"""Convert all revisions in a project."""
list = sorted(self.revisions, key = lambda id:
self.revisions[id]["date"])
for i in list:
self.convertrevision(i)
def convertrevision(self, version):
version = str(version)
if self.revisions[version].get("deleted"):
sys.stderr.write("Ignored deleted version {0}\n".format(version))
return
if self.verbose:
sys.stderr.write("Converting version {0}\n".format(version))
descriptor = self.prcs.descriptor(version)
parent = descriptor.parentversion()
if parent is None:
# It is a root revision.
self.hgclient.update("null")
parent_filemap = {}
else:
while self.revisions[str(parent)].get("deleted"):
parent.minor -= 1
parent = str(parent)
if self.revisionmap.get(parent) is None:
self.convertrevision(parent)
# TODO: If the parent is not converted, do it here.
sys.exit("Parent revision {0} not converted"
.format(parent))
self.hgclient.update(self.revisionmap[parent])
parent_filemap = self.revisions[parent].get("filemap")
if parent_filemap is None:
sys.exit("No parent filemap")
parent_descriptor = self.prcs.descriptor(parent)
parent_filemap = _makefilemap(parent_descriptor.files())
# Handles merges.
mergeparents = descriptor.mergeparents()
if mergeparents:
self._handlemerge(mergeparents)
# Makes the working directory clean.
for i in self.hgclient.status():
if i[0] == "?":
os.unlink(i[1])
self.hgclient.revert([], "null", all = True)
self.prcs.checkout(version)
files = descriptor.files()
filemap = _makefilemap(files)
self.revisions[version]["filemap"] = filemap
# Checks for files.
addlist = []
for name, file in files.iteritems():
# We cannot include symbolic links in Mercurial repositories.
if "symlink" in file:
if not self.symlink_warned.get(name, False):
sys.stderr.write("{0}: warning: symbolic link\n"
.format(name))
self.symlink_warned[name] = True
else:
file_id = file.get("id")
if file_id is None:
sys.exit("{0}: Missing file identity".format(name))
parent_name = parent_filemap.get(file_id)
if parent_name is not None and parent_name != name:
if self.verbose:
sys.stderr.write("{0}: renamed from {1}\n"
.format(name, parent_name))
self.hgclient.copy(parent_name, name, after = True)
else:
addlist.append(name)
if addlist:
self.hgclient.add(addlist)
# Sets the branch for the following commit.
version = descriptor.version()
branch = "default"
if not re.match("[0-9]+$", version.major):
branch = version.major
self.hgclient.branch(branch, force = True)
version = str(version)
message = descriptor.message()
if not message:
message = "(empty commit message)"
revision = self.hgclient.commit(message = message,
date = self.revisions[version]["date"],
user = self.revisions[version]["author"])
self.revisionmap[version] = revision[1]
# Keeps the revision identifier as a local tag for convenience.
self.hgclient.tag([version], local = True, force = True)
def _handlemerge(self, mergeparents):
"""Handle merges."""
if len(mergeparents) > 1:
sys.stderr.write("warning: multiple merge parents: {0}\n"
.format(join(mergeparents, ", ")))
sys.stderr.write("warning: picked {0} on record\n"
.format(mergeparents[-1]))
self.hgclient.merge(self.revisionmap[mergeparents[-1]],
tool = "internal:local", cb = hglib.merge.handlers.noninteractive)
def _makefilemap(files):
filemap = {}
for name, file in files.iteritems():
id = file.get("id")
if id is not None:
if filemap.get(id) is not None:
sys.stderr.write(
"warning: Duplicate file identifier in a revision\n")
filemap[id] = name
return filemap
def convert(name, verbose = False):
"""convert revisions."""
converter = Converter(name, verbose = verbose)
converter.convert()
| StarcoderdataPython |
3583278 | #!/usr/bin/env python
from __future__ import print_function
from keras.losses import binary_crossentropy, sparse_categorical_crossentropy
from keras.losses import categorical_crossentropy, mean_squared_error
from keras.optimizers import SGD, Adam, Adadelta, Adagrad
from keras.optimizers import Adamax, RMSprop, Nadam
from keras.activations import relu, sigmoid
from sklearn.model_selection import train_test_split as splt
from talos.scan.Scan import Scan
from talos.commands.reporting import Reporting
import talos as ta
# single values
def values_single_params():
return {'lr': [1],
'first_neuron': [4],
'hidden_layers': [2],
'batch_size': [100],
'epochs': [2],
'dropout': [0],
'shapes': ['brick'],
'optimizer': [Adam],
'losses': [binary_crossentropy,
sparse_categorical_crossentropy,
categorical_crossentropy,
mean_squared_error],
'activation': ['relu'],
'last_activation': ['softmax']}
# lists of values
def values_list_params():
return {'lr': [1, 2],
'first_neuron': [4, 4],
'hidden_layers': [2, 2],
'batch_size': [100, 200],
'epochs': [1, 2],
'dropout': [0, 0.1],
'shapes': ['brick', 'funnel', 'triangle', 0.2],
'optimizer': [Adam, Adagrad, Adamax, RMSprop, Adadelta, Nadam, SGD],
'losses': ['binary_crossentropy',
'sparse_categorical_crossentropy',
'categorical_crossentropy',
'mean_squared_error'],
'activation': ['relu', 'elu'],
'last_activation': ['softmax']}
# range of values
def values_range_params():
return {'lr': (0.5, 5, 10),
'first_neuron': (4, 100, 5),
'hidden_layers': (0, 5, 5),
'batch_size': (200, 300, 10),
'epochs': (1, 5, 4),
'dropout': (0, 0.5, 5),
'shapes': ['funnel'],
'optimizer': [Nadam],
'losses': [binary_crossentropy,
sparse_categorical_crossentropy,
categorical_crossentropy,
mean_squared_error],
'activation': [relu],
'last_activation': [sigmoid]}
"""
The tests below have to serve several purpose:
- test possible input methods to params dict
- test binary, multi class, multi label and continuous problems
- test all Scan arguments
Each problem type is presented as a Class, and contains three
experiments using single, list, or range inputs. There is an
effort to test as many scenarios as possible here, so be
inventive / experiment! Doing well with this part of the testing,
there is a healthy base for a more serious approach to ensuring
procedural integrity.
"""
def get_params(task):
"""
Helper that allows the tests to feed from same
params dictionaries.
USE: values_single, values_list, values_range = get_appropriate_loss(0)
0 = binary
1 = 1d multi class
2 = 2d multi label
3 = continuous / regression
"""
# first create the params dict
values_single = values_single_params()
values_list = values_list_params()
values_range = values_range_params()
# then limit the losses according to prediction task
values_single['losses'] = [values_single_params()['losses'][task]]
values_list['losses'] = [values_list_params()['losses'][task]]
values_range['losses'] = [values_range_params()['losses'][task]]
return values_single, values_list, values_range
class BinaryTest:
def __init__(self):
# read the params dictionary with the right loss
self.values_single, self.values_list, self.values_range = get_params(0)
# prepare the data for the experiment
self.x, self.y = ta.templates.datasets.cervical_cancer()
self.x = self.x[:300]
self.y = self.y[:300]
self.model = ta.templates.models.cervical_cancer
# split validation data
self.x_train, self.x_val, self.y_train, self.y_val = splt(self.x,
self.y,
test_size=0.2)
def values_single_test(self):
print("BinaryTest : Running values_single_test...")
Scan(self.x,
self.y,
params=self.values_single,
model=ta.templates.models.cervical_cancer)
def values_list_test(self):
print("BinaryTest : Running values_list_test...")
Scan(self.x_train,
self.y_train,
x_val=self.x_val,
y_val=self.y_val,
params=self.values_list,
round_limit=5,
dataset_name='BinaryTest',
experiment_no='000',
model=ta.templates.models.cervical_cancer,
random_method='crypto_uniform',
seed=2423,
search_method='linear',
reduction_method='correlation',
reduction_interval=2,
reduction_window=2,
reduction_threshold=0.2,
reduction_metric='val_loss',
reduce_loss=True,
last_epoch_value=True,
clear_tf_session=False,
disable_progress_bar=True,
debug=True)
# comprehensive
def values_range_test(self):
print("BinaryTest : Running values_range_test...")
Scan(self.x_train,
self.y_train,
params=self.values_range,
model=ta.templates.models.cervical_cancer,
grid_downsample=0.0001,
permutation_filter=lambda p: p['first_neuron'] * p['hidden_layers'] < 220,
random_method='sobol',
reduction_method='correlation',
reduction_interval=2,
reduction_window=2,
reduction_threshold=0.2,
reduction_metric='val_acc',
reduce_loss=False,
debug=True)
class MultiLabelTest:
def __init__(self):
# read the params dictionary with the right loss
self.values_single, self.values_list, self.values_range = get_params(2)
self.x, self.y = ta.templates.datasets.iris()
self.x_train, self.x_val, self.y_train, self.y_val = splt(self.x,
self.y,
test_size=0.2)
def values_single_test(self):
print("MultiLabelTest : Running values_single_test...")
Scan(self.x,
self.y,
params=self.values_single,
model=ta.templates.models.iris)
def values_list_test(self):
print("MultiLabelTest : Running values_list_test...")
Scan(self.x,
self.y,
x_val=self.x_val,
y_val=self.y_val,
params=self.values_list,
round_limit=5,
dataset_name='MultiLabelTest',
experiment_no='000',
model=ta.templates.models.iris,
random_method='crypto_uniform',
seed=2423,
search_method='linear',
permutation_filter=lambda p: p['first_neuron'] * p['hidden_layers'] < 9,
reduction_method='correlation',
reduction_interval=2,
reduction_window=2,
reduction_threshold=0.2,
reduction_metric='val_loss',
reduce_loss=True,
last_epoch_value=True,
clear_tf_session=False,
disable_progress_bar=True,
debug=True)
# comprehensive
def values_range_test(self):
print("MultiLabelTest : Running values_range_test...")
Scan(self.x,
self.y,
params=self.values_range,
model=ta.templates.models.iris,
grid_downsample=0.0001,
random_method='sobol',
reduction_method='correlation',
reduction_interval=2,
reduction_window=2,
reduction_threshold=0.2,
reduction_metric='val_acc',
reduce_loss=False,
debug=True)
class ReportingTest:
def __init__(self):
print("ReportingTest : Running Binary test...")
r = Reporting('BinaryTest_000.csv')
x = r.data
x = r.correlate()
x = r.high()
x = r.low()
x = r.rounds()
x = r.rounds2high()
x = r.best_params()
x = r.plot_corr()
x = r.plot_hist()
x = r.plot_line()
print("ReportingTest : Running MultiLabel test...")
r = Reporting('MultiLabelTest_000.csv')
x = r.data
x = r.correlate()
x = r.high()
x = r.low()
x = r.rounds()
x = r.rounds2high()
x = r.best_params()
x = r.plot_corr()
x = r.plot_hist()
x = r.plot_line()
del x
class DatasetTest:
def __init__(self):
print("DatasetTest : Running tests...")
x = ta.templates.datasets.icu_mortality()
x = ta.templates.datasets.icu_mortality(100)
x = ta.templates.datasets.titanic()
x = ta.templates.datasets.iris()
x = ta.templates.datasets.cervical_cancer()
x = ta.templates.datasets.breast_cancer()
x = ta.templates.params.iris()
x = ta.templates.params.breast_cancer()
| StarcoderdataPython |
6654866 | import bpy
import sys
width = 800
height = 600
n = 0
seed = 0
argv = sys.argv
argv = argv[argv.index("--") + 1:] # get all args after "--"
for scene in bpy.data.scenes:
scene.render.resolution_x = width
scene.render.resolution_y = height
scene.cycles.seed = seed
scene.render.filepath = argv[0]
bpy.ops.render.render(write_still=True)
| StarcoderdataPython |
1939275 | <reponame>ankur-gupta/rain
from rain.module_three.submodule_one import function_three
from rain.module_three.submodule_two import function_four
| StarcoderdataPython |
3201841 | import pygame as pg
from highscore.highscore import *
from objects.chickenwindmil import *
from objects.ammo import *
from objects.predator import *
from objects.signpost import *
from objects.chickenforeground import *
from objects.trunk import *
from objects.pumpkin import *
from objects.plane import *
from objects.leaves import *
from objects.chickenhole import *
from objects.chicken import *
from patterns.camera import *
from patterns.observer import *
from settings.settings import *
from settings.background import *
from settings.sounds import *
from settings.fonts import *
from settings.menus import *
from loops.startloop import *
from loops.gameloop import *
from loops.endloop import *
from loops.bestlistloop import *
from loops.helploop import *
| StarcoderdataPython |
5024367 | """
This code is attributed to <NAME> (@kayzliu), <NAME> (@YingtongDou)
and UIC BDSC Lab
DGFraud-TF2 (A Deep Graph-based Toolbox for Fraud Detection in TensorFlow 2.X)
https://github.com/safe-graph/DGFraud-TF2
"""
import argparse
import numpy as np
import collections
from sklearn.metrics import accuracy_score
from tqdm import tqdm
import tensorflow as tf
from algorithms.GraphSage.GraphSage import GraphSage
from utils.data_loader import load_data_yelp
from utils.utils import preprocess_feature
# init the common args, expect the model specific args
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=717, help='random seed')
parser.add_argument('--epochs', type=int, default=5,
help='number of epochs to train')
parser.add_argument('--batch_size', type=int, default=512, help='batch size')
parser.add_argument('--train_size', type=float, default=0.8,
help='training set percentage')
parser.add_argument('--lr', type=float, default=0.5, help='learning rate')
parser.add_argument('--nhid', type=int, default=128,
help='number of hidden units')
parser.add_argument('--sample_sizes', type=list, default=[5, 5],
help='number of samples for each layer')
args = parser.parse_args()
# set seed
np.random.seed(args.seed)
tf.random.set_seed(args.seed)
def GraphSage_main(neigh_dict, features, labels, masks, num_classes, args):
train_nodes = masks[0]
val_nodes = masks[1]
test_nodes = masks[2]
# training
def generate_training_minibatch(nodes_for_training,
all_labels, batch_size):
nodes_for_epoch = np.copy(nodes_for_training)
ix = 0
np.random.shuffle(nodes_for_epoch)
while len(nodes_for_epoch) > ix + batch_size:
mini_batch_nodes = nodes_for_epoch[ix:ix + batch_size]
batch = build_batch(mini_batch_nodes,
neigh_dict, args.sample_sizes)
labels = all_labels[mini_batch_nodes]
ix += batch_size
yield (batch, labels)
mini_batch_nodes = nodes_for_epoch[ix:-1]
batch = build_batch(mini_batch_nodes, neigh_dict, args.sample_sizes)
labels = all_labels[mini_batch_nodes]
yield (batch, labels)
model = GraphSage(features.shape[-1], args.nhid,
len(args.sample_sizes), num_classes)
optimizer = tf.keras.optimizers.SGD(learning_rate=args.lr)
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy()
for epoch in range(args.epochs):
print(f"Epoch {epoch:d}: training...")
minibatch_generator = generate_training_minibatch(
train_nodes, labels, args.batch_size)
for inputs, inputs_labels in tqdm(
minibatch_generator, total=len(train_nodes) / args.batch_size):
with tf.GradientTape() as tape:
predicted = model(inputs, features)
loss = loss_fn(tf.convert_to_tensor(inputs_labels), predicted)
acc = accuracy_score(inputs_labels,
predicted.numpy().argmax(axis=1))
grads = tape.gradient(loss, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
print(f" loss: {loss.numpy():.4f}, acc: {acc:.4f}")
# validation
print("Validating...")
val_results = model(build_batch(
val_nodes, neigh_dict, args.sample_sizes), features)
loss = loss_fn(tf.convert_to_tensor(labels[val_nodes]), val_results)
val_acc = accuracy_score(labels[val_nodes],
val_results.numpy().argmax(axis=1))
print(f"Epoch: {epoch:d}, "
f"loss: {loss.numpy():.4f}, "
f"acc: {val_acc:.4f}")
# testing
print("Testing...")
results = model(build_batch(
test_nodes, neigh_dict, args.sample_sizes), features)
test_acc = accuracy_score(labels[test_nodes],
results.numpy().argmax(axis=1))
print(f"Test acc: {test_acc:.4f}")
def build_batch(nodes, neigh_dict, sample_sizes):
"""
:param [int] nodes: node ids
:param {node:[node]} neigh_dict: BIDIRECTIONAL adjacency matrix in dict
:param [sample_size]: sample sizes for each layer,
lens is the number of layers
:param tensor features: 2d features of nodes
:return namedtuple minibatch
"src_nodes": node ids to retrieve from raw feature
and feed to the first layer
"dstsrc2srcs": list of dstsrc2src matrices from last to first layer
"dstsrc2dsts": list of dstsrc2dst matrices from last to first layer
"dif_mats": list of dif_mat matrices from last to first layer
"""
dst_nodes = [nodes]
dstsrc2dsts = []
dstsrc2srcs = []
dif_mats = []
max_node_id = max(list(neigh_dict.keys()))
for sample_size in reversed(sample_sizes):
ds, d2s, d2d, dm = compute_diffusion_matrix(dst_nodes[-1],
neigh_dict,
sample_size,
max_node_id,
)
dst_nodes.append(ds)
dstsrc2srcs.append(d2s)
dstsrc2dsts.append(d2d)
dif_mats.append(dm)
src_nodes = dst_nodes.pop()
MiniBatchFields = ["src_nodes", "dstsrc2srcs", "dstsrc2dsts", "dif_mats"]
MiniBatch = collections.namedtuple("MiniBatch", MiniBatchFields)
return MiniBatch(src_nodes, dstsrc2srcs, dstsrc2dsts, dif_mats)
def compute_diffusion_matrix(dst_nodes, neigh_dict, sample_size, max_node_id):
def sample(ns):
return np.random.choice(ns, min(len(ns), sample_size), replace=False)
def vectorize(ns):
v = np.zeros(max_node_id + 1, dtype=np.float32)
v[ns] = 1
return v
# sample neighbors
adj_mat_full = np.stack([vectorize(
sample(neigh_dict[n])) for n in dst_nodes])
nonzero_cols_mask = np.any(adj_mat_full.astype(np.bool), axis=0)
# compute diffusion matrix
adj_mat = adj_mat_full[:, nonzero_cols_mask]
adj_mat_sum = np.sum(adj_mat, axis=1, keepdims=True)
dif_mat = np.nan_to_num(adj_mat / adj_mat_sum)
# compute dstsrc mappings
src_nodes = np.arange(nonzero_cols_mask.size)[nonzero_cols_mask]
# np.union1d automatic sorts the return,
# which is required for np.searchsorted
dstsrc = np.union1d(dst_nodes, src_nodes)
dstsrc2src = np.searchsorted(dstsrc, src_nodes)
dstsrc2dst = np.searchsorted(dstsrc, dst_nodes)
return dstsrc, dstsrc2src, dstsrc2dst, dif_mat
if __name__ == "__main__":
# load the data
adj_list, features, split_ids, y = load_data_yelp(
meta=False, train_size=args.train_size)
idx_train, _, idx_val, _, idx_test, _ = split_ids
num_classes = len(set(y))
label = np.array([y]).T
features = preprocess_feature(features, to_tuple=False)
features = np.array(features.todense())
neigh_dict = collections.defaultdict(list)
for i in range(len(y)):
neigh_dict[i] = []
# merge all relations into single graph
for net in adj_list:
nodes1 = net.nonzero()[0]
nodes2 = net.nonzero()[1]
for node1, node2 in zip(nodes1, nodes2):
neigh_dict[node1].append(node2)
neigh_dict = {k: np.array(v, dtype=np.int64)
for k, v in neigh_dict.items()}
GraphSage_main(neigh_dict, features, label,
[idx_train, idx_val, idx_test], num_classes, args)
| StarcoderdataPython |
5131902 | from django.core.checks import Critical, Warning, run_checks
from django.test import SimpleTestCase, override_settings
class AdminURLCheck(SimpleTestCase):
@override_settings(MYMONEY={"ADMIN_BASE_URL": ''})
def test_deploy_critical(self):
errors = self.get_filtered_msgs(
run_checks(include_deployment_checks=True)
)
self.assertEqual(len(errors), 1)
self.assertIsInstance(errors.pop(), Critical)
@override_settings(MYMONEY={"ADMIN_BASE_URL": 'admin'})
def test_deploy_warning(self):
errors = self.get_filtered_msgs(
run_checks(include_deployment_checks=True)
)
self.assertEqual(len(errors), 1)
self.assertIsInstance(errors.pop(), Warning)
@override_settings(MYMONEY={"ADMIN_BASE_URL": 'foo'})
def test_deploy_ok(self):
errors = self.get_filtered_msgs(
run_checks(include_deployment_checks=True)
)
self.assertFalse(errors)
@override_settings(MYMONEY={"ADMIN_BASE_URL": ''})
def test_no_deploy(self):
errors = self.get_filtered_msgs(
run_checks(include_deployment_checks=False)
)
self.assertFalse(errors)
def get_filtered_msgs(self, msgs):
return [msg for msg in msgs if msg.id == 'mymoney_admin_base_url']
| StarcoderdataPython |
1704869 | <filename>python_lessons/MtMk_Test_Files/SublimeText_Test.py
print("Hallo neuer User.")
myString = input("Bitte gebe deinen Namen ein: ")
print("Hallo neuer User, dein Name ist " + myString)
print("-------------------------------------")
| StarcoderdataPython |
11218677 | n, k, x = map(int, input().split())
rangers = list(map(int, input().split()))
for i in range(min(k, 8 + (k & 3))):
rangers.sort()
rangers = [rangers[i] if (i & 1) else rangers[i] ^ x for i in range(n)]
print(rangers)
rangers.sort()
print(rangers[-1], rangers[0]) | StarcoderdataPython |
4831080 | from evaluate import get_env, get_state_action_size, evaluate
from policy import NeuroevoPolicy
from argparse import ArgumentParser
import logging
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('-e', '--env', help='environment', default='small', type=str)
parser.add_argument('--render', help='display the environment', default=False, type=bool)
parser.add_argument('--weights', help='filename to load policy weights', default='weights', type=str)
args = parser.parse_args()
# starting point
env, params = get_env(args.env)
s, a = get_state_action_size(env)
policy = NeuroevoPolicy(s, a)
policy.load(args.weights)
print('Weights: ', policy.get_params()[:5])
mean_fit = 0
for i in range(15):
params["seed"] = i*2
fit = evaluate(env, params, policy, render=args.render)
mean_fit += fit
print('Fitness: ', fit)
mean_fit /= 15.0
print('Mean fit: ', mean_fit)
| StarcoderdataPython |
125725 | <filename>Python/Algorithms/selection_sort.py
def selection_sort(arr):
for num in range(0, len(arr)):
min_position = num
for i in range(num, len(arr)):
if arr[i] < arr[min_position]:
min_position = i
temp = arr[num]
arr[num] = arr[min_position]
arr[min_position] = temp
arr = [6, 3, 8, 5, 2, 7, 4, 1]
print(f'Unordered: {arr}')
selection_sort(arr)
print(f'Ordered: {arr}') | StarcoderdataPython |
1791363 | <gh_stars>10-100
import unittest
from pygsti.forwardsims.mapforwardsim import MapForwardSimulator
import pygsti
from pygsti.modelpacks import smq1Q_XY
from ..testutils import BaseTestCase
class LayoutTestCase(BaseTestCase):
def setUp(self):
super(LayoutTestCase, self).setUp()
self.circuits = pygsti.circuits.to_circuits(["Gxpi2:0", "Gypi2:0", "Gxpi2:0Gxpi2:0",
"Gypi2:0Gypi2:0", "Gxpi2:0Gypi2:0"])
self.model = smq1Q_XY.target_model()
def _test_layout(self, layout):
self.assertEqual(layout.num_elements, len(self.circuits) * 2) # 2 outcomes per circuit
self.assertEqual(layout.num_elements, len(layout))
self.assertEqual(layout.num_circuits, len(self.circuits))
for i, c in enumerate(self.circuits):
print("Circuit%d: " % i, c)
indices = layout.indices(c)
outcomes = layout.outcomes(c)
self.assertEqual(pygsti.tools.slicetools.length(indices), 2)
self.assertEqual(outcomes, (('0',), ('1',)))
if isinstance(indices, slice):
self.assertEqual(layout.indices_for_index(i), indices)
else: # indices is an array
self.assertArraysEqual(layout.indices_for_index(i), indices)
self.assertEqual(layout.outcomes_for_index(i), outcomes)
self.assertEqual(layout.indices_and_outcomes(c), (indices, outcomes))
self.assertEqual(layout.indices_and_outcomes_for_index(i), (indices, outcomes))
circuits_seen = set()
for indices, c, outcomes in layout.iter_unique_circuits():
self.assertFalse(c in circuits_seen)
circuits_seen.add(c)
if isinstance(indices, slice):
self.assertEqual(indices, layout.indices(c))
else:
self.assertArraysEqual(indices, layout.indices(c))
self.assertEqual(outcomes, layout.outcomes(c))
layout_copy = layout.copy()
self.assertEqual(layout.circuits, layout_copy.circuits)
def test_base_layout(self):
self._test_layout(pygsti.layouts.copalayout.CircuitOutcomeProbabilityArrayLayout.create_from(self.circuits[:], self.model))
def test_map_layout(self):
self._test_layout(pygsti.layouts.maplayout.MapCOPALayout(self.circuits[:], self.model))
#TODO: test split layouts
def test_matrix_layout(self):
self._test_layout(pygsti.layouts.matrixlayout.MatrixCOPALayout(self.circuits[:], self.model))
#SCRATCH
# # An additional specific test added from debugging mapevaltree splitting
# mgateset = pygsti.construction.create_explicit_model(
# [('Q0',)],['Gi','Gx','Gy'],
# [ "I(Q0)","X(pi/8,Q0)", "Y(pi/8,Q0)"])
# mgateset._calcClass = MapForwardSimulator
#
# gatestring1 = ('Gx','Gy')
# gatestring2 = ('Gx','Gy','Gy')
# gatestring3 = ('Gx',)
# gatestring4 = ('Gy','Gy')
# #mevt,mlookup,moutcome_lookup = mgateset.bulk_evaltree( [gatestring1,gatestring2] )
# #mevt,mlookup,moutcome_lookup = mgateset.bulk_evaltree( [gatestring1,gatestring4] )
# mevt,mlookup,moutcome_lookup = mgateset.bulk_evaltree( [gatestring1,gatestring2,gatestring3,gatestring4] )
# print("Tree = ",mevt)
# print("Cache size = ",mevt.cache_size())
# print("lookup = ",mlookup)
# print()
#
# self.assertEqual(mevt[:], [(0, ('Gy',), 1),
# (1, ('Gy',), None),
# (None, ('rho0', 'Gx',), 0),
# (None, ('rho0', 'Gy', 'Gy'), None)])
# self.assertEqual(mevt.cache_size(),2)
# self.assertEqual(mevt.evaluation_order(),[2, 0, 1, 3])
# self.assertEqual(mevt.num_final_circuits(),4)
#
# ## COPY
# mevt_copy = mevt.copy()
# print("Tree copy = ",mevt_copy)
# print("Cache size = ",mevt_copy.cache_size())
# print("Eval order = ",mevt_copy.evaluation_order())
# print("Num final = ",mevt_copy.num_final_circuits())
# print()
#
# self.assertEqual(mevt_copy[:], [(0, ('Gy',), 1),
# (1, ('Gy',), None),
# (None, ('rho0', 'Gx',), 0),
# (None, ('rho0', 'Gy', 'Gy'), None)])
# self.assertEqual(mevt_copy.cache_size(),2)
# self.assertEqual(mevt_copy.evaluation_order(),[2, 0, 1, 3])
# self.assertEqual(mevt_copy.num_final_circuits(),4)
#
# ## SQUEEZE
# maxCacheSize = 1
# mevt_squeeze = mevt.copy()
# mevt_squeeze.squeeze(maxCacheSize)
# print("Squeezed Tree = ",mevt_squeeze)
# print("Cache size = ",mevt_squeeze.cache_size())
# print("Eval order = ",mevt_squeeze.evaluation_order())
# print("Num final = ",mevt_squeeze.num_final_circuits())
# print()
#
# self.assertEqual(mevt_squeeze[:], [(0, ('Gy',), None),
# (0, ('Gy','Gy'), None),
# (None, ('rho0', 'Gx',), 0),
# (None, ('rho0', 'Gy', 'Gy'), None)])
#
# self.assertEqual(mevt_squeeze.cache_size(),maxCacheSize)
# self.assertEqual(mevt_squeeze.evaluation_order(),[2, 0, 1, 3])
# self.assertEqual(mevt_squeeze.num_final_circuits(),4)
#
# #SPLIT
# mevt_split = mevt.copy()
# mlookup_splt = mevt_split.split(mlookup,num_sub_trees=4)
# print("Split tree = ",mevt_split)
# print("new lookup = ",mlookup_splt)
# print()
#
# self.assertEqual(mevt_split[:], [(None, ('rho0', 'Gx',), 0),
# (0, ('Gy',), 1),
# (1, ('Gy',), None),
# (None, ('rho0', 'Gy', 'Gy'), None)])
# self.assertEqual(mevt_split.cache_size(),2)
# self.assertEqual(mevt_split.evaluation_order(),[0, 1, 2, 3])
# self.assertEqual(mevt_split.num_final_circuits(),4)
#
#
# subtrees = mevt_split.sub_trees()
# print("%d subtrees" % len(subtrees))
# self.assertEqual(len(subtrees),4)
# for i,subtree in enumerate(subtrees):
# print("Sub tree %d = " % i,subtree,
# " csize = ",subtree.cache_size(),
# " eval = ",subtree.evaluation_order(),
# " nfinal = ",subtree.num_final_circuits())
# self.assertEqual(subtree.cache_size(),0)
# self.assertEqual(subtree.evaluation_order(),[0])
# self.assertEqual(subtree.num_final_circuits(),1)
#
# probs = np.zeros( mevt.num_final_elements(), 'd')
# mgateset.bulk_fill_probs(probs, mevt)
# print("probs = ",probs)
# print("lookup = ",mlookup)
# self.assertArraysAlmostEqual(probs, np.array([ 0.9267767,0.0732233,0.82664074,
# 0.17335926,0.96193977,0.03806023,
# 0.85355339,0.14644661],'d'))
#
#
# squeezed_probs = np.zeros( mevt_squeeze.num_final_elements(), 'd')
# mgateset.bulk_fill_probs(squeezed_probs, mevt_squeeze)
# print("squeezed probs = ",squeezed_probs)
# print("lookup = ",mlookup)
# self.assertArraysAlmostEqual(probs, squeezed_probs)
#
# split_probs = np.zeros( mevt_split.num_final_elements(), 'd')
# mgateset.bulk_fill_probs(split_probs, mevt_split)
# print("split probs = ",split_probs)
# print("lookup = ",mlookup_splt)
# for i in range(4): #then number of original strings (num final strings)
# self.assertArraysAlmostEqual(probs[mlookup[i]], split_probs[mlookup_splt[i]])
if __name__ == '__main__':
unittest.main(verbosity=2)
| StarcoderdataPython |
155185 | <filename>tally_ho/libs/models/enums/clearance_resolution.py
from django_enumfield import enum
from django.utils.translation import ugettext_lazy as _
class ClearanceResolution(enum.Enum):
EMPTY = 0
PENDING_FIELD_INPUT = 1
PASS_TO_ADMINISTRATOR = 2
RESET_TO_PREINTAKE = 3
labels = {
EMPTY: _('Empty'),
PENDING_FIELD_INPUT: _(u"Pending Field Input"),
PASS_TO_ADMINISTRATOR: _(u"Pass To Administrator"),
RESET_TO_PREINTAKE: _(u"Reset To Preintake")
}
CLEARANCE_CHOICES = [
(ClearanceResolution.EMPTY, '----'),
(ClearanceResolution.PENDING_FIELD_INPUT, _(u"Pending Field Input")),
(ClearanceResolution.PASS_TO_ADMINISTRATOR, _(u"Pass To Administrator")),
(ClearanceResolution.RESET_TO_PREINTAKE, _(u"Reset To Preintake"))
]
| StarcoderdataPython |
6447559 | import os
# Non-Flask, SQLAlchemy, lib stuff, just for our use!
basedir = os.path.abspath(os.path.dirname(__file__))
# General settings
debug = True
host = '0.0.0.0'
port = 5000
# Path to stat files. Default value MUST be changed.
STATS_DIR = os.path.join(basedir, 'test-statfiles')
# This is where files get moved to once they're batch processed.
# They will never be used again and are kept around only for debugging the server code.
PROCESSED_DIR = os.path.join(STATS_DIR, 'processed')
# Files that could not be parsed.
UNPARSABLE_DIR = os.path.join(STATS_DIR, 'unparsable')
# Database
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_DATABASE_URI = 'sqlite:///:memory:'
SQLALCHEMY_TRACK_MODIFICATIONS = debug # Track on debug
# No longer used due to Manager implementation
# Load from arguments
# if len(sys.argv) > 1:
# if sys.argv[1] == 'debug':
# debug = True
# SQLALCHEMY_TRACK_MODIFICATIONS = True
# App settings
MATCHES_PER_PAGE = 24 # Works best as a multiple of 3
| StarcoderdataPython |
6695491 | #!/usr/bin/env python3
'''
$ wget https://github.com/PheWAS/PheWAS/blob/master/data/phemap.rda
$ wget https://github.com/PheWAS/PheWAS/blob/master/data/pheinfo.rda
$ r
> load('phemap.rda')
> load('pheinfo.rda')
> write.csv(phemap, 'phemap.csv', row.names=F)
> write.csv(pheinfo, 'pheinfo.csv', row.names=F)
# found this link at <https://www.cms.gov/medicare/coding/ICD9providerdiagnosticcodes/codes.html>
$ wget https://www.cms.gov/Medicare/Coding/ICD9ProviderDiagnosticCodes/Downloads/ICD-9-CM-v32-master-descriptions.zip
$ unzip ICD-9-CM-v32-master-descriptions.zip
$ wget https://medschool.vanderbilt.edu/cpm/files/cpm/public_files/perl_phewas.zip
$ unzip perl_phewas.zip
$ cat code_translation.txt | tr "\r" "\n" > icd9s.tsv
# got ICD9_CodeCounts_20160323_LF.txt from group. it differs from others in some ways.
# maybe I should have just downloaded the last 6 icd9 versions and unioned them?
'''
import csv
import json
import itertools
import string
icd9s_1 = list(csv.DictReader(open('icd9s.tsv'), delimiter='\t'))
string_for_icd9_1 = {}
for x in icd9s_1:
string_for_icd9_1[x['CODE']] = x['STR_SHORT'].strip()
icd9s_2 = list(open("ICD-9-CM-v32-master-descriptions/CMS32_DESC_LONG_DX.txt", encoding="ISO-8859-1"))
string_for_icd9_2 = {}
for x in icd9s_2:
icd9, desc = x.strip().split(' ', 1)
icd9 = icd9[:3] + '.' + icd9[3:]
string_for_icd9_2[icd9] = desc.strip()
icd9s_3 = list(csv.DictReader(open("ICD9_CodeCounts_20160323_LF.txt"), delimiter='\t'))
string_for_icd9_3 = {}
for x in icd9s_3:
string_for_icd9_3[x['icd9']] = x['icd9_string'].strip()
phemap = list(csv.DictReader(open('phemap.csv')))
icd9s_for_phecode = {}
for x in phemap:
icd9s_for_phecode.setdefault(x['phecode'], []).append(x['icd9'])
pheinfo = list(csv.DictReader(open("pheinfo.csv")))
info_for_phecode = {}
for x in pheinfo:
info_for_phecode[x['phecode']] = {
'desc': x['description'].strip(),
'category': x['group'].strip(),
'color': x['color'].strip(),
}
def cmp(*xs):
for n in range(1, 1+len(xs)):
for c in itertools.combinations(range(len(xs)), n):
print(''.join(string.ascii_letters[i] for i in c), end=':')
print(len(set.intersection(*[set(xs[i]) for i in c])), end=' ')
print('')
for n in range(1, 1+len(xs)):
for c in itertools.combinations(range(len(xs)), n):
print(''.join(string.ascii_letters[i] for i in c), end='')
comp = [i for i in range(len(xs)) if i not in c]
if comp:
print('-' + ''.join(string.ascii_letters[i] for i in comp), end='')
print(':', end='')
print(len(set.intersection(*[set(xs[i]) for i in c]).difference(*[xs[i] for i in comp])), end=' ')
print('')
cmp(info_for_phecode, icd9s_for_phecode)
cmp(string_for_icd9_1, string_for_icd9_2, string_for_icd9_3)
for phecode in info_for_phecode:
ii = []
for icd9 in icd9s_for_phecode[phecode]:
# 3 is from group, 2 is from govt, 1 is from vb
desc = string_for_icd9_3.get(icd9, False) or string_for_icd9_2.get(icd9, False) or string_for_icd9_1.get(icd9, False) or '?'
ii.append({'icd9': icd9, 'desc': desc})
ii = sorted(ii, key=lambda x: x['icd9'])
info_for_phecode[phecode]['icd9s'] = ii
with open('phecodes_icd9.json', 'w') as f:
json.dump(info_for_phecode, f, sort_keys=True, indent=1)
| StarcoderdataPython |
8176791 | <filename>services/fuse/tests/test_token_expiry.py
import apiclient
import arvados
import arvados_fuse
import logging
import mock
import multiprocessing
import os
import re
import sys
import time
import unittest
from .integration_test import IntegrationTest
logger = logging.getLogger('arvados.arv-mount')
class TokenExpiryTest(IntegrationTest):
def setUp(self):
super(TokenExpiryTest, self).setUp()
self.test_start_time = time.time()
self.time_now = int(time.time())+1
def fake_time(self):
self.time_now += 1
return self.time_now
orig_open = arvados_fuse.Operations.open
def fake_open(self, operations, *args, **kwargs):
self.time_now += 86400*13
logger.debug('opening file at time=%f', self.time_now)
return self.orig_open(operations, *args, **kwargs)
@mock.patch.object(arvados_fuse.Operations, 'open', autospec=True)
@mock.patch('time.time')
@mock.patch('arvados.keep.KeepClient.get')
@IntegrationTest.mount(argv=['--mount-by-id', 'zzz'])
def test_refresh_old_manifest(self, mocked_get, mocked_time, mocked_open):
# This test (and associated behavior) is still not strong
# enough. We should ensure old tokens are never used even if
# blobSignatureTtl seconds elapse between open() and
# read(). See https://dev.arvados.org/issues/10008
mocked_get.return_value = 'fake data'
mocked_time.side_effect = self.fake_time
mocked_open.side_effect = self.fake_open
with mock.patch.object(self.mount.api, 'collections', wraps=self.mount.api.collections) as mocked_collections:
mocked_collections.return_value = mocked_collections()
with mock.patch.object(self.mount.api.collections(), 'get', wraps=self.mount.api.collections().get) as mocked_get:
self.pool_test(os.path.join(self.mnt, 'zzz'))
# open() several times here to make sure we don't reach our
# quota of mocked_get.call_count dishonestly (e.g., the first
# open causes 5 mocked_get, and the rest cause none).
self.assertEqual(8, mocked_open.call_count)
self.assertGreaterEqual(
mocked_get.call_count, 8,
'Not enough calls to collections().get(): expected 8, got {!r}'.format(
mocked_get.mock_calls))
@staticmethod
def _test_refresh_old_manifest(self, zzz):
uuid = 'zzzzz-4zz18-op4e2lbej01tcvu'
fnm = 'zzzzz-8i9sb-0vsrcqi7whchuil.log.txt'
os.listdir(os.path.join(zzz, uuid))
for _ in range(8):
with open(os.path.join(zzz, uuid, fnm)) as f:
f.read()
| StarcoderdataPython |
6516268 | #!python
import string
# Hint: Use these string constants to ignore capitalization and/or punctuation
# string.ascii_lowercase is 'abcdefghijklmnopqrstuvwxyz'
# string.ascii_uppercase is 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# string.ascii_letters is ascii_lowercase + ascii_uppercase
def is_palindrome(text):
"""A string of characters is a palindrome if it reads the same forwards and
backwards, ignoring punctuation, whitespace, and letter casing."""
assert isinstance(text, str), 'input is not a string: {}'.format(text)
# return is_palindrome_iterative(text)
return is_palindrome_recursive(text)
def is_palindrome_iterative(text):
# Clean the text
text = ''.join(letter for letter in text.lower() if 'a' <= letter <= 'z')
# Start at either end of the word, work towards middle
left_index = 0 # keep track of left index
right_index = len(text)-1 # keep track of right index
# Ensure middle hasn't been surpased
while left_index <= right_index:
if text[left_index].lower() != text[right_index].lower():
return False
else: # if letters match, shift indices one step towards middle
left_index += 1
right_index -= 1
return True
def is_palindrome_recursive(text, left=None, right=None):
# Clean the text
text = ''.join(letter for letter in text.lower() if 'a' <= letter <= 'z')
if left is None:
left = 0
right = len(text)-1
if left >= right:
return True
if text[right] == text[left]:
return is_palindrome_recursive(text, left + 1, right - 1)
else:
return False
def main():
import sys
args = sys.argv[1:] # Ignore script file name
if len(args) > 0:
for arg in args:
is_pal = is_palindrome(arg)
result = 'PASS' if is_pal else 'FAIL'
is_str = 'is' if is_pal else 'is not'
print('{}: {} {} a palindrome'.format(result, repr(arg), is_str))
else:
print('Usage: {} string1 string2 ... stringN'.format(sys.argv[0]))
print(' checks if each argument given is a palindrome')
if __name__ == '__main__':
main()
| StarcoderdataPython |
4903054 | from pathlib import Path
from fhir.resources.codesystem import CodeSystem
from oops_fhir.utils import CodeSystemConcept
__all__ = ["v3CodingRationale"]
_resource = CodeSystem.parse_file(Path(__file__).with_suffix(".json"))
class v3CodingRationale:
"""
v3 Code System CodingRationale
Identifies how to interpret the instance of the code, codeSystem value
in a set of translations. Since HL7 (or a government body) may mandate
that codes from certain code systems be sent in conformant messages,
other synonyms that are sent in the translation set need to be
distinguished among the originally captured source, the HL7 specified
code, or some future role. When this code is NULL, it indicates that
the translation is an undefined type. When valued, this property must
contain one of the following values: SRC - Source (or original) code HL7
- HL7 Specified or Mandated SH - both HL7 mandated and the original code
(precoordination) There may be additional values added to this value set
as we work through the use of codes in messages and determine other Use
Cases requiring special interpretation of the translations.
Status: active - Version: 2018-08-12
Copyright None
http://terminology.hl7.org/CodeSystem/v3-CodingRationale
"""
o = CodeSystemConcept(
{
"code": "O",
"definition": "Description: Originally produced code.",
"display": "originally produced code",
}
)
"""
originally produced code
Description: Originally produced code.
"""
or_ = CodeSystemConcept(
{
"code": "OR",
"definition": "Originally produced code, required by the specification describing the use of the coded concept.",
"display": "original and required",
}
)
"""
original and required
Originally produced code, required by the specification describing the use of the coded concept.
"""
p = CodeSystemConcept(
{
"code": "P",
"definition": "Description: Post-coded from free text source</description>",
"display": "post-coded",
}
)
"""
post-coded
Description: Post-coded from free text source</description>
"""
pr = CodeSystemConcept(
{
"code": "PR",
"definition": "Post-coded from free text source, required by the specification describing the use of the coded concept.",
"display": "post-coded and required",
}
)
"""
post-coded and required
Post-coded from free text source, required by the specification describing the use of the coded concept.
"""
r = CodeSystemConcept(
{
"code": "R",
"definition": "Description: Required standard code for HL7.",
"display": "required",
}
)
"""
required
Description: Required standard code for HL7.
"""
hl7 = CodeSystemConcept(
{
"code": "HL7",
"definition": "HL7 Specified or Mandated",
"display": "HL7 Specified or Mandated",
"property": [{"code": "status", "valueCode": "retired"}],
}
)
"""
HL7 Specified or Mandated
HL7 Specified or Mandated
"""
sh = CodeSystemConcept(
{
"code": "SH",
"definition": "Both HL7 mandated and the original code (precoordination)",
"display": "Both HL7 mandated and the original code",
"property": [{"code": "status", "valueCode": "retired"}],
}
)
"""
Both HL7 mandated and the original code
Both HL7 mandated and the original code (precoordination)
"""
src = CodeSystemConcept(
{
"code": "SRC",
"definition": "Source (or original) code",
"display": "Source (or original) code",
"property": [{"code": "status", "valueCode": "retired"}],
}
)
"""
Source (or original) code
Source (or original) code
"""
class Meta:
resource = _resource
| StarcoderdataPython |
1747211 | <filename>twitter_api_v2/TwitterAPI.py
import json
import logging
from logging import Logger
from typing import Dict, List, Optional
import requests
from requests.models import Response
from twitter_api_v2 import Media, Poll, Tweet, User
logger: Logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class TwitterAPI:
def __init__(self, bearer_token: str) -> None:
self.__BEARER_TOKEN: str = bearer_token
self.__REQUEST_HEADERS: Dict = {
"Authorization": f"Bearer {self.__BEARER_TOKEN}"
}
self.__API_URL: str = "https://api.twitter.com/2"
def get_tweet(
self,
id: str,
expansions: List[Tweet.Expantion] = [],
tweet_fields: List[Tweet.Field] = [],
media_fields: List[Media.Field] = [],
poll_fields: List[Poll.Field] = [],
) -> Tweet.Tweet:
params: Optional[Dict[str, str]] = self._make_params(
expansions, tweet_fields, media_fields, poll_fields
)
logger.debug(params)
response: Response = requests.get(
url=f"{self.__API_URL}/tweets/{id}",
params=params,
headers=self.__REQUEST_HEADERS,
)
if response.status_code != 200:
raise Exception(
f"Request returned an error: {response.status_code} {response.text}"
)
res_json = json.loads(response.text)
logger.debug(res_json)
if "includes" in res_json.keys():
return Tweet.Tweet(**res_json["data"], **res_json["includes"])
else:
return Tweet.Tweet(**res_json["data"])
def get_user_by_id(self, id: str, user_fields: List[User.Field] = []) -> User.User:
params: Optional[Dict[str, str]] = None
if user_fields:
params = {}
params["user.fields"] = ",".join(list(map(str, user_fields)))
response: Response = requests.get(
f"{self.__API_URL}/users/{id}",
params=params,
headers=self.__REQUEST_HEADERS,
)
if response.status_code != 200:
raise Exception(
f"Request returned an error: {response.status_code} {response.text}"
)
res_json = json.loads(response.text)
logger.debug(res_json)
return User.User(**res_json["data"])
def get_user_by_username(
self, username: str, user_fields: List[User.Field] = []
) -> User.User:
params: Optional[Dict[str, str]] = None
if user_fields:
params = {}
params["user.fields"] = ",".join(list(map(str, user_fields)))
response: Response = requests.get(
f"{self.__API_URL}/users/by/username/{username}",
params=params,
headers=self.__REQUEST_HEADERS,
)
if response.status_code != 200:
raise Exception(
f"Request returned an error: {response.status_code} {response.text}"
)
res_json = json.loads(response.text)
logger.debug(res_json)
return User.User(**res_json["data"])
def _make_params(
self,
expansions: List[Tweet.Expantion],
tweet_fields: List[Tweet.Field],
media_fields: List[Media.Field],
poll_fields: List[Poll.Field],
) -> Optional[Dict[str, str]]:
if (
(not expansions)
and (not tweet_fields)
and (not media_fields)
and (not poll_fields)
):
return None
params: Dict[str, str] = {}
if expansions:
params["expansions"] = ",".join(list(map(str, expansions)))
if tweet_fields:
params["tweet.fields"] = ",".join(list(map(str, tweet_fields)))
if media_fields:
params["media.fields"] = ",".join(list(map(str, media_fields)))
if poll_fields:
params["poll.fields"] = ",".join(list(map(str, poll_fields)))
return params
| StarcoderdataPython |
144416 | import copy, os
import tensorflow as tf
import numpy as np
from lib.tf_ops import shape_list, spacial_shape_list, tf_tensor_stats, tf_norm2, tf_angle_between
from lib.util import load_numpy
from .renderer import Renderer
from .transform import GridTransform
from .vector import GridShape, Vector3
import logging
LOG = logging.getLogger("Structs")
# --- DATA Structs ---
def get_coord_field(shape, offset=[0,0,0], lod=0.0, concat=True):
'''
shape: z,y,x
offset: x,y,z
returns: 1,z,y,x,c with c=x,z,y,lod
'''
coord_z, coord_y, coord_x = tf.meshgrid(tf.range(shape[0], dtype=tf.float32), tf.range(shape[1], dtype=tf.float32), tf.range(shape[2], dtype=tf.float32), indexing='ij') #z,y,x
coord_data = [tf.reshape(coord_x + offset[0], [1]+shape+[1]),
tf.reshape(coord_y + offset[1], [1]+shape+[1]),
tf.reshape(coord_z + offset[2], [1]+shape+[1])] #3 x 1DHW1
if lod is not None:
lod_data = tf.constant(lod, shape=[1]+shape+[1], dtype=tf.float32) #tf.ones([1]+shape+[1])*lod
coord_data.append(lod_data)#4 x 1DHW1
if concat:
coord_data = tf.concat(coord_data, axis=-1)
return coord_data
class Zeroset:
def __init__(self, initial_value, shape=None, as_var=True, outer_bounds="OPEN", device=None, var_name="zeroset", trainable=True):
self.outer_bounds = outer_bounds
self.is_var = as_var
self._device = device
self._name = var_name
self._is_trainable = trainable
with tf.device(self._device):
if shape is not None:
assert isinstance(shape, GridShape)
initial_value = tf.constant(initial_value, shape=shape.value, dtype=tf.float32)
if as_var:
self._levelset = tf.Variable(initial_value=initial_value, name=var_name, trainable=trainable)
else:
self._levelset = tf.identity(initial_value)
@property
def grid_shape(self):
return GridShape.from_tensor(self._levelset)
def _hull_staggered_lerp_weight(self, a, b):
a_leq = tf.less_equal(a,0)
return tf.where( tf.logical_xor(a_leq, tf.less_equal(b,0)), #sign change along iterpolation
tf.abs( tf.divide( tf.minimum(a,b), tf.subtract(a,b) ) ),
tf.cast(a_leq, dtype=a.dtype)
)
def _hull_simple_staggered_component(self, axis):
assert axis in [1,2,3,-2,-3,-4]
axis = axis%5
pad = [(0,0),(0,0),(0,0),(0,0),(0,0)]
pad[axis]=(1,1)
shape = self.grid_shape.value
shape[axis] -= 1
offset = np.zeros((5,), dtype=np.int32)
cells_prev = tf.slice(self._levelset, offset, shape) #self._levelset[:,:,:,:-1,:]
offset[axis] += 1
cells_next = tf.slice(self._levelset, offset, shape) #self._levelset[:,:,:, 1:,:]
hull = self._hull_staggered_lerp_weight(cells_prev,cells_next)
hull = tf.pad(hull, pad, constant_values=1 if self.outer_bounds=="OPEN" else 0)
return hull
def to_hull_simple_staggered(self):
return self._hull_simple_staggered_component(-2), self._hull_simple_staggered_component(-3), self._hull_simple_staggered_component(-4)
def to_hull_simple_centered(self):
raise NotImplementedError()
def to_denstiy_simple_centered(self):
return tf.where(tf.greater(self._levelset, 0), 250, 0)
def resize(self, shape):
assert shape_list(shape)==[3]
new_shape = GridShape(shape)
if new_shape==self.grid_shape:
return
raise NotImplementedError("Zeroset.resize() not implemented.")
def assign(levelset):
raise NotImplementedError()
class DensityGrid:
def __init__(self, shape, constant=0.1, as_var=True, d=None, scale_renderer=None, hull=None, inflow=None, inflow_offset=None, inflow_mask=None, device=None, var_name="denstiy", trainable=True, restrict_to_hull=True):
self.shape = shape
if d is not None:
d_shape = shape_list(d)
if not len(d_shape)==5 or not d_shape[-1]==1 or not self.shape==spacial_shape_list(d):
raise ValueError("Invalid shape of density on assignment: %s"%d_shape)
self.is_var = as_var
self._device = device
self._name = var_name
self._is_trainable = trainable
if as_var:
rand_init = tf.constant_initializer(constant)
with tf.device(self._device):
self._d = tf.Variable(initial_value=d if d is not None else rand_init(shape=[1]+self.shape+[1], dtype=tf.float32), name=var_name+'_dens', trainable=True)
else:
with tf.device(self._device):
if d is not None:
self._d = tf.constant(d, dtype=tf.float32)
else:
self._d = tf.constant(constant, shape=[1]+self.shape+[1], dtype=tf.float32)
self.scale_renderer = scale_renderer
with tf.device(self._device):
self.hull = tf.constant(hull, dtype=tf.float32) if hull is not None else None
self.restrict_to_hull = restrict_to_hull
if inflow is not None:
with tf.device(self._device):
if isinstance(inflow, str) and inflow=='CONST':
assert isinstance(inflow_mask, (tf.Tensor, np.ndarray))
inflow = rand_init(shape=shape_list(inflow_mask), dtype=tf.float32)
if as_var:
self._inflow = tf.Variable(initial_value=inflow, name=var_name+'_inflow', trainable=True)
else:
self._inflow = tf.constant(inflow, dtype=tf.float32)
self.inflow_mask = tf.constant(inflow_mask, dtype=tf.float32) if inflow_mask is not None else None
inflow_shape = spacial_shape_list(self._inflow) #.get_shape().as_list()[-4:-1]
self._inflow_padding = [[0,0]]+[[inflow_offset[_],self.shape[_]-inflow_offset[_]-inflow_shape[_]] for _ in range(3)]+[[0,0]]
self.inflow_offset = inflow_offset
else:
self._inflow = None
@property
def trainable(self):
return self._is_trainable and self.is_var
@property
def d(self):
if self.restrict_to_hull:
return self.with_hull()
else:
return tf.identity(self._d)
def with_hull(self):
if self.hull is not None:
return self._d * self.hull # hull is a (smooth) binary mask
else:
return tf.identity(self._d)
@property
def inflow(self):
if self._inflow is None:
return tf.zeros_like(self._d, dtype=tf.float32)
elif self.inflow_mask is not None: #hasattr(self, 'inflow_mask') and
return tf.pad(self._inflow*self.inflow_mask, self._inflow_padding)
else:
return tf.pad(self._inflow, self._inflow_padding)
def with_inflow(self):
density = self.d
if self._inflow is not None:
density = tf.maximum(density+self.inflow, 0)
return density
@classmethod
def from_file(cls, path, as_var=True, scale_renderer=None, hull=None, inflow=None, inflow_offset=None, inflow_mask=None, device=None, var_name="denstiy", trainable=True, restrict_to_hull=True):
try:
with np.load(path) as np_data:
d = np_data['arr_0']
shape =spacial_shape_list(d)
if 'hull' in np_data and hull is None:
hull = np_data['hull']
if 'inflow' in np_data and inflow is None:
inflow=np_data['inflow']
if 'inflow_mask' in np_data and inflow_mask is None:
inflow_mask=np_data['inflow_mask']
if 'inflow_offset' in np_data and inflow_offset is None:
inflow_offset=np_data['inflow_offset'].tolist()
grid = cls(shape, d=d, as_var=as_var, scale_renderer=scale_renderer, hull=hull, inflow=inflow, inflow_offset=inflow_offset, inflow_mask=inflow_mask, \
device=device, var_name=var_name, trainable=trainable, restrict_to_hull=restrict_to_hull)
except:
LOG.warning("Failed to load density from '%s':", path, exc_info=True)
return None
else:
return grid
@classmethod
def from_scalarFlow_file(cls, path, as_var=True, shape=None, scale_renderer=None, hull=None, inflow=None, inflow_offset=None, inflow_mask=None, device=None, var_name="sF_denstiy", trainable=True, restrict_to_hull=True):
# if shape is set the loaded grid will be reshaped if necessary
density = load_numpy(path).astype(np.float32)[::-1]
density = density.reshape([1] + list(density.shape)) #
density = tf.constant(density, dtype=tf.float32)
d_shape = spacial_shape_list(density)
if shape is not None and shape!=d_shape:
if scale_renderer is None:
raise ValueError("No renderer provided to scale density.")
LOG.debug("scaling scalarFlow density from %s to %s", d_shape, shape)
density = scale_renderer.resample_grid3D_aligned(density, shape)
d_shape = shape
else:
# cut of SF inflow region and set as inflow. or is it already cut off in SF dataset? it is, but not in the synth dataset or my own sF runs.
# lower 15 cells...
inflow, density= tf.split(density, [15, d_shape[1]-15], axis=-3)
inflow_mask = tf.ones_like(inflow, dtype=tf.float32)
inflow_offset = [0,0,0]
density = tf.concat([tf.zeros_like(inflow, dtype=tf.float32), density], axis=-3)
return cls(d_shape, d=density, as_var=as_var, scale_renderer=scale_renderer, hull=hull, inflow=inflow, inflow_offset=inflow_offset, inflow_mask=inflow_mask, \
device=device, var_name=var_name, trainable=trainable, restrict_to_hull=restrict_to_hull)
def copy(self, as_var=None, device=None, var_name=None, trainable=None, restrict_to_hull=None):
if as_var is None:
as_var = self.is_var
if as_var and var_name is None:
var_name = self._name + '_cpy'
if trainable is None:
trainable = self._is_trainable
if restrict_to_hull is None:
restrict_to_hull = self.restrict_to_hull
if self._inflow is not None:
grid = DensityGrid(self.shape, d=tf.identity(self._d), as_var=as_var, scale_renderer=self.scale_renderer, hull=self.hull, \
inflow=tf.identity(self._inflow), inflow_offset=self.inflow_offset, inflow_mask=self.inflow_mask, \
device=device, var_name=var_name, trainable=trainable, restrict_to_hull=restrict_to_hull)
else:
grid = DensityGrid(self.shape, d=tf.identity(self._d), as_var=as_var, scale_renderer=self.scale_renderer, hull=self.hull, \
device=device, var_name=var_name, trainable=trainable, restrict_to_hull=restrict_to_hull)
return grid
def scaled(self, new_shape, with_inflow=False):
if not (isinstance(new_shape, list) and len(new_shape)==3):
raise ValueError("Invalid shape")
density = self.d if not with_inflow else self.with_inflow()
if new_shape!=self.shape:
LOG.debug("Scaling density from %s to %s", self.shape, new_shape)
with self.scale_renderer.profiler.sample("scale density"):
d_scaled = self.scale_renderer.resample_grid3D_aligned(density, new_shape)
else:
LOG.debug("No need to scale density to same shape %s", self.shape)
d_scaled = tf.identity(density)
return d_scaled
def copy_scaled(self, new_shape, as_var=None, device=None, var_name=None, trainable=None, restrict_to_hull=None):
'''Does not copy inflow and hull, TODO'''
if as_var is None:
as_var = self.is_var
if as_var and var_name is None:
var_name = self._name + '_scaled'
if trainable is None:
trainable = self._is_trainable
if restrict_to_hull is None:
restrict_to_hull = self.restrict_to_hull
d_scaled = self.scaled(new_shape)
grid = DensityGrid(new_shape, d=d_scaled, as_var=as_var, scale_renderer=self.scale_renderer, device=device, var_name=var_name, trainable=trainable, restrict_to_hull=restrict_to_hull)
return grid
def warped(self, vel_grid, order=1, dt=1.0, clamp="NONE"):
if not (isinstance(vel_grid, VelocityGrid)):
raise ValueError("Invalid velocity grid")
return vel_grid.warp(self.with_inflow(), order=order, dt=dt, clamp=clamp)
def copy_warped(self, vel_grid, as_var=None, order=1, dt=1.0, device=None, var_name=None, clamp="NONE", trainable=None, restrict_to_hull=None):
'''Does not copy inflow and hull, TODO'''
if as_var is None:
as_var = self.is_var
if as_var and var_name is None:
var_name = self._name + '_warped'
if trainable is None:
trainable = self._is_trainable
if restrict_to_hull is None:
restrict_to_hull = self.restrict_to_hull
d_warped = self.warped(vel_grid, order=order, dt=dt, clamp=clamp)
grid = DensityGrid(self.shape, d=d_warped, as_var=as_var, scale_renderer=self.scale_renderer, device=device, var_name=var_name, trainable=trainable, restrict_to_hull=restrict_to_hull)
return grid
def scale(self, scale):
self.assign(self._d*scale)
def apply_clamp(self, vmin, vmax):
vmin = tf.maximum(vmin, 0)
d = tf.clip_by_value(self._d, vmin, vmax)
inflow = None
if self._inflow is not None:
# use already clamped density for consistency
denstiy_shape = shape_list(d)
density_cropped = d[self._inflow_padding[0][0] : denstiy_shape[0]-self._inflow_padding[0][1],
self._inflow_padding[1][0] : denstiy_shape[1]-self._inflow_padding[1][1],
self._inflow_padding[2][0] : denstiy_shape[2]-self._inflow_padding[2][1],
self._inflow_padding[3][0] : denstiy_shape[3]-self._inflow_padding[3][1],
self._inflow_padding[4][0] : denstiy_shape[4]-self._inflow_padding[4][1]]
inflow = tf.clip_by_value(self._inflow, vmin - density_cropped, vmax - density_cropped)
self.assign(d, inflow)
def assign(self, d, inflow=None):
shape = shape_list(d)
if not len(shape)==5 or not shape[-1]==1 or not shape[-4:-1]==self.shape:
raise ValueError("Invalid or incompatible shape of density on assignment: is {}, required: NDHW1 with DHW={}".format(shape, self.shape))
if self.is_var:
self._d.assign(d)
if self._inflow is not None and inflow is not None:
self._inflow.assign(inflow)
else:
with tf.device(self._device):
self._d = tf.identity(d)
if self._inflow is not None and inflow is not None:
self._inflow = tf.identity(inflow)
def var_list(self):
if self.is_var:
if self._inflow is not None:
return [self._d, self._inflow]
return [self._d]
else:
raise TypeError("This DensityGrid is not a variable.")
def get_variables(self):
if self.is_var:
var_dict = {'density': self._d}
if self._inflow is not None:
var_dict['inflow'] = self._inflow
return var_dict
else:
raise TypeError("This DensityGrid is not a variable.")
def save(self, path):
density = self._d
if isinstance(density, (tf.Tensor, tf.Variable)):
density = density.numpy()
save = {}
if self.hull is not None:
hull = self.hull
if isinstance(hull, (tf.Tensor, tf.Variable)):
hull = hull.numpy()
save['hull']=hull
if self._inflow is not None:
inflow = self._inflow
if isinstance(inflow, (tf.Tensor, tf.Variable)):
inflow = inflow.numpy()
save['inflow']=inflow
if self.inflow_mask is not None:
inflow_mask = self.inflow_mask
if isinstance(inflow_mask, (tf.Tensor, tf.Variable)):
inflow_mask = inflow_mask.numpy()
save['inflow_mask']=inflow_mask
save['inflow_offset']=np.asarray(self.inflow_offset)
np.savez_compressed(path, density, **save)
def mean(self):
return tf.reduce_mean(self.d)
def stats(self, mask=None, state=None, **warp_kwargs):
'''
mask: optional binary float mask, stats only consider cells>0.5
'''
d = self.d
if mask is not None:
mask = mask if mask.dtype==tf.bool else tf.greater(mask, 0.5)
d = tf.boolean_mask(d, mask)
stats = {
'density': tf_tensor_stats(d, as_dict=True),
'shape':self.shape,
}
if state is not None and state.prev is not None and state.prev.density is not None and state.prev.velocity is not None:
warp_SE = tf.squared_difference(state.prev.density_advected(**warp_kwargs), self.d)
if mask is not None:
warp_SE = tf.boolean_mask(warp_SE, mask)
stats["warp_SE"] = tf_tensor_stats(warp_SE, as_dict=True)
else:
stats["warp_SE"] = tf_tensor_stats(tf.zeros([1,1,1,1,1], dtype=tf.float32), as_dict=True)
return stats
class VelocityGrid:
@staticmethod
def component_shapes(centered_shape):
x_shape = copy.copy(centered_shape)
x_shape[2] +=1
y_shape = copy.copy(centered_shape)
y_shape[1] +=1
z_shape = copy.copy(centered_shape)
z_shape[0] +=1
return x_shape, y_shape, z_shape
def __init__(self, centered_shape, std=0.1, as_var=True, x=None, y=None, z=None, boundary=None, scale_renderer=None, warp_renderer=None, *, coords=None, lod=None, device=None, var_name="velocity", trainable=True):
self.centered_shape = centered_shape.tolist() if isinstance(centered_shape, np.ndarray) else centered_shape
self.x_shape, self.y_shape, self.z_shape = VelocityGrid.component_shapes(self.centered_shape)
self.set_boundary(boundary)
self.is_var = as_var
self._device = device
self._name = var_name
self._is_trainable = trainable
if as_var:
if x is not None:
x_shape = shape_list(x)
if not len(x_shape)==5 or not x_shape[-1]==1 or not x_shape[-4:-1]==self.x_shape:
raise ValueError("Invalid shape of velocity x component on assignment")
if y is not None:
y_shape = shape_list(y)
if not len(y_shape)==5 or not y_shape[-1]==1 or not y_shape[-4:-1]==self.y_shape:
raise ValueError("Invalid shape of velocity y component on assignment")
if z is not None:
z_shape = shape_list(z)
if not len(z_shape)==5 or not z_shape[-1]==1 or not z_shape[-4:-1]==self.z_shape:
raise ValueError("Invalid shape of velocity z component on assignment")
# in a box
#rand_init = tf.random_normal_initializer(0.0, std)
std = tf.abs(std)
rand_init = tf.random_uniform_initializer(-std, std)
# maybe even uniformly in space and in a sphere?: http://6degreesoffreedom.co/circle-random-sampling/
with tf.device(self._device):
self._x = tf.Variable(initial_value=x if x is not None else rand_init(shape=[1]+self.x_shape+[1], dtype=tf.float32), name=var_name + '_x', trainable=True)
self._y = tf.Variable(initial_value=y if y is not None else rand_init(shape=[1]+self.y_shape+[1], dtype=tf.float32), name=var_name + '_y', trainable=True)
self._z = tf.Variable(initial_value=z if z is not None else rand_init(shape=[1]+self.z_shape+[1], dtype=tf.float32), name=var_name + '_z', trainable=True)
else:
if x is None:
x = tf.constant(tf.random.uniform([1]+self.x_shape+[1], -std, std, dtype=tf.float32))
if y is None:
y = tf.constant(tf.random.uniform([1]+self.y_shape+[1], -std, std, dtype=tf.float32))
if z is None:
z = tf.constant(tf.random.uniform([1]+self.z_shape+[1], -std, std, dtype=tf.float32))
self.assign(x,y,z)
if lod is None:
lod = tf.zeros([1]+self.centered_shape+[1])
with tf.device(self._device):
self.lod_pad = tf.identity(lod)
self.scale_renderer = scale_renderer
if self.scale_renderer is not None:
if (self.outer_bounds=='CLOSED' and self.scale_renderer.boundary_mode!='BORDER') \
or (self.outer_bounds=='OPEN' and self.scale_renderer.boundary_mode!='CLAMP'):
LOG.warning("Velocity outer boundary %s does not match scale renderer boundary mode %s", self.outer_bounds, self.scale_renderer.boundary_mode)
self.warp_renderer = warp_renderer
if self.warp_renderer is not None:
if (self.outer_bounds=='CLOSED' and self.warp_renderer.boundary_mode!='BORDER') \
or (self.outer_bounds=='OPEN' and self.warp_renderer.boundary_mode!='CLAMP'):
LOG.warning("Velocity outer boundary %s does not match scale renderer boundary mode %s", self.outer_bounds, self.warp_renderer.boundary_mode)
def set_boundary(self, boundary):
assert (boundary is None) or isinstance(boundary, Zeroset)
self.boundary = boundary
self.outer_bounds = self.boundary.outer_bounds if self.boundary is not None else "OPEN"
@property
def trainable(self):
return self._is_trainable and self.is_var
@property
def x(self):
v = self._x
if self.boundary is not None:
v*= self.boundary._hull_simple_staggered_component(-2)
return v
@property
def y(self):
v = self._y
if self.boundary is not None:
v*= self.boundary._hull_simple_staggered_component(-3)
return v
@property
def z(self):
v = self._z
if self.boundary is not None:
v*= self.boundary._hull_simple_staggered_component(-4)
return v
@classmethod
def from_centered(cls, centered_grid, as_var=True, boundary=None, scale_renderer=None, warp_renderer=None, device=None, var_name="velocity", trainable=True):
centered_shape = shape_list(centered_grid)
assert len(centered_shape)==5
assert centered_shape[-1]==3
assert centered_shape[0]==1
centered_shape = centered_shape[-4:-1]
vel_grid = cls(centered_shape, as_var=as_var, boundary=boundary, scale_renderer=scale_renderer, warp_renderer=warp_renderer, device=device, var_name=var_name, trainable=trainable)
x,y,z = vel_grid._centered_to_staggered(centered_grid)
vel_grid.assign(x,y,z)
return vel_grid
@classmethod
def from_file(cls, path, as_var=True, boundary=None, scale_renderer=None, warp_renderer=None, device=None, var_name="velocity", trainable=True):
try:
with np.load(path) as vel:
if 'centered_shape' not in vel:#legacy
shape = shape_list(vel["vel_x"])
LOG.debug("%s", shape)
shape[-2] -=1
shape = shape[1:-1]
else:
shape = vel['centered_shape'].tolist()
vel_grid = cls(shape, x=vel["vel_x"].astype(np.float32), y=vel["vel_y"].astype(np.float32), z=vel["vel_z"].astype(np.float32), \
as_var=as_var, boundary=boundary, scale_renderer=scale_renderer, warp_renderer=warp_renderer, device=device, var_name=var_name, trainable=trainable)
except:
LOG.warning("Failed to load velocity from '%s':", path, exc_info=True)
return None
else:
return vel_grid
@classmethod
def from_scalarFlow_file(cls, path, as_var=True, shape=None, boundary=None, scale_renderer=None, warp_renderer=None, device=None, var_name="sF_velocity", trainable=True):
# sF velocities are stored as combined staggered grid with upper cells missing, DHWC with C=3
velocity = load_numpy(path).astype(np.float32)[::-1]
v_shape = GridShape.from_tensor(velocity)
velocity = v_shape.normalize_tensor_shape(velocity) #.reshape([1] + list(velocity.shape)) # NDHWC
velocity = tf.constant(velocity, dtype=tf.float32)
v_shape = v_shape.zyx.value
v_x, v_y, v_z = tf.split(velocity, 3, axis=-1)
p0 = (0,0)
# extend missing upper cell
v_x = tf.pad(v_x, [p0,p0,p0,(0,1),p0], "SYMMETRIC")
v_y = tf.pad(v_y, [p0,p0,(0,1),p0,p0], "SYMMETRIC")
v_z = tf.pad(-v_z, [p0,(1,0),p0,p0,p0], "SYMMETRIC") #z value/direction reversed, pad lower value as axis is reversed (?)
#v_shape = spacial_shape_list(velocity)
if shape is not None and v_shape!=shape:
assert len(shape)==3
if scale_renderer is None:
raise ValueError("No renderer provided to scale velocity.")
# shape = GridShape(shape).zyx
# vel_scale = shape/v_shape #[o/i for i,o in zip(v_shape, shape)] #z,y,x
LOG.debug("scaling scalarFlow velocity from %s to %s with magnitude scale %s", v_shape, shape)
v_tmp = cls(v_shape, x=v_x, y=v_y, z=v_z, as_var=False, boundary=boundary, scale_renderer=scale_renderer, warp_renderer=warp_renderer, device=device, var_name="sF_tmp", trainable=False)
v_x, v_y, v_z = v_tmp.scaled(shape, scale_magnitude=True)
# can only scale 1 and 4 channel grids
# v_x = scale_renderer.resample_grid3D_aligned(v_x, shape.value)*vel_scale.x#[2]
# v_y = scale_renderer.resample_grid3D_aligned(v_y, shape.value)*vel_scale.y#[1]
# v_z = scale_renderer.resample_grid3D_aligned(v_z, shape.value)*vel_scale.z#[0]
# velocity = tf.concat([v_x, v_y, v_z], axis=-1)
v_shape = shape
#return cls.from_centered(velocity,as_var=as_var, boundary=boundary, scale_renderer=scale_renderer, warp_renderer=warp_renderer, device=device, var_name=var_name)
return cls(v_shape, x=v_x, y=v_y, z=v_z,as_var=as_var, boundary=boundary, scale_renderer=scale_renderer, warp_renderer=warp_renderer, device=device, var_name=var_name, trainable=trainable)
def copy(self, as_var=None, device=None, var_name=None, trainable=None):
if as_var is None:
as_var = self.is_var
if as_var and var_name is None:
var_name = self._name + '_cpy'
if trainable is None:
trainable = self._is_trainable
grid = VelocityGrid(self.centered_shape, x=tf.identity(self._x), y=tf.identity(self._y), z=tf.identity(self._z), as_var=as_var, \
boundary=self.boundary, scale_renderer=self.scale_renderer, warp_renderer=self.warp_renderer, device=device, var_name=var_name, trainable=trainable)
return grid
def scaled(self, centered_shape, scale_magnitude=True):
if not (isinstance(centered_shape, list) and len(centered_shape)==3):
raise ValueError("Invalid shape")
#resample velocity
if centered_shape!=self.centered_shape:
with self.scale_renderer.profiler.sample("scale velocity"):
x_shape, y_shape, z_shape = VelocityGrid.component_shapes(centered_shape)
LOG.debug("Scaling velocity from %s to %s", self.centered_shape, centered_shape)
x_scaled = self.scale_renderer.resample_grid3D_aligned(self.x, x_shape, align_x='center')
y_scaled = self.scale_renderer.resample_grid3D_aligned(self.y, y_shape, align_y='center')
z_scaled = self.scale_renderer.resample_grid3D_aligned(self.z, z_shape, align_z='center')
if scale_magnitude:
vel_scale = [o/i for i,o in zip(self.centered_shape, centered_shape)] #z,y,x
LOG.debug("Scaling velocity magnitude with %s", vel_scale)
x_scaled *= vel_scale[2]
y_scaled *= vel_scale[1]
z_scaled *= vel_scale[0]
else:
LOG.debug("No need to scale velocity to same shape %s", self.centered_shape)
x_scaled = tf.identity(self.x)
y_scaled = tf.identity(self.y)
z_scaled = tf.identity(self.z)
return x_scaled, y_scaled, z_scaled
def copy_scaled(self, centered_shape, scale_magnitude=True, as_var=None, device=None, var_name=None, trainable=None):
if as_var is None:
as_var = self.is_var
if as_var and var_name is None:
var_name = self._name + '_scaled'
if trainable is None:
trainable = self._is_trainable
x_scaled, y_scaled, z_scaled = self.scaled(centered_shape, scale_magnitude)
grid = VelocityGrid(centered_shape, x=x_scaled, y=y_scaled, z=z_scaled, as_var=as_var, \
boundary=self.boundary, scale_renderer=self.scale_renderer, warp_renderer=self.warp_renderer, device=device, var_name=var_name, trainable=trainable)
return grid
def _lut_warp_vel(self, shape, dt=1.0):
# use to get lookup positions to warp velocity components
vel = self._sampled_to_shape(shape) #3 x 1DHW1
vel_lut = [- vel[i]*dt for i in range(len(vel))] #3 x 1DHW1
vel_lut = tf.concat(vel_lut, axis = -1) #1DHW3
return vel_lut
def _warp_vel_component(self, data, lut, order=1, dt=1.0, clamp="NONE"):
if order<1 or order>2:
raise ValueError("Unsupported warp order '{}'".format(order))
warped = self.warp_renderer._sample_LuT(data, lut, True, relative=True)
clamp = clamp.upper()
if order==2: #MacCormack
warped_back = self.warp_renderer._sample_LuT(warped, -lut, True, relative=True)
corrected = warped + 0.5*(data-warped_back)
if clamp=="MC" or clamp=="MC_SMOOTH":
#raise NotImplementedError("MacCormack clamping has not been implemented.")
fm = self.warp_renderer.filter_mode
self.warp_renderer.filter_mode = "MIN"
data_min = self.warp_renderer._sample_LuT(data, lut, True, relative=True)
self.warp_renderer.filter_mode = "MAX"
data_max = self.warp_renderer._sample_LuT(data, lut, True, relative=True)
self.warp_renderer.filter_mode = fm
if clamp=='MC':
#LOG.warning("Experimental clamp for MacCormack velocity advection.")
raise NotImplementedError("MIM and MAX warp sampling have wrong gradients.")
corrected = tf.clip_by_value(corrected, data_min, data_max)
if clamp=='MC_SMOOTH':
#LOG.warning("Experimental 'revert' clamp for MacCormack velocity advection.")
clamp_OOB = tf.logical_or(tf.less(corrected, data_min), tf.greater(corrected, data_max))
corrected = tf.where(clamp_OOB, warped, corrected)
warped = corrected
return warped
def warped(self, vel_grid=None, order=1, dt=1.0, clamp="NONE"):
if vel_grid is None:
#vel_grid = self
pass
elif not isinstance(vel_grid, VelocityGrid):
raise TypeError("Invalid VelocityGrid")
with self.warp_renderer.profiler.sample("warp velocity"):
LOG.debug("Warping velocity grid")
#TODO will cause errors if grid shapes do not match, resample if necessary?
if vel_grid is None:
lut_x = tf.concat([-vel*dt for vel in self._sampled_to_component_shape('X', concat=False)], axis=-1)
else:
lut_x = vel_grid._lut_warp_vel(self.x_shape, dt)
x_warped = self._warp_vel_component(self.x, lut_x, order=order, dt=dt, clamp=clamp)
del lut_x
if vel_grid is None:
lut_y = tf.concat([-vel*dt for vel in self._sampled_to_component_shape('Y', concat=False)], axis=-1)
else:
lut_y = vel_grid._lut_warp_vel(self.y_shape, dt)
y_warped = self._warp_vel_component(self.y, lut_y, order=order, dt=dt, clamp=clamp)
del lut_y
if vel_grid is None:
lut_z = tf.concat([-vel*dt for vel in self._sampled_to_component_shape('Z', concat=False)], axis=-1)
else:
lut_z = vel_grid._lut_warp_vel(self.z_shape, dt)
z_warped = self._warp_vel_component(self.z, lut_z, order=order, dt=dt, clamp=clamp)
del lut_z
return x_warped, y_warped, z_warped
def copy_warped(self, vel_grid=None, as_var=None, order=1, dt=1.0, device=None, var_name=None, clamp="NONE", trainable=None):
if as_var is None:
as_var = self.is_var
if as_var and var_name is None:
var_name = self._name + '_warped'
if trainable is None:
trainable = self._is_trainable
x_warped, y_warped, z_warped = self.warped(vel_grid, order, dt, clamp=clamp)
grid = VelocityGrid(self.centered_shape, x=x_warped, y=y_warped, z=z_warped, as_var=as_var, \
boundary=self.boundary, scale_renderer=self.scale_renderer, warp_renderer=self.warp_renderer, device=device, var_name=var_name, trainable=trainable)
return grid
def divergence_free(self, residual=1e-5):
raise NotImplementedError
def var_list(self):
if self.is_var:
return [self._x, self._y, self._z]
else:
raise TypeError("This VelocityGrid is not a variable.")
def get_variables(self):
if self.is_var:
return {'velocity_x': self._x, 'velocity_y': self._y, 'velocity_z': self._z}
else:
raise TypeError("This VelocityGrid is not a variable.")
def save(self, path):
np.savez_compressed(path, centered_shape=self.centered_shape, vel_x=self.x.numpy(), vel_y=self.y.numpy(), vel_z=self.z.numpy())
def assign(self, x,y,z):
x_shape = shape_list(x)
if not len(x_shape)==5 or not x_shape[-1]==1 or not x_shape[-4:-1]==self.x_shape:
raise ValueError("Invalid or incompatible shape of velocity x component on assignment: is {}, required: NDHW1 with DHW={}".format(x_shape, self.x_shape))
y_shape = shape_list(y)
if not len(y_shape)==5 or not y_shape[-1]==1 or not y_shape[-4:-1]==self.y_shape:
raise ValueError("Invalid or incompatible shape of velocity y component on assignment: is {}, required: NDHW1 with DHW={}".format(y_shape, self.y_shape))
z_shape = shape_list(z)
if not len(z_shape)==5 or not z_shape[-1]==1 or not z_shape[-4:-1]==self.z_shape:
raise ValueError("Invalid or incompatible shape of velocity z component on assignment: is {}, required: NDHW1 with DHW={}".format(z_shape, self.z_shape))
if self.is_var:
self._x.assign(x)
self._y.assign(y)
self._z.assign(z)
else:
with tf.device(self._device):
self._x = tf.identity(x)
self._y = tf.identity(y)
self._z = tf.identity(z)
def assign_add(self, x,y,z):
x_shape = shape_list(x)
if not len(x_shape)==5 or not x_shape[-1]==1 or not x_shape[-4:-1]==self.x_shape:
raise ValueError("Invalid or incompatible shape of velocity x component on assignment: is {}, required: NDHW1 with DHW={}".format(x_shape, self.x_shape))
y_shape = shape_list(y)
if not len(y_shape)==5 or not y_shape[-1]==1 or not y_shape[-4:-1]==self.y_shape:
raise ValueError("Invalid or incompatible shape of velocity y component on assignment: is {}, required: NDHW1 with DHW={}".format(y_shape, self.y_shape))
z_shape = shape_list(z)
if not len(z_shape)==5 or not z_shape[-1]==1 or not z_shape[-4:-1]==self.z_shape:
raise ValueError("Invalid or incompatible shape of velocity z component on assignment: is {}, required: NDHW1 with DHW={}".format(z_shape, self.z_shape))
if self.is_var:
self._x.assign_add(x)
self._y.assign_add(y)
self._z.assign_add(z)
else:
with tf.device(self._device):
self._x = tf.identity(self._x+x)
self._y = tf.identity(self._y+y)
self._z = tf.identity(self._z+z)
def assign_sub(self, x,y,z):
x_shape = shape_list(x)
if not len(x_shape)==5 or not x_shape[-1]==1 or not x_shape[-4:-1]==self.x_shape:
raise ValueError("Invalid or incompatible shape of velocity x component on assignment: is {}, required: NDHW1 with DHW={}".format(x_shape, self.x_shape))
y_shape = shape_list(y)
if not len(y_shape)==5 or not y_shape[-1]==1 or not y_shape[-4:-1]==self.y_shape:
raise ValueError("Invalid or incompatible shape of velocity y component on assignment: is {}, required: NDHW1 with DHW={}".format(y_shape, self.y_shape))
z_shape = shape_list(z)
if not len(z_shape)==5 or not z_shape[-1]==1 or not z_shape[-4:-1]==self.z_shape:
raise ValueError("Invalid or incompatible shape of velocity z component on assignment: is {}, required: NDHW1 with DHW={}".format(z_shape, self.z_shape))
if self.is_var:
self._x.assign_sub(x)
self._y.assign_sub(y)
self._z.assign_sub(z)
else:
with tf.device(self._device):
self._x = tf.identity(self._x-x)
self._y = tf.identity(self._y-y)
self._z = tf.identity(self._z-z)
def scale_magnitude(self, scale):
if np.isscalar(scale):
scale = [scale]*3
assert len(scale)==3
self.assign(self.x*scale[0],self.y*scale[1], self.z*scale[2])
def _centered_to_staggered(self, centered):
centered_shape = shape_list(centered)
assert len(centered_shape)==5
assert centered_shape[-1]==3
assert centered_shape[0]==1
assert self.centered_shape==centered_shape[-4:-1]
with self.scale_renderer.profiler.sample("centered velocity to staggered"):
x,y,z= tf.split(centered, 3, axis=-1)
centered_x_transform = GridTransform(self.centered_shape, scale=[2./_ for _ in self.x_shape[::-1]], center=True)
centered_y_transform = GridTransform(self.centered_shape, scale=[2./_ for _ in self.y_shape[::-1]], center=True)
centered_z_transform = GridTransform(self.centered_shape, scale=[2./_ for _ in self.z_shape[::-1]], center=True)
# only shape important here
staggered_x_transform = GridTransform(self.x_shape)#,translation=[0.5,0,0])
staggered_y_transform = GridTransform(self.y_shape)#,translation=[0,0.5,0])
staggered_z_transform = GridTransform(self.z_shape)#,translation=[0,0,0.5])
x = tf.squeeze(self.scale_renderer._sample_transform(x, [centered_x_transform], [staggered_x_transform]),1)
y = tf.squeeze(self.scale_renderer._sample_transform(y, [centered_y_transform], [staggered_y_transform]),1)
z = tf.squeeze(self.scale_renderer._sample_transform(z, [centered_z_transform], [staggered_z_transform]),1)
return x,y,z
def _staggeredTensor_to_components(self, tensor, reverse=False):
tensor_shape = GridShape.from_tensor(tensor)
# assert len(tensor_shape)==5
assert tensor_shape.c==3
assert tensor_shape.n==1
assert np.asarray(self.tensor_shape)+np.asarray([1,1,1])== tensor_shape.xyz.as_shape() #tensor_shape[-4:-1]
tensor = tensor_shape.normalize_tensor_shape(tensor)
components = tf.split(tensor, 3, axis=-1)
if reverse:
components = components[::-1]
x = components[0][:,:-1,:-1,:]
y = components[0][:,:-1,:,:-1]
z = components[0][:,:,:-1,:-1]
return x,y,z
def as_staggeredTensor(self, reverse=False):
z = (0,0)
p = (0,1)
components = [
tf.pad(self.x, [z,p,p,z,z]),
tf.pad(self.y, [z,p,z,p,z]),
tf.pad(self.z, [z,z,p,p,z]),
]
if reverse:
components = components[::-1]
return tf.concat(components, axis=-1)
def _sampled_to_shape(self, shape):
with self.scale_renderer.profiler.sample("velocity to shape"):
# uniform scaling, centered grids
#_sample_transform assumes the output grid to be in a centered [-1,1] cube, so scale input accordingly
# scale with output shape to get the right 0.5 offset
scale = [2./_ for _ in shape[::-1]]
staggered_x_transform = GridTransform(self.x_shape, scale=scale, center=True)
staggered_y_transform = GridTransform(self.y_shape, scale=scale, center=True)
staggered_z_transform = GridTransform(self.z_shape, scale=scale, center=True)
# only shape important here
sample_transform = GridTransform(shape)
#check if shape matches component shape to avoid sampling (e.g. for self warping)
vel_sampled = [
tf.squeeze(self.scale_renderer._sample_transform(self.x, [staggered_x_transform], [sample_transform]),1) \
if not shape==self.x_shape else tf.identity(self.x), #1DHW1
tf.squeeze(self.scale_renderer._sample_transform(self.y, [staggered_y_transform], [sample_transform]),1) \
if not shape==self.y_shape else tf.identity(self.y),
tf.squeeze(self.scale_renderer._sample_transform(self.z, [staggered_z_transform], [sample_transform]),1) \
if not shape==self.z_shape else tf.identity(self.z),
]
return vel_sampled
def centered(self, pad_lod=False, concat=True):#, shape=None):
shape = self.centered_shape
with self.warp_renderer.profiler.sample("velocity to centered"):
#vel_centered = self._sampled_to_shape(shape)#3 x 1DHW1
h = tf.constant(0.5, dtype=tf.float32)
vel_centered = [
(self.x[:,:,:,1:] + self.x[:,:,:,:-1])*h,
(self.y[:,:,1:] + self.y[:,:,:-1])*h,
(self.z[:,1:] + self.z[:,:-1])*h,
]
if pad_lod:
vel_centered.append(self.lod_pad)#4 x 1DHW1
if concat:
vel_centered = tf.concat(vel_centered, axis=-1) #1DHW[3|4]
return vel_centered
def _sampled_to_component_shape(self, component, pad_lod=False, concat=True):
# grids have the same spacing/resolution, so global/constant offset
component = component.upper()
offset_coord_from = 0.5
offset_coord_to = -0.5
with self.warp_renderer.profiler.sample("velocity to component shape"):
vel_sampled = []
# sample x
vel_sampled.append(tf.identity(self.x) if component=='X' else \
tf.squeeze(self.warp_renderer.resample_grid3D_offset(self.x, \
offsets = [[offset_coord_from,offset_coord_to,0.0] if component=='Y' else [offset_coord_from,0.0,offset_coord_to],], \
target_shape = self.y_shape if component=='Y' else self.z_shape), 1))
# sample y
vel_sampled.append(tf.identity(self.y) if component=='Y' else \
tf.squeeze(self.warp_renderer.resample_grid3D_offset(self.y, \
offsets = [[offset_coord_to,offset_coord_from,0.0] if component=='X' else [0.0,offset_coord_from,offset_coord_to],], \
target_shape = self.x_shape if component=='X' else self.z_shape), 1))
# sample z
vel_sampled.append(tf.identity(self.z) if component=='Z' else \
tf.squeeze(self.warp_renderer.resample_grid3D_offset(self.z, \
offsets = [[offset_coord_to,0.0,offset_coord_from] if component=='X' else [0.0,offset_coord_to,offset_coord_from],], \
target_shape = self.x_shape if component=='X' else self.y_shape), 1))
if pad_lod:
vel_sampled.append(self.lod_pad)#4 x 1DHW1
if concat:
vel_sampled = tf.concat(vel_sampled, axis=-1) #1DHW[3|4]
return vel_sampled
def centered_lut_grid(self, dt=1.0):
vel_centered = self.centered()
#vel_lut = tf.concat([self.coords - vel_centered * dt, self.lod_pad], axis = -1)
vel_lut = vel_centered * (- dt)
return vel_lut
def warp(self, data, order=1, dt=1.0, clamp="NONE"):
with self.warp_renderer.profiler.sample("warp scalar"):
v = self.centered_lut_grid(dt)
data_shape = spacial_shape_list(data)
if data_shape!=self.centered_shape:
raise ValueError("Shape mismatch")
LOG.debug("Warping density grid")
data_warped = self.warp_renderer._sample_LuT(data, v, True, relative=True)
clamp = clamp.upper()
if order==2: #MacCormack
data_warped_back = self.warp_renderer._sample_LuT(data_warped, -v, True, relative=True)
data_corr = data_warped + 0.5*(data-data_warped_back)
if clamp=='MC' or clamp=='MC_SMOOTH': #smooth clamp
fm = self.warp_renderer.filter_mode
self.warp_renderer.filter_mode = "MIN"
data_min = self.warp_renderer._sample_LuT(data, v, True, relative=True)
self.warp_renderer.filter_mode = "MAX"
data_max = self.warp_renderer._sample_LuT(data, v, True, relative=True)
self.warp_renderer.filter_mode = fm
if clamp=='MC':
#LOG.warning("Experimental clamp for MacCormack density advection.")
raise NotImplementedError("MIM and MAX warp sampling have wrong gradients.")
data_corr = tf.clip_by_value(data_corr, data_min, data_max)
if clamp=='MC_SMOOTH':
#LOG.warning("Experimental 'revert' clamp for MacCormack density advection.")
clamp_OOB = tf.logical_or(tf.less(data_corr, data_min), tf.greater(data_corr, data_max))
data_corr = tf.where(clamp_OOB, data_warped, data_corr)
data_warped = data_corr
elif order>2:
raise ValueError("Unsupported warp order '{}'".format(order))
if clamp=='NEGATIVE':
data_warped = tf.maximum(data_warped, 0)
return data_warped
def with_buoyancy(self, value, scale_grid):
# value: [x,y,z]
# scale_grid: density 1DHW1
if isinstance(scale_grid, DensityGrid):
scale_grid = scale_grid.with_inflow() #.d
assert len(shape_list(value))==1
if not isinstance(value, (tf.Tensor, tf.Variable)):
value = tf.constant(value, dtype=tf.float32)
value = tf.reshape(value, [1,1,1,1,shape_list(value)[0]])
buoyancy = value*scale_grid # 1DHW3
return self + buoyancy
"""
def apply_buoyancy(self, value, scale_grid):
# value: [x,y,z]
# scale_grid: density 1DHW1
assert len(shape_list(value))==1
value = tf.reshape(tf.constant(value, dtype=tf.float32), [1,1,1,1,shape_list(value)[0]])
buoyancy = value*scale_grid # 1DHW3
self += buoyancy
"""
#centered
def divergence(self, world_scale=[1,1,1]):
#out - in per cell, per axis
x_div = self.x[:,:,:,1:,:] - self.x[:,:,:,:-1,:]
y_div = self.y[:,:,1:,:,:] - self.y[:,:,:-1,:,:]
z_div = self.z[:,1:,:,:,:] - self.z[:,:-1,:,:,:]
# sum to get total divergence per cell
div = x_div*world_scale[0]+y_div*world_scale[1]+z_div*world_scale[2]
return div
#centered
def magnitude(self, world_scale=[1,1,1]):
with self.warp_renderer.profiler.sample("magnitude"):
v = self.centered(pad_lod=False)*tf.constant(world_scale, dtype=tf.float32)
return tf_norm2(v, axis=-1, keepdims=True)
def stats(self, world_scale=[1,1,1], mask=None, state=None, **warp_kwargs):
'''
mask: optional binary float mask, stats only consider cells>0.5
'''
x = self.x
if mask is not None:
mask_x = tf.greater(self.scale_renderer.resample_grid3D_aligned(mask, self.x_shape, align_x='stagger_output'), 0.5)
x = tf.boolean_mask(x, mask_x)
y = self.y
if mask is not None:
mask_y = tf.greater(self.scale_renderer.resample_grid3D_aligned(mask, self.y_shape, align_y='stagger_output'), 0.5)
y = tf.boolean_mask(y, mask_y)
z = self.z
if mask is not None:
mask_z = tf.greater(self.scale_renderer.resample_grid3D_aligned(mask, self.z_shape, align_z='stagger_output'), 0.5)
z = tf.boolean_mask(z, mask_z)
if mask is not None and mask.dtype!=tf.bool:
mask = tf.greater(mask, 0.5)
divergence = self.divergence(world_scale)
if mask is not None: divergence = tf.boolean_mask(divergence, mask)
magnitude = self.magnitude(world_scale)
if mask is not None: magnitude = tf.boolean_mask(magnitude, mask)
stats = {
'divergence': tf_tensor_stats(divergence, as_dict=True),
'magnitude': tf_tensor_stats(magnitude, as_dict=True),
'velocity_x': tf_tensor_stats(x, as_dict=True),
'velocity_y': tf_tensor_stats(y, as_dict=True),
'velocity_z': tf_tensor_stats(z, as_dict=True),
'shape':self.centered_shape, 'bounds':self.outer_bounds,
}
if state is not None and state.prev is not None and state.prev.velocity is not None:
prev_warped = state.prev.velocity_advected(**warp_kwargs)
def vel_warp_SE_stats(prev, curr, mask):
warp_SE = tf.squared_difference(prev, curr)
if mask is not None:
warp_SE = tf.boolean_mask(warp_SE, mask)
return tf_tensor_stats(warp_SE, as_dict=True)
stats["warp_x_SE"] = vel_warp_SE_stats(prev_warped.x, self.x, mask_x if mask is not None else None)
stats["warp_y_SE"] = vel_warp_SE_stats(prev_warped.y, self.y, mask_y if mask is not None else None)
stats["warp_z_SE"] = vel_warp_SE_stats(prev_warped.z, self.z, mask_z if mask is not None else None)
warp_vdiff_mag = (prev_warped-self).magnitude()
if mask is not None:
warp_vdiff_mag = tf.boolean_mask(warp_vdiff_mag, mask)
stats["warp_vdiff_mag"] = tf_tensor_stats(warp_vdiff_mag, as_dict=True)
del warp_vdiff_mag
vel_CangleRad_mask = tf.greater(state.prev.velocity.magnitude() * self.magnitude(), 1e-8)
if mask is not None:
vel_CangleRad_mask = tf.logical_and(mask, vel_CangleRad_mask)
warp_CangleRad = tf_angle_between(state.prev.velocity.centered(), self.centered(), axis=-1, keepdims=True)
stats["warp_angleCM_rad"] = tf_tensor_stats(tf.boolean_mask(warp_CangleRad, vel_CangleRad_mask), as_dict=True)
del warp_CangleRad
else:
stats["warp_x_SE"] = tf_tensor_stats(tf.zeros([1,1,1,1,1], dtype=tf.float32), as_dict=True)
stats["warp_y_SE"] = tf_tensor_stats(tf.zeros([1,1,1,1,1], dtype=tf.float32), as_dict=True)
stats["warp_z_SE"] = tf_tensor_stats(tf.zeros([1,1,1,1,1], dtype=tf.float32), as_dict=True)
stats["warp_vdiff_mag"] = tf_tensor_stats(tf.zeros([1,1,1,1,1], dtype=tf.float32), as_dict=True)
stats["warp_angleCM_rad"] = tf_tensor_stats(tf.zeros([1,1,1,1,1], dtype=tf.float32), as_dict=True)
return stats
def __add__(self, other):
if isinstance(other, VelocityGrid):
if self.centered_shape!=other.centered_shape:
raise ValueError("VelocityGrids of shape %s and %s are not compatible"%(self.centered_shape, other.centered_shape))
return VelocityGrid(self.centered_shape, x=self.x+other.x, y=self.y+other.y, z=self.z+other.z, as_var=False, \
boundary=self.boundary, scale_renderer=self.scale_renderer, warp_renderer=self.warp_renderer, device=None)
if isinstance(other, (np.ndarray, tf.Tensor, tf.Variable)):
other_shape = shape_list(other)
if self.centered_shape!=spacial_shape_list(other) or other_shape[0]!=1 or other_shape[-1]!=3:
raise ValueError("VelocityGrid of shape %s is not compatible with tensor of shape %s are not compatible"%(self.centered_shape, spacial_shape_list(other)))
x,y,z = self._centered_to_staggered(other)
return VelocityGrid(self.centered_shape, x=self.x+x, y=self.y+y, z=self.z+z, as_var=False, \
boundary=self.boundary, scale_renderer=self.scale_renderer, warp_renderer=self.warp_renderer, device=None)
else:
return NotImplemented
def __iadd__(self, other):
if isinstance(other, VelocityGrid):
if self.centered_shape!=other.centered_shape:
raise ValueError("VelocityGrids of shape %s and %s are not compatible"%(self.centered_shape, other.centered_shape))
self.assign_add(other.x, other.y, other.z)
return self
if isinstance(other, (np.ndarray, tf.Tensor, tf.Variable)):
other_shape = shape_list(other)
if self.centered_shape!=spacial_shape_list(other) or other_shape[0]!=1 or other_shape[-1]!=3:
raise ValueError("VelocityGrid of shape %s is not compatible with tensor of shape %s are not compatible"%(self.centered_shape, spacial_shape_list(other)))
x,y,z = self._centered_to_staggered(other)
self.assign_add(x, y, z)
return self
else:
return NotImplemented
def __sub__(self, other):
if isinstance(other, VelocityGrid):
if self.centered_shape!=other.centered_shape:
raise ValueError("VelocityGrids of shape %s and %s are not compatible"%(self.centered_shape, other.centered_shape))
return VelocityGrid(self.centered_shape, x=self.x-other.x, y=self.y-other.y, z=self.z-other.z, as_var=False, \
boundary=self.boundary, scale_renderer=self.scale_renderer, warp_renderer=self.warp_renderer, device=None)
if isinstance(other, (np.ndarray, tf.Tensor, tf.Variable)):
other_shape = shape_list(other)
if self.centered_shape!=spacial_shape_list(other) or other_shape[0]!=1 or other_shape[-1]!=3:
raise ValueError("VelocityGrid of shape %s is not compatible with tensor of shape %s are not compatible"%(self.centered_shape, spacial_shape_list(other)))
x,y,z = self._centered_to_staggered(other)
return VelocityGrid(self.centered_shape, x=self.x-x, y=self.y-y, z=self.z-z, as_var=False, \
boundary=self.boundary, scale_renderer=self.scale_renderer, warp_renderer=self.warp_renderer, device=None)
else:
return NotImplemented
def __isub__(self, other):
if isinstance(other, VelocityGrid):
if self.centered_shape!=other.centered_shape:
raise ValueError("VelocityGrids of shape %s and %s are not compatible"%(self.centered_shape, other.centered_shape))
self.assign_sub(other.x, other.y, other.z)
return self
if isinstance(other, (np.ndarray, tf.Tensor, tf.Variable)):
other_shape = shape_list(other)
if self.centered_shape!=spacial_shape_list(other) or other_shape[0]!=1 or other_shape[-1]!=3:
raise ValueError("VelocityGrid of shape %s is not compatible with tensor of shape %s are not compatible"%(self.centered_shape, spacial_shape_list(other)))
x,y,z = self._centered_to_staggered(other)
self.assign_sub(x, y, z)
return self
else:
return NotImplemented
class State:
def __init__(self, density, velocity, frame, prev=None, next=None, transform=None, targets=None, targets_raw=None, bkgs=None):
self._density = None
if density is not None:
assert isinstance(density, DensityGrid)
self._density = density
self._velocity = None
if velocity is not None:
assert isinstance(velocity, VelocityGrid)
self._velocity = velocity
self.frame = frame
self.prev = prev
self.next = next
self.transform = transform
self.targets = targets
self.targets_raw = targets_raw
self.bkgs = bkgs
self.target_cameras = None
self.images = None
self.t = None
class StateIterator:
def __init__(self, state):
self.curr_state = state
def __next__(self):
if self.curr_state is not None:
state = self.curr_state
self.curr_state = state.next
return state
raise StopIteration
def __iter__(self):
return self.StateIterator(self)
@property
def density(self):
if self._density is not None:
return self._density
else:
raise AttributeError("State for frame {} does not contain density".format(self.frame))
@property
def velocity(self):
if self._velocity is not None:
return self._velocity
else:
raise AttributeError("State for frame {} does not contain velocity".format(self.frame))
@classmethod
def from_file(cls, path, frame, transform=None, as_var=True, boundary=None, scale_renderer=None, warp_renderer=None, device=None, density_filename="density.npz", velocity_filename="velocity.npz"):
density = DensityGrid.from_file(os.path.join(path, density_filename), as_var=as_var, scale_renderer=scale_renderer, device=device)
velocity = VelocityGrid.from_file(os.path.join(path, velocity_filename), as_var=as_var, \
boundary=boundary, scale_renderer=scale_renderer, warp_renderer=warp_renderer, device=device)
state = cls(density, velocity, frame, transform=transform)
return state
@classmethod
def from_scalarFlow_file(cls, density_path, velocity_path, frame, transform=None, as_var=True, boundary=None, scale_renderer=None, warp_renderer=None, device=None):
density = DensityGrid.from_scalarFlow_file(density_path, as_var=as_var, scale_renderer=scale_renderer, device=device)
velocity = VelocityGrid.from_scalarFlow_file(velocity_path, as_var=as_var, \
boundary=boundary, scale_renderer=scale_renderer, warp_renderer=warp_renderer, device=device)
state = cls(density, velocity, frame, transform=transform)
return state
def copy(self, as_var=None, device=None):
s = State(self.density.copy(as_var=as_var, device=device), self.velocity.copy(as_var=as_var, device=device), self.frame)
m = copy.copy(self.__dict__)
del m["_velocity"]
del m["_density"]
del m["prev"]
del m["next"]
for k,v in m.items():
setattr(s,k,v)
return s
def copy_warped(self, order=1, dt=1.0, frame=None, as_var=None, targets=None, targets_raw=None, bkgs=None, device=None, clamp="NONE"):
d = self.density.copy_warped(order=order, dt=dt, as_var=as_var, device=device, clamp=clamp)
v = self.velocity.copy_warped(order=order, dt=dt, as_var=as_var, device=device, clamp=clamp)
return State(d, v, frame, transform=self.transform, targets=targets, targets_raw=targets_raw, bkgs=bkgs)
def get_density_transform(self):
if isinstance(self.transform, GridTransform):
return self.transform.copy_new_data(self.density.d)
else:
raise TypeError("state.transform is not a GridTransform")
def get_velocity_transform(self):
if isinstance(self.transform, GridTransform):
return self.transform.copy_new_data(self.velocity.lod_pad)
else:
raise TypeError("state.transform is not a GridTransform")
def render_density(self, render_ctx, custom_ops=None):
imgs = tf.concat(render_ctx.dens_renderer.render_density(self.get_density_transform(), light_list=render_ctx.lights, camera_list=self.target_cameras, cut_alpha=False, monochrome=render_ctx.monochrome, custom_ops=custom_ops), axis=0) #, background=bkg
imgs, d = tf.split(imgs, [3,1], axis=-1)
t = tf.exp(-d)
self.images = imgs
self.t = t
def density_advected(self, dt=1.0, order=1, clamp="NONE"):
return self.density.warped(self.velocity, order=order, dt=dt, clamp=clamp)#self.velocity.warp(self.density, scale_renderer)
def velocity_advected(self, dt=1.0, order=1, clamp="NONE"):
return self.velocity.copy_warped(order=order, dt=dt, as_var=False, clamp=clamp)
def rescale_density(self, shape, device=None):
self._density = self.density.copy_scaled(shape, device=device)
def rescale_velocity(self, shape, scale_magnitude=True, device=None):
self._velocity = self.velocity.copy_scaled(shape, scale_magnitude=scale_magnitude, device=device)
def rescale(self, dens_shape, vel_shape, device=None):
rescale_density(self, dens_shape, device=device)
rescale_velocity(self, vel_shape, device=device)
def var_list(self):
var_list = []
if self._density is not None:
var_list += self.density.var_list()
if self._velocity is not None:
var_list += self.velocity.var_list()
return var_list
def get_variables(self):
var_dict = {}
if self._density is not None:
var_dict.update(self.density.get_variables())
if self._velocity is not None:
var_dict.update(self.velocity.get_variables())
return var_dict
def stats(self, vel_scale=[1,1,1], mask=None, render_ctx=None, **warp_kwargs):
target_stats = None
if render_ctx is not None and getattr(self, "target_cameras", None) is not None:
target_stats = {}
self.render_density(render_ctx)
if getattr(self, "targets_raw") is not None and getattr(self, "bkgs") is not None:
target_stats["SE_raw"] = tf_tensor_stats(tf.math.squared_difference(self.images + self.bkgs*self.t, self.targets_raw), as_dict=True)
if getattr(self, "targets") is not None:
target_stats["SE"] = tf_tensor_stats(tf.math.squared_difference(self.images, self.targets), as_dict=True)
return self.density.stats(mask=mask, state=self, **warp_kwargs), self.velocity.stats(vel_scale, mask=mask, state=self, **warp_kwargs), target_stats
def save(self, path, suffix=None):
self.density.save(os.path.join(path, 'density.npz' if suffix is None else 'density_'+suffix+'.npz'))
self.velocity.save(os.path.join(path, 'velocity.npz' if suffix is None else 'velocity_'+suffix+'.npz'))
class Sequence:
def __init__(self, states):
self.sequence = [state for state in states]
class SequenceIterator:
def __init__(self, sequence):
self.seq = sequence
self.idx = 0
def __next__(self):
if self.idx<len(self.seq):
idx = self.idx
self.idx +=1
return self.seq[idx]
raise StopIteration
def __iter__(self):
return self.SequenceIterator(self)
def __getitem__(self, idx):
return self.sequence[idx]
def __len__(self):
return len(self.sequence)
@classmethod
def from_file(cls, load_path, frames, transform=None, as_var=True, base_path=None, boundary=None, scale_renderer=None, warp_renderer=None, device=None, density_filename="density.npz", velocity_filename="velocity.npz", frame_callback=lambda idx, frame: None):
sequence = []
prev = None
for idx, frame in enumerate(frames):
frame_callback(idx, frame)
sub_dir = 'frame_{:06d}'.format(frame)
data_path = os.path.join(load_path, sub_dir)
state = State.from_file(data_path, frame, transform=transform, as_var=as_var, boundary=boundary, scale_renderer=scale_renderer, warp_renderer=warp_renderer, \
device=device, density_filename=density_filename, velocity_filename=velocity_filename)
if base_path is not None:
state.data_path = os.path.join(base_path, sub_dir)
os.makedirs(state.data_path, exist_ok=True)
state.prev = prev
prev = state
sequence.append(state)
for i in range(len(sequence)-1):
sequence[i].next = sequence[i+1]
return cls(sequence)
@classmethod
def from_scalarFlow_file(cls, density_path_mask, velocity_path_mask, frames, transform=None, as_var=True, base_path=None, boundary=None, scale_renderer=None, warp_renderer=None, device=None, vel_frame_offset=1, frame_callback=lambda idx, frame: None):
sequence = []
prev = None
for idx, frame in enumerate(frames):
frame_callback(idx, frame)
sub_dir = 'frame_{:06d}'.format(frame)
density_path = density_path_mask.format(frame=frame)
velocity_path = velocity_path_mask.format(frame=frame+vel_frame_offset)
state = State.from_scalarFlow_file(density_path, velocity_path, frame=frame, transform=transform, as_var=as_var, boundary=boundary, scale_renderer=scale_renderer, warp_renderer=warp_renderer, device=device)
if base_path is not None:
state.data_path = os.path.join(base_path, sub_dir)
os.makedirs(state.data_path, exist_ok=True)
state.prev = prev
prev = state
sequence.append(state)
for i in range(len(sequence)-1):
sequence[i].next = sequence[i+1]
return cls(sequence)
def copy(self, as_var=None, device=None):
s = [_.copy(as_var=as_var, device=device) for _ in self]
for i in range(len(s)):
if i>0:
s[i].prev = s[i-1]
if i<(len(s)-1):
s[i].next = s[i+1]
return Sequence(s)
def insert_state(self, state, idx):
self.sequence.insert(state, idx)
def append_state(self, state):
self.sequence.append(state)
def start_iteration(self, iteration):
for state in self:
ctx.start_iteration(iteration)
def stats(self, vel_scale=[1,1,1], mask=None, **warp_kwargs):
return [_.stats(vel_scale, mask=mask, state=_, **warp_kwargs) for _ in self]
def save(self, path=None, suffix=None):
for state in self:
if path is None and hasattr(state, 'data_path'):
state.save(state.data_path, suffix)
else:
state.save(os.path.join(path, 'frame_{:06d}'.format(state.frame)), suffix)
def densities_advect_fwd(self, dt=1.0, order=1, clamp='NONE'):
if clamp is None or clamp.upper()not in ['LOCAL', 'GLOBAL']:
for i in range(1, len(self)):
self[i].density.assign(self[i-1].density_advected(order=order, dt=dt, clamp=clamp))
elif clamp.upper()=='LOCAL': #clamp after each step, before the next warp
for i in range(1, len(self)):
self[i].density.assign(tf.maximum(self[i-1].density_advected(order=order, dt=dt), 0))
elif clamp.upper()=='GLOBAL': #clamp after all warping
for i in range(1, len(self)):
self[i].density.assign(self[i-1].density_advected(order=order, dt=dt))
for i in range(1, len(self)):
self[i].density.assign(tf.maximum(self[i].density._d, 0))
def velocities_advect_fwd(self, dt=1.0, order=1, clamp='NONE'):
for i in range(1, len(self)):
self[i].velocity.assign(*self[i-1].velocity.warped(order=order, dt=dt, clamp=clamp)) | StarcoderdataPython |
330492 | from . import config
from lxml import etree
from lxml.builder import E
class Request(object):
def __init__(self, type):
self.tree = (
E.request({'type': type},
E.type_os(config.type_os),
E.client_version(config.client_version)
)
)
self.form = {}
def add_members(self, members):
for key, value in members.items():
elem = etree.Element(key)
elem.text = value
self.tree.append(elem)
class ProxyTestRequest(Request):
def __init__(self):
super().__init__('testproxy')
class TryLoginRequest(Request):
def __init__(self, email, password):
super().__init__('trylogin')
self.add_members({
'email': email,
'password': password
})
class LoginRequest(Request):
def __init__(self, email, hash, computer):
super().__init__('login')
self.add_members({
'email': email,
'passwordhash': hash,
'computer': computer
})
class ClientLoginRequest(Request):
def __init__(self, userid, computerid, hash):
super().__init__('client_login')
self.add_members({
'userid': userid,
'computerid': computerid,
'passwordhash': hash
})
class PasswordRequest(Request):
def __init__(self, client_token, password):
super().__init__('get_password_hash')
self.add_members({
'client_token': client_token,
'real_password': password
})
class ComputerIDRequest(Request):
def __init__(self, userid, computer):
super().__init__('get_computer_id')
self.add_members({
'userid': userid,
'computer': computer
})
class TokenResetRequest(Request):
def __init__(self, client_token):
super().__init__('resettoken')
self.add_members({
'client_token': client_token
})
class StatusRequest(Request):
def __init__(self, client_token):
super().__init__('refresh_account_info')
self.add_members({
'client_token': client_token
})
class PremiumCheckRequest(Request):
def __init__(self, client_token):
super().__init__('check_premium')
self.add_members({
'client_token': client_token
})
class PulseRequest(Request):
def __init__(self, client_token, token, stats):
super().__init__('pulse')
self.add_members({
'client_token': client_token,
'token': token
})
for elem in stats.dump():
self.tree.append(elem)
class UploadComputerinfoRequest(Request):
def __init__(self, client_token, computer_info):
super().__init__('upload_computerinfo')
self.add_members({
'client_token': client_token
})
self.form['computer_info'] = computer_info
__all__ = ["Request", "ProxyTestRequest", "TryLoginRequest", "LoginRequest",
"ClientLoginRequest", "PasswordRequest", "ComputerIDRequest",
"TokenResetRequest", "StatusRequest", "PremiumCheckRequest",
"PulseRequest", "UploadComputerinfoRequest"]
| StarcoderdataPython |
125701 | from threading import Thread
from time import sleep
def tf(arg):
for i in range(arg):
print "running"
sleep(1)
if __name__ == "__main__":
thread = Thread(target = tf, args = (5, ))
thread.start()
# parallel
for i in range(6):
print "continuing"
sleep(1)
thread.join()
print "thread finished...exiting" | StarcoderdataPython |
8000455 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import panflute as pf
import subprocess
from subprocess import PIPE
import os.path as p
import os
import sys
import io
os.chdir(p.dirname(p.dirname(__file__)))
in1 = '$1-1$'
out1 = '$1+1markdown$'
out1err = 'panflute: data_dir={dd} sys_path={sp}'
in1a = """---
panflute-filters: test_filter
panflute-path: ./tests/test_panfl
...
{}
""".format(in1)
def test_all():
assert pf.get_filter_dir() == pf.get_filter_dir(hardcoded=False)
def to_json(text):
return pf.convert_text(text, 'markdown', 'json')
def assert3(*extra_args, stdin):
"""
filters=None, search_dirs=None, data_dir=True, sys_path=True, panfl_=False
"""
sys.argv[1:] = []
sys.argv.append('markdown')
_stdout = io.StringIO()
pf.stdio(*extra_args, input_stream=io.StringIO(stdin), output_stream=_stdout)
_stdout = pf.convert_text(_stdout.getvalue(), 'json', 'markdown')
assert _stdout == out1
json1, json1a = to_json(in1), to_json(in1a)
assert3(None, None, True, True, True, stdin=json1a)
assert3(None, None, True, True, False, stdin=json1a)
assert3(['test_filter/test_filter.py'], ['./tests/test_panfl'], False, True, True, stdin=json1)
assert3([p.abspath('./tests/test_panfl/test_filter/test_filter.py')], [], False, True, True, stdin=json1)
assert3(['test_filter.test_filter'], ['./tests/test_panfl'], False, True, True, stdin=json1)
assert3(['test_filter'], ['./tests/test_panfl'], False, True, True, stdin=json1)
# --------------------------------
if sys.version_info[0:2] < (3, 6):
return
def run_proc(*args, stdin):
proc = subprocess.run(args, stdout=PIPE, stderr=PIPE, input=stdin,
encoding='utf-8', cwd=os.getcwd())
_stdout, _stderr = proc.stdout, proc.stderr
return (_stdout if _stdout else '').strip(), (_stderr if _stderr else '').strip()
def assert1(*extra_args):
_stdout = run_proc('pandoc', '-t', 'json', stdin=in1)[0]
_stdout = run_proc('panfl', '-t', 'markdown', *extra_args, stdin=_stdout)[0]
_stdout = run_proc('pandoc', '-f', 'json', '-t', 'markdown', stdin=_stdout)[0]
assert _stdout == out1
# assert1('-d', './tests/test_panfl', 'test_filter/test_filter.py')
# assert1(p.abspath('./tests/test_panfl/test_filter/test_filter.py'))
# assert1('-d', './tests/test_panfl', 'test_filter.test_filter')
assert1('-d', './tests/test_panfl', 'test_filter')
stdout = run_proc('pandoc', '--filter', 'panfl', '-t', 'markdown',
'--metadata', 'panflute-filters: test_filter',
'--metadata', 'panflute-path: ./tests/test_panfl',
stdin=in1)[0]
assert stdout == out1
stderr = run_proc('pandoc', '--filter', 'panfl', '-t', 'markdown',
'--metadata', 'panflute-verbose: True',
'--metadata', 'panflute-filters: "{}"'.format(
p.abspath('./tests/test_panfl/__filter__.py')),
'--metadata', 'panflute-path: --no-sys-path',
stdin=in1)[1] # __filter__.py doesn't exist
assert out1err.format(dd=False, sp=False) in stderr
def assert2(*extra_args, dd, sp):
_stdout = run_proc('pandoc', '-t', 'json',
'--metadata', 'panflute-verbose: True',
stdin=in1)[0]
_stderr = run_proc('panfl', '-t', 'markdown', '-d', './tests/test_panfl',
p.abspath('./tests/test_panfl/__filter__.py'),
*extra_args, stdin=_stdout)[1] # __filter__.py doesn't exist
assert out1err.format(dd=dd, sp=sp) in _stderr
assert2('--data-dir', '--no-sys-path', dd=True, sp=False)
assert2('--no-sys-path', dd=False, sp=False)
# test_all()
# print(0, file=open(r'D:\log.txt', 'a', encoding='utf-8'))
# with io.StringIO() as f:
# pf.dump(doc, f)
# out = f.getvalue()
| StarcoderdataPython |
1722984 | <filename>salt/modules/django.py
import os
def _get_django_admin(bin_env):
if not bin_env:
da = 'django-admin.py'
else:
# try to get pip bin from env
if os.path.exists(os.path.join(bin_env, 'bin', 'django-admin.py')):
da = os.path.join(bin_env, 'bin', 'django-admin.py')
else:
da = bin_env
return da
def command(settings_module,
command,
bin_env=None,
pythonpath=None,
*args, **kwargs):
"""
run arbitrary django management command
"""
da = _get_django_admin(bin_env)
cmd = "{0} {1} --settings={2}".format(da, command, settings_module)
if pythonpath:
cmd = "{0} --pythonpath={1}".format(cmd, pythonpath)
for arg in args:
cmd = "{0} --{1}".format(cmd, arg)
for key, value in kwargs.iteritems():
if not key.startswith("__"):
cmd = '{0} --{1}={2}'.format(cmd, key, value)
return __salt__['cmd.run'](cmd)
def syncdb(settings_module,
bin_env=None,
migrate=False,
database=None,
pythonpath=None):
"""
run syncdb
if you have south installed, you can pass in the optional
``migrate`` kwarg and run the migrations after the syncdb
finishes.
"""
da = _get_django_admin(bin_env)
cmd = "{0} syncdb --settings={1}".format(da, settings_module)
if migrate:
cmd = "{0} --migrate".format(cmd)
if database:
cmd = "{0} --database={1}".format(cmd, database)
if pythonpath:
cmd = "{0} --pythonpath={1}".format(cmd, pythonpath)
return __salt__['cmd.run'](cmd)
def createsuperuser(settings_module,
username,
email,
bin_env=None,
database=None,
pythonpath=None):
"""
create a super user for the database.
this defaults to use the ``--noinput`` flag which will
not create a password for the superuser.
"""
da = _get_django_admin(bin_env)
cmd = "{0} createsuperuser --settings={1} --noinput --email='{2}' --username={3}".format(
da, settings_module, email, username)
if database:
cmd = "{0} --database={1}".format(cmd, database)
if pythonpath:
cmd = "{0} --pythonpath={1}".format(cmd, pythonpath)
return __salt__['cmd.run'](cmd)
def loaddata(settings_module,
fixtures,
bin_env=None,
database=None,
pythonpath=None):
"""
load fixture data
fixtures:
comma separated list of fixtures to load
"""
da = _get_django_admin(bin_env)
cmd = "{0} loaddata --settings={1} {2}".format(
da, settings_module, " ".join(fixtures.split(",")))
if database:
cmd = "{0} --database={1}".format(cmd, database)
if pythonpath:
cmd = "{0} --pythonpath={1}".format(cmd, pythonpath)
return __salt__['cmd.run'](cmd)
def collectstatic(settings_module,
bin_env=None,
no_post_process=False,
ignore=None,
dry_run=False,
clear=False,
link=False,
no_default_ignore=False,
pythonpath=None):
da = _get_django_admin(bin_env)
cmd = "{0} collectstatic --settings={1} --noinput".format(
da, settings_module)
if no_post_process:
cmd = "{0} --no-post-process".format(cmd)
if ignore:
cmd = "{0} --ignore=".format(cmd, ignore)
if dry_run:
cmd = "{0} --dry-run".format(cmd)
if clear:
cmd = "{0} --clear".format(cmd)
if link:
cmd = "{0} --link".format(cmd)
if no_default_ignore:
cmd = "{0} --no-default-ignore".format(cmd)
if pythonpath:
cmd = "{0} --pythonpath={1}".format(cmd, pythonpath)
return __salt__['cmd.run'](cmd)
| StarcoderdataPython |
9749922 | from __future__ import absolute_import
from __future__ import division
from builtins import range
from past.utils import old_div
import time
import numpy as np
from pomdpy.util import console
from pomdpy.action_selection import ucb_action
from .belief_tree_solver import BeliefTreeSolver
module = "pomcp"
class POMCP(BeliefTreeSolver):
"""
Monte-Carlo Tree Search implementation, from POMCP
"""
# Dimensions for the fast-UCB table
UCB_N = 10000
UCB_n = 100
def __init__(self, agent):
"""
Initialize an instance of the POMCP solver
:param agent:
:param model:
:return:
"""
super(POMCP, self).__init__(agent)
# Pre-calculate UCB values for a speed-up
self.fast_UCB = [[None for _ in range(POMCP.UCB_n)] for _ in range(POMCP.UCB_N)]
for N in range(POMCP.UCB_N):
for n in range(POMCP.UCB_n):
if n is 0:
self.fast_UCB[N][n] = np.inf
else:
self.fast_UCB[N][n] = agent.model.ucb_coefficient * np.sqrt(old_div(np.log(N + 1), n))
@staticmethod
def reset(agent):
"""
Generate a new POMCP solver
:param agent:
Implementation of abstract method
"""
return POMCP(agent)
def find_fast_ucb(self, total_visit_count, action_map_entry_visit_count, log_n):
"""
Look up and return the value in the UCB table corresponding to the params
:param total_visit_count:
:param action_map_entry_visit_count:
:param log_n:
:return:
"""
assert self.fast_UCB is not None
if total_visit_count < POMCP.UCB_N and action_map_entry_visit_count < POMCP.UCB_n:
return self.fast_UCB[int(total_visit_count)][int(action_map_entry_visit_count)]
if action_map_entry_visit_count == 0:
return np.inf
else:
return self.model.ucb_coefficient * np.sqrt(old_div(log_n, action_map_entry_visit_count))
def select_eps_greedy_action(self, eps, start_time):
"""
Starts off the Monte-Carlo Tree Search and returns the selected action. If the belief tree
data structure is disabled, random rollout is used.
"""
if self.disable_tree:
print('You dont want this')
self.rollout_search(self.belief_tree_index)
else:
self.monte_carlo_approx(eps, start_time)
return ucb_action(self, self.belief_tree_index, True)
def simulate(self, belief_node, eps, start_time):
"""
:param belief_node:
:return:
"""
return self.traverse(belief_node, 0, start_time)
def traverse(self, belief_node, tree_depth, start_time):
delayed_reward = 0
state = belief_node.sample_particle()
# Time expired
if time.time() - start_time > self.model.action_selection_timeout:
console(4, module, "action selection timeout")
return 0
action = ucb_action(self, belief_node, False)
# Search horizon reached
if tree_depth >= self.model.max_depth:
console(4, module, "Search horizon reached")
return 0
step_result, is_legal = self.model.generate_step(state, action)
child_belief_node = belief_node.child(action, step_result.observation)
if child_belief_node is None and not step_result.is_terminal and belief_node.action_map.total_visit_count > 0:
child_belief_node, added = belief_node.create_or_get_child(action, step_result.observation)
if not step_result.is_terminal or not is_legal:
tree_depth += 1
if child_belief_node is not None:
# Add S' to the new belief node
# Add a state particle with the new state
if child_belief_node.state_particles.__len__() < self.model.max_particle_count:
child_belief_node.state_particles.append(step_result.next_state)
delayed_reward = self.traverse(child_belief_node, tree_depth, start_time)
else:
delayed_reward = self.rollout(belief_node)
tree_depth -= 1
else:
console(4, module, "Reached terminal state.")
# delayed_reward is "Q maximal"
# current_q_value is the Q value of the current belief-action pair
action_mapping_entry = belief_node.action_map.get_entry(action.bin_number)
q_value = action_mapping_entry.mean_q_value
# off-policy Q learning update rule
q_value += (step_result.reward + (self.model.discount * delayed_reward) - q_value)
# print(q_value)
action_mapping_entry.update_visit_count(1)
action_mapping_entry.update_q_value(q_value)
# Add RAVE ?
return q_value
| StarcoderdataPython |
4802682 | def is_palindromic(s):
ss = s[::-1]
return s == ss
inf = 100000000
upper = 100004
ans = 0
for start in range(1, upper):
cur = 0
yes = 0
for i in range(start, upper):
if cur > 0:
yes = 1
cur += i * i
if cur >= inf:
break
if yes and is_palindromic(str(cur)):
ans += cur
print(ans)
#2906969179 | StarcoderdataPython |
4942060 | import logging
from pgevents import data_access, event_stream, constants
from pgevents.utils import timestamps
LOGGER = logging.getLogger(__name__)
def always_continue(app):
return True
class App:
def __init__(self, dsn, channel, interval=5, migration_locations=None):
self.dsn = dsn
self.channel = channel
self.interval = interval
self.migration_locations = [constants.CORE_MIGRATIONS_LOCATION,] + (
migration_locations or []
)
self.last_processed = timestamps.EPOCH
self.connection = None
self.event_stream = None
self.handlers = {}
def run(self, should_continue=always_continue):
self.connect()
self.setup_event_stream()
self.start_listening()
try:
while should_continue(self):
self.tick()
finally:
self.stop_listening()
def tick(self):
if self.should_process_events():
self.process_events()
def should_process_events(self):
return self.has_received_notification() or self.has_exceeded_interval()
def has_received_notification(self):
self.connection.poll()
if not self.connection.notifies:
return False
LOGGER.debug("Received notification")
while self.connection.notifies:
self.connection.notifies.pop()
return True
def has_exceeded_interval(self):
if self.calculate_seconds_since_last_processed() > self.interval:
LOGGER.debug(f"Exceeded interval of {self.interval} seconds")
return True
return False
def calculate_seconds_since_last_processed(self):
return (timestamps.now() - self.last_processed).total_seconds()
def process_events(self):
LOGGER.debug("Processing events")
self.last_processed = timestamps.now()
self.event_stream.process()
def connect(self):
self.connection = data_access.connect(self.dsn)
def setup_event_stream(self):
self.event_stream = event_stream.EventStream(self.connection, self.handlers)
def start_listening(self):
LOGGER.debug("Starting to listen on channel: %s", self.channel)
with data_access.cursor(self.connection) as cursor:
data_access.listen(cursor, self.channel)
def stop_listening(self):
LOGGER.debug("Stopping listening on channel: %s", self.channel)
with data_access.cursor(self.connection) as cursor:
data_access.unlisten(cursor, self.channel)
def register(self, topic):
def decorator(func):
LOGGER.debug(
"Registering handler '%s' to topic: '%s'", func.__name__, topic
)
self.handlers[topic] = func
return func
return decorator
def unregister(self, topic, func):
LOGGER.debug(
"Unregistering handler '%s' from topic: '%s'", func.__name__, topic
)
try:
del self.handlers[topic]
except KeyError:
pass
| StarcoderdataPython |
5095930 | from django.urls import path
from .views import ProfileViewSet
profile = ProfileViewSet.as_view({
'get': 'retrieve',
'patch': 'update'
})
profile_list = ProfileViewSet.as_view({
'get':'list'
})
urlpatterns = [
path('profile/<int:pk>/', profile, name="profile"),
path('profile/', profile_list, name="profile_list"),
]
| StarcoderdataPython |
1927166 | <filename>infobip_channels/email/models/body/update_tracking_events.py
from typing import Optional
from pydantic import StrictBool
from infobip_channels.core.models import MessageBodyBase
class UpdateTrackingEventsMessageBody(MessageBodyBase):
open: Optional[StrictBool] = None
clicks: Optional[StrictBool] = None
unsubscribe: Optional[StrictBool] = None
| StarcoderdataPython |
1791144 | # -*- coding:utf-8 -*-
"""
-------------------------------------------------------------------------------
Project Name : ESEP
File Name : base.py
Start Date : 2022-03-25 07:45
Contributor : D.CW
Email : <EMAIL>
-------------------------------------------------------------------------------
Introduction:
-------------------------------------------------------------------------------
"""
import numpy as np
def speed(u, v):
return np.sqrt(u ** 2 + v ** 2)
def day_night_split(solzen: np.ndarray) -> tuple:
"""
solar zenith angle (degrees, 0->180; daytime if < 85)
:param solzen: 天顶角矩阵
:return: 表示白天,黑夜的矩阵索引的元组
Reference
------
.. [#] AIRS/AMSU/HSB Version 5 Level 1B Product User Guide(P10)
"""
return np.where(solzen < 85), np.where(solzen >= 85)
def dpres1d(pressure: np.ndarray or list, bot_p: float, top_p: float) -> np.ndarray:
"""
计算恒定压力水平系统的各层气压厚度
:param pressure: 气压序列
:param bot_p: 计算气压层厚度的底层气压
:param top_p: 计算气压层厚度的顶层气压
:return: 与输入气压层数相同的各层气压厚度
"""
dp = np.full(np.shape(pressure), np.nan)
len_p = len(pressure)
lev_start_idx = 0
lev_last_idx = len_p - 1
if pressure[1] > pressure[0]:
tmp_p = pressure
else:
tmp_p = pressure[::-1]
if top_p <= tmp_p[0] and bot_p >= tmp_p[-1]:
dp[0] = (tmp_p[0] + tmp_p[1]) * 0.5 - top_p
for lev_idx in range(1, len_p - 1):
dp[lev_idx] = (tmp_p[lev_idx + 1] - tmp_p[lev_idx - 1]) * 0.5
dp[len_p - 1] = bot_p - (tmp_p[len_p - 1] + tmp_p[len_p - 2]) * 0.5
else:
for lev_start_idx in range(len_p - 1, 0, -1):
if (tmp_p[lev_start_idx - 1] + tmp_p[lev_start_idx]) / 2 < top_p:
break
for lev_last_idx in range(len_p - 1):
if (tmp_p[lev_last_idx + 1] + tmp_p[lev_last_idx]) / 2 > bot_p:
break
if lev_start_idx == lev_last_idx:
dp[lev_start_idx] = bot_p - top_p
elif lev_start_idx < lev_last_idx:
dp[lev_start_idx] = (tmp_p[lev_start_idx] + tmp_p[
lev_start_idx + 1]) * 0.5 - top_p
for lev_idx in range(lev_start_idx + 1, lev_last_idx - 1):
dp[lev_idx] = (tmp_p[lev_idx + 1] - tmp_p[
lev_idx - 1]) * 0.5
dp[lev_last_idx] = bot_p - (
tmp_p[lev_start_idx] + tmp_p[lev_start_idx + 1]) * 0.5
return dp
def dbe1(dep, curt_mag, dis, delta):
"""One-dimensional Dynamic Balance Equation.
:param dep: The depth of water
:param curt_mag: Tidal current
:param dis: The distance between the two station
:param delta: Time Step
:return: The terms of the dynamic balance equation
:rtype: tuple
"""
time_len = np.size(curt_mag[0])
# Pressure Gradient
p_grad = 9.80665 * (dep[0][:] - dep[1][:]) / dis[0]
# Local Acceleration
local_acc = np.zeros(time_len)
for i in np.arange(1, time_len - 1):
local_acc[i] = (curt_mag[1][i + 1] - curt_mag[1][i - 1]) / (delta * 2)
# Advection Acceleration
adv_acc = np.zeros(time_len)
for i in np.arange(time_len):
adv_acc[i] = curt_mag[1][i] * (curt_mag[0][i] - curt_mag[1][i]) / dis[1]
# Bottom Friction
bf = local_acc + adv_acc + p_grad
return p_grad, local_acc, adv_acc, bf
| StarcoderdataPython |
3248880 | <reponame>JumpingYang001/tornadis
from tornado.ioloop import IOLoop
from tornado.web import RequestHandler, Application, url
import tornado.gen
import tornadis
import logging
logging.basicConfig(level=logging.WARNING)
POOL = tornadis.ClientPool(max_size=15)
class HelloHandler(RequestHandler):
@tornado.gen.coroutine
def get(self):
with (yield POOL.connected_client()) as client:
reply = yield client.call("PING")
if not isinstance(reply, tornadis.TornadisException):
self.write("Hello, %s" % reply)
self.finish()
def make_app():
return Application([
url(r"/", HelloHandler),
])
def main():
app = make_app()
app.listen(8888)
IOLoop.current().start()
main()
| StarcoderdataPython |
3253859 | from .base import FileOutputTemplate, FileOutput
from .collection import FileOutputCollectionTemplate, FileOutputCollection
from .copy import CopyFileOutputTemplate, CopyFileOutput
from .general import FileOutputType, load_output_template
from .tag import TagFileOutputTemplate, TagFileOutput
| StarcoderdataPython |
9704315 | #!/usr/bin/env python
#coding:utf-8
L=['Michael', 'Sarah', 'Tracy']
r=[]
n=3
for i in range(n):
r.append(L[i])
print r
print L[0:3]
print L[-1:]
print L[-2:-1]
print L[-2:]
L=range(100)
print L
print L[-10:]
T=(0,1,2,3,4,5)
print T[-3:]
print 'ABCDEFG'[:3]
| StarcoderdataPython |
8118317 | <gh_stars>1000+
import json
from django.core import mail
from django.test.utils import override_settings
from hc.api.models import Channel, Check
from hc.test import BaseTestCase
class EditEmailTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self.check = Check.objects.create(project=self.project)
self.channel = Channel(project=self.project, kind="email")
self.channel.value = json.dumps(
{"value": "<EMAIL>", "up": True, "down": True}
)
self.channel.email_verified = True
self.channel.save()
self.url = f"/integrations/{self.channel.code}/edit/"
def test_it_shows_form(self):
self.client.login(username="<EMAIL>", password="password")
r = self.client.get(self.url)
self.assertContains(r, "Get an email message when check goes up or down.")
self.assertContains(r, "<EMAIL>")
self.assertContains(r, "Email Settings")
def test_it_saves_changes(self):
form = {"value": "<EMAIL>", "down": "true", "up": "false"}
self.client.login(username="<EMAIL>", password="password")
r = self.client.post(self.url, form)
self.assertRedirects(r, self.channels_url)
self.channel.refresh_from_db()
self.assertEqual(self.channel.email_value, "<EMAIL>")
self.assertTrue(self.channel.email_notify_down)
self.assertFalse(self.channel.email_notify_up)
# It should send a verification link
email = mail.outbox[0]
self.assertTrue(email.subject.startswith("Verify email address on"))
self.assertEqual(email.to[0], "<EMAIL>")
# Make sure it does not call assign_all_checks
self.assertFalse(self.channel.checks.exists())
def test_it_skips_verification_if_email_unchanged(self):
form = {"value": "<EMAIL>", "down": "false", "up": "true"}
self.client.login(username="<EMAIL>", password="password")
self.client.post(self.url, form)
self.channel.refresh_from_db()
self.assertEqual(self.channel.email_value, "<EMAIL>")
self.assertFalse(self.channel.email_notify_down)
self.assertTrue(self.channel.email_notify_up)
self.assertTrue(self.channel.email_verified)
# The email address did not change, so we should skip verification
self.assertEqual(len(mail.outbox), 0)
def test_team_access_works(self):
form = {"value": "<EMAIL>", "down": "true", "up": "true"}
self.client.login(username="<EMAIL>", password="password")
self.client.post(self.url, form)
self.channel.refresh_from_db()
self.assertEqual(self.channel.email_value, "<EMAIL>")
@override_settings(EMAIL_USE_VERIFICATION=False)
def test_it_hides_confirmation_needed_notice(self):
self.client.login(username="<EMAIL>", password="password")
r = self.client.get(self.url)
self.assertNotContains(r, "Requires confirmation")
@override_settings(EMAIL_USE_VERIFICATION=False)
def test_it_auto_verifies_email(self):
form = {"value": "<EMAIL>", "down": "true", "up": "true"}
self.client.login(username="<EMAIL>", password="password")
r = self.client.post(self.url, form)
self.assertRedirects(r, self.channels_url)
self.channel.refresh_from_db()
self.assertEqual(self.channel.email_value, "<EMAIL>")
# Email should *not* have been sent
self.assertEqual(len(mail.outbox), 0)
def test_it_auto_verifies_own_email(self):
form = {"value": "<EMAIL>", "down": "true", "up": "true"}
self.client.login(username="<EMAIL>", password="password")
r = self.client.post(self.url, form)
self.assertRedirects(r, self.channels_url)
self.channel.refresh_from_db()
self.assertEqual(self.channel.email_value, "<EMAIL>")
# Email should *not* have been sent
self.assertEqual(len(mail.outbox), 0)
def test_it_requires_rw_access(self):
self.bobs_membership.role = "r"
self.bobs_membership.save()
self.client.login(username="<EMAIL>", password="password")
r = self.client.get(self.url)
self.assertEqual(r.status_code, 403)
| StarcoderdataPython |
256431 | <reponame>smk4664/nautobot-plugin-ansible-runner<filename>ansible_runner/tests/__init__.py
"""Unit tests for ansible_runner plugin."""
| StarcoderdataPython |
248956 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.3'
# jupytext_version: 0.8.6
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
from numpy import dot
from numpy.linalg import norm
from boto3 import client as boto3_client
import json
import logging
from botocore.client import Config
from dataclasses import dataclass
from scorer.pre_process import preprocess_text
from s3client.s3 import S3Manager
import os
import pickle
logger = logging.getLogger()
config = Config(connect_timeout=60, read_timeout=240, retries={"max_attempts": 0},)
lambda_client = boto3_client("lambda", config=config)
@dataclass
class TextSegment:
id: str
text: str
speaker: str
@dataclass
class Score(TextSegment):
score: float
def upload_fv(fv_list, Request, context_id, instance_id):
try:
bucket = "io.etherlabs." + os.getenv("ACTIVE_ENV", "staging2") + ".contexts"
s3_path = (
context_id + "/feature_vectors/" + instance_id + "/" + Request.id + ".pkl"
)
logger.info("The path used for s3.", extra={"S3": s3_path, "bucket": bucket})
s3_obj = S3Manager(bucket_name=bucket)
s3_obj.upload_object(pickle.dumps(fv_list), s3_path)
except Exception as e:
logger.info("Uploading failed ", extra={"exception:": e})
return False
return True
def get_score(
mind_id: str,
mind_dict,
Request: TextSegment,
context_id,
instance_id,
for_pims=False,
) -> Score:
score = []
pre_processed_input = preprocess_text(Request.text)
lambda_function = "mind-" + mind_id
transcript_text = Request.text
if len(pre_processed_input) != 0:
mind_input = json.dumps({"text": pre_processed_input})
mind_input = json.dumps({"body": mind_input})
logger.info("sending request to mind service")
if for_pims is False:
transcript_score = get_feature_vector(
mind_input,
lambda_function,
mind_dict,
Request,
context_id,
instance_id,
)
else:
response = get_feature_vector(
mind_input,
lambda_function,
mind_dict,
Request,
context_id,
instance_id,
store_features=True,
)
if response is True:
return True
else:
return False
else:
return True
transcript_score = 0.00001
logger.warn("processing transcript: {}".format(transcript_text))
logger.warn("transcript too small to process. Returning default score")
# hack to penalize out of domain small transcripts coming as PIMs - word level
if len(transcript_text.split(" ")) < 40:
transcript_score = 0.1 * transcript_score
score = 1 / transcript_score
return score
def getClusterScore(mind_vec, sent_vec):
n1 = norm(mind_vec, axis=1).reshape(1, -1)
n2 = norm(sent_vec, axis=1).reshape(-1, 1)
dotp = dot(sent_vec, mind_vec).squeeze(2)
segment_scores = dotp / (n2 * n1)
return segment_scores
def get_feature_vector(
mind_input,
lambda_function,
mind_dict,
Request,
context_id,
instance_id,
store_features=False,
):
invoke_response = lambda_client.invoke(
FunctionName=lambda_function,
InvocationType="RequestResponse",
Payload=mind_input,
)
out_json = invoke_response["Payload"].read().decode("utf8").replace("'", '"')
data = json.loads(json.loads(out_json)["body"])
response = json.loads(out_json)["statusCode"]
if store_features is True:
vector_list = data["sent_feats"][0]
upload_fv(vector_list, Request, context_id, instance_id)
return True
feats = list(mind_dict["feature_vector"].values())
mind_vector = np.array(feats).reshape(len(feats), -1)
transcript_score = 0.00001
transcript_mind_list = []
transcript_score_list = []
if response == 200:
logger.info("got {} from mind server".format(response))
feature_vector = np.array(data["sent_feats"][0])
if len(feature_vector) > 0:
# For paragraphs, uncomment below LOC
# feature_vector = np.mean(np.array(feature_vector),0).reshape(1,-1)
batch_size = min(10, feature_vector.shape[0])
for i in range(0, feature_vector.shape[0], batch_size):
mind_vec = np.expand_dims(np.array(mind_vector), 2)
sent_vec = feature_vector[i : i + batch_size]
cluster_scores = getClusterScore(mind_vec, sent_vec)
batch_scores = cluster_scores.max(1)
transcript_score_list.extend(batch_scores)
minds_selected = cluster_scores.argmax(1)
transcript_mind_list.extend(minds_selected)
transcript_score = np.mean(transcript_score_list)
logger.info(
"Mind Selected is {}".format(
{
ele: transcript_mind_list.count(ele)
for ele in set(transcript_mind_list)
}
)
)
else:
logger.debug(
"Invalid response from mind service for input: {}".format(mind_input)
)
logger.debug("Returning default score")
return transcript_score
| StarcoderdataPython |
1845411 | #!/bin/env python
"""
The MIT License
Copyright (c) 2010 The Chicago Tribune & Contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from . import ants
from optparse import OptionParser, OptionGroup
def parse_options():
"""
Handle the command line arguments for spinning up bees
"""
parser = OptionParser(usage="""
bees COMMAND [options]
Hivemind
A fork of Bees With Machine Guns to make it useful for more arbitrary tasks
commands:
up Start a batch of load testing servers.
order Begin the attack on a specific url.
down Shutdown and deactivate the load testing servers.
report Report the status of the load testing servers.
""")
up_group = OptionGroup(parser, "up",
"""In order to spin up new servers you will need to specify at least the -k command, which is the name of the EC2 keypair to use for creating and connecting to the new servers. The ants will expect to find a .pem file with this name in ~/.ssh/. Alternatively, ants can use SSH Agent for the key.""")
# Required
up_group.add_option('-k', '--key', metavar="KEY", nargs=1,
action='store', dest='key', type='string',
help="The ssh key pair name to use to connect to the new servers.")
up_group.add_option('-s', '--servers', metavar="SERVERS", nargs=1,
action='store', dest='servers', type='int', default=5,
help="The number of servers to start (default: 5).")
up_group.add_option('-g', '--group', metavar="GROUP", nargs=1,
action='store', dest='group', type='string', default='default',
help="The security group(s) to run the instances under (default: default).")
up_group.add_option('-z', '--zone', metavar="ZONE", nargs=1,
action='store', dest='zone', type='string', default='us-east-1d',
help="The availability zone to start the instances in (default: us-east-1d).")
up_group.add_option('-i', '--instance', metavar="INSTANCE", nargs=1,
action='store', dest='instance', type='string', default='ami-ff17fb96',
help="The instance-id to use for each server from (default: ami-ff17fb96).")
up_group.add_option('-t', '--type', metavar="TYPE", nargs=1,
action='store', dest='type', type='string', default='t1.micro',
help="The instance-type to use for each server (default: t1.micro).")
up_group.add_option('-l', '--login', metavar="LOGIN", nargs=1,
action='store', dest='login', type='string', default='newsapps',
help="The ssh username name to use to connect to the new servers (default: newsapps).")
up_group.add_option('-v', '--subnet', metavar="SUBNET", nargs=1,
action='store', dest='subnet', type='string', default=None,
help="The vpc subnet id in which the instances should be launched. (default: None).")
up_group.add_option('-b', '--bid', metavar="BID", nargs=1,
action='store', dest='bid', type='float', default=None,
help="The maximum bid price per spot instance (default: None).")
parser.add_option_group(up_group)
order_group = OptionGroup(parser, "order",
"""Orders will be executed before order files. Orders and order files are executed in the order entered.""")
# Required
order_group.add_option('-o', '--order', metavar="ORDER", nargs=1,
action='append', dest='orders', type='string',
help="Order")
order_group.add_option('-f', '--file', metavar="FILE", nargs=1,
action='append', dest='files', type='string',
help="File with orders")
parser.add_option_group(order_group)
(options, args) = parser.parse_args()
if len(args) <= 0:
parser.error('Please enter a command.')
command = args[0]
if command == 'up':
if not options.key:
parser.error('To spin up new instances you need to specify a key-pair name with -k')
if options.group == 'default':
print('New ants will use the "default" EC2 security group. Please note that port 22 (SSH) is not normally open on this group. You will need to use to the EC2 tools to open it before you will be able to attack.')
ants.up(options.servers, options.group, options.zone, options.instance, options.type, options.login, options.key, options.subnet, options.bid)
elif command == 'order':
if not options.orders and not options.files:
parser.error('Need orders')
ants.order(options.orders, options.files)
elif command == 'down':
ants.down()
elif command == 'report':
ants.report()
def main():
parse_options()
| StarcoderdataPython |
4811034 | <gh_stars>0
from rest_framework import serializers
from web.datasets.models import CityCouncilAgenda
class CityCouncilAgendaSerializer(serializers.ModelSerializer):
class Meta:
model = CityCouncilAgenda
fields = "__all__"
| StarcoderdataPython |
5047978 | from setuptools import setup
import os
setup(
name='pygraphblas',
version='5.1.5.1',
description='GraphBLAS Python bindings.',
author='<NAME>',
packages=['pygraphblas'],
setup_requires=["pytest-runner"],
install_requires=["suitesparse-graphblas", "numba", "scipy", "contextvars"],
)
| StarcoderdataPython |
399666 | <reponame>leonardogian/CANA<filename>cana/datasets/bools.py
# -*- coding: utf-8 -*-
"""
Boolean Nodes
=================================
Commonly used boolean node functions.
"""
# Copyright (C) 2017 by
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# All rights reserved.
# MIT license.
from .. boolean_network import BooleanNode
def AND():
"""AND boolean node.
.. code::
00 : 0
01 : 0
10 : 0
11 : 1
"""
return BooleanNode.from_output_list(outputs=[0,0,0,1], name="AND")
def OR():
"""OR boolean node.
.. code::
00 : 0
01 : 1
10 : 1
11 : 1
"""
return BooleanNode.from_output_list(outputs=[0,1,1,1], name="OR")
def XOR():
"""XOR boolean node.
.. code::
00 : 0
01 : 1
10 : 1
11 : 0
"""
return BooleanNode.from_output_list(outputs=[0,1,1,0], name="XOR")
def COPYx1():
"""COPY :math:`x_1` boolean node.
.. code::
00 : 0
01 : 0
10 : 1
11 : 1
"""
return BooleanNode.from_output_list(outputs=[0,0,1,1], name="COPY x_1")
def CONTRADICTION():
"""Contradiction boolean node.
.. code::
00 : 0
01 : 0
10 : 0
11 : 0
"""
return BooleanNode.from_output_list(outputs=[0,0,0,0], name="CONTRADICTION")
def RULE90():
"""RULE 90 celular automata node.
.. code::
000 : 0
001 : 1
010 : 0
011 : 1
100 : 1
101 : 0
110 : 1
111 : 0
"""
return BooleanNode.from_output_list(outputs=[0,1,0,1,1,0,1,0], name="RULE 90")
def RULE110():
"""RULE 110 celular automata node.
.. code::
000 : 0
001 : 1
010 : 1
011 : 1
100 : 0
101 : 1
110 : 1
111 : 0
"""
return BooleanNode.from_output_list(outputs=[0,1,1,1,0,1,1,0], name="RULE 110")
| StarcoderdataPython |
1689669 | import iyzipay
options = {
'base_url': iyzipay.base_url
}
api_test = iyzipay.ApiTest().retrieve(options)
print(api_test.body)
| StarcoderdataPython |
329864 | <gh_stars>0
"""
Edge Examples
"""
import sys, os
thisPath = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.abspath(os.path.join(thisPath,"..")))
from ExampleBuilders.ExampleBuilder import ExampleBuilder
from Core.IdSet import IdSet
import Core.ExampleUtils as ExampleUtils
from FeatureBuilders.MultiEdgeFeatureBuilder import MultiEdgeFeatureBuilder
from FeatureBuilders.TokenFeatureBuilder import TokenFeatureBuilder
from FeatureBuilders.BioInferOntologyFeatureBuilder import BioInferOntologyFeatureBuilder
from FeatureBuilders.NodalidaFeatureBuilder import NodalidaFeatureBuilder
from FeatureBuilders.BacteriaRenamingFeatureBuilder import BacteriaRenamingFeatureBuilder
from FeatureBuilders.RELFeatureBuilder import RELFeatureBuilder
from FeatureBuilders.DrugFeatureBuilder import DrugFeatureBuilder
from FeatureBuilders.EVEXFeatureBuilder import EVEXFeatureBuilder
from FeatureBuilders.GiulianoFeatureBuilder import GiulianoFeatureBuilder
#import Graph.networkx_v10rc1 as NX10
from Core.SimpleGraph import Graph
from FeatureBuilders.TriggerFeatureBuilder import TriggerFeatureBuilder
import Utils.Range as Range
from multiprocessing import Process
# For gold mapping
import Evaluators.EvaluateInteractionXML as EvaluateInteractionXML
class EdgeExampleBuilder(ExampleBuilder):
"""
This example builder makes edge examples, i.e. examples describing
the event arguments.
"""
def __init__(self, style=None, types=[], featureSet=None, classSet=None):
if featureSet == None:
featureSet = IdSet()
if classSet == None:
classSet = IdSet(1)
else:
classSet = classSet
ExampleBuilder.__init__(self, classSet=classSet, featureSet=featureSet)
assert( classSet.getId("neg") == 1 or (len(classSet.Ids)== 2 and classSet.getId("neg") == -1) )
# Basic style = trigger_features:typed:directed:no_linear:entities:auto_limits:noMasking:maxFeatures
self._setDefaultParameters([
"directed", "undirected", "headsOnly", "graph_kernel", "noAnnType", "mask_nodes", "limit_features",
"no_auto_limits", "co_features", "genia_features", "bi_features", #"genia_limits", "epi_limits", "id_limits", "rel_limits", "bb_limits", "bi_limits", "co_limits",
"genia_task1", "ontology", "nodalida", "bacteria_renaming", "no_trigger_features", "rel_features",
"drugbank_features", "ddi_mtmx", "evex", "giuliano", "random", "themeOnly", "causeOnly", "no_path", "token_nodes",
"skip_extra_triggers", "headsOnly", "graph_kernel", "no_task", "no_dependency",
"disable_entity_features", "disable_terminus_features", "disable_single_element_features",
"disable_ngram_features", "disable_path_edge_features", "linear_features", "subset", "binary", "pos_only",
"entity_type", "filter_shortest_path", "maskTypeAsProtein", "keep_neg", "metamap"])
self.styles = self.getParameters(style)
#if style == None: # no parameters given
# style["typed"] = style["directed"] = style["headsOnly"] = True
self.multiEdgeFeatureBuilder = MultiEdgeFeatureBuilder(self.featureSet, self.styles)
# NOTE Temporarily re-enabling predicted range
#self.multiEdgeFeatureBuilder.definePredictedValueRange([], None)
if self.styles["graph_kernel"]:
from FeatureBuilders.GraphKernelFeatureBuilder import GraphKernelFeatureBuilder
self.graphKernelFeatureBuilder = GraphKernelFeatureBuilder(self.featureSet)
if self.styles["noAnnType"]:
self.multiEdgeFeatureBuilder.noAnnType = True
if self.styles["mask_nodes"]:
self.multiEdgeFeatureBuilder.maskNamedEntities = True
else:
self.multiEdgeFeatureBuilder.maskNamedEntities = False
if not self.styles["limit_features"]:
self.multiEdgeFeatureBuilder.maximum = True
if self.styles["genia_task1"]:
self.multiEdgeFeatureBuilder.filterAnnTypes.add("Entity")
self.tokenFeatureBuilder = TokenFeatureBuilder(self.featureSet)
if self.styles["ontology"]:
self.multiEdgeFeatureBuilder.ontologyFeatureBuilder = BioInferOntologyFeatureBuilder(self.featureSet)
if self.styles["nodalida"]:
self.nodalidaFeatureBuilder = NodalidaFeatureBuilder(self.featureSet)
if self.styles["bacteria_renaming"]:
self.bacteriaRenamingFeatureBuilder = BacteriaRenamingFeatureBuilder(self.featureSet)
if not self.styles["no_trigger_features"]:
self.triggerFeatureBuilder = TriggerFeatureBuilder(self.featureSet, self.styles)
self.triggerFeatureBuilder.useNonNameEntities = True
if self.styles["genia_task1"]:
self.triggerFeatureBuilder.filterAnnTypes.add("Entity")
#self.bioinferOntologies = OntologyUtils.loadOntologies(OntologyUtils.g_bioInferFileName)
if self.styles["rel_features"]:
self.relFeatureBuilder = RELFeatureBuilder(featureSet)
if self.styles["drugbank_features"]:
self.drugFeatureBuilder = DrugFeatureBuilder(featureSet)
if self.styles["evex"]:
self.evexFeatureBuilder = EVEXFeatureBuilder(featureSet)
if self.styles["giuliano"]:
self.giulianoFeatureBuilder = GiulianoFeatureBuilder(featureSet)
self.types = types
if self.styles["random"]:
from FeatureBuilders.RandomFeatureBuilder import RandomFeatureBuilder
self.randomFeatureBuilder = RandomFeatureBuilder(self.featureSet)
def definePredictedValueRange(self, sentences, elementName):
self.multiEdgeFeatureBuilder.definePredictedValueRange(sentences, elementName)
def getPredictedValueRange(self):
return self.multiEdgeFeatureBuilder.predictedRange
def filterEdgesByType(self, edges, typesToInclude):
if len(typesToInclude) == 0:
return edges
edgesToKeep = []
for edge in edges:
if edge.get("type") in typesToInclude:
edgesToKeep.append(edge)
return edgesToKeep
def getCategoryNameFromTokens(self, sentenceGraph, t1, t2, directed=True):
"""
Example class. Multiple overlapping edges create a merged type.
"""
types = set()
intEdges = sentenceGraph.interactionGraph.getEdges(t1, t2)
if not directed:
intEdges = intEdges + sentenceGraph.interactionGraph.getEdges(t2, t1)
for intEdge in intEdges:
types.add(intEdge[2].get("type"))
types = list(types)
types.sort()
categoryName = ""
for name in types:
if categoryName != "":
categoryName += "---"
categoryName += name
if categoryName != "":
return categoryName
else:
return "neg"
def getCategoryName(self, sentenceGraph, e1, e2, directed=True):
"""
Example class. Multiple overlapping edges create a merged type.
"""
interactions = sentenceGraph.getInteractions(e1, e2, True)
if not directed:
interactions = interactions + sentenceGraph.getInteractions(e2, e1, True)
types = set()
for interaction in interactions:
types.add(interaction[2].get("type"))
types = list(types)
types.sort()
categoryName = ""
for name in types:
if self.styles["causeOnly"] and name != "Cause":
continue
if self.styles["themeOnly"] and name != "Theme":
continue
if categoryName != "":
categoryName += "---"
categoryName += name
if categoryName != "":
return categoryName
else:
return "neg"
def getBISuperType(self, eType):
if eType in ["GeneProduct", "Protein", "ProteinFamily", "PolymeraseComplex"]:
return "ProteinEntity"
elif eType in ["Gene", "GeneFamily", "GeneComplex", "Regulon", "Site", "Promoter"]:
return "GeneEntity"
else:
return None
def isValidInteraction(self, e1, e2, structureAnalyzer,forceUndirected=False):
return len(structureAnalyzer.getValidEdgeTypes(e1.get("type"), e2.get("type"), forceUndirected=forceUndirected)) > 0
def getGoldCategoryName(self, goldGraph, entityToGold, e1, e2, directed=True):
if len(entityToGold[e1]) > 0 and len(entityToGold[e2]) > 0:
return self.getCategoryName(goldGraph, entityToGold[e1][0], entityToGold[e2][0], directed=directed)
else:
return "neg"
def filterEdge(self, edge, edgeTypes):
import types
assert edgeTypes != None
if type(edgeTypes) not in [types.ListType, types.TupleType]:
edgeTypes = [edgeTypes]
if edge[2].get("type") in edgeTypes:
return True
else:
return False
def keepExample(self, e1, e2, categoryName, isDirected, structureAnalyzer):
makeExample = True
if (not self.styles["no_auto_limits"]) and not self.isValidInteraction(e1, e2, structureAnalyzer, forceUndirected=not isDirected):
makeExample = False
self.exampleStats.filter("auto_limits")
if self.styles["genia_task1"] and (e1.get("type") == "Entity" or e2.get("type") == "Entity"):
makeExample = False
self.exampleStats.filter("genia_task1")
if self.styles["pos_only"] and categoryName == "neg":
makeExample = False
self.exampleStats.filter("pos_only")
return makeExample
def getExampleCategoryName(self, e1=None, e2=None, t1=None, t2=None, sentenceGraph=None, goldGraph=None, entityToGold=None, isDirected=True):
if self.styles["token_nodes"]:
categoryName = self.getCategoryNameFromTokens(sentenceGraph, t1, t2, isDirected)
else:
categoryName = self.getCategoryName(sentenceGraph, e1, e2, isDirected)
if goldGraph != None:
categoryName = self.getGoldCategoryName(goldGraph, entityToGold, e1, e2, isDirected)
return categoryName
def buildExamplesFromGraph(self, sentenceGraph, outfile, goldGraph = None, structureAnalyzer=None):
"""
Build examples for a single sentence. Returns a list of examples.
See Core/ExampleUtils for example format.
"""
#examples = []
exampleIndex = 0
# example directionality
if self.styles["directed"] == None and self.styles["undirected"] == None: # determine directedness from corpus
examplesAreDirected = structureAnalyzer.hasDirectedTargets()
elif self.styles["directed"]:
assert self.styles["undirected"] in [None, False]
examplesAreDirected = True
elif self.styles["undirected"]:
assert self.styles["directed"] in [None, False]
examplesAreDirected = False
if not self.styles["no_trigger_features"]:
self.triggerFeatureBuilder.initSentence(sentenceGraph)
if self.styles["evex"]:
self.evexFeatureBuilder.initSentence(sentenceGraph)
# Filter entities, if needed
sentenceGraph.mergeInteractionGraph(True)
entities = sentenceGraph.mergedEntities
entityToDuplicates = sentenceGraph.mergedEntityToDuplicates
self.exampleStats.addValue("Duplicate entities skipped", len(sentenceGraph.entities) - len(entities))
# Connect to optional gold graph
entityToGold = None
if goldGraph != None:
entityToGold = EvaluateInteractionXML.mapEntities(entities, goldGraph.entities)
paths = None
if not self.styles["no_path"]:
undirected = sentenceGraph.dependencyGraph.toUndirected()
paths = undirected
if self.styles["filter_shortest_path"] != None: # For DDI use filter_shortest_path=conj_and
paths.resetAnalyses() # just in case
paths.FloydWarshall(self.filterEdge, {"edgeTypes":self.styles["filter_shortest_path"]})
# Generate examples based on interactions between entities or interactions between tokens
if self.styles["token_nodes"]:
loopRange = len(sentenceGraph.tokens)
else:
loopRange = len(entities)
for i in range(loopRange-1):
for j in range(i+1,loopRange):
eI = None
eJ = None
if self.styles["token_nodes"]:
tI = sentenceGraph.tokens[i]
tJ = sentenceGraph.tokens[j]
else:
eI = entities[i]
eJ = entities[j]
tI = sentenceGraph.entityHeadTokenByEntity[eI]
tJ = sentenceGraph.entityHeadTokenByEntity[eJ]
if eI.get("type") == "neg" or eJ.get("type") == "neg":
continue
if self.styles["skip_extra_triggers"]:
if eI.get("source") != None or eJ.get("source") != None:
continue
# only consider paths between entities (NOTE! entities, not only named entities)
if self.styles["headsOnly"]:
if (len(sentenceGraph.tokenIsEntityHead[tI]) == 0) or (len(sentenceGraph.tokenIsEntityHead[tJ]) == 0):
continue
examples = self.buildExamplesForPair(tI, tJ, paths, sentenceGraph, goldGraph, entityToGold, eI, eJ, structureAnalyzer, examplesAreDirected)
for categoryName, features, extra in examples:
# make example
if self.styles["binary"]:
if categoryName != "neg":
category = 1
else:
category = -1
extra["categoryName"] = "i"
else:
category = self.classSet.getId(categoryName)
example = [sentenceGraph.getSentenceId()+".x"+str(exampleIndex), category, features, extra]
ExampleUtils.appendExamples([example], outfile)
exampleIndex += 1
return exampleIndex
def buildExamplesForPair(self, token1, token2, paths, sentenceGraph, goldGraph, entityToGold, entity1=None, entity2=None, structureAnalyzer=None, isDirected=True):
# define forward
categoryName = self.getExampleCategoryName(entity1, entity2, token1, token2, sentenceGraph, goldGraph, entityToGold, isDirected)
# make forward
forwardExample = None
self.exampleStats.beginExample(categoryName)
if self.keepExample(entity1, entity2, categoryName, isDirected, structureAnalyzer):
forwardExample = self.buildExample(token1, token2, paths, sentenceGraph, categoryName, entity1, entity2, structureAnalyzer, isDirected)
if isDirected: # build a separate reverse example (if that is valid)
self.exampleStats.endExample() # end forward example
# define reverse
categoryName = self.getExampleCategoryName(entity2, entity1, token2, token1, sentenceGraph, goldGraph, entityToGold, True)
# make reverse
self.exampleStats.beginExample(categoryName)
reverseExample = None
if self.keepExample(entity2, entity1, categoryName, True, structureAnalyzer):
reverseExample = self.buildExample(token2, token1, paths, sentenceGraph, categoryName, entity2, entity1, structureAnalyzer, isDirected)
self.exampleStats.endExample()
return filter(None, [forwardExample, reverseExample])
elif forwardExample != None: # merge features from the reverse example to the forward one
reverseExample = self.buildExample(token2, token1, paths, sentenceGraph, categoryName, entity2, entity1, structureAnalyzer, isDirected)
forwardExample[1].update(reverseExample[1])
self.exampleStats.endExample() # end merged example
return [forwardExample]
else: # undirected example that was filtered
self.exampleStats.endExample() # end merged example
return []
def buildExample(self, token1, token2, paths, sentenceGraph, categoryName, entity1=None, entity2=None, structureAnalyzer=None, isDirected=True):
"""
Build a single directed example for the potential edge between token1 and token2
"""
# define features
features = {}
if not self.styles["no_path"]:
path = paths.getPaths(token1, token2)
if len(path) > 0:
path = path[0]
pathExists = True
else:
path = [token1, token2]
pathExists = False
else:
path = [token1, token2]
pathExists = False
if not self.styles["no_trigger_features"]: # F 85.52 -> 85.55
self.triggerFeatureBuilder.setFeatureVector(features)
self.triggerFeatureBuilder.tag = "trg1_"
self.triggerFeatureBuilder.buildFeatures(token1)
self.triggerFeatureBuilder.tag = "trg2_"
self.triggerFeatureBuilder.buildFeatures(token2)
self.triggerFeatureBuilder.setFeatureVector(None)
# REL features
if self.styles["rel_features"] and not self.styles["no_task"]:
self.relFeatureBuilder.setFeatureVector(features)
self.relFeatureBuilder.tag = "rel1_"
self.relFeatureBuilder.buildAllFeatures(sentenceGraph.tokens, sentenceGraph.tokens.index(token1))
self.relFeatureBuilder.tag = "rel2_"
self.relFeatureBuilder.buildAllFeatures(sentenceGraph.tokens, sentenceGraph.tokens.index(token2))
self.relFeatureBuilder.setFeatureVector(None)
if self.styles["bacteria_renaming"] and not self.styles["no_task"]:
self.bacteriaRenamingFeatureBuilder.setFeatureVector(features)
self.bacteriaRenamingFeatureBuilder.buildPairFeatures(entity1, entity2)
#self.bacteriaRenamingFeatureBuilder.buildSubstringFeatures(entity1, entity2) # decreases perf. 74.76 -> 72.41
self.bacteriaRenamingFeatureBuilder.setFeatureVector(None)
if self.styles["co_features"] and not self.styles["no_task"]:
e1Offset = Range.charOffsetToSingleTuple(entity1.get("charOffset"))
e2Offset = Range.charOffsetToSingleTuple(entity2.get("charOffset"))
if Range.contains(e1Offset, e2Offset):
features[self.featureSet.getId("e1_contains_e2")] = 1
if entity2.get("given") == "True":
features[self.featureSet.getId("e1_contains_e2name")] = 1
if Range.contains(e2Offset, e1Offset):
features[self.featureSet.getId("e2_contains_e1")] = 1
if entity1.get("given") == "True":
features[self.featureSet.getId("e2_contains_e1name")] = 1
if self.styles["drugbank_features"]:
self.drugFeatureBuilder.setFeatureVector(features)
self.drugFeatureBuilder.tag = "ddi_"
self.drugFeatureBuilder.buildPairFeatures(entity1, entity2)
if self.styles["ddi_mtmx"]:
self.drugFeatureBuilder.buildMTMXFeatures(entity1, entity2)
self.drugFeatureBuilder.setFeatureVector(None)
if self.styles["graph_kernel"]:
self.graphKernelFeatureBuilder.setFeatureVector(features, entity1, entity2)
self.graphKernelFeatureBuilder.buildGraphKernelFeatures(sentenceGraph, path)
self.graphKernelFeatureBuilder.setFeatureVector(None)
if self.styles["entity_type"]:
e1Type = self.multiEdgeFeatureBuilder.getEntityType(entity1)
e2Type = self.multiEdgeFeatureBuilder.getEntityType(entity2)
features[self.featureSet.getId("e1_"+e1Type)] = 1
features[self.featureSet.getId("e2_"+e2Type)] = 1
features[self.featureSet.getId("distance_"+str(len(path)))] = 1
if not self.styles["no_dependency"]:
#print "Dep features"
self.multiEdgeFeatureBuilder.setFeatureVector(features, entity1, entity2)
#self.multiEdgeFeatureBuilder.buildStructureFeatures(sentenceGraph, paths) # remove for fast
if not self.styles["disable_entity_features"]:
self.multiEdgeFeatureBuilder.buildEntityFeatures(sentenceGraph)
self.multiEdgeFeatureBuilder.buildPathLengthFeatures(path)
if not self.styles["disable_terminus_features"]:
self.multiEdgeFeatureBuilder.buildTerminusTokenFeatures(path, sentenceGraph) # remove for fast
if not self.styles["disable_single_element_features"]:
self.multiEdgeFeatureBuilder.buildSingleElementFeatures(path, sentenceGraph)
if not self.styles["disable_ngram_features"]:
#print "NGrams"
self.multiEdgeFeatureBuilder.buildPathGrams(2, path, sentenceGraph) # remove for fast
self.multiEdgeFeatureBuilder.buildPathGrams(3, path, sentenceGraph) # remove for fast
self.multiEdgeFeatureBuilder.buildPathGrams(4, path, sentenceGraph) # remove for fast
#self.buildEdgeCombinations(path, edges, sentenceGraph, features) # remove for fast
#if edges != None:
# self.multiEdgeFeatureBuilder.buildTerminusFeatures(path[0], edges[0][1]+edges[1][0], "t1", sentenceGraph) # remove for fast
# self.multiEdgeFeatureBuilder.buildTerminusFeatures(path[-1], edges[len(path)-1][len(path)-2]+edges[len(path)-2][len(path)-1], "t2", sentenceGraph) # remove for fast
if not self.styles["disable_path_edge_features"]:
self.multiEdgeFeatureBuilder.buildPathEdgeFeatures(path, sentenceGraph)
self.multiEdgeFeatureBuilder.buildSentenceFeatures(sentenceGraph)
self.multiEdgeFeatureBuilder.setFeatureVector(None)
if self.styles["nodalida"]:
self.nodalidaFeatureBuilder.setFeatureVector(features, entity1, entity2)
shortestPaths = self.nodalidaFeatureBuilder.buildShortestPaths(sentenceGraph.dependencyGraph, path)
print shortestPaths
if len(shortestPaths) > 0:
self.nodalidaFeatureBuilder.buildNGrams(shortestPaths, sentenceGraph)
self.nodalidaFeatureBuilder.setFeatureVector(None)
if self.styles["linear_features"]:
self.tokenFeatureBuilder.setFeatureVector(features)
for i in range(len(sentenceGraph.tokens)):
if sentenceGraph.tokens[i] == token1:
token1Index = i
if sentenceGraph.tokens[i] == token2:
token2Index = i
linearPreTag = "linfw_"
if token1Index > token2Index:
token1Index, token2Index = token2Index, token1Index
linearPreTag = "linrv_"
self.tokenFeatureBuilder.buildLinearOrderFeatures(token1Index, sentenceGraph, 2, 2, preTag="linTok1")
self.tokenFeatureBuilder.buildLinearOrderFeatures(token2Index, sentenceGraph, 2, 2, preTag="linTok2")
# Before, middle, after
# self.tokenFeatureBuilder.buildTokenGrams(0, token1Index-1, sentenceGraph, "bf")
# self.tokenFeatureBuilder.buildTokenGrams(token1Index+1, token2Index-1, sentenceGraph, "bw")
# self.tokenFeatureBuilder.buildTokenGrams(token2Index+1, len(sentenceGraph.tokens)-1, sentenceGraph, "af")
# before-middle, middle, middle-after
# self.tokenFeatureBuilder.buildTokenGrams(0, token2Index-1, sentenceGraph, linearPreTag+"bf", max=2)
# self.tokenFeatureBuilder.buildTokenGrams(token1Index+1, token2Index-1, sentenceGraph, linearPreTag+"bw", max=2)
# self.tokenFeatureBuilder.buildTokenGrams(token1Index+1, len(sentenceGraph.tokens)-1, sentenceGraph, linearPreTag+"af", max=2)
self.tokenFeatureBuilder.setFeatureVector(None)
if self.styles["random"]:
self.randomFeatureBuilder.setFeatureVector(features)
self.randomFeatureBuilder.buildRandomFeatures(100, 0.01)
self.randomFeatureBuilder.setFeatureVector(None)
if self.styles["genia_features"] and not self.styles["no_task"]:
e1Type = entity1.get("type")
e2Type = entity2.get("type")
assert(entity1.get("given") in (None, "False"))
if entity2.get("given") == "True":
features[self.featureSet.getId("GENIA_target_protein")] = 1
else:
features[self.featureSet.getId("GENIA_nested_event")] = 1
if e1Type.find("egulation") != -1: # leave r out to avoid problems with capitalization
if entity2.get("given") == "True":
features[self.featureSet.getId("GENIA_regulation_of_protein")] = 1
else:
features[self.featureSet.getId("GENIA_regulation_of_event")] = 1
if self.styles["bi_features"]:
# Make features based on entity types
e1Type = entity1.get("type")
e2Type = entity2.get("type")
e1SuperType = str(self.getBISuperType(e1Type))
e2SuperType = str(self.getBISuperType(e2Type))
features[self.featureSet.getId("BI_e1_"+e1Type)] = 1
features[self.featureSet.getId("BI_e2_"+e2Type)] = 1
features[self.featureSet.getId("BI_e1sup_"+e1SuperType)] = 1
features[self.featureSet.getId("BI_e2sup_"+e2SuperType)] = 1
features[self.featureSet.getId("BI_e1e2_"+e1Type+"_"+e2Type)] = 1
features[self.featureSet.getId("BI_e1e2sup_"+e1SuperType+"_"+e2SuperType)] = 1
if self.styles["evex"]:
self.evexFeatureBuilder.setFeatureVector(features, entity1, entity2)
self.evexFeatureBuilder.buildEdgeFeatures(entity1, entity2, token1, token2, path, sentenceGraph)
self.evexFeatureBuilder.setFeatureVector(None)
if self.styles["giuliano"]:
self.giulianoFeatureBuilder.setFeatureVector(features, entity1, entity2)
self.giulianoFeatureBuilder.buildEdgeFeatures(entity1, entity2, token1, token2, path, sentenceGraph)
self.giulianoFeatureBuilder.setFeatureVector(None)
# define extra attributes
if int(path[0].get("charOffset").split("-")[0]) < int(path[-1].get("charOffset").split("-")[0]):
extra = {"xtype":"edge","type":"i","t1":path[0].get("id"),"t2":path[-1].get("id")}
extra["deprev"] = False
else:
extra = {"xtype":"edge","type":"i","t1":path[-1].get("id"),"t2":path[0].get("id")}
extra["deprev"] = True
if entity1 != None:
extra["e1"] = entity1.get("id")
if sentenceGraph.mergedEntityToDuplicates != None:
extra["e1DuplicateIds"] = ",".join([x.get("id") for x in sentenceGraph.mergedEntityToDuplicates[entity1]])
if entity2 != None:
extra["e2"] = entity2.get("id")
if sentenceGraph.mergedEntityToDuplicates != None:
extra["e2DuplicateIds"] = ",".join([x.get("id") for x in sentenceGraph.mergedEntityToDuplicates[entity2]])
extra["categoryName"] = categoryName
if self.styles["bacteria_renaming"]:
if entity1.get("text") != None and entity1.get("text") != "":
extra["e1t"] = entity1.get("text").replace(" ", "---").replace(":","-COL-")
if entity2.get("text") != None and entity2.get("text") != "":
extra["e2t"] = entity2.get("text").replace(" ", "---").replace(":","-COL-")
sentenceOrigId = sentenceGraph.sentenceElement.get("origId")
if sentenceOrigId != None:
extra["SOID"] = sentenceOrigId
extra["directed"] = str(isDirected)
return (categoryName, features, extra)
| StarcoderdataPython |
4934030 | <gh_stars>0
# import torch
# import torch.nn as nn
# from torch.nn import functional as F
# import copy
# import math
##### Variables to set for RCERM #####
queue_sz=7 # the memory module/ queue size
# tau = 0.05 # temperature parameter in the objective
# momentum = 0.999 # theta in momentum encoding step
train_queues=[]
##### Variables to set for RCERM #####
# def not_ind_i(lst,ind_i): # returns all elements in list except pointed by the index ind_i
# if ind_i==0:
# return lst[ind_i+1:]
# elif ind_i==len(lst)-1:
# return lst[:-1]
# else:
# res=lst[:ind_i]
# res.extend(lst[ind_i+1:])
# return res
# def get_pos_neg_queues(id_c,id_d,train_queues):
# ind_class_other_domains=not_ind_i(train_queues[id_c],id_d) # indexed class, other domains; len = N_d-1
# positive_queue=None
# for positive_domain_queue in ind_class_other_domains:
# if positive_queue is None:
# positive_queue=positive_domain_queue
# else:
# positive_queue = torch.cat((positive_queue, positive_domain_queue), 0)
# #print('Positive Queue Generated for class ',id_c,' domain ',id_d,' with size ',positive_queue.size())
# other_classes=not_ind_i(train_queues,id_c) # remaining classes; len = N_c-1
# negative_queue=None
# for negative_class in other_classes:
# for negative_domain_queue in negative_class:
# if negative_queue is None:
# negative_queue=negative_domain_queue
# else:
# negative_queue = torch.cat((negative_queue, negative_domain_queue), 0)
# #print('Negative Queue Generated for class ',id_c,' domain ',id_d,' with size ',negative_queue.size())
# return positive_queue,negative_queue
# def get_pos_queues(id_c,id_d,train_queues):
# ind_class_other_domains=not_ind_i(train_queues[id_c],id_d) # indexed class, other domains; len = N_d-1
# positive_queue=None
# for positive_domain_queue in ind_class_other_domains:
# if positive_queue is None:
# positive_queue=positive_domain_queue
# else:
# positive_queue = torch.cat((positive_queue, positive_domain_queue), 0)
# #print('Positive Queue Generated for class ',id_c,' domain ',id_d,' with size ',positive_queue.size())
# return positive_queue
# def loss_function(q, k, queue):
# N = q.shape[0]
# C = q.shape[1]
# pos = torch.exp(torch.div(torch.bmm(q.view(N,1,C), k.view(N,C,1)).view(N, 1),tau))
# neg = torch.sum(torch.exp(torch.div(torch.mm(q.view(N,C), torch.t(queue)),tau)), dim=1)
# denominator = neg + pos
# return torch.mean(-torch.log(torch.div(pos,denominator)))
# def loss_function_NCL(q, k):
# N = q.shape[0]
# C = q.shape[1]
# pos = torch.exp(torch.div(torch.bmm(q.view(N,1,C), k.view(N,C,1)).view(N, 1),tau))
# # neg = torch.sum(torch.exp(torch.div(torch.mm(q.view(N,C), torch.t(queue)),tau)), dim=1)
# # denominator = neg + pos
# return torch.mean(-torch.log(pos))
# class AttenHead(nn.Module):
# def __init__(self, fdim, num_heads=1):
# super().__init__()
# self.num_heads = num_heads
# self.fatt = fdim//num_heads
# for i in range(num_heads):
# setattr(self, f'embd{i}', nn.Linear(fdim, self.fatt))
# for i in range(num_heads):
# setattr(self, f'fc{i}', nn.Linear(2*self.fatt, self.fatt))
# self.fc = nn.Linear(self.fatt*num_heads, fdim)
# self.dropout = nn.Dropout(0.1)
# def forward(self, fx_in, fp_in):
# fp_in = fp_in.squeeze(0)# Return tensor with all dimensions of input of size 1 removed.
# d = math.sqrt(self.fatt)
# Nx = len(fx_in)
# # print(fx_in.size(),fp_in.size())
# f = torch.cat([fx_in, fp_in])
# f = torch.stack([getattr(self, f'embd{i}')(f) for i in range(self.num_heads)]) # head x N x fatt
# # f: torch.Size([1, 5, 6]), ie., [Nheads,(Nfx+Nfp),#dims]
# # fx: torch.Size([1, 6]), i.e, [Nfx,#dims]
# # fp: torch.Size([1, 4, 6]), i.e., [Nheads,Nfp,#dims]
# fx, fp = f[:, :Nx], f[:, Nx:]
# w = self.dropout(F.softmax(torch.matmul(fx, torch.transpose(fp, 1, 2)) / d, dim=2)) # head x Nx x Np
# fa = torch.cat([torch.matmul(w, fp), fx], dim=2) # head x Nx x 2*fatt
# fa = torch.stack([F.relu(getattr(self, f'fc{i}')(fa[i])) for i in range(self.num_heads)]) # head x Nx x fatt
# fa = torch.transpose(fa, 0, 1).reshape(Nx, -1) # Nx x fdim
# fx = F.relu(fx_in + self.fc(fa)) # Nx x fdim
# w = torch.transpose(w, 0, 1) # Nx x head x Np
# return fx, w
| StarcoderdataPython |
3250216 | <gh_stars>0
import random
class Queue:
def __init__(self):
self.__queue = []
self.__len_queue = 0
def enqueue(self, e):
self.__queue.append(e)
self.__len_queue += 1
def dequeue(self):
if not self.empty():
self.__queue.pop(0)
self.__len_queue -= 1
def empty(self):
if self.__len_queue == 0:
return True
return False
def length(self):
return self.__len_queue
def front(self):
if not self.empty():
return self.__queue[0]
def show(self):
print('Queue: {}'.format(self.__queue))
def main():
queue = Queue()
for _ in range(0, 10):
queue.enqueue(random.randint(10, 99))
queue.show()
print('Length : {}'.format(queue.length()))
print('Front : {}'.format(queue.front()))
queue.dequeue()
queue.dequeue()
queue.show()
print('Length : {}'.format(queue.length()))
print('Front : {}'.format(queue.front()))
if __name__ == '__main__':
main()
| StarcoderdataPython |
11232148 | <filename>tagcloud/__init__.py
import os
import string
from tagcloud.lang.counter import get_tag_counts, sum_tag_counts
from tagcloud.font_size_mappers import linear_mapper, logarithmic_mapper
import codecs
def html_links_from_tags(tags, data_weight = 'dataWeight', top = 0):
'''Creates a bunch of html links with the given tags
@tags List of tuples of size three in which the elements are
(tag, frequency, ref), example [(hola, 1, "http://hola.com")]
@data_weight Is an extra property that will be added to the links from which
javascript will gather the weights for the tags.
'''
links = []
link_template = '<a href="%s" %s="%d">%s</a>'
tags = list(tags)
tags.sort(lambda x,y: x[1] - y[1], reverse=True)
for tag, frequency, ref in tags[0:(top or len(tags))]:
links.append(link_template % (ref, data_weight, frequency, tag))
return '<br />\n'.join(links)
def render_template_by_dicts(tags, links = None, outfile_name = 'sample_cloud.html',
render_conf = {}, top=50):
if not links: links = {}
tags_tuples = []
for tag, frequency in tags.items():
link = links.get(tag, '#')
tags_tuples.append((tag, frequency, link))
return render_template_by_tuples(tags_tuples, outfile_name = outfile_name, render_conf = render_conf)
def render_template_by_tuples(tags_tuples, outfile_name = 'sample_cloud.html', render_conf = {}, top = 50):
render_conf.setdefault('canvas_id', 'canvasId')
render_conf.setdefault('link_container', 'linkContainer')
render_conf.setdefault('size', '800')
# Prepare the template
links = html_links_from_tags(tags_tuples, top = top)
render_conf['links'] = links
template_file = codecs.open(os.path.join(os.path.dirname(__file__),'docs', 'html5template.html'), 'r', 'utf-8')
html_template = string.Template(template_file.read())
template_file.close()
# Write the output file
outfile = codecs.open(outfile_name, 'w', 'utf-8')
output = html_template.substitute(render_conf)
outfile.write(output)
outfile.close()
def sample_text():
'''Load sample text from a file and return it as a string'''
sample_file = codecs.open(os.path.join(os.path.dirname(__file__), 'docs', 'sample_text.txt'), 'r', 'utf-8')
text = sample_file.read()
sample_file.close()
return text
| StarcoderdataPython |
252363 | <gh_stars>1-10
from wagtail.core import blocks
from wagtail.core.blocks import RichTextBlock, PageChooserBlock
from wagtail.core.rich_text import expand_db_html
from wagtail.images.blocks import ImageChooserBlock
from falmer.content.serializers import WagtailImageSerializer
from falmer.content.utils import get_public_path_for_page
class FalmerPageChooserBlock(PageChooserBlock):
def get_api_representation(self, value, context=None):
if value is None:
return None
return {
'title': value.title,
'path': get_public_path_for_page(value),
}
class FalmerImageChooserBlock(ImageChooserBlock):
def get_api_representation(self, value, context=None):
return WagtailImageSerializer(value).data
ImageBlock = FalmerImageChooserBlock
class RichTextWithExpandedContent(RichTextBlock):
def get_api_representation(self, value, context=None):
return expand_db_html(value.source)
class ContactBlock(blocks.StructBlock):
body = blocks.TextBlock()
name = blocks.CharBlock()
email = blocks.EmailBlock()
class Meta:
icon = 'user'
class SectionBlock(blocks.StructBlock):
heading = blocks.CharBlock(required=True)
heading_image = FalmerImageChooserBlock(required=False)
body = blocks.StreamBlock([
('paragraph', RichTextWithExpandedContent()),
])
class Meta:
icon = 'user'
class FigureBlock(blocks.StructBlock):
title = blocks.CharBlock(required=True)
subtitle = blocks.CharBlock(required=True)
image = FalmerImageChooserBlock()
link = blocks.CharBlock(required=False)
class Meta:
icon = 'user'
class PledgeBlock(blocks.StructBlock):
title = blocks.CharBlock(required=True)
body = RichTextWithExpandedContent(required=True)
image = FalmerImageChooserBlock()
status = blocks.ChoiceBlock(choices=[
('in_progress', 'In Progress'),
('done', 'Done'),
('blank', 'Blank'),
]
)
class Meta:
icon = 'text'
class HeroImageBlock(blocks.StructBlock):
heading = blocks.CharBlock(required=False, help_text='Leave empty to use the page title')
image = FalmerImageChooserBlock()
class Meta:
icon = 'image'
| StarcoderdataPython |
247616 | from pathlib import Path
from fhir.resources.codesystem import CodeSystem
from oops_fhir.utils import CodeSystemConcept
__all__ = ["v3ContainerSeparator"]
_resource = CodeSystem.parse_file(Path(__file__).with_suffix(".json"))
class v3ContainerSeparator:
"""
v3 Code System ContainerSeparator
A material in a blood collection container that facilites the
separation of of blood cells from serum or plasma
Status: active - Version: 2018-08-12
Copyright None
http://terminology.hl7.org/CodeSystem/v3-ContainerSeparator
"""
gel = CodeSystemConcept(
{
"code": "GEL",
"definition": "A gelatinous type of separator material.",
"display": "Gel",
}
)
"""
Gel
A gelatinous type of separator material.
"""
none = CodeSystemConcept(
{
"code": "NONE",
"definition": "No separator material is present in the container.",
"display": "None",
}
)
"""
None
No separator material is present in the container.
"""
class Meta:
resource = _resource
| StarcoderdataPython |
11311438 | <gh_stars>10-100
from __future__ import division, absolute_import, print_function
import time
from integration_test import *
class ScanRecordTimeoutTestCase(IntegrationTest):
def testCase(self, badge, logger):
badge.start_recording(timeout_minutes=1)
badge.start_scanning(timeout_minutes=1)
status = badge.get_status()
self.assertTrue(status.collector_status)
self.assertTrue(status.scanner_status)
time.sleep(59)
status = badge.get_status()
self.assertTrue(status.collector_status)
self.assertTrue(status.scanner_status)
time.sleep(121)
status = badge.get_status()
self.assertFalse(status.collector_status)
self.assertFalse(status.scanner_status)
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Please enter badge MAC address")
exit(1)
device_addr = sys.argv[1]
testCase = ScanRecordTimeoutTestCase(device_addr)
testCase.runTest() | StarcoderdataPython |
11234959 | <gh_stars>1000+
# For django 1.x
class View:
pass
| StarcoderdataPython |
148980 | """Provide XBlock urls"""
from django.conf.urls import url
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from workbench import views
admin.autodiscover()
urlpatterns = [
url(r'^$', views.index, name='workbench_index'),
url(
r'^scenario/(?P<scenario_id>[^/]+)/(?P<view_name>[^/]+)/$',
views.show_scenario,
name='scenario'
),
url(r'^userlist/$',
views.user_list,
name='userlist'),
url(
r'^scenario/(?P<scenario_id>[^/]+)/$',
views.show_scenario,
name='workbench_show_scenario'
),
url(
r'^view/(?P<scenario_id>[^/]+)/(?P<view_name>[^/]+)/$',
views.show_scenario,
{'template': 'workbench/blockview.html'}
),
url(
r'^view/(?P<scenario_id>[^/]+)/$',
views.show_scenario,
{'template': 'workbench/blockview.html'}
),
url(
r'^handler/(?P<usage_id>[^/]+)/(?P<handler_slug>[^/]*)(?:/(?P<suffix>.*))?$',
views.handler, {'authenticated': True},
name='handler'
),
url(
r'^aside_handler/(?P<aside_id>[^/]+)/(?P<handler_slug>[^/]*)(?:/(?P<suffix>.*))?$',
views.aside_handler, {'authenticated': True},
name='aside_handler'
),
url(
r'^unauth_handler/(?P<usage_id>[^/]+)/(?P<handler_slug>[^/]*)(?:/(?P<suffix>.*))?$',
views.handler, {'authenticated': False},
name='unauth_handler'
),
url(
r'^resource/(?P<block_type>[^/]+)/(?P<resource>.*)$',
views.package_resource,
name='package_resource'
),
url(
r'^reset_state$',
views.reset_state,
name='reset_state'
),
url(r'^admin/', admin.site.urls),
]
urlpatterns += staticfiles_urlpatterns()
| StarcoderdataPython |
1723333 | from typing import Optional
from .data import AttributesModel
class SshKeyModel(AttributesModel):
name: Optional[str]
value: Optional[str]
| StarcoderdataPython |
4822991 | from zerver.context_processors import get_zulip_version_name
from zerver.lib.test_classes import ZulipTestCase
class TestContextProcessors(ZulipTestCase):
def test_get_zulip_version_name(self) -> None:
self.assertEqual(get_zulip_version_name("4.0-dev+git"), "Zulip 4.0-dev")
self.assertEqual(get_zulip_version_name("4.0"), "Zulip 4.0")
| StarcoderdataPython |
3507665 | """
Tree evaluation and rollback
"""
from smart_choice.decisiontree import DecisionTree
from smart_choice.examples import stguide, stbook, oil_tree_example
from tests.capsys import check_capsys
def test_stguide_fig_5_6a(capsys):
"""Fig. 5.6 (a) --- Evaluation of terminal nodes"""
nodes = stguide()
tree = DecisionTree(nodes=nodes)
tree.evaluate()
tree.display()
check_capsys("./tests/files/stguide_fig_5_6a.txt", capsys)
def test_stguide_fig_5_6b(capsys):
"""Fig. 5.6 (b) --- Expected Values"""
nodes = stguide()
tree = DecisionTree(nodes=nodes)
tree.evaluate()
tree.rollback()
tree.display()
check_capsys("./tests/files/stguide_fig_5_6b.txt", capsys)
def test_stbook_fig_3_7_pag_54(capsys):
"""Example creation from Fig. 5.1"""
nodes = stbook()
tree = DecisionTree(nodes=nodes)
tree.evaluate()
tree.rollback()
tree.display()
check_capsys("./tests/files/stbook_fig_3_7_pag_54.txt", capsys)
def test_stbook_fig_5_13_pag_114(capsys):
"""Expected utility"""
nodes = stbook()
tree = DecisionTree(nodes=nodes)
tree.evaluate()
tree.rollback(utility_fn="exp", risk_tolerance=1000)
tree.display(view="ce")
check_capsys("./tests/files/stbook_fig_5_13_pag_114.txt", capsys)
def test_stbook_fig_5_11_pag_112(capsys):
"""Dependent outcomes"""
nodes = stbook()
tree = DecisionTree(nodes=nodes)
tree.evaluate()
tree.rollback(utility_fn="exp", risk_tolerance=1000)
tree.display(view="eu")
check_capsys("./tests/files/stbook_fig_5_11_pag_112.txt", capsys)
def test_oilexample_pag_43(capsys):
"""Basic oil tree example"""
nodes = oil_tree_example()
tree = DecisionTree(nodes=nodes)
tree.evaluate()
tree.rollback()
tree.display()
check_capsys("./tests/files/oilexample_pag_43.txt", capsys)
def test_oilexample_pag_56(capsys):
"""Basic oil tree example"""
nodes = oil_tree_example()
tree = DecisionTree(nodes=nodes)
tree.evaluate()
tree.rollback()
tree.display(max_deep=3)
check_capsys("./tests/files/oilexample_pag_56.txt", capsys)
| StarcoderdataPython |
3538056 | # Copyright 2019-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import functools
import numpy as np
import pytest
from braket.circuits.quantum_operator_helpers import (
get_pauli_eigenvalues,
is_cptp,
is_hermitian,
is_square_matrix,
is_unitary,
verify_quantum_operator_matrix_dimensions,
)
valid_unitary_hermitian_matrix = np.array([[0, 1], [1, 0]])
valid_CPTP_matrices = [
np.array([[0, 1], [1, 0]]) / np.sqrt(2),
np.array([[0, 1], [1, 0]]) / np.sqrt(2),
]
invalid_dimension_matrices = [
(np.array([[1]])),
(np.array([1])),
(np.array([0, 1, 2])),
(np.array([[0, 1], [1, 2], [3, 4]])),
(np.array([[0, 1, 2], [2, 3]])),
(np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]])),
]
invalid_unitary_matrices_false = [(np.array([[0, 1], [1, 1]])), (np.array([[1, 2], [3, 4]]))]
invalid_hermitian_matrices_false = [(np.array([[1, 0], [0, 1j]])), (np.array([[1, 2], [3, 4]]))]
invalid_CPTP_matrices_false = [np.array([[1, 0], [0, 1]]), np.array([[0, 1], [1, 0]])]
invalid_matrix_type_error = np.array([[0, 1], ["a", 0]])
z_matrix = np.array([[1, 0], [0, -1]])
def test_verify_quantum_operator_matrix_dimensions():
assert verify_quantum_operator_matrix_dimensions(valid_unitary_hermitian_matrix) is None
def test_is_unitary_true():
assert is_unitary(valid_unitary_hermitian_matrix)
def test_is_hermitian_true():
assert is_hermitian(valid_unitary_hermitian_matrix)
def test_is_cptp_true():
assert is_cptp(valid_CPTP_matrices)
def test_is_square_matrix():
assert is_square_matrix(valid_unitary_hermitian_matrix)
@pytest.mark.xfail(raises=ValueError)
@pytest.mark.parametrize("matrix", invalid_dimension_matrices)
def test_verify_quantum_operator_matrix_dimensions_value_error(matrix):
verify_quantum_operator_matrix_dimensions(matrix)
@pytest.mark.parametrize("matrix", invalid_unitary_matrices_false)
def test_is_unitary_false(matrix):
assert not is_unitary(matrix)
@pytest.mark.parametrize("matrix", invalid_hermitian_matrices_false)
def test_is_hermitian_false(matrix):
assert not is_hermitian(matrix)
def test_is_cptp_false():
assert not is_cptp(invalid_CPTP_matrices_false)
@pytest.mark.xfail(raises=Exception)
def test_is_hermitian_exception():
is_hermitian(invalid_matrix_type_error)
@pytest.mark.xfail(raises=Exception)
def test_is_unitary_exception():
is_unitary(invalid_matrix_type_error)
@pytest.mark.xfail(raises=Exception)
def test_is_cptp_exception():
is_cptp([invalid_matrix_type_error])
def test_get_pauli_eigenvalues_correct_eigenvalues_one_qubit():
"""Test the get_pauli_eigenvalues function for one qubit"""
assert np.array_equal(get_pauli_eigenvalues(1), np.diag(z_matrix))
def test_get_pauli_eigenvalues_correct_eigenvalues_two_qubits():
"""Test the get_pauli_eigenvalues function for two qubits"""
assert np.array_equal(get_pauli_eigenvalues(2), np.diag(np.kron(z_matrix, z_matrix)))
def test_get_pauli_eigenvalues_correct_eigenvalues_three_qubits():
"""Test the get_pauli_eigenvalues function for three qubits"""
assert np.array_equal(
get_pauli_eigenvalues(3),
np.diag(np.kron(z_matrix, np.kron(z_matrix, z_matrix))),
)
@pytest.mark.parametrize("depth", list(range(1, 6)))
def test_get_pauli_eigenvalues_cache_usage(depth):
"""Test that the right number of cachings have been executed after clearing the cache"""
get_pauli_eigenvalues.cache_clear()
get_pauli_eigenvalues(depth)
assert functools._CacheInfo(depth - 1, depth, 128, depth) == get_pauli_eigenvalues.cache_info()
@pytest.mark.xfail(raises=ValueError)
@pytest.mark.parametrize("num_qubits", [1, 2])
def test_get_pauli_eigenvalues_immutable(num_qubits):
get_pauli_eigenvalues(num_qubits)[0] = 100
| StarcoderdataPython |
6422013 | <gh_stars>0
from django.contrib import admin
# Register your models here.
from unesco.models import Site, Category, State,Iso,Region
admin.site.register(Site)
admin.site.register(Category)
admin.site.register(State)
admin.site.register(Iso)
admin.site.register(Region)
| StarcoderdataPython |
111731 | <reponame>bkmrk/bkmrk<filename>bkmrk/__init__.py<gh_stars>1-10
from flask import Flask
from flask_bootstrap import Bootstrap
from flask_login import LoginManager
from flask_mail import Mail
from flask_migrate import Migrate
from flask_moment import Moment
from flask_sqlalchemy import SQLAlchemy
from .config import Config
from . import utils
bootstrap = Bootstrap()
db = SQLAlchemy()
login = LoginManager()
login.login_message = 'Please log in to access this page.'
login.login_view = 'auth.login'
mail = Mail()
migrate = Migrate()
moment = Moment()
def create_app(config_class=Config):
app = Flask(__name__)
app.config.from_object(config_class)
bootstrap.init_app(app)
db.init_app(app)
login.init_app(app)
mail.init_app(app)
migrate.init_app(app, db)
moment.init_app(app)
from bkmrk.errors import bp as errors_bp
app.register_blueprint(errors_bp)
from bkmrk.auth import bp as auth_bp
app.register_blueprint(auth_bp, url_prefix='/auth')
from bkmrk.main import bp as main_bp
app.register_blueprint(main_bp)
utils.add_stream_logger(app)
return app
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.