id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
1788416 | __author__ = '<EMAIL>'
from threading import Thread, Event, current_thread
import threading
class SyncThread(Thread):
def __init__(self, target):
super(SyncThread, self).__init__(target=target, name='SyncThread')
self._stop_event = Event()
def stop(self):
self._stop_event.set()
def stopped(self):
return self._stop_event.is_set()
def thread_is_stoppped():
if current_thread().name == 'SyncThread':
this_thread = current_thread()
return this_thread.stopped()
return False
def get_sync_thread():
for thread in threading.enumerate():
if thread.name == 'SyncThread':
return thread
return None
def create_and_start_sync_thread(sync_routine):
if not get_sync_thread():
sync_thread = SyncThread(target=sync_routine)
sync_thread.daemon = True
sync_thread.start()
| StarcoderdataPython |
116019 | <reponame>apple/ml-cvnets<gh_stars>100-1000
#
# For licensing see accompanying LICENSE file.
# Copyright (C) 2022 Apple Inc. All Rights Reserved.
#
from typing import Any
import numpy as np
def setup_size(size: Any, error_msg="Need a tuple of length 2"):
if isinstance(size, int):
return size, size
if isinstance(size, (list, tuple)) and len(size) == 1:
return size[0], size[0]
if len(size) != 2:
raise ValueError(error_msg)
return size
def intersect(box_a, box_b):
"""Computes the intersection between box_a and box_b"""
max_xy = np.minimum(box_a[:, 2:], box_b[2:])
min_xy = np.maximum(box_a[:, :2], box_b[:2])
inter = np.clip((max_xy - min_xy), a_min=0, a_max=np.inf)
return inter[:, 0] * inter[:, 1]
def jaccard_numpy(box_a: np.ndarray, box_b: np.ndarray):
"""
Computes the intersection of two boxes.
Args:
box_a (np.ndarray): Boxes of shape [Num_boxes_A, 4]
box_b (np.ndarray): Box osf shape [Num_boxes_B, 4]
Returns:
intersection over union scores. Shape is [box_a.shape[0], box_a.shape[1]]
"""
inter = intersect(box_a, box_b)
area_a = (box_a[:, 2] - box_a[:, 0]) * (box_a[:, 3] - box_a[:, 1]) # [A,B]
area_b = (box_b[2] - box_b[0]) * (box_b[3] - box_b[1]) # [A,B]
union = area_a + area_b - inter
return inter / union # [A,B]
| StarcoderdataPython |
26604 | <gh_stars>1-10
try:
from zipfile import ZipFile
from pyspark import SparkContext, SparkConf
from pyspark.sql import SparkSession
import pyspark.sql.functions as f
import os
except Exception as e:
print(e)
## http://www.hongyusu.com/imt/technology/spark-via-python-basic-setup-count-lines-and-word-counts.html
def push_acc():
spark = SparkSession.builder \
.master('spark://master:7077') \
.appName("Push Accidents data to HDFS") \
.getOrCreate()
sc = spark.sparkContext
sc.setLogLevel('WARN')
# unzip the file
# with ZipFile("/volume/data/accidents_2012_2018.zip", 'r') as zipObj:
# zipObj.extractall('/volume/data')
# read the data from the volume
acc_data = spark.read.csv("/volume/data/")
# push the data on HDFS as parquet
acc_data.write.parquet("hdfs://hadoop/acc_data_parquet")
if __name__ == "__main__":
push_acc()
| StarcoderdataPython |
61786 | <reponame>OliverLPH/PaddleClas
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import cv2
import time
import sys
sys.path.insert(0, ".")
from ppcls.utils import logger
from tools.infer.utils import parse_args, get_image_list, create_paddle_predictor, preprocess, postprocess
class Predictor(object):
def __init__(self, args):
# HALF precission predict only work when using tensorrt
if args.use_fp16 is True:
assert args.use_tensorrt is True
self.args = args
self.paddle_predictor = create_paddle_predictor(args)
input_names = self.paddle_predictor.get_input_names()
self.input_tensor = self.paddle_predictor.get_input_handle(input_names[
0])
output_names = self.paddle_predictor.get_output_names()
self.output_tensor = self.paddle_predictor.get_output_handle(
output_names[0])
def predict(self, batch_input):
self.input_tensor.copy_from_cpu(batch_input)
self.paddle_predictor.run()
batch_output = self.output_tensor.copy_to_cpu()
return batch_output
def normal_predict(self):
image_list = get_image_list(self.args.image_file)
batch_input_list = []
img_name_list = []
cnt = 0
for idx, img_path in enumerate(image_list):
img = cv2.imread(img_path)
if img is None:
logger.warning(
"Image file failed to read and has been skipped. The path: {}".
format(img_path))
continue
else:
img = img[:, :, ::-1]
img = preprocess(img, args)
batch_input_list.append(img)
img_name = img_path.split("/")[-1]
img_name_list.append(img_name)
cnt += 1
if cnt % args.batch_size == 0 or (idx + 1) == len(image_list):
batch_outputs = self.predict(np.array(batch_input_list))
batch_result_list = postprocess(batch_outputs, self.args.top_k)
for number, result_dict in enumerate(batch_result_list):
filename = img_name_list[number]
clas_ids = result_dict["clas_ids"]
scores_str = "[{}]".format(", ".join("{:.2f}".format(
r) for r in result_dict["scores"]))
print(
"File:{}, Top-{} result: class id(s): {}, score(s): {}".
format(filename, self.args.top_k, clas_ids,
scores_str))
batch_input_list = []
img_name_list = []
def benchmark_predict(self):
test_num = 500
test_time = 0.0
for i in range(0, test_num + 10):
inputs = np.random.rand(args.batch_size, 3, 224,
224).astype(np.float32)
start_time = time.time()
batch_output = self.predict(inputs).flatten()
if i >= 10:
test_time += time.time() - start_time
time.sleep(0.01) # sleep for T4 GPU
fp_message = "FP16" if args.use_fp16 else "FP32"
trt_msg = "using tensorrt" if args.use_tensorrt else "not using tensorrt"
print("{0}\t{1}\t{2}\tbatch size: {3}\ttime(ms): {4}".format(
args.model, trt_msg, fp_message, args.batch_size, 1000 * test_time
/ test_num))
if __name__ == "__main__":
args = parse_args()
assert os.path.exists(
args.model_file), "The path of 'model_file' does not exist: {}".format(
args.model_file)
assert os.path.exists(
args.params_file
), "The path of 'params_file' does not exist: {}".format(args.params_file)
predictor = Predictor(args)
if not args.enable_benchmark:
predictor.normal_predict()
else:
assert args.model is not None
predictor.benchmark_predict()
| StarcoderdataPython |
3242632 | import pandas as pd
import numpy as np
import requests
import json
'''
Author: <NAME>
Purpose: This program will geocode address in downtown detroit to specific x,y coordinates
Input: an excel table with address column(s)
ADDITIONAL FIELDS:
1. x,y: longitude and latitude OF the GEOCODing result(came from Google API)
2. flag: indicate whether the geocoder match exact address of the input
System Requirements:
1. Need pandas libraries
'''
# MAIN PARAMETERS
with open('config.json') as json_data_file:
data = json.load(json_data_file)
input_table = str(data['input_table'])
output_table = str(data['output_table'])
reference_path = str(data['reference_path'])
googleApiKey = str(data['googleApiKey'])
county = str(data['county'])
state = str(data['state'])
viewbox = str(data['viewbox'])
bound = str(data['bound'])
ref_data = pd.read_excel(reference_path) # load reference data
def OSM_geocode(address):
url = 'https://nominatim.openstreetmap.org/search'
global county, state, viewbox
params = {'q': address,
'county': county,
'state': state,
'viewbox': viewbox,
'bounded': 1,
'format': 'json',
'addressdetails': 0,
'countrycodes': 'US'}
try:
R = requests.get(url, params=params)
R.raise_for_status()
response = R.json()
display_name = response[0]['display_name']
except:
display_name = google_geocode(address)
return display_name
def google_geocode(intersect):
global bound, county, state, googleApiKey
GoogleApiKey = googleApiKey
params = {'address': '{},{}'.format(intersect, state),
'bounds': bound,
'key': GoogleApiKey}
url = 'https://maps.googleapis.com/maps/api/geocode/json'
R = requests.get(url, params=params)
R.raise_for_status()
response = R.json()
try:
display_name = response['results'][0]['formatted_address']
x = response['results'][0]['geometry']['location']['lng']
y = response['results'][0]['geometry']['location']['lat']
except:
display_name = False
x = False
y = False
return display_name, x, y
def match_ref(string, df):
ref_data = df
prefix = ['East', 'South', 'West', 'North']
first_word = string.strip().split(',')[0]
second_word = string.strip().split(',')[1]
if list(first_word)[0].isdigit() and list(first_word)[-1].isdigit():
parsed_range_r = first_word
parsed_name_r = ' '.join(second_word.strip().split(' ')[:-1])
reg_name = '^({}).*$'.format(parsed_name_r)
if first_word.strip().split(' ')[0] in prefix:
parsed_dir_r = first_word.strip().split(' ')[0]
else:
parsed_dir_r = False
else:
parsed_range_r = False
parsed_name_r = ' '.join(first_word.strip().split(' ')[:-1])
reg_name = '^.*\s({}).*$'.format(parsed_name_r)
if second_word.strip().split(' ')[0] in prefix:
parsed_dir_r = second_word.strip().split(' ')[0]
else:
parsed_dir_r = False
reg_name = '^.*\s({}).*$'.format(parsed_name_r)
if parsed_range_r:
matched_record = ref_data[(ref_data['ParsedRange'] == parsed_range_r)]
matched_record = matched_record[matched_record['Address'].str.contains(
reg_name)]
else:
matched_record = ref_data[ref_data['Address'].str.contains(reg_name)]
if parsed_dir_r:
matched_record = matched_record[(
ref_data['ParsedPreDir'] == parsed_dir_r)]
else:
pass
return matched_record
def google_match_ref(string, x, y, df):
ref_data = df
flag = None
first_word = string.strip().split(',')[0]
first_word_1st_word = first_word.strip().split(' ')[0]
second_word = string.strip().split(',')[1]
if list(first_word_1st_word)[0].isdigit() and list(first_word_1st_word)[-1].isdigit():
parsed_address = ' '.join(first_word.strip().split(' ')[:-1])
reg_name = '^({}).*$'.format(parsed_address)
else:
if list(second_word.strip().split(' ')[0])[0].isdigit() and list(second_word.strip().split(' ')[0])[
-1].isdigit():
parsed_address = ' '.join(second_word.strip().split(' ')[:-1])
reg_name = '^({}).*$'.format(parsed_address)
else:
flag = 'Do not match exact address.'
parsed_address = ' '.join(second_word.strip().split(' ')[:-1])
reg_name = '^.*({}).*$'.format(parsed_address)
matched_record = ref_data[ref_data['Address'].str.contains(reg_name)]
matched_record['flag'] = flag
matched_record['x'] = x
matched_record['y'] = y
return matched_record
def geocode(address_input):
global ref_data
output_df = ref_data[ref_data['Address'] == False]
for i, address in enumerate(address_input):
print('Geocoding <{}>...'.format(address))
google_output, x, y = google_geocode(address)
if google_output:
selected_record = google_match_ref(google_output, x, y, ref_data)
if selected_record.shape[0] > 0:
selected_record = selected_record.iloc[0]
output_df = output_df.append(selected_record)
print(' Complete.')
else:
print(' No matching record found in the reference database.', )
empty_output = ref_data.iloc[0].copy()
empty_output['flag'] = 'No matching record found in the reference database.'
empty_output['x'] = x
empty_output['y'] = y
output_df = output_df.append(empty_output)
else:
print(' Google GeoCoding Error: Address can\'t be found.', )
empty_output = ref_data.iloc[0].copy()
empty_output['flag'] = 'Google Address can\'t be found'
empty_output['x'] = np.nan
empty_output['y'] = np.nan
output_df = output_df.append(empty_output)
return output_df.reset_index()
def main():
# read input excel table
global input_table, output_table
input = pd.read_excel(input_table)
input_list = input.values.reshape((1, -1))[0]
output = geocode(input_list)
output['input_address'] = pd.Series(input_list)
output.to_excel(output_table, sheet_name="geocoding_output")
return
if __name__ == '__main__':
main()
| StarcoderdataPython |
130898 | <reponame>maruel/swarming
#!/usr/bin/env vpython
# Copyright 2019 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
import subprocess
import sys
def run_tests(test_cmds, python3=False):
"""Run tests sequentially"""
run_cnt = 0
skipped_cmds = []
failed_cmds = []
for cmd in test_cmds:
_exit_code, skipped = _run_test(cmd, python3=python3)
if skipped:
skipped_cmds.append(cmd)
continue
if _exit_code:
failed_cmds.append(cmd)
run_cnt += 1
print('\n-------------------------------------------------------------------')
print('Ran %d test files, Skipped %d test files' %
(run_cnt, len(skipped_cmds)))
if len(skipped_cmds) > 0:
print('\nSkipped tests:')
for t in skipped_cmds:
print(' - %s' % t)
if len(failed_cmds) > 0:
print('\nFailed tests:')
for t in failed_cmds:
print(' - %s' % t)
print('\nFAILED')
return 1
print('\nOK')
return 0
def _run_test(cmd, python3=False):
if python3 and not _has_py3_shebang(cmd[0]):
print('Skipping test in python3: %s' % cmd)
return 0, True
# vpython
vpython = 'vpython'
if python3:
vpython += '3'
cmd = [vpython] + cmd
shell = False
if sys.platform == 'win32':
shell = True
print('Running test script: %r' % cmd)
return subprocess.call(cmd, shell=shell), False
def _has_py3_shebang(path):
with open(path, 'r') as f:
maybe_shebang = f.readline()
return maybe_shebang.startswith('#!') and 'python3' in maybe_shebang
| StarcoderdataPython |
3397414 | <gh_stars>10-100
#!/usr/bin/env python3
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import numpy as np
np.set_printoptions(threshold=np.nan)
import tensorflow as tf
import time
# seeding for debug purposes --- dont forget to remove
SEED = 12345
np.random.seed(SEED)
tf.set_random_seed(SEED)
def convolve_inner_layers(x, W, b):
y = tf.nn.conv2d(x, W, strides = [1,1,1,1], padding='SAME')
y = tf.nn.bias_add(y, b)
return tf.nn.relu(y)
def convolve_ouput_layer(x, W, b):
y = tf.nn.conv2d(x, W, strides = [1,1,1,1], padding='SAME')
y = tf.nn.bias_add(y, b)
return y
def conv_net(x, W, b):
conv1 = convolve_inner_layers(x, W['weights1'], b['bias1'])
conv2 = convolve_inner_layers(conv1, W['weights2'], b['bias2'])
conv3 = convolve_inner_layers(conv2, W['weights3'], b['bias3'])
output = convolve_ouput_layer(conv3, W['weights_out'], b['bias_out'])
return output
def run_training(image_dim_, initializer_scale_, learning_rate_):
# parameters
filter_dim = 11
number_images = 100
image_dim = image_dim_
input_layer = 2
first_layer = 50
second_layer = 25
third_layer = 10
output_layer = 1
initializer_scale = initializer_scale_
# train images
rand_img_train_1 = np.random.random_sample((number_images,image_dim**2))
rand_img_train_2 = np.random.random_sample((number_images,image_dim**2))
difference_train = abs(rand_img_train_1 - rand_img_train_2)
# test image
rand_img_test_1 = np.random.random_sample((number_images,image_dim**2))
rand_img_test_2 = np.random.random_sample((number_images,image_dim**2))
difference_test = abs(rand_img_test_1 - rand_img_test_2)
# stacking & reshaping images
train_data = np.reshape(np.dstack((rand_img_train_1, rand_img_train_2)), [number_images,image_dim,image_dim,2])
test_data = np.reshape(np.dstack((rand_img_test_1, rand_img_test_2)), [number_images,image_dim,image_dim,2])
target_data_train = np.reshape(difference_train, [number_images,image_dim,image_dim,1])
target_data_test = np.reshape(difference_test, [number_images,image_dim,image_dim,1])
# initializing variables --- fan in
weights = {
'weights1': tf.Variable(tf.random_normal([filter_dim,filter_dim,input_layer,first_layer],stddev=(1.0/(initializer_scale*filter_dim*filter_dim*input_layer)))),
'weights2': tf.Variable(tf.random_normal([filter_dim,filter_dim,first_layer,second_layer],stddev=(1.0/(initializer_scale*filter_dim*filter_dim*first_layer)))),
'weights3': tf.Variable(tf.random_normal([filter_dim,filter_dim,second_layer,third_layer],stddev=(1.0/(initializer_scale*filter_dim*filter_dim*second_layer)))),
'weights_out': tf.Variable(tf.random_normal([filter_dim,filter_dim,third_layer,output_layer],stddev=(1.0/(initializer_scale*filter_dim*filter_dim*third_layer))))
}
biases = {
'bias1': tf.Variable(tf.random_normal([first_layer],stddev=(1.0/(initializer_scale*filter_dim*filter_dim*input_layer)))),
'bias2': tf.Variable(tf.random_normal([second_layer],stddev=(1.0/(initializer_scale*filter_dim*filter_dim*first_layer)))),
'bias3': tf.Variable(tf.random_normal([third_layer],stddev=(1.0/(initializer_scale*filter_dim*filter_dim*second_layer)))),
'bias_out': tf.Variable(tf.random_normal([output_layer],stddev=(1.0/(initializer_scale*filter_dim*filter_dim*third_layer))))
}
# tf Graph input
x = tf.placeholder(tf.float32, [None, image_dim, image_dim, 2])
y = tf.placeholder(tf.float32, [None, image_dim, image_dim, 1])
# paramaters
learning_rate = learning_rate_
epochs = 1000
# model
prediction = conv_net(x, weights, biases)
# loss and optimization
cost = tf.reduce_mean(tf.square(tf.subtract(prediction, y)))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# session
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
epoch_count = 0
start_time = time.time()
print("starting training with paramaers: (im_dim={}, init_scale={}, lr={})".format(image_dim, initializer_scale, learning_rate))
while epoch_count < epochs:
x_data_train, y_data_train = train_data, target_data_train
sess.run(optimizer, feed_dict={x : x_data_train, y : y_data_train})
loss = sess.run(cost, feed_dict={x : x_data_train, y : y_data_train})
epoch_count+=1
print(' optimization finished!')
score = sess.run(cost, feed_dict={x: test_data, y: target_data_test})
print(' score : {} '.format(score))
return (image_dim, initializer_scale, learning_rate), (loss, score)
def main():
results = {}
image_dims = [1,2,3,4,5]
init_scales = [.01, .1, 1.0, 10.0]
learning_rates = [.1, .01, .001]
for dim in image_dims:
for scale in init_scales:
for learning_rate in learning_rates:
setting, result = run_training(dim, scale, learning_rate)
results[setting] = result
with open('results.txt', mode='w') as write_file:
for setting in results:
write_file.write(str(setting)+','+str(results[setting])+'\n')
write_file.close()
if __name__ == '__main__':
main()
| StarcoderdataPython |
1698693 | <gh_stars>0
# Lint as: python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for MetricsAndPlotsEvaluator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tensorflow_model_analysis import config
from tensorflow_model_analysis.eval_saved_model import testutil
from tensorflow_model_analysis.evaluators import metrics_validator
from tensorflow_model_analysis.metrics import metric_types
from tensorflow_model_analysis.proto import validation_result_pb2
from google.protobuf import text_format
class MetricsValidatorTest(testutil.TensorflowModelAnalysisTest):
def testValidateMetricsMetricValueAndThreshold(self):
eval_config = config.EvalConfig(
model_specs=[
config.ModelSpec(),
],
slicing_specs=[config.SlicingSpec()],
metrics_specs=[
config.MetricsSpec(
metrics=[
config.MetricConfig(
class_name='WeightedExampleCount',
# 1.5 < 1, NOT OK.
threshold=config.MetricThreshold(
value_threshold=config.GenericValueThreshold(
upper_bound={'value': 1}))),
],
model_names=['']),
],
)
sliced_metrics = ((()), {
metric_types.MetricKey(name='weighted_example_count'): 1.5,
})
result = metrics_validator.validate_metrics(sliced_metrics, eval_config)
self.assertFalse(result.validation_ok)
expected = text_format.Parse(
"""
metric_validations_per_slice {
slice_key {
}
failures {
metric_key {
name: "weighted_example_count"
}
metric_threshold {
value_threshold {
upper_bound {
value: 1.0
}
}
}
metric_value {
double_value {
value: 1.5
}
}
}
}""", validation_result_pb2.ValidationResult())
self.assertEqual(result, expected)
def testValidateMetricsValueThresholdUpperBoundFail(self):
eval_config = config.EvalConfig(
model_specs=[
config.ModelSpec(),
],
slicing_specs=[config.SlicingSpec()],
metrics_specs=[
config.MetricsSpec(
metrics=[
config.MetricConfig(
class_name='WeightedExampleCount',
# 1.5 < 1, NOT OK.
threshold=config.MetricThreshold(
value_threshold=config.GenericValueThreshold(
upper_bound={'value': 1}))),
],
model_names=['']),
],
)
sliced_metrics = ((()), {
metric_types.MetricKey(name='weighted_example_count'): 1.5,
})
result = metrics_validator.validate_metrics(sliced_metrics, eval_config)
self.assertFalse(result.validation_ok)
def testValidateMetricsValueThresholdLowerBoundFail(self):
eval_config = config.EvalConfig(
model_specs=[
config.ModelSpec(),
],
slicing_specs=[config.SlicingSpec()],
metrics_specs=[
config.MetricsSpec(
metrics=[
config.MetricConfig(
class_name='WeightedExampleCount',
# 0 > 1, NOT OK.
threshold=config.MetricThreshold(
value_threshold=config.GenericValueThreshold(
lower_bound={'value': 1}))),
],
model_names=['']),
],
)
sliced_metrics = ((()), {
metric_types.MetricKey(name='weighted_example_count'): 0,
})
result = metrics_validator.validate_metrics(sliced_metrics, eval_config)
self.assertFalse(result.validation_ok)
def testValidateMetricsValueThresholdUpperBoundPass(self):
eval_config = config.EvalConfig(
model_specs=[
config.ModelSpec(),
],
slicing_specs=[config.SlicingSpec()],
metrics_specs=[
config.MetricsSpec(
metrics=[
config.MetricConfig(
class_name='WeightedExampleCount',
# 0 < 1, OK.
threshold=config.MetricThreshold(
value_threshold=config.GenericValueThreshold(
upper_bound={'value': 1}))),
],
model_names=['']),
],
)
sliced_metrics = ((()), {
metric_types.MetricKey(name='weighted_example_count'): 0,
})
result = metrics_validator.validate_metrics(sliced_metrics, eval_config)
self.assertTrue(result.validation_ok)
def testValidateMetricsValueThresholdLowerBoundPass(self):
eval_config = config.EvalConfig(
model_specs=[
config.ModelSpec(),
],
slicing_specs=[config.SlicingSpec()],
metrics_specs=[
config.MetricsSpec(
metrics=[
config.MetricConfig(
class_name='WeightedExampleCount',
# 2 > 1, OK.
threshold=config.MetricThreshold(
value_threshold=config.GenericValueThreshold(
lower_bound={'value': 1}))),
],
model_names=['']),
],
)
sliced_metrics = ((()), {
metric_types.MetricKey(name='weighted_example_count'): 2,
})
result = metrics_validator.validate_metrics(sliced_metrics, eval_config)
self.assertTrue(result.validation_ok)
def testValidateMetricsChangeThresholdAbsoluteFail(self):
eval_config = config.EvalConfig(
model_specs=[
config.ModelSpec(),
config.ModelSpec(name='baseline', is_baseline=True)
],
slicing_specs=[config.SlicingSpec()],
metrics_specs=[
config.MetricsSpec(
metrics=[
config.MetricConfig(
class_name='MeanPrediction',
# Diff = 0 - .333 = -.333 < -1, NOT OK.
threshold=config.MetricThreshold(
change_threshold=config.GenericChangeThreshold(
direction=config.MetricDirection
.LOWER_IS_BETTER,
absolute={'value': -1})))
],
model_names=['']),
],
)
sliced_metrics = ((()), {
metric_types.MetricKey(name='mean_prediction', model_name='baseline'):
0.333,
metric_types.MetricKey(name='mean_prediction', is_diff=True):
-0.333,
})
result = metrics_validator.validate_metrics(sliced_metrics, eval_config)
self.assertFalse(result.validation_ok)
def testValidateMetricsChangeThresholdRelativeFail(self):
eval_config = config.EvalConfig(
model_specs=[
config.ModelSpec(),
config.ModelSpec(name='baseline', is_baseline=True)
],
slicing_specs=[config.SlicingSpec()],
metrics_specs=[
config.MetricsSpec(
metrics=[
config.MetricConfig(
class_name='MeanPrediction',
# Diff = -.333
# Diff% = -.333/.333 = -100% < -200%, NOT OK.
threshold=config.MetricThreshold(
change_threshold=config.GenericChangeThreshold(
direction=config.MetricDirection
.LOWER_IS_BETTER,
relative={'value': -2}))),
],
model_names=['']),
],
)
sliced_metrics = ((()), {
metric_types.MetricKey(name='mean_prediction', model_name='baseline'):
0.333,
metric_types.MetricKey(name='mean_prediction', is_diff=True):
-0.333,
})
result = metrics_validator.validate_metrics(sliced_metrics, eval_config)
self.assertFalse(result.validation_ok)
def testValidateMetricsChangeThresholdAbsolutePass(self):
eval_config = config.EvalConfig(
model_specs=[
config.ModelSpec(),
config.ModelSpec(name='baseline', is_baseline=True)
],
slicing_specs=[config.SlicingSpec()],
metrics_specs=[
config.MetricsSpec(
metrics=[
config.MetricConfig(
class_name='MeanPrediction',
# Diff = 0 - .333 = -.333 < 0, OK.
threshold=config.MetricThreshold(
change_threshold=config.GenericChangeThreshold(
direction=config.MetricDirection
.LOWER_IS_BETTER,
absolute={'value': 0})))
],
model_names=['']),
],
)
sliced_metrics = ((()), {
metric_types.MetricKey(name='mean_prediction', model_name='baseline'):
0.333,
metric_types.MetricKey(name='mean_prediction', is_diff=True):
-0.333,
})
result = metrics_validator.validate_metrics(sliced_metrics, eval_config)
self.assertTrue(result.validation_ok)
def testValidateMetricsChangeThresholdRelativePass(self):
eval_config = config.EvalConfig(
model_specs=[
config.ModelSpec(),
config.ModelSpec(name='baseline', is_baseline=True)
],
slicing_specs=[config.SlicingSpec()],
metrics_specs=[
config.MetricsSpec(
metrics=[
config.MetricConfig(
class_name='MeanPrediction',
# Diff = -.333
# Diff% = -.333/.333 = -100% < 0%, OK.
threshold=config.MetricThreshold(
change_threshold=config.GenericChangeThreshold(
direction=config.MetricDirection
.LOWER_IS_BETTER,
relative={'value': 0}))),
],
model_names=['']),
],
)
sliced_metrics = ((()), {
metric_types.MetricKey(name='mean_prediction', model_name='baseline'):
0.333,
metric_types.MetricKey(name='mean_prediction', is_diff=True):
-0.333,
})
result = metrics_validator.validate_metrics(sliced_metrics, eval_config)
self.assertTrue(result.validation_ok)
def testValidateMetricsChangeThresholdHigherIsBetterPass(self):
eval_config = config.EvalConfig(
model_specs=[
config.ModelSpec(),
config.ModelSpec(name='baseline', is_baseline=True)
],
slicing_specs=[config.SlicingSpec()],
metrics_specs=[
config.MetricsSpec(
metrics=[
config.MetricConfig(
class_name='MeanPrediction',
# Diff = -.333 > -1, OK.
threshold=config.MetricThreshold(
change_threshold=config.GenericChangeThreshold(
direction=config.MetricDirection
.HIGHER_IS_BETTER,
absolute={'value': -1}))),
],
model_names=['']),
],
)
sliced_metrics = ((()), {
metric_types.MetricKey(name='mean_prediction', model_name='baseline'):
0.333,
metric_types.MetricKey(name='mean_prediction', is_diff=True):
-0.333,
})
result = metrics_validator.validate_metrics(sliced_metrics, eval_config)
self.assertTrue(result.validation_ok)
def testValidateMetricsChangeThresholdHigherIsBetterFail(self):
eval_config = config.EvalConfig(
model_specs=[
config.ModelSpec(),
config.ModelSpec(name='baseline', is_baseline=True)
],
slicing_specs=[config.SlicingSpec()],
metrics_specs=[
config.MetricsSpec(
metrics=[
config.MetricConfig(
class_name='MeanPrediction',
# Diff = -.333 > 0, NOT OK.
threshold=config.MetricThreshold(
change_threshold=config.GenericChangeThreshold(
direction=config.MetricDirection
.HIGHER_IS_BETTER,
absolute={'value': 0}))),
],
model_names=['']),
],
)
sliced_metrics = ((()), {
metric_types.MetricKey(name='mean_prediction', model_name='baseline'):
0.333,
metric_types.MetricKey(name='mean_prediction', is_diff=True):
-0.333,
})
result = metrics_validator.validate_metrics(sliced_metrics, eval_config)
self.assertFalse(result.validation_ok)
if __name__ == '__main__':
tf.compat.v1.enable_v2_behavior()
tf.test.main()
| StarcoderdataPython |
12280 | #! /usr/bin/env python
"""Toolbox for unbalanced dataset in machine learning."""
from setuptools import setup, find_packages
import os
import sys
import setuptools
from distutils.command.build_py import build_py
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
descr = """Toolbox for unbalanced dataset in machine learning."""
DISTNAME = 'unbalanced_dataset'
DESCRIPTION = 'Toolbox for unbalanced dataset in machine learning.'
LONG_DESCRIPTION = descr
MAINTAINER = '<NAME>, <NAME>'
MAINTAINER_EMAIL = '<EMAIL>, <EMAIL>'
URL = 'https://github.com/fmfn/UnbalancedDataset'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'https://github.com/fmfn/UnbalancedDataset'
# This is a bit (!) hackish: we are setting a global variable so that the main
# skimage __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by UnbalancedDataset to
# recursively build the compiled extensions in sub-packages is based on
# the Python import machinery.
builtins.__UNBALANCED_DATASET_SETUP__ = True
with open('unbalanced_dataset/__init__.py') as fid:
for line in fid:
if line.startswith('__version__'):
VERSION = line.strip().split()[-1][1:-1]
break
with open('requirements.txt') as fid:
INSTALL_REQUIRES = [l.strip() for l in fid.readlines() if l]
# requirements for those browsing PyPI
REQUIRES = [r.replace('>=', ' (>= ') + ')' for r in INSTALL_REQUIRES]
REQUIRES = [r.replace('==', ' (== ') for r in REQUIRES]
REQUIRES = [r.replace('[array]', '') for r in REQUIRES]
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
config.set_options(
ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('unbalanced_dataset')
return config
if __name__ == "__main__":
try:
from numpy.distutils.core import setup
extra = {'configuration': configuration}
# Do not try and upgrade larger dependencies
for lib in ['scipy', 'numpy', 'matplotlib']:
try:
__import__(lib)
INSTALL_REQUIRES = [i for i in INSTALL_REQUIRES
if lib not in i]
except ImportError:
pass
except ImportError:
if len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands',
'--version',
'clean')):
# For these actions, NumPy is not required.
#
# They are required to succeed without Numpy for example when
# pip is used to install UnbalancedDataset when Numpy is not yet
# present in the system.
from setuptools import setup
extra = {}
else:
print('To install UnbalancedDataset from source, you need numpy.' +
'Install numpy with pip:\n' +
'pip install numpy\n'
'Or use your operating system package manager.')
sys.exit(1)
setup(
name=DISTNAME,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
url=URL,
license=LICENSE,
download_url=DOWNLOAD_URL,
version=VERSION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
install_requires=INSTALL_REQUIRES,
requires=REQUIRES,
packages=setuptools.find_packages(exclude=['doc']),
include_package_data=True,
zip_safe=False, # the package can run out of an .egg file
cmdclass={'build_py': build_py},
**extra
)
| StarcoderdataPython |
181091 | <filename>src/icupy/number.py<gh_stars>0
"""
Module for icu::number namespace
"""
from .icu.number import * # noqa
| StarcoderdataPython |
1709295 | #ToDo : Write tests for application interface
import pytest
import os
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from mp3wav.application import Mp3WavApp
from mp3wav.exceptions.fileexception import FileTypeException
from mp3wav.exceptions.libraryexception import LibraryException
from mp3wav.exceptions.filenotexistexception import FileNotExistException
from mp3wav.exceptions.samefileexception import SameFileException
from mp3wav.exceptions.overwriteexception import OverWriteException
def windowTest(qtbot):
testapp = Mp3WavApp()
testapp.show()
qtbot.addWidget(testapp)
assert testapp.isVisible()
assert testapp.close()
def fileTypeTest(qtbot, tmpdir):
testapp = Mp3WavApp()
qtbot.addWidget(testapp)
infile = tmpdir.mkdir("files").join("demo.mp3")
infile.write("something")
testapp.inputFileLine.setText(str(tmpdir.join("files", "demo.mp3")))
testapp.outputFileLine.setText(str(tmpdir.join('files')))
testapp.outputFileLineName.setText('demo.wave')
with pytest.raises(FileTypeException):
qtbot.mouseClick(testapp.conversionButton, Qt.LeftButton)
| StarcoderdataPython |
3298780 | '''Implements the required methods for instruction counting using
ARM assembly.
'''
from typing import List
from asm_analyser import counter
from asm_analyser.blocks.code_block import CodeBlock
from asm_analyser.blocks.basic_block import BasicBlock
class ArmCounter(counter.Counter):
'''Implements the Counter class for ARM assembly.
'''
@staticmethod
def insert_counters(code_blocks: List[CodeBlock],
basic_blocks: List[BasicBlock]) -> List[CodeBlock]:
last_block_name = ''
instr_index = 0
# add count instruction to the beginning of every basic block
for i, block in enumerate(basic_blocks):
if block.parent_block != last_block_name:
instr_index = 0
last_block_name = block.parent_block
code_index = next((i for i, item in enumerate(code_blocks)
if item.name == last_block_name), -1)
code_blocks[code_index].instructions.insert(
instr_index, (-1, 'ctr', [str(i)]))
instr_index += 1
instr_index += len(block.instructions)
return code_blocks
@staticmethod
def get_counter_defs(blocks: List[BasicBlock]) -> str:
if len(blocks) <= 0:
return ''
# array with an entry for each basic block
result = f'int counters[{len(blocks)}];\n'
# array with size of each basic block
result += f'int block_sizes[{len(blocks)}];\n'
return result
@staticmethod
def get_counter_init(blocks: List[BasicBlock]) -> str:
if len(blocks) <= 0:
return ''
# initialize counter array with 0
result = '.counters = {0}, '
# initialize basic block sizes array
result += '.block_sizes = {'
block_lengths = [str(len(block.instructions)) for block in blocks]
result += ','.join(block_lengths)
result += '},\n'
return result
@staticmethod
def write_instr_counts(file_path: str, blocks: List[BasicBlock],
block_counts: List[int]) -> None:
asm_lines = []
with open(file_path, 'r') as f:
asm_lines = f.readlines()
line_index = 0
with open(file_path, 'w') as f:
for i, block in enumerate(blocks):
for instr in block.instructions:
while line_index < instr[0]:
f.write(f'0 {asm_lines[line_index]}')
line_index += 1
try:
f.write(f'{block_counts[i]} {asm_lines[line_index]}')
except:
pass
line_index += 1
while line_index < len(asm_lines):
f.write(f'0 {asm_lines[line_index]}')
line_index += 1
| StarcoderdataPython |
1722251 | <filename>belter/tests/test_entities.py<gh_stars>0
from py2d.Math import Polygon
from colortuple import Color
from ..entities import create_asteroid, create_ship
def test_ship():
actual = create_ship(1, 2)
assert actual.x == 1
assert actual.y == 2
assert isinstance(actual.color, Color)
assert isinstance(actual.shape, Polygon)
assert len(actual.shape) == 3
def test_asteroid():
actual = create_asteroid(1, 2)
assert actual.x == 1
assert actual.y == 2
assert isinstance(actual.color, Color)
assert isinstance(actual.shape, Polygon)
assert len(actual.shape) == 3
| StarcoderdataPython |
1748366 | <filename>MLlib/models.py<gh_stars>0
from optimizers import GradientDescent
from utils.misc_utils import generate_weights
from utils.decision_tree_utils import partition, find_best_split
from utils.decision_tree_utils import Leaf, Decision_Node
from utils .knn_utils import get_neighbours
from utils.naive_bayes_utils import make_likelihood_table
import numpy as np
import pickle
from activations import sigmoid
from activations import sigmoid
from datetime import datetime
class LinearRegression():
def fit(self, X, Y, optimizer=GradientDescent, epochs=25, zeros=False, save_best=False):
self.weights = generate_weights(X.shape[1], 1, zeros=zeros)
self.best_weights = {weights: None, loss: float('inf')}
print("Starting training with loss:",
optimizer.loss_func.loss(X, Y, self.weights))
for epoch in range(1, epochs + 1):
print("======================================")
print("epoch:", epoch)
self.weights = optimizer.iterate(X, Y, self.weights)
epoch_loss = optimizer.loss_func.loss(X, Y, self.weights)
if save_best and epoch_loss < best_weights['loss']:
print("updating best weights (loss: {})".format(epoch_loss))
best_weights['weights'] = self.weights
best_weights['loss'] = epoch_loss
version = "model_best_" + datetime.now().strftime(DATE_FORMAT)
print("Saving best model version: ", version)
self.save(version)
print("Loss in this step: ", epoch_loss)
version = "model_final_" + datetime.now().strftime(DATE_FORMAT)
print("Saving final model version: ", version)
self.save(version)
print("======================================\n")
print("Finished training with final loss:",
optimizer.loss_func.loss(X, Y, self.weights))
print("=====================================================\n")
def predict(self, X):
return np.dot(X, self.weights)
def save(self, name):
with open(name + '.rob', 'ab') as robfile:
pickle.dump(self, robfile)
class LogisticRegression(LinearRegression):
def predict(self, X):
prediction = np.dot(X, self.weights).T
return sigmoid(prediction)
def classify(self, X):
prediction = np.dot(X, self.weights).T
prediction = sigmoid(prediction)
actual_predictions = np.zeros((1, X.shape[0]))
for i in range(prediction.shape[1]):
if prediction[0][i] > 0.5:
actual_predictions[0][i] = 1
return actual_predictions
class DecisionTreeClassifier():
root = None
def fit(self, rows):
"""
Builds the tree.
Rules of recursion: 1) Believe that it works. 2) Start by checking
for the base case (no further information gain). 3) Prepare for
giant stack traces.
"""
# Try partitioing the dataset on each of the unique attribute,
# calculate the information gain,
# and return the question that produces the highest gain.
gain, question = find_best_split(rows)
# Base case: no further info gain
# Since we can ask no further questions,
# we'll return a leaf.
if gain == 0:
return Leaf(rows)
# If we reach here, we have found a useful feature / value
# to partition on.
true_rows, false_rows = partition(rows, question)
# Recursively build the true branch.
true_branch = self.fit(true_rows)
# Recursively build the false branch.
false_branch = self.fit(false_rows)
# Return a Question node.
# This records the best feature / value to ask at this point,
self.root = Decision_Node(question, true_branch, false_branch)
def print_tree(self, spacing=""):
"""
A tree printing function.
"""
# Base case: we've reached a leaf
if isinstance(self.root, Leaf):
print(spacing + "Predict", self.root.predictions)
return
# Print the question at this node
print(spacing + str(self.root.question))
# Call this function recursively on the true branch
print(spacing + '--> True:')
self.print_tree(self.root.true_branch, spacing + " ")
# Call this function recursively on the false branch
print(spacing + '--> False:')
self.print_tree(self.root.false_branch, spacing + " ")
def classify(self, row):
"""
Classify a bit of data
"""
# Base case: we've reached a leaf
if isinstance(self.root, Leaf):
return self.root.predictions
# Decide whether to follow the true-branch or the false-branch.
# Compare the feature / value stored in the node,
# to the example we're considering.
if self.root.question.match(row):
return self.classify(row, self.root.true_branch)
else:
return self.classify(row, self.root.false_branch)
class KNN():
"""
A single Class that can act as both KNN classifier or regressor based on arguements given to the prediction function.
"""
def predict(self, train, test_row, num_neighbours=7, classify=True):
neigbours = get_neighbours(
train, test_row, num_neighbours, distance_metrics="block")
ouput = [row[-1] for row in neigbours]
if classify:
prediction = max(set(ouput), key=ouput.count)
else:
prediction = sum(ouput) / len(ouput)
return prediction
class Naive_Bayes():
"""
pyx: P(y/X) is proportional to p(x1/y)*p(x2/y)...*p(y)
using log and adding as multiplying for smaller numbers can make them very small
As denominator P(X)=P(x1)*P(x2).. is common we can ignore it
"""
def predict():
pyx = []
likelihood = make_likelihood_table(X, Y, x_label, y_class)
for j in range(len(Y)):
Sum = 0
for i in range(len(X)):
if(likelihood[i][j] == 0):
continue
Sum += math.log(likelihood[i][j])
y_sum = (y_class == Y[j]).sum()
if y_sum:
Sum += math.log(y_sum / len(y_class))
pyx.append([Sum, X[i], Y[j]])
prediction = max(pyx)
return [prediction[1], prediction[2]]
| StarcoderdataPython |
1729326 | <gh_stars>0
import unittest
from user import User,Credentials
class TestUser(unittest.TestCase):
"""
Test class that defines test cases for the user class behaviours
"""
"""
Args:
unittest.TestCase: TestCase class that helps in creating test cases
"""
def setUp(self):
"""
Set up method to run efore each test cases
"""
self.new_user = User("JulietKoech","<PASSWORD>")
self.new_credential = Credentials("Twitter", "Julz", "<PASSWORD>")
def test__init__(self):
"""test_init test case to test if the object is initialized properly
"""
self.assertEqual(self.new_user.user_name,"JulietKoech")
self.assertEqual(self.new_user.password,"<PASSWORD>")
self.assertEqual(self.new_credential.site_name,"Twitter")
self.assertEqual(self.new_credential.site_username,"Julz")
self.assertEqual(self.new_credential.site_password,"<PASSWORD>")
def test_save_user(self):
"""
test_save_contact test case to test if the contact object is saved into the contact_list
"""
self.new_user.save_user()
self.assertEqual(len(User.user_list),1)
def tearDown(self):
"""
tearDown method that does clean up after each test case has run
"""
User.user_list = []
Credentials.credentials_list = []
def test_save_multiple_user(self):
"""
test_save_multiple _user to check if we can save multiple user object to our user_list
"""
self.new_user.save_user()
test_user = User("Test","12338")
test_user.save_user()
self.assertEqual(len(User.user_list),2)
def test_find_user_by_user_name(self):
"""
test to check if we can find a user by user_name and display information
"""
self.new_user.save_user()
test_user = User("Test","12338")
test_user.save_user()
found_user = User.find_by_user_name("Test")
self.assertEqual(found_user.user_name,test_user.user_name)
def test_user_exists(self):
"""
test to check if we can return a Boolean if we cannot find the user.
"""
self.new_user.save_user()
test_user = User("Test","12338")
test_user.save_user()
user_exists = User.user_exist("Test","12338")
self.assertTrue(user_exists)
#Tests for credentials
def test_save_credential(self):
"""
test_save_credential test case to test if the credential object is saved into the credential list
"""
self.new_credential.save_credential()
self.assertEqual(len(Credentials.credentials_list), 1)
def test_save_multiple_credential(self):
"""
test_save_multiple_credential object is saved into the credential list
"""
self.new_credential.save_credential()
test_credential = Credentials("Instagram","Jul","12338")
test_credential.save_credential()
self.assertEqual(len(Credentials.credentials_list),2)
def test_display_all_credentials(self):
"""
method that returns a list of a list of all credentials accounts
"""
self.assertEqual(Credentials.display_credentials(),Credentials.credentials_list)
def test_delete_credential(self):
"""
test_delete_credential to test if we can remove a credentials account from our credentials list
"""
self.new_credential.save_credential()
test_credential = Credentials("Twitter","Julie","1233")
test_credential.save_credential()
self.new_credential.delete_credential()
self.assertEqual(len(Credentials.credentials_list),1)
def test_credential_exists(self):
"""
test to check if we can return a boolean if we cannot find the credential account
"""
self.new_credential.save_credential()
test_credential = Credentials("Test","12338")
test_credential.save_credential()
credential_exists = Credentials.credential_exist("Test")
self.assertTrue(credential_exists)
if __name__ =='__main__':
unittest.main()
| StarcoderdataPython |
3321473 | import math
import numpy as np
import pybullet as p
def getCameraParametersPush():
params = {}
params['imgW'] = 400
params['imgH'] = 400
params['imgW_orig'] = 1024
params['imgH_orig'] = 768
# p.resetDebugVisualizerCamera(0.70, 180, -89, [0.60, 0.0, 0.0]) # 70cm away
params['viewMatPanda'] = [-1.0, 0.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.6, 0.0, -0.7, 1.0] # -0.7 is same as height
params['projMatPanda'] = [1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, -1.0000200271606445, -1.0,
0.0, 0.0, -0.02000020071864128, 0.0]
params['cameraUp'] = [0.0, 0.0, 1.0]
params['camForward'] = [0.0, -0.00017464162374380976, -1.0]
params['horizon'] = [-20000.0, -0.0, 0.0]
params['vertical'] = [0.0, -20000.0, 3.4928321838378906]
params['dist'] = 0.70
params['camTarget'] = [0.6, 0.0, 0.0]
###########################################################################
m22 = params['projMatPanda'][10]
m32 = params['projMatPanda'][14]
params['near'] = 2*m32/(2*m22-2)
params['far'] = ((m22-1.0)*params['near'])/(m22+1.0)
return params
def getCameraParametersGrasp():
params = {}
params['imgW'] = 512 #640
params['imgH'] = 512 #480
params['imgW_orig'] = 1024
params['imgH_orig'] = 768
# p.resetDebugVisualizerCamera(0.70, 180, -89, [0.50, 0.0, 0.0]) # 70cm away
params['viewMatPanda'] = [-1.0, 0.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.5, 0.0, -0.7, 1.0] # -0.7 is same as height
params['projMatPanda'] = [1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, -1.0000200271606445, -1.0,
0.0, 0.0, -0.02000020071864128, 0.0]
params['cameraUp'] = [0.0, 0.0, 1.0]
params['camForward'] = [0.0, -0.00017464162374380976, -1.0]
params['horizon'] = [-20000.0, -0.0, 0.0]
params['vertical'] = [0.0, -20000.0, 3.4928321838378906]
params['dist'] = 0.70
params['camTarget'] = [0.5, 0.0, 0.0]
###########################################################################
m22 = params['projMatPanda'][10]
m32 = params['projMatPanda'][14]
params['near'] = 2*m32/(2*m22-2)
params['far'] = ((m22-1.0)*params['near'])/(m22+1.0)
return params
| StarcoderdataPython |
1651266 | l =[1, 1, 2, 2, 3, 3, 4, 'abc', "abc"]
no_duplicate_set = set(l)
#print(no_duplicate_set)
no_duplicate_list = list(no_duplicate_set)
print(no_duplicate_list)
#s = {'berry','blueberry'}
#s.add('blackberry')
#s.add(4)
#s.add('berry')
#print(s)
| StarcoderdataPython |
1780796 | <gh_stars>0
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_NV_vdpau_interop'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_NV_vdpau_interop',error_checker=_errors._error_checker)
GL_SURFACE_MAPPED_NV=_C('GL_SURFACE_MAPPED_NV',0x8700)
GL_SURFACE_REGISTERED_NV=_C('GL_SURFACE_REGISTERED_NV',0x86FD)
GL_SURFACE_STATE_NV=_C('GL_SURFACE_STATE_NV',0x86EB)
GL_WRITE_DISCARD_NV=_C('GL_WRITE_DISCARD_NV',0x88BE)
@_f
@_p.types(None,)
def glVDPAUFiniNV():pass
@_f
@_p.types(None,_cs.GLvdpauSurfaceNV,_cs.GLenum,_cs.GLsizei,arrays.GLsizeiArray,arrays.GLintArray)
def glVDPAUGetSurfaceivNV(surface,pname,bufSize,length,values):pass
@_f
@_p.types(None,ctypes.c_void_p,ctypes.c_void_p)
def glVDPAUInitNV(vdpDevice,getProcAddress):pass
@_f
@_p.types(_cs.GLboolean,_cs.GLvdpauSurfaceNV)
def glVDPAUIsSurfaceNV(surface):pass
@_f
@_p.types(None,_cs.GLsizei,ctypes.POINTER(_cs.GLvdpauSurfaceNV))
def glVDPAUMapSurfacesNV(numSurfaces,surfaces):pass
@_f
@_p.types(_cs.GLvdpauSurfaceNV,ctypes.c_void_p,_cs.GLenum,_cs.GLsizei,arrays.GLuintArray)
def glVDPAURegisterOutputSurfaceNV(vdpSurface,target,numTextureNames,textureNames):pass
@_f
@_p.types(_cs.GLvdpauSurfaceNV,ctypes.c_void_p,_cs.GLenum,_cs.GLsizei,arrays.GLuintArray)
def glVDPAURegisterVideoSurfaceNV(vdpSurface,target,numTextureNames,textureNames):pass
@_f
@_p.types(None,_cs.GLvdpauSurfaceNV,_cs.GLenum)
def glVDPAUSurfaceAccessNV(surface,access):pass
@_f
@_p.types(None,_cs.GLsizei,ctypes.POINTER(_cs.GLvdpauSurfaceNV))
def glVDPAUUnmapSurfacesNV(numSurface,surfaces):pass
@_f
@_p.types(None,_cs.GLvdpauSurfaceNV)
def glVDPAUUnregisterSurfaceNV(surface):pass
| StarcoderdataPython |
3244550 | import tensorflow as tf
import tensorflow_hub as hub
module_url = "https://tfhub.dev/google/universal-sentence-encoder/4"
tf.compat.v1.disable_eager_execution()
model = hub.Module(module_url)
embeddings = model([
"i like green eggs and ham",
"would you eat them in a box"
])
with tf.compat.v1.Session() as sess:
sess.run([
tf.compat.v1.global_variables_initializer(),
tf.compat.v1.tables_initializer()
])
embeddings_value = sess.run(embeddings)
print(embeddings_value.shape)
| StarcoderdataPython |
1608445 | <reponame>kimjaed/simpeg<filename>tests/base/test_utils.py
from __future__ import print_function
import unittest
import numpy as np
import scipy.sparse as sp
import os
import shutil
from SimPEG.Utils import (
sdiag, sub2ind, ndgrid, mkvc, inv2X2BlockDiagonal,
inv3X3BlockDiagonal, invPropertyTensor, makePropertyTensor, indexCube,
ind2sub, asArray_N_x_Dim, TensorType, diagEst, count, timeIt, Counter,
download, surface2ind_topo
)
from SimPEG import Mesh
from discretize.Tests import checkDerivative
TOL = 1e-8
class TestCheckDerivative(unittest.TestCase):
def test_simplePass(self):
def simplePass(x):
return np.sin(x), sdiag(np.cos(x))
passed = checkDerivative(simplePass, np.random.randn(5), plotIt=False)
self.assertTrue(passed, True)
def test_simpleFunction(self):
def simpleFunction(x):
return np.sin(x), lambda xi: sdiag(np.cos(x))*xi
passed = checkDerivative(
simpleFunction, np.random.randn(5), plotIt=False
)
self.assertTrue(passed, True)
def test_simpleFail(self):
def simpleFail(x):
return np.sin(x), -sdiag(np.cos(x))
passed = checkDerivative(simpleFail, np.random.randn(5), plotIt=False)
self.assertTrue(not passed, True)
class TestCounter(unittest.TestCase):
def test_simpleFail(self):
class MyClass(object):
def __init__(self, url):
self.counter = Counter()
@count
def MyMethod(self):
pass
@timeIt
def MySecondMethod(self):
pass
c = MyClass('blah')
for i in range(100):
c.MyMethod()
for i in range(300):
c.MySecondMethod()
c.counter.summary()
self.assertTrue(True)
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.a = np.array([1, 2, 3])
self.b = np.array([1, 2])
self.c = np.array([1, 2, 3, 4])
def test_mkvc1(self):
x = mkvc(self.a)
self.assertTrue(x.shape, (3,))
def test_mkvc2(self):
x = mkvc(self.a, 2)
self.assertTrue(x.shape, (3, 1))
def test_mkvc3(self):
x = mkvc(self.a, 3)
self.assertTrue(x.shape, (3, 1, 1))
def test_ndgrid_2D(self):
XY = ndgrid([self.a, self.b])
X1_test = np.array([1, 2, 3, 1, 2, 3])
X2_test = np.array([1, 1, 1, 2, 2, 2])
self.assertTrue(np.all(XY[:, 0] == X1_test))
self.assertTrue(np.all(XY[:, 1] == X2_test))
def test_ndgrid_3D(self):
XYZ = ndgrid([self.a, self.b, self.c])
X1_test = np.array([
1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1,
2, 3
])
X2_test = np.array([
1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2, 1, 1, 1, 2,
2, 2
])
X3_test = np.array([
1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4,
4, 4
])
self.assertTrue(np.all(XYZ[:, 0] == X1_test))
self.assertTrue(np.all(XYZ[:, 1] == X2_test))
self.assertTrue(np.all(XYZ[:, 2] == X3_test))
def test_sub2ind(self):
x = np.ones((5, 2))
self.assertTrue(np.all(sub2ind(x.shape, [0, 0]) == [0]))
self.assertTrue(np.all(sub2ind(x.shape, [4, 0]) == [4]))
self.assertTrue(np.all(sub2ind(x.shape, [0, 1]) == [5]))
self.assertTrue(np.all(sub2ind(x.shape, [4, 1]) == [9]))
self.assertTrue(np.all(sub2ind(x.shape, [[4, 1]]) == [9]))
self.assertTrue(
np.all(sub2ind(
x.shape, [[0, 0], [4, 0], [0, 1], [4, 1]]) == [0, 4, 5, 9]
)
)
def test_ind2sub(self):
x = np.ones((5, 2))
self.assertTrue(
np.all(ind2sub(x.shape, [0, 4, 5, 9])[0] == [0, 4, 0, 4])
)
self.assertTrue(
np.all(ind2sub(x.shape, [0, 4, 5, 9])[1] == [0, 0, 1, 1])
)
def test_indexCube_2D(self):
nN = np.array([3, 3])
self.assertTrue(np.all(indexCube('A', nN) == np.array([0, 1, 3, 4])))
self.assertTrue(np.all(indexCube('B', nN) == np.array([3, 4, 6, 7])))
self.assertTrue(np.all(indexCube('C', nN) == np.array([4, 5, 7, 8])))
self.assertTrue(np.all(indexCube('D', nN) == np.array([1, 2, 4, 5])))
def test_indexCube_3D(self):
nN = np.array([3, 3, 3])
self.assertTrue(np.all(
indexCube('A', nN) == np.array([0, 1, 3, 4, 9, 10, 12, 13])
))
self.assertTrue(np.all(
indexCube('B', nN) == np.array([3, 4, 6, 7, 12, 13, 15, 16])
))
self.assertTrue(np.all(
indexCube('C', nN) == np.array([4, 5, 7, 8, 13, 14, 16, 17])
))
self.assertTrue(np.all(
indexCube('D', nN) == np.array([1, 2, 4, 5, 10, 11, 13, 14])
))
self.assertTrue(np.all(
indexCube('E', nN) == np.array([9, 10, 12, 13, 18, 19, 21, 22])
))
self.assertTrue(np.all(
indexCube('F', nN) == np.array([12, 13, 15, 16, 21, 22, 24, 25])
))
self.assertTrue(np.all(
indexCube('G', nN) == np.array([13, 14, 16, 17, 22, 23, 25, 26])
))
self.assertTrue(np.all(
indexCube('H', nN) == np.array([10, 11, 13, 14, 19, 20, 22, 23])
))
def test_invXXXBlockDiagonal(self):
a = [np.random.rand(5, 1) for i in range(4)]
B = inv2X2BlockDiagonal(*a)
A = sp.vstack((sp.hstack((sdiag(a[0]), sdiag(a[1]))),
sp.hstack((sdiag(a[2]), sdiag(a[3])))))
Z2 = B*A - sp.identity(10)
self.assertTrue(np.linalg.norm(Z2.todense().ravel(), 2) < TOL)
a = [np.random.rand(5, 1) for i in range(9)]
B = inv3X3BlockDiagonal(*a)
A = sp.vstack((sp.hstack((sdiag(a[0]), sdiag(a[1]), sdiag(a[2]))),
sp.hstack((sdiag(a[3]), sdiag(a[4]), sdiag(a[5]))),
sp.hstack((sdiag(a[6]), sdiag(a[7]), sdiag(a[8])))))
Z3 = B*A - sp.identity(15)
self.assertTrue(np.linalg.norm(Z3.todense().ravel(), 2) < TOL)
def test_invPropertyTensor2D(self):
M = Mesh.TensorMesh([6, 6])
a1 = np.random.rand(M.nC)
a2 = np.random.rand(M.nC)
a3 = np.random.rand(M.nC)
prop1 = a1
prop2 = np.c_[a1, a2]
prop3 = np.c_[a1, a2, a3]
for prop in [4, prop1, prop2, prop3]:
b = invPropertyTensor(M, prop)
A = makePropertyTensor(M, prop)
B1 = makePropertyTensor(M, b)
B2 = invPropertyTensor(M, prop, returnMatrix=True)
Z = B1*A - sp.identity(M.nC*2)
self.assertTrue(np.linalg.norm(Z.todense().ravel(), 2) < TOL)
Z = B2*A - sp.identity(M.nC*2)
self.assertTrue(np.linalg.norm(Z.todense().ravel(), 2) < TOL)
def test_TensorType2D(self):
M = Mesh.TensorMesh([6, 6])
a1 = np.random.rand(M.nC)
a2 = np.random.rand(M.nC)
a3 = np.random.rand(M.nC)
prop1 = a1
prop2 = np.c_[a1, a2]
prop3 = np.c_[a1, a2, a3]
for ii, prop in enumerate([4, prop1, prop2, prop3]):
self.assertTrue(TensorType(M, prop) == ii)
self.assertRaises(Exception, TensorType, M, np.c_[a1, a2, a3, a3])
self.assertTrue(TensorType(M, None) == -1)
def test_TensorType3D(self):
M = Mesh.TensorMesh([6, 6, 7])
a1 = np.random.rand(M.nC)
a2 = np.random.rand(M.nC)
a3 = np.random.rand(M.nC)
a4 = np.random.rand(M.nC)
a5 = np.random.rand(M.nC)
a6 = np.random.rand(M.nC)
prop1 = a1
prop2 = np.c_[a1, a2, a3]
prop3 = np.c_[a1, a2, a3, a4, a5, a6]
for ii, prop in enumerate([4, prop1, prop2, prop3]):
self.assertTrue(TensorType(M, prop) == ii)
self.assertRaises(Exception, TensorType, M, np.c_[a1, a2, a3, a3])
self.assertTrue(TensorType(M, None) == -1)
def test_invPropertyTensor3D(self):
M = Mesh.TensorMesh([6, 6, 6])
a1 = np.random.rand(M.nC)
a2 = np.random.rand(M.nC)
a3 = np.random.rand(M.nC)
a4 = np.random.rand(M.nC)
a5 = np.random.rand(M.nC)
a6 = np.random.rand(M.nC)
prop1 = a1
prop2 = np.c_[a1, a2, a3]
prop3 = np.c_[a1, a2, a3, a4, a5, a6]
for prop in [4, prop1, prop2, prop3]:
b = invPropertyTensor(M, prop)
A = makePropertyTensor(M, prop)
B1 = makePropertyTensor(M, b)
B2 = invPropertyTensor(M, prop, returnMatrix=True)
Z = B1*A - sp.identity(M.nC*3)
self.assertTrue(np.linalg.norm(Z.todense().ravel(), 2) < TOL)
Z = B2*A - sp.identity(M.nC*3)
self.assertTrue(np.linalg.norm(Z.todense().ravel(), 2) < TOL)
def test_asArray_N_x_Dim(self):
true = np.array([[1, 2, 3]])
listArray = asArray_N_x_Dim([1, 2, 3], 3)
self.assertTrue(np.all(true == listArray))
self.assertTrue(true.shape == listArray.shape)
listArray = asArray_N_x_Dim(np.r_[1, 2, 3], 3)
self.assertTrue(np.all(true == listArray))
self.assertTrue(true.shape == listArray.shape)
listArray = asArray_N_x_Dim(np.array([[1, 2, 3.]]), 3)
self.assertTrue(np.all(true == listArray))
self.assertTrue(true.shape == listArray.shape)
true = np.array([[1, 2], [4, 5]])
listArray = asArray_N_x_Dim([[1, 2], [4, 5]], 2)
self.assertTrue(np.all(true == listArray))
self.assertTrue(true.shape == listArray.shape)
def test_surface2ind_topo(self):
file_url = "https://storage.googleapis.com/simpeg/tests/utils/vancouver_topo.xyz"
file2load = download(file_url)
vancouver_topo = np.loadtxt(file2load)
mesh_topo = Mesh.TensorMesh([
[(500., 24)],
[(500., 20)],
[(10., 30)]
],
x0='CCC')
indtopoCC = surface2ind_topo(mesh_topo, vancouver_topo, gridLoc='CC', method='nearest')
indtopoN = surface2ind_topo(mesh_topo, vancouver_topo, gridLoc='N', method='nearest')
assert len(np.where(indtopoCC)[0]) == 8729
assert len(np.where(indtopoN)[0]) == 8212
class TestDiagEst(unittest.TestCase):
def setUp(self):
self.n = 1000
self.A = np.random.rand(self.n, self.n)
self.Adiag = np.diagonal(self.A)
def getTest(self, testType):
Adiagtest = diagEst(self.A, self.n, self.n, testType)
r = np.abs(Adiagtest-self.Adiag)
err = r.dot(r)
return err
def testProbing(self):
err = self.getTest('probing')
print('Testing probing. {}'.format(err))
self.assertTrue(err < TOL)
class TestDownload(unittest.TestCase):
def test_downloads(self):
url = "https://storage.googleapis.com/simpeg/Chile_GRAV_4_Miller/"
cloudfiles = [
'LdM_grav_obs.grv', 'LdM_mesh.mesh',
'LdM_topo.topo', 'LdM_input_file.inp'
]
url1 = url + cloudfiles[0]
url2 = url + cloudfiles[1]
file_names = download(
[url1, url2], folder='./test_urls', overwrite=True
)
# or
file_name = download(url1, folder='./test_url', overwrite=True)
# where
assert isinstance(file_names, list)
assert len(file_names) == 2
assert isinstance(file_name, str)
# clean up
shutil.rmtree(os.path.expanduser('./test_urls'))
shutil.rmtree(os.path.expanduser('./test_url'))
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3327121 | """ Script base for orlov astarte package. """
import logging
import pytest
# pylint: disable=E0401
from astarte.script.testcase import Astarte
logger = logging.getLogger(__name__)
@pytest.mark.usefixtures('conftests_fixture', 'orlov_fixture', 'astarte_fixture')
# pylint: disable=E1101, C0302, R0914
class TestArena(Astarte):
""" Test Case Base `browndust` package.
"""
def test_001_arena(self):
""" Test Arena Auto Play. """
logger.info(' *** Start TestCase : %s *** ', __file__)
logger.info(' *** Start Arena Battle. *** ')
arena = self.app.ui.home.arena
assert arena.displayed()
assert arena.battle_around()
logger.info(' *** Wait Arena Battle Result. *** ')
assert arena.battle_result()
assert arena.return_home()
assert self.app.ui.home.displayed()
| StarcoderdataPython |
4833996 | <reponame>elliotpeele/pyramid_oauth2_provider
#
# Copyright (c) <NAME> <<EMAIL>>
#
# This program is distributed under the terms of the MIT License as found
# in a file called LICENSE. If it is not present, the license
# is always available at http://www.opensource.org/licenses/mit-license.php.
#
# This program is distributed in the hope that it will be useful, but
# without any warrenty; without even the implied warranty of merchantability
# or fitness for a particular purpose. See the MIT License for full details.
#
from sqlalchemy import engine_from_config
from pyramid.config import Configurator
from pyramid.exceptions import ConfigurationError
from pyramid.interfaces import IAuthenticationPolicy
from .models import initialize_sql
from .interfaces import IAuthCheck
from .authentication import OauthAuthenticationPolicy
# imported to make the test runnner happy
from . import tests
def includeme(config):
settings = config.registry.settings
engine = engine_from_config(settings, 'sqlalchemy.')
initialize_sql(engine, settings)
if not config.registry.queryUtility(IAuthenticationPolicy):
config.set_authentication_policy(OauthAuthenticationPolicy())
auth_check = settings.get('oauth2_provider.auth_checker')
if not auth_check:
raise ConfigurationError('You must provide an implementation of the '
'authentication check interface that is included with '
'pyramid_oauth2_provider for verifying usernames and passwords')
policy = config.maybe_dotted(auth_check)
config.registry.registerUtility(policy, IAuthCheck)
config.add_route('oauth2_provider_authorize', '/oauth2/authorize')
config.add_route('oauth2_provider_token', '/oauth2/token')
config.scan()
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
config = Configurator(settings=settings)
includeme(config)
return config.make_wsgi_app()
| StarcoderdataPython |
3226484 | <gh_stars>0
import streamlit as st
from PIL import Image
def app():
st.title("Vegetation Analysis")
st.markdown(
"""
The goal of this task is to discover the use of different vegetation indices to identify the level of
desertification in northern Iraq. Indices of interest include NDVI, NDWI, NDBI, and MSAVI. In specfic, we conducted the analysis using NDVI
index, for the years 2016, 2018 and 2021. A summary of what has been done for this task is shown below:
"""
)
# Summary
st.subheader("Summary")
st.markdown(
"""
1. **Dataset:** Sentinel2 images using Google Earth Engine
2. **Region of Interest:** Mosul - Iraq
3. **Periods of study:** 2016, 2018, 2021
4. **Bands:** 5 Bands downloaded: R, G, B, NIR, SWIR
5. **Processing method:** Used rasterio to process the images
"""
)
# NDVI analysis
st.subheader("1. NDVI Analysis")
# NDVI Definitoin
st.info("""
The normalized difference vegetation index (NDVI) is a simple graphical indicator that can be used to analyze remote sensing measurements, often from a space platform,
assessing whether or not the target being observed contains live green vegetation
"""
)
st.markdown(
"""
The following shows NDVI values of Mosul for three different periods:
**2016**, **2018** and **2021**, calculated using data from Sentinel2.
"""
)
# NDVI_classes_2016
st.markdown("""**NDVI: 2016**""")
image1 = Image.open('NDVI_classes_2016.png')
st.image(image1, use_column_width=True)
st.markdown(""" ----- """)
# NDVI_classes_2018
st.markdown("""**NDVI: 2018**""")
image2 = Image.open('NDVI_classes_2018.png')
st.image(image2, use_column_width=True)
st.markdown(""" ----- """)
# NDVI_classes_2021
st.markdown("""**NDVI: 2021**""")
image3 = Image.open('NDVI_classes_2021.png')
st.image(image3, use_column_width=True)
# Pie chart Analysis
st.subheader("2. Pie chart Analysis")
st.markdown(
"""
The following shows pie chart analysis of Mosul over three periods: 2016, 2018 and 2021.
The results clearly show that the arid area is reducing and the green area is increasing, which seems to be a good indication.
"""
)
st.markdown("""**Pie chart analysis of Mosul: 2016**""")
image2 = Image.open('NDVI_2016.png')
st.image(image2, use_column_width=True)
st.markdown(""" ----- """)
st.markdown("""**Pie chart analysis of Mosul: 2018**""")
image3 = Image.open('NDVI_2018.png')
st.image(image3, use_column_width=True)
st.markdown(""" ----- """)
st.markdown("""**Pie chart analysis of Mosul: 2021**""")
image3 = Image.open('NDVI_2021.png')
st.image(image3, use_column_width=True) | StarcoderdataPython |
3260916 | <reponame>jacobhjkim/amundsendatabuilder
# Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import re
from typing import List, Optional
from databuilder.models.graph_node import GraphNode
from databuilder.models.graph_relationship import GraphRelationship
from databuilder.models.graph_serializable import GraphSerializable
class Badge:
def __init__(self, name: str, category: str):
self.name = name
self.category = category
def __repr__(self) -> str:
return f'Badge({self.name!r}, {self.category!r})'
def __eq__(self, other: object) -> bool:
if not isinstance(other, Badge):
return NotImplemented
return self.name == other.name and \
self.category == other.category
class BadgeMetadata(GraphSerializable):
"""
Badge model.
"""
BADGE_NODE_LABEL = 'Badge'
BADGE_KEY_FORMAT = '{badge}'
BADGE_CATEGORY = 'category'
# Relation between entity and badge
BADGE_RELATION_TYPE = 'HAS_BADGE'
INVERSE_BADGE_RELATION_TYPE = 'BADGE_FOR'
def __init__(self,
start_label: str, # Table, Dashboard, Column
start_key: str,
badges: List[Badge],
):
self.badges = badges
table_key_pattern = re.compile('[a-z]+://[a-zA-Z0-9_.-]+.[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+')
dashboard_key_pattern = re.compile('[a-z]+_dashboard://[a-zA-Z0-9_.-]+.[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+')
column_key_pattern = re.compile('[a-z]+://[a-zA-Z0-9_.-]+.[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+')
map_label_to_key_pattern = {
'Table': table_key_pattern,
'Dashboard': dashboard_key_pattern,
'Column': column_key_pattern,
}
if start_label in map_label_to_key_pattern.keys():
self.start_label = start_label
if map_label_to_key_pattern[start_label].match(start_key):
self.start_key = start_key
else:
raise Exception(start_key + ' does not match the key pattern for a ' + start_label)
else:
raise Exception(start_label + ' is not a valid start_label for a Badge relation')
self._node_iter = iter(self.create_nodes())
self._relation_iter = iter(self.create_relation())
def __repr__(self) -> str:
return f'BadgeMetadata({self.start_label!r}, {self.start_key!r})'
def create_next_node(self) -> Optional[GraphNode]:
# return the string representation of the data
try:
return next(self._node_iter)
except StopIteration:
return None
def create_next_relation(self) -> Optional[GraphRelationship]:
try:
return next(self._relation_iter)
except StopIteration:
return None
@staticmethod
def get_badge_key(name: str) -> str:
if not name:
return ''
return BadgeMetadata.BADGE_KEY_FORMAT.format(badge=name)
def get_metadata_model_key(self) -> str:
return self.start_key
def create_nodes(self) -> List[GraphNode]:
"""
Create a list of `GraphNode` records
:return:
"""
results = []
for badge in self.badges:
if badge:
node = GraphNode(
key=self.get_badge_key(badge.name),
label=self.BADGE_NODE_LABEL,
attributes={
self.BADGE_CATEGORY: badge.category
}
)
results.append(node)
return results
def create_relation(self) -> List[GraphRelationship]:
results: List[GraphRelationship] = []
for badge in self.badges:
relation = GraphRelationship(
start_label=self.start_label,
end_label=self.BADGE_NODE_LABEL,
start_key=self.start_key,
end_key=self.get_badge_key(badge.name),
type=self.BADGE_RELATION_TYPE,
reverse_type=self.INVERSE_BADGE_RELATION_TYPE,
attributes={}
)
results.append(relation)
return results
| StarcoderdataPython |
3357044 | # Generated by Django 3.1.7 on 2021-04-26 08:31
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('order', '0005_remove_order_customer'),
('customer', '0006_customerprofile_gender'),
]
operations = [
migrations.AddField(
model_name='customer',
name='cart',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to='order.cart'),
),
migrations.AddField(
model_name='customer',
name='myorders',
field=models.ManyToManyField(blank=True, null=True, to='order.Order'),
),
]
| StarcoderdataPython |
1675054 | """Module for Game class"""
import time
from hangman.dictionary import Dictionary
from hangman.cli.screen import Screen
from hangman.word import Word
from hangman.error import GuessError
class Game:
"""
The primary object responsible for managing game details and flow.
"""
def __init__(self):
self._dictionary = Dictionary()
def start(self):
"""Start the game"""
self.game()
self.quit()
def game(self):
"""Play a single game of hangman"""
word = Word(self._dictionary.get())
while True:
Screen.clear()
Screen.gallows(len(word.incorrects))
Screen.put(f"Word : {word.masked}")
Screen.put(f"Incorrects: {' '.join(word.incorrects)}")
if not word.alive or word.solved:
break
guess = Screen.get("What is your guess?")
try:
word.guess(guess)
except GuessError as err:
Screen.put(str(err))
time.sleep(2)
if word.alive:
Screen.put("Congrats, you won!!")
else:
Screen.put(f"I'm sorry. The word was {word.unmasked}.")
@staticmethod
def quit():
"""Quit the game"""
Screen.goodbye()
| StarcoderdataPython |
3256723 | from OpenPNM.Utilities import misc
import scipy as _sp
import numpy as _np
import os as _os
import pickle as _pickle
from xml.etree import ElementTree as _ET
class VTK():
r"""
Class for writing a Vtp file to be read by ParaView
"""
_TEMPLATE = '''
<?xml version="1.0" ?>
<VTKFile byte_order="LittleEndian" type="PolyData" version="0.1">
<PolyData>
<Piece NumberOfLines="0" NumberOfPoints="0">
<Points>
</Points>
<Lines>
</Lines>
<PointData>
</PointData>
<CellData>
</CellData>
</Piece>
</PolyData>
</VTKFile>
'''.strip()
def __init__(self, **kwargs):
super().__init__(**kwargs)
@staticmethod
def save(network, filename='', phases=[]):
r"""
Save network and phase data to a single vtp file for visualizing in
Paraview
Parameters
----------
network : OpenPNM Network Object
The Network containing the data to be written
filename : string, optional
Filename to write data. If no name is given the file is named after
ther network
phases : list, optional
A list contain OpenPNM Phase object(s) containing data to be written
Examples
--------
>>> import OpenPNM
>>> pn = OpenPNM.Network.Cubic(shape=[3,3,3])
>>> geo = OpenPNM.Geometry.Stick_and_Ball(network=pn,
... pores=pn.pores(),
... throats=pn.throats())
>>> air = OpenPNM.Phases.Air(network=pn)
>>> phys = OpenPNM.Physics.Standard(network=pn, phase=air,
... pores=pn.pores(), throats=pn.throats())
>>> import OpenPNM.Utilities.IO as io
>>> io.VTK.save(pn,'test_pn.vtp',[air])
>>> # Delete the new file
>>> import os
>>> os.remove('test_pn.vtp')
"""
if filename == '':
filename = network.name
filename = filename.split('.')[0] + '.vtp'
root = _ET.fromstring(VTK._TEMPLATE)
objs = []
if type(phases) != list:
phases = [phases]
for phase in phases:
objs.append(phase)
objs.append(network)
am = misc.amalgamate_data(objs=objs)
key_list = list(sorted(am.keys()))
points = network['pore.coords']
pairs = network['throat.conns']
num_points = len(points)
num_throats = len(pairs)
piece_node = root.find('PolyData').find('Piece')
piece_node.set("NumberOfPoints", str(num_points))
piece_node.set("NumberOfLines", str(num_throats))
points_node = piece_node.find('Points')
coords = VTK._array_to_element("coords", points.T.ravel('F'), n=3)
points_node.append(coords)
lines_node = piece_node.find('Lines')
connectivity = VTK._array_to_element("connectivity", pairs)
lines_node.append(connectivity)
offsets = VTK._array_to_element("offsets", 2*_np.arange(len(pairs))+2)
lines_node.append(offsets)
point_data_node = piece_node.find('PointData')
for key in key_list:
array = am[key]
if array.dtype == _np.bool:
array = array.astype(int)
if array.size != num_points:
continue
element = VTK._array_to_element(key, array)
point_data_node.append(element)
cell_data_node = piece_node.find('CellData')
for key in key_list:
array = am[key]
if array.dtype == _np.bool:
array = array.astype(int)
if array.size != num_throats:
continue
element = VTK._array_to_element(key, array)
cell_data_node.append(element)
tree = _ET.ElementTree(root)
tree.write(filename)
# Make pretty
with open(filename, 'r+') as f:
string = f.read()
string = string.replace('</DataArray>', '</DataArray>\n\t\t\t')
f.seek(0)
# consider adding header: '<?xml version="1.0"?>\n'+
f.write(string)
@staticmethod
def load(filename):
r"""
Read in pore and throat data from a saved VTK file.
Notes
-----
This will NOT reproduce original simulation, since all models and object
relationships are lost. Use IO.Save and IO.Load for that.
"""
network = OpenPNM.Network.GenericNetwork()
tree = _ET.parse(filename)
piece_node = tree.find('PolyData').find('Piece')
# Extract connectivity
conn_element = piece_node.find('Lines').find('DataArray')
array = VTK._element_to_array(conn_element, 2)
network['throat.conns'] = array.T
for element in piece_node.find('PointData').iter('DataArray'):
key = element.get('Name')
array = VTK._element_to_array(element)
netname = key.split('.')[0]
propname = key.strip(netname+'.')
network[propname] = array
return network
@staticmethod
def _array_to_element(name, array, n=1):
dtype_map = {
'int8': 'Int8',
'int16': 'Int16',
'int32': 'Int32',
'int64': 'Int64',
'uint8': 'UInt8',
'uint16': 'UInt16',
'uint32': 'UInt32',
'uint64': 'UInt64',
'float32': 'Float32',
'float64': 'Float64',
'str': 'String',
}
element = _ET.Element('DataArray')
element.set("Name", name)
element.set("NumberOfComponents", str(n))
element.set("type", dtype_map[str(array.dtype)])
element.text = '\t'.join(map(str, array.ravel()))
return element
@staticmethod
def _element_to_array(element, n=1):
string = element.text
dtype = element.get("type")
array = _np.fromstring(string, sep='\t')
array = array.astype(dtype)
if n is not 1:
array = array.reshape(array.size//n, n)
return array
class MAT():
r"""
Class for reading and writing OpenPNM data to a Matlab 'mat' file
"""
@staticmethod
def save(network, filename='', phases=[]):
r"""
Write Network to a Mat file for exporting to Matlab. This method will be
enhanced in a future update, and it's functionality may change!
Parameters
----------
network : OpenPNM Network Object
filename : string
Desired file name, defaults to network name if not given
phases : list of phase objects ([])
Phases that have properties we want to write to file
Examples
--------
>>> import OpenPNM
>>> pn = OpenPNM.Network.TestNet()
>>> geo = OpenPNM.Geometry.TestGeometry(network=pn,
... pores=pn.pores(),
... throats=pn.throats())
>>> air = OpenPNM.Phases.TestPhase()
>>> import OpenPNM.Utilities.IO as io
>>> io.MAT.save(network=pn, filename='test_pn.mat', phases=air)
>>> # Remove newly created file
>>> import os
>>> os.remove('test_pn.mat')
"""
if filename == '':
filename = network.name
filename = filename.split('.')[0] + '.mat'
pnMatlab = {}
new = []
old = []
for keys in list(network.keys()):
old.append(keys)
new.append(keys.replace('.', '_'))
for i in range(len(network)):
pnMatlab[new[i]] = network[old[i]]
if type(phases) != list:
phases = [phases]
if len(phases) != 0:
for j in range(len(phases)):
new = []
old = []
for keys in list(phases[j].keys()):
old.append(keys)
new.append(phases[j].name+'_'+keys.replace('.', '_'))
for i in range(len(phases[j])):
pnMatlab[new[i]] = phases[j][old[i]]
_sp.io.savemat(file_name=filename, mdict=pnMatlab)
@staticmethod
def load():
r"""
This method is not implemented yet.
"""
raise NotImplemented()
| StarcoderdataPython |
3370791 | CSRF_ENABLED = True #Enables Cross-site Request Forgery. SECRET_KEY is needed when CSRF is enabled, and creates a cryptographic token used to validate a form.
SECRET_KEY = 'you-will-never-guess'
import os
basedir = os.path.abspath(os.path.dirname(__file__))
SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL']
# folder where SQLAlchemy-migrate data files will be stored
SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db2_repository')
# Constants for Pagination in /views
USERS_PER_PAGE = 25
TOURNAMENTS_PER_PAGE = 15
CHAR_USERS_PER_PAGE = 15
| StarcoderdataPython |
1696555 | <filename>test/lmp/util/model/test_load.py<gh_stars>1-10
r"""Test loading utilities for all language models.
Test target:
- :py:meth:`lmp.util.model.load`.
"""
import torch
import lmp.util.model
from lmp.model import GRUModel, LSTMModel, RNNModel
from lmp.tknzr import BaseTknzr
def test_load_rnn(exp_name: str, tknzr: BaseTknzr, clean_model):
r"""Load back pre-trained RNN language model."""
ckpt = 1
d_emb = 1
d_hid = 2
n_hid_lyr = 1
n_post_hid_lyr = 2
n_pre_hid_lyr = 3
p_emb = 0.1
p_hid = 0.2
model = lmp.util.model.create(
model_name=RNNModel.model_name,
d_emb=d_emb,
d_hid=d_hid,
n_hid_lyr=n_hid_lyr,
n_post_hid_lyr=n_post_hid_lyr,
n_pre_hid_lyr=n_pre_hid_lyr,
tknzr=tknzr,
p_emb=p_emb,
p_hid=p_hid,
)
model.save(ckpt=ckpt, exp_name=exp_name)
load_model = lmp.util.model.load(
ckpt=ckpt,
exp_name=exp_name,
model_name=RNNModel.model_name,
d_emb=d_emb,
d_hid=d_hid,
n_hid_lyr=n_hid_lyr,
n_post_hid_lyr=n_post_hid_lyr,
n_pre_hid_lyr=n_pre_hid_lyr,
tknzr=tknzr,
p_emb=p_emb,
p_hid=p_hid,
)
# Test Case: Type check.
assert isinstance(load_model, RNNModel)
# Test Case: Parameters check.
for (p_1, p_2) in zip(load_model.parameters(), model.parameters()):
assert torch.equal(p_1, p_2)
def test_load_gru(exp_name: str, tknzr: BaseTknzr, clean_model):
r"""Load back pre-trained GRU language model."""
ckpt = 1
d_emb = 1
d_hid = 2
n_hid_lyr = 1
n_post_hid_lyr = 2
n_pre_hid_lyr = 3
p_emb = 0.1
p_hid = 0.2
model = lmp.util.model.create(
model_name=GRUModel.model_name,
d_emb=d_emb,
d_hid=d_hid,
n_hid_lyr=n_hid_lyr,
n_post_hid_lyr=n_post_hid_lyr,
n_pre_hid_lyr=n_pre_hid_lyr,
tknzr=tknzr,
p_emb=p_emb,
p_hid=p_hid,
)
model.save(ckpt=ckpt, exp_name=exp_name)
load_model = lmp.util.model.load(
ckpt=ckpt,
exp_name=exp_name,
model_name=GRUModel.model_name,
d_emb=d_emb,
d_hid=d_hid,
n_hid_lyr=n_hid_lyr,
n_post_hid_lyr=n_post_hid_lyr,
n_pre_hid_lyr=n_pre_hid_lyr,
tknzr=tknzr,
p_emb=p_emb,
p_hid=p_hid,
)
# Test Case: Type check.
assert isinstance(load_model, GRUModel)
# Test Case: Parameters check.
for (p_1, p_2) in zip(load_model.parameters(), model.parameters()):
assert torch.equal(p_1, p_2)
def test_load_lstm(exp_name: str, tknzr: BaseTknzr, clean_model):
r"""Load back pre-trained LSTM language model."""
ckpt = 1
d_emb = 1
d_hid = 2
n_hid_lyr = 1
n_post_hid_lyr = 2
n_pre_hid_lyr = 3
p_emb = 0.1
p_hid = 0.2
model = lmp.util.model.create(
model_name=LSTMModel.model_name,
d_emb=d_emb,
d_hid=d_hid,
n_hid_lyr=n_hid_lyr,
n_post_hid_lyr=n_post_hid_lyr,
n_pre_hid_lyr=n_pre_hid_lyr,
tknzr=tknzr,
p_emb=p_emb,
p_hid=p_hid,
)
model.save(ckpt=ckpt, exp_name=exp_name)
load_model = lmp.util.model.load(
ckpt=ckpt,
exp_name=exp_name,
model_name=LSTMModel.model_name,
d_emb=d_emb,
d_hid=d_hid,
n_hid_lyr=n_hid_lyr,
n_post_hid_lyr=n_post_hid_lyr,
n_pre_hid_lyr=n_pre_hid_lyr,
tknzr=tknzr,
p_emb=p_emb,
p_hid=p_hid,
)
# Test Case: Type check.
assert isinstance(load_model, LSTMModel)
# Test Case: Parameters check.
for (p_1, p_2) in zip(load_model.parameters(), model.parameters()):
assert torch.equal(p_1, p_2)
| StarcoderdataPython |
1613633 | <filename>examples/baseball.py
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
"""
Example: Baseball Batting Average
=================================
Original example from Pyro:
https://github.com/pyro-ppl/pyro/blob/dev/examples/baseball.py
Example has been adapted from [1]. It demonstrates how to do Bayesian inference using
various MCMC kernels in Pyro (HMC, NUTS, SA), and use of some common inference utilities.
As in the Stan tutorial, this uses the small baseball dataset of Efron and Morris [2]
to estimate players' batting average which is the fraction of times a player got a
base hit out of the number of times they went up at bat.
The dataset separates the initial 45 at-bats statistics from the remaining season.
We use the hits data from the initial 45 at-bats to estimate the batting average
for each player. We then use the remaining season's data to validate the predictions
from our models.
Three models are evaluated:
- Complete pooling model: The success probability of scoring a hit is shared
amongst all players.
- No pooling model: Each individual player's success probability is distinct and
there is no data sharing amongst players.
- Partial pooling model: A hierarchical model with partial data sharing.
We recommend Radford Neal's tutorial on HMC ([3]) to users who would like to get a
more comprehensive understanding of HMC and its variants, and to [4] for details on
the No U-Turn Sampler, which provides an efficient and automated way (i.e. limited
hyper-parameters) of running HMC on different problems.
Note that the Sample Adaptive (SA) kernel, which is implemented based on [5],
requires large `num_warmup` and `num_samples` (e.g. 15,000 and 300,000). So
it is better to disable progress bar to avoid dispatching overhead.
**References:**
1. <NAME>. (2016), `"Hierarchical Partial Pooling for Repeated Binary Trials"
<http://mc-stan.org/users/documentation/case-studies/pool-binary-trials.html/>`_.
2. <NAME>., <NAME>. (1975), "Data analysis using Stein's estimator and its
generalizations", J. Amer. Statist. Assoc., 70, 311-319.
3. <NAME>. (2012), "MCMC using Hamiltonian Dynamics",
(https://arxiv.org/pdf/1206.1901.pdf)
4. <NAME>. and <NAME>. (2014), "The No-U-turn sampler: Adaptively setting
path lengths in Hamiltonian Monte Carlo", (https://arxiv.org/abs/1111.4246)
5. <NAME> (2019), "Sample Adaptive MCMC",
(https://papers.nips.cc/paper/9107-sample-adaptive-mcmc)
"""
import argparse
import os
import jax.numpy as jnp
import jax.random as random
from jax.scipy.special import logsumexp
import numpyro
import numpyro.distributions as dist
from numpyro.examples.datasets import BASEBALL, load_dataset
from numpyro.infer import HMC, MCMC, NUTS, SA, Predictive, log_likelihood
def fully_pooled(at_bats, hits=None):
r"""
Number of hits in $K$ at bats for each player has a Binomial
distribution with a common probability of success, $\phi$.
:param (jnp.DeviceArray) at_bats: Number of at bats for each player.
:param (jnp.DeviceArray) hits: Number of hits for the given at bats.
:return: Number of hits predicted by the model.
"""
phi_prior = dist.Uniform(0, 1)
phi = numpyro.sample("phi", phi_prior)
num_players = at_bats.shape[0]
with numpyro.plate("num_players", num_players):
return numpyro.sample("obs", dist.Binomial(at_bats, probs=phi), obs=hits)
def not_pooled(at_bats, hits=None):
r"""
Number of hits in $K$ at bats for each player has a Binomial
distribution with independent probability of success, $\phi_i$.
:param (jnp.DeviceArray) at_bats: Number of at bats for each player.
:param (jnp.DeviceArray) hits: Number of hits for the given at bats.
:return: Number of hits predicted by the model.
"""
num_players = at_bats.shape[0]
with numpyro.plate("num_players", num_players):
phi_prior = dist.Uniform(0, 1)
phi = numpyro.sample("phi", phi_prior)
return numpyro.sample("obs", dist.Binomial(at_bats, probs=phi), obs=hits)
def partially_pooled(at_bats, hits=None):
r"""
Number of hits has a Binomial distribution with independent
probability of success, $\phi_i$. Each $\phi_i$ follows a Beta
distribution with concentration parameters $c_1$ and $c_2$, where
$c_1 = m * kappa$, $c_2 = (1 - m) * kappa$, $m ~ Uniform(0, 1)$,
and $kappa ~ Pareto(1, 1.5)$.
:param (jnp.DeviceArray) at_bats: Number of at bats for each player.
:param (jnp.DeviceArray) hits: Number of hits for the given at bats.
:return: Number of hits predicted by the model.
"""
m = numpyro.sample("m", dist.Uniform(0, 1))
kappa = numpyro.sample("kappa", dist.Pareto(1, 1.5))
num_players = at_bats.shape[0]
with numpyro.plate("num_players", num_players):
phi_prior = dist.Beta(m * kappa, (1 - m) * kappa)
phi = numpyro.sample("phi", phi_prior)
return numpyro.sample("obs", dist.Binomial(at_bats, probs=phi), obs=hits)
def partially_pooled_with_logit(at_bats, hits=None):
r"""
Number of hits has a Binomial distribution with a logit link function.
The logits $\alpha$ for each player is normally distributed with the
mean and scale parameters sharing a common prior.
:param (jnp.DeviceArray) at_bats: Number of at bats for each player.
:param (jnp.DeviceArray) hits: Number of hits for the given at bats.
:return: Number of hits predicted by the model.
"""
loc = numpyro.sample("loc", dist.Normal(-1, 1))
scale = numpyro.sample("scale", dist.HalfCauchy(1))
num_players = at_bats.shape[0]
with numpyro.plate("num_players", num_players):
alpha = numpyro.sample("alpha", dist.Normal(loc, scale))
return numpyro.sample("obs", dist.Binomial(at_bats, logits=alpha), obs=hits)
def run_inference(model, at_bats, hits, rng_key, args):
if args.algo == "NUTS":
kernel = NUTS(model)
elif args.algo == "HMC":
kernel = HMC(model)
elif args.algo == "SA":
kernel = SA(model)
mcmc = MCMC(
kernel,
args.num_warmup,
args.num_samples,
num_chains=args.num_chains,
progress_bar=False
if ("NUMPYRO_SPHINXBUILD" in os.environ or args.disable_progbar)
else True,
)
mcmc.run(rng_key, at_bats, hits)
return mcmc.get_samples()
def predict(model, at_bats, hits, z, rng_key, player_names, train=True):
header = model.__name__ + (" - TRAIN" if train else " - TEST")
predictions = Predictive(model, posterior_samples=z)(rng_key, at_bats)["obs"]
print_results(
"=" * 30 + header + "=" * 30, predictions, player_names, at_bats, hits
)
if not train:
post_loglik = log_likelihood(model, z, at_bats, hits)["obs"]
# computes expected log predictive density at each data point
exp_log_density = logsumexp(post_loglik, axis=0) - jnp.log(
jnp.shape(post_loglik)[0]
)
# reports log predictive density of all test points
print(
"\nLog pointwise predictive density: {:.2f}\n".format(exp_log_density.sum())
)
def print_results(header, preds, player_names, at_bats, hits):
columns = ["", "At-bats", "ActualHits", "Pred(p25)", "Pred(p50)", "Pred(p75)"]
header_format = "{:>20} {:>10} {:>10} {:>10} {:>10} {:>10}"
row_format = "{:>20} {:>10.0f} {:>10.0f} {:>10.2f} {:>10.2f} {:>10.2f}"
quantiles = jnp.quantile(preds, jnp.array([0.25, 0.5, 0.75]), axis=0)
print("\n", header, "\n")
print(header_format.format(*columns))
for i, p in enumerate(player_names):
print(row_format.format(p, at_bats[i], hits[i], *quantiles[:, i]), "\n")
def main(args):
_, fetch_train = load_dataset(BASEBALL, split="train", shuffle=False)
train, player_names = fetch_train()
_, fetch_test = load_dataset(BASEBALL, split="test", shuffle=False)
test, _ = fetch_test()
at_bats, hits = train[:, 0], train[:, 1]
season_at_bats, season_hits = test[:, 0], test[:, 1]
for i, model in enumerate(
(fully_pooled, not_pooled, partially_pooled, partially_pooled_with_logit)
):
rng_key, rng_key_predict = random.split(random.PRNGKey(i + 1))
zs = run_inference(model, at_bats, hits, rng_key, args)
predict(model, at_bats, hits, zs, rng_key_predict, player_names)
predict(
model,
season_at_bats,
season_hits,
zs,
rng_key_predict,
player_names,
train=False,
)
if __name__ == "__main__":
assert numpyro.__version__.startswith("0.6.0")
parser = argparse.ArgumentParser(description="Baseball batting average using MCMC")
parser.add_argument("-n", "--num-samples", nargs="?", default=3000, type=int)
parser.add_argument("--num-warmup", nargs="?", default=1500, type=int)
parser.add_argument("--num-chains", nargs="?", default=1, type=int)
parser.add_argument(
"--algo", default="NUTS", type=str, help='whether to run "HMC", "NUTS", or "SA"'
)
parser.add_argument(
"-dp",
"--disable-progbar",
action="store_true",
default=False,
help="whether to disable progress bar",
)
parser.add_argument("--device", default="cpu", type=str, help='use "cpu" or "gpu".')
args = parser.parse_args()
numpyro.set_platform(args.device)
numpyro.set_host_device_count(args.num_chains)
main(args)
| StarcoderdataPython |
3297182 | <filename>src/orders/forms.py
from django import forms
from django.contrib.auth import get_user_model # allows us to use User obj
from .models import UserAddress
User = get_user_model()
class GuestCheckoutForm(forms.Form):
email = forms.EmailField()
email2 = forms.EmailField(label='Confirm email')
def clean(self):
cleaned_data = super(GuestCheckoutForm, self).clean()
# print(cleaned_data)
email = cleaned_data.get('email')
email2 = cleaned_data.get('email2')
if email == email2:
user_exists = User.objects.filter(email=email).count()
if user_exists != 0:
raise forms.ValidationError("This user already exists. Please login instead.")
return cleaned_data
else:
raise forms.ValidationError("Please confirm emails are the same")
class AddressForm(forms.Form):
"""
Model Choice Field allows us to have choices based off of a queryset from a model
By default there would be an empty line, we don't want that, so empty_label="--None--" so it would display what is
inside the quotes. or =None (no quotes) to remove
forms.RadioSelect widget turns it into a radio field
"""
billing_address = forms.ModelChoiceField(queryset=UserAddress.objects.filter(type="billing").all(),
empty_label=None,
widget=forms.RadioSelect)
shipping_address = forms.ModelChoiceField(queryset=UserAddress.objects.filter(type="shipping").all(),
empty_label=None,
widget=forms.RadioSelect)
class UserAddressForm(forms.ModelForm):
class Meta:
model = UserAddress
fields = [
'street',
'city',
'state',
'zipcode',
'type'
] | StarcoderdataPython |
1635972 | <filename>EmailTemplate/apps.py
from django.apps import AppConfig
class EmailtemplateConfig(AppConfig):
name = 'EmailTemplate'
| StarcoderdataPython |
3334024 | from telegram import InlineKeyboardMarkup, InlineKeyboardButton
from constants import PRICES
keyboard = [
[InlineKeyboardButton(
text='Спасибо, я пока тут осмотрюсь...',
# url='https://card.tochka.com/cjgbrgtye-individualnaia_konsultatsiia',
callback_data='Не покупаем подписку'
)],
[
InlineKeyboardButton(f'1 месяц ({PRICES[1]} ₽)',
callback_data=PRICES[1]),
InlineKeyboardButton(f'3 месяца ({PRICES[3]} ₽)',
callback_data=PRICES[3]),
InlineKeyboardButton(f'Полгода ({PRICES[6]} ₽)',
callback_data=PRICES[6]),
],
[InlineKeyboardButton(f'Год ({PRICES[12]} ₽)',
callback_data=PRICES[12])],
]
payment_keyboard = InlineKeyboardMarkup(keyboard)
| StarcoderdataPython |
1777032 | <gh_stars>1-10
#!/usr/bin/env python3
import random
import unittest
import numpy as np
from ml.rl.test.gridworld.gridworld_base import DISCOUNT
from ml.rl.test.gridworld.gridworld_continuous import GridworldContinuous
from ml.rl.test.gridworld.gridworld_continuous_enum import GridworldContinuousEnum
from ml.rl.test.gridworld.gridworld_evaluator import GridworldContinuousEvaluator
from ml.rl.thrift.core.ttypes import (
ContinuousActionModelParameters,
FactorizationParameters,
FeedForwardParameters,
KnnParameters,
RLParameters,
TrainingParameters,
)
from ml.rl.training.evaluator import Evaluator
from ml.rl.training.parametric_dqn_trainer import ParametricDQNTrainer
class TestGridworldContinuous(unittest.TestCase):
def setUp(self):
self.minibatch_size = 512
super(self.__class__, self).setUp()
np.random.seed(0)
random.seed(0)
def get_sarsa_parameters(self):
return ContinuousActionModelParameters(
rl=RLParameters(
gamma=DISCOUNT,
target_update_rate=1.0,
reward_burnin=100,
maxq_learning=False,
),
training=TrainingParameters(
layers=[-1, 256, 128, -1],
activations=["relu", "relu", "linear"],
minibatch_size=self.minibatch_size,
learning_rate=0.125,
optimizer="ADAM",
),
knn=KnnParameters(model_type="DQN"),
)
def get_sarsa_parameters_factorized(self):
return ContinuousActionModelParameters(
rl=RLParameters(
gamma=DISCOUNT,
target_update_rate=1.0,
reward_burnin=100,
maxq_learning=False,
),
training=TrainingParameters(
layers=[],
activations=[],
factorization_parameters=FactorizationParameters(
state=FeedForwardParameters(
layers=[-1, 128, 64, 32], activations=["relu", "relu", "linear"]
),
action=FeedForwardParameters(
layers=[-1, 128, 64, 32], activations=["relu", "relu", "linear"]
),
),
minibatch_size=self.minibatch_size,
learning_rate=0.125,
optimizer="ADAM",
),
knn=KnnParameters(model_type="DQN"),
)
def get_sarsa_trainer(self, environment, parameters=None):
parameters = parameters or self.get_sarsa_parameters()
return ParametricDQNTrainer(
parameters, environment.normalization, environment.normalization_action
)
def test_trainer_sarsa(self):
environment = GridworldContinuous()
samples = environment.generate_samples(150000, 1.0)
trainer = self.get_sarsa_trainer(environment)
predictor = trainer.predictor()
evaluator = GridworldContinuousEvaluator(
environment, False, DISCOUNT, False, samples
)
tdps = environment.preprocess_samples(samples, self.minibatch_size)
for tdp in tdps:
tdp.rewards = tdp.rewards.flatten()
tdp.not_terminals = tdp.not_terminals.flatten()
trainer.train(tdp)
predictor = trainer.predictor()
evaluator.evaluate(predictor)
self.assertLess(evaluator.evaluate(predictor), 0.15)
def test_trainer_sarsa_factorized(self):
environment = GridworldContinuous()
samples = environment.generate_samples(150000, 1.0)
trainer = self.get_sarsa_trainer(
environment, self.get_sarsa_parameters_factorized()
)
predictor = trainer.predictor()
evaluator = GridworldContinuousEvaluator(
environment, False, DISCOUNT, False, samples
)
tdps = environment.preprocess_samples(samples, self.minibatch_size)
for tdp in tdps:
tdp.rewards = tdp.rewards.flatten()
tdp.not_terminals = tdp.not_terminals.flatten()
trainer.train(tdp)
predictor = trainer.predictor()
evaluator.evaluate(predictor)
self.assertLess(evaluator.evaluate(predictor), 0.15)
def test_trainer_sarsa_enum(self):
environment = GridworldContinuousEnum()
samples = environment.generate_samples(150000, 1.0)
trainer = self.get_sarsa_trainer(environment)
predictor = trainer.predictor()
evaluator = GridworldContinuousEvaluator(
environment, False, DISCOUNT, False, samples
)
tdps = environment.preprocess_samples(samples, self.minibatch_size)
for tdp in tdps:
tdp.rewards = tdp.rewards.flatten()
tdp.not_terminals = tdp.not_terminals.flatten()
trainer.train(tdp)
predictor = trainer.predictor()
evaluator.evaluate(predictor)
self.assertLess(evaluator.evaluate(predictor), 0.15)
def test_trainer_sarsa_enum_factorized(self):
environment = GridworldContinuousEnum()
samples = environment.generate_samples(150000, 1.0)
trainer = self.get_sarsa_trainer(
environment, self.get_sarsa_parameters_factorized()
)
predictor = trainer.predictor()
evaluator = GridworldContinuousEvaluator(
environment, False, DISCOUNT, False, samples
)
tdps = environment.preprocess_samples(samples, self.minibatch_size)
for tdp in tdps:
tdp.rewards = tdp.rewards.flatten()
tdp.not_terminals = tdp.not_terminals.flatten()
trainer.train(tdp)
predictor = trainer.predictor()
evaluator.evaluate(predictor)
self.assertLess(evaluator.evaluate(predictor), 0.15)
def test_evaluator_ground_truth(self):
environment = GridworldContinuous()
samples = environment.generate_samples(200000, 1.0)
true_values = environment.true_values_for_sample(
samples.states, samples.actions, False
)
# Hijack the reward timeline to insert the ground truth
samples.reward_timelines = []
for tv in true_values:
samples.reward_timelines.append({0: tv})
trainer = self.get_sarsa_trainer(environment)
evaluator = Evaluator(None, 10, DISCOUNT)
tdps = environment.preprocess_samples(samples, self.minibatch_size)
for tdp in tdps:
tdp.rewards = tdp.rewards.flatten()
tdp.not_terminals = tdp.not_terminals.flatten()
trainer.train(tdp, evaluator)
self.assertLess(evaluator.mc_loss[-1], 0.15)
def test_evaluator_ground_truth_factorized(self):
environment = GridworldContinuous()
samples = environment.generate_samples(200000, 1.0)
true_values = environment.true_values_for_sample(
samples.states, samples.actions, False
)
# Hijack the reward timeline to insert the ground truth
samples.reward_timelines = []
for tv in true_values:
samples.reward_timelines.append({0: tv})
trainer = self.get_sarsa_trainer(
environment, self.get_sarsa_parameters_factorized()
)
evaluator = Evaluator(None, 10, DISCOUNT)
tdps = environment.preprocess_samples(samples, self.minibatch_size)
for tdp in tdps:
tdp.rewards = tdp.rewards.flatten()
tdp.not_terminals = tdp.not_terminals.flatten()
trainer.train(tdp, evaluator)
self.assertLess(evaluator.mc_loss[-1], 0.15)
| StarcoderdataPython |
95628 | from socket import *
from msgstruct import *
#from fcntl import ioctl
#from termios import TIOCOUTQ
from zlib import compressobj, Z_SYNC_FLUSH
import struct
ZeroBuffer = struct.pack("i", 0)
class SocketMarshaller:
def __init__(self, tcpsock, mixer):
self.tcpsock = tcpsock
self.mixer = mixer
self.mixer_can_mix = mixer.send_can_mix
self.mixer_send = mixer.send_buffer
self.tcpsock_fd = tcpsock.fileno()
# try to reduce TCP latency
try:
tcpsock.setsockopt(SOL_IP, IP_TOS, 0x10) # IPTOS_LOWDELAY
except error, e:
print "Cannot set IPTOS_LOWDELAY for client:", str(e)
try:
tcpsock.setsockopt(SOL_TCP, TCP_NODELAY, 1)
except error, e:
print "Cannot set TCP_NODELAY for client:", str(e)
compressor = compressobj(6)
self.compress = compressor.compress
self.compress_flush = compressor.flush
def send(self, data):
if self.mixer_can_mix():
# discard all packets if there is still data waiting in tcpsock
# --- mmmh, works much better without this check ---
#try:
# if ioctl(self.tcpsock_fd, TIOCOUTQ, ZeroBuffer) != ZeroBuffer:
# return
#except IOError, e:
# print "ioctl(TIOCOUTQ) failed, disconnecting client"
# self.mixer.disconnect(e)
#else:
data = self.compress(data) + self.compress_flush(Z_SYNC_FLUSH)
self.mixer_send(message(MSG_INLINE_FRAME, data))
return len(data)
return 0
| StarcoderdataPython |
3217081 | from jax import numpy as np
from jax import random
from mcx.distributions import constraints
from mcx.distributions.distribution import Distribution
from mcx.distributions.shapes import broadcast_batch_shape
class DiscreteUniform(Distribution):
"""Random variable with a uniform distribution on a range of integers. """
parameters = {"lower": constraints.integer, "upper": constraints.integer}
def __init__(self, lower, upper):
self.support = constraints.integer_interval(lower, upper)
self.event_shape = ()
self.batch_shape = broadcast_batch_shape(np.shape(lower), np.shape(upper))
self.lower = np.floor(lower)
self.upper = np.floor(upper)
def sample(self, rng_key, sample_shape=()):
shape = sample_shape + self.batch_shape + self.event_shape
return random.randint(rng_key, shape, self.lower, self.upper)
@constraints.limit_to_support
def logpdf(self, x):
return -np.log(self.upper - self.lower + 1)
| StarcoderdataPython |
22843 | from os import listdir
from os.path import join, isfile
import json
from random import randint
#########################################
## START of part that students may change
from code_completion_baseline import Code_Completion_Baseline
training_dir = "./../../programs_800/"
query_dir = "./../../programs_200/"
model_file = "./../../trained_model"
use_stored_model = False
max_hole_size = 2
simplify_tokens = True
## END of part that students may change
#########################################
def simplify_token(token):
if token["type"] == "Identifier":
token["value"] = "ID"
elif token["type"] == "String":
token["value"] = "\"STR\""
elif token["type"] == "RegularExpression":
token["value"] = "/REGEXP/"
elif token["type"] == "Numeric":
token["value"] = "5"
# load sequences of tokens from files
def load_tokens(token_dir):
token_files = [join(token_dir, f) for f in listdir(token_dir) if isfile(join(token_dir, f)) and f.endswith("_tokens.json")]
token_lists = [json.load(open(f)) for f in token_files]
if simplify_tokens:
for token_list in token_lists:
for token in token_list:
simplify_token(token)
return token_lists
# removes up to max_hole_size tokens
def create_hole(tokens):
hole_size = randint(1, max_hole_size)
hole_start_idx = randint(1, len(tokens) - hole_size)
prefix = tokens[0:hole_start_idx]
expected = tokens[hole_start_idx:hole_start_idx + hole_size]
suffix = tokens[hole_start_idx + hole_size:]
return(prefix, expected, suffix)
# checks if two sequences of tokens are identical
def same_tokens(tokens1, tokens2):
if len(tokens1) != len(tokens2):
return False
for idx, t1 in enumerate(tokens1):
t2 = tokens2[idx]
if t1["type"] != t2["type"] or t1["value"] != t2["value"]:
return False
return True
#########################################
## START of part that students may change
code_completion = Code_Completion_Baseline()
## END of part that students may change
#########################################
# train the network
training_token_lists = load_tokens(training_dir)
if use_stored_model:
code_completion.load(training_token_lists, model_file)
else:
code_completion.train(training_token_lists, model_file)
# query the network and measure its accuracy
query_token_lists = load_tokens(query_dir)
correct = incorrect = 0
for tokens in query_token_lists:
(prefix, expected, suffix) = create_hole(tokens)
completion = code_completion.query(prefix, suffix)
if same_tokens(completion, expected):
correct += 1
else:
incorrect += 1
accuracy = correct / (correct + incorrect)
print("Accuracy: " + str(correct) + " correct vs. " + str(incorrect) + " incorrect = " + str(accuracy))
| StarcoderdataPython |
129417 | <gh_stars>1-10
# Copyright (c) 2018, The Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the University of California nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OF THE UNIVERSITY OF CALIFORNIA
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import unittest
from cogrob.navigation.tour import tour_info
class TestAllTourPoints(unittest.TestCase):
def testGetAllTourPoints(self):
for location in tour_info.GetAllLocations():
all_tour_points = tour_info.GetAllTourPoints(location)
for tour_point in all_tour_points:
self.assertTrue(isinstance(tour_point, tour_info.TourPoint))
self.assertTrue(isinstance(tour_point.id, str))
self.assertTrue(bool(tour_point.id))
self.assertTrue(isinstance(tour_point.nav_point, str))
self.assertEqual(len(tour_point.nav_point), 3)
self.assertTrue(isinstance(tour_point.short_name, str))
self.assertGreater(len(tour_point.nav_point), 0)
self.assertTrue(isinstance(tour_point.lcs_templates, list))
for lcs_template in tour_point.lcs_templates:
self.assertTrue(isinstance(lcs_template, str))
self.assertTrue(bool(lcs_template))
self.assertTrue(isinstance(tour_point.intro_words, str))
self.assertGreater(len(tour_point.intro_words), 0)
def testGetAllTourPointsMap(self):
for location in tour_info.GetAllLocations():
all_tour_points_map = tour_info.GetAllTourPointsMap(location)
self.assertTrue(isinstance(all_tour_points_map, dict))
for k, v in all_tour_points_map.items():
self.assertTrue(isinstance(v, tour_info.TourPoint))
self.assertEqual(k, v.id)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1617801 | <gh_stars>0
import unittest
from sensor_validate import validate_param_reading
from sensorActions import set_sensor_status
class SensorValidatorTest(unittest.TestCase):
def test_reports_error_when_soc_jumps(self):
soc_jump_threshold = 0.05
self.assertFalse(
validate_param_reading([0.0, 0.01, 0.5, 0.51], 'soc', soc_jump_threshold)
)
self.assertTrue(
validate_param_reading([0.1], 'soc', soc_jump_threshold)
)
set_sensor_status('soc', 'OFF')
self.assertEqual(
validate_param_reading([], 'soc', soc_jump_threshold), 'SENSOR_UP_AGAIN', 'Sensor was off, suitable actions are handled by controller'
)
self.assertIsNone(
validate_param_reading([], 'soc', soc_jump_threshold), 'Sensor is not working'
)
self.assertEqual(
validate_param_reading([0.01, 0.02, float('nan')], 'soc', soc_jump_threshold) , 'NULL_VALUE'
)
def test_reports_error_when_current_jumps(self):
current_jump_threshold = 0.1
self.assertFalse(
validate_param_reading([0.03, 0.03, 0.03, 0.33], 'current', current_jump_threshold)
)
self.assertTrue(
validate_param_reading([0.1], 'current', current_jump_threshold)
)
self.assertIsNone(
validate_param_reading([], 'current', current_jump_threshold), 'Sensor is not working'
)
self.assertEqual(
validate_param_reading([0.01, 0.02, float('nan')], 'current', current_jump_threshold) , 'NULL_VALUE'
)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
37693 | <filename>scripts/collapse_subtypes.py<gh_stars>10-100
import sys
from collections import Counter
## 5 |strains A1:23146 C:377 B1:546 unclassified:211701 A3:133 A2:212 A4:2230 B2:1052 D2:551 D3:3685 D1:30293 |sketch sketchSize=1000 kmer=16
if __name__ == "__main__":
for line in sys.stdin:
x_d = Counter()
tokens = line.split("|")
features = tokens[1].split(" ")
for i in features:
if i.startswith("A"):
x_d["A"] += int(i.strip().split(":")[1])
elif i.startswith("B"):
x_d["B"] += int(i.strip().split(":")[1])
elif i.startswith("C"):
x_d["C"] += int(i.strip().split(":")[1])
elif i.startswith("D"):
x_d["D"] += int(i.strip().split(":")[1])
elif i.startswith("u"):
x_d["U"] = int(i.strip().split(":")[1])
total = sum([x_d[x] for x in x_d])
#feat_l = [str(x + ":" + str( float(x_d[x]) / float(total) )) for x in x_d if x is not "U"]
feat_l = [str(x + ":" + str( float(x_d[x]) / float(total) )) for x in x_d]
#feat_l = [str(x + ":" + str((x_d[x]) )) for x in x_d]
x_feat = "|vir " + " ".join(feat_l)
#xtra_namespace = "|" + tokens[2]
#print " ".join([ tokens[0].strip(), x_feat, xtra_namespace] ).strip()
print " ".join([ tokens[0].strip(), x_feat] ).strip()
| StarcoderdataPython |
1710422 | <reponame>fadhifatah/Fataservice<filename>Fataservice/settings.py
"""
Django settings for Fataservice project.
Generated by 'django-admin startproject' using Django 2.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = <KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
'my-load-balancer',
'host21018.proxy.infralabs.cs.ui.ac.id',
'host22018.proxy.infralabs.cs.ui.ac.id',
'host23018.proxy.infralabs.cs.ui.ac.id',
'host24018.proxy.infralabs.cs.ui.ac.id',
'172.22.0.98',
'172.22.0.129',
'172.22.0.160',
'172.22.0.191',
'127.0.0.1',
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'api_v1',
'thumb',
'cots_1',
'algorithm',
'upload',
'cots_2',
'latihan',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Fataservice.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'Fataservice.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql_psycopg2',
# 'NAME': 'd9ef928o48bsjr',
# 'USER': 'kpbnkfjmkeplil',
# 'PASSWORD': '<PASSWORD>',
# 'HOST': 'ec2-54-221-220-59.compute-1.amazonaws.com',
# 'PORT': '5432',
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'fataservice',
'USER': 'postgres',
'PASSWORD': '<PASSWORD>',
'HOST': '172.22.0.67',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
| StarcoderdataPython |
3290858 | import logging
import sys
from utils.hustlers_den_exceptions import HustlersDenValidationError
logger = logging.getLogger(__name__)
class BaseService:
def __init__(self):
"""
Superclass Base Service for Hustlers Den services
"""
logger.debug(
"Func %s begins with data %s", sys._getframe().f_code.co_name, locals()
)
self.service_response_data = None
self.valid = None
self.errors = []
@property
def error_message(self):
"""
Property to stringify the error messages
return :str:
"""
logger.debug(
"Func %s begins with data %s", sys._getframe().f_code.co_name, locals()
)
return ", ".join(self.errors)
@property
def is_valid(self):
"""
Returns a boolean based on errors
return :bool:
"""
logger.debug(
"Func %s begins with data %s", sys._getframe().f_code.co_name, locals()
)
if not self.errors:
self.valid = True
else:
self.valid = False
return self.valid
def validate(self, raise_errors=False):
"""
Wrapper over subclassed validate. Used to check if the validations ran
successfully. Optionally raise an error which bubbles up to the caller
return :bool:
return :HustlersDenValidationError: Raise the errors in a stringify
manner
"""
logger.debug(
"Func %s begins with data %s", sys._getframe().f_code.co_name, locals()
)
if not self.is_valid and raise_errors:
raise HustlersDenValidationError(message=self.error_message)
return self.is_valid
def execute(self, raise_errors=False):
"""
Wrapper for running the execute method. Needs to be called at the start
of the subclassed method.
Implicitly runs the validations in case the validations haven't been
run already
Optionally raises validation errors and "explodes" the service
return :bool: Returns whether the execution was valid.
"""
logger.debug(
"Func %s begins with data %s", sys._getframe().f_code.co_name, locals()
)
if self.valid is None:
self.validate(raise_errors)
return self.valid
| StarcoderdataPython |
1730063 | # -*- coding: utf-8 -*-
'''
Grains for Onyx OS Switches Proxy minions
.. versionadded: Neon
For documentation on setting up the onyx proxy minion look in the documentation
for :mod:`salt.proxy.onyx<salt.proxy.onyx>`.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Libs
import logging
import salt.utils.platform
import salt.modules.onyx
log = logging.getLogger(__name__)
__proxyenabled__ = ['onyx']
__virtualname__ = 'onyx'
def __virtual__():
try:
if salt.utils.platform.is_proxy() and __opts__['proxy']['proxytype'] == 'onyx':
return __virtualname__
except KeyError:
pass
return False
def proxy_functions(proxy=None):
'''
Proxy Initialization
'''
if proxy is None:
return {}
if proxy['onyx.initialized']() is False:
return {}
return {'onyx': proxy['onyx.grains']()}
| StarcoderdataPython |
48081 | <gh_stars>1-10
import numpy as np
from rdkit.Chem import AllChem, DataStructs
from rdkit import Chem
import csv
def load_csv(file):
# load data
expression = []
with open(file, "r") as csv_file:
reader = csv.reader(csv_file, dialect='excel')
for row in reader:
expression.append(row)
return expression
def get_feature_dict(file, delimiter=',', key_index=0, use_int=False):
with open(file, "r") as csv_file:
reader = csv.reader(csv_file, dialect='excel', delimiter=delimiter)
next(reader)
if use_int:
my_dict = {}
for row in reader:
list = []
for value in row[1:]:
list.append(int(value))
my_dict[row[key_index]] = list
return my_dict
return dict((row[key_index], row[1:]) for row in reader)
i = 0
finger_dimension = 2048
molecules = []
fps = []
id = []
smiles = []
names = []
import os
path = os.path.dirname(os.path.abspath(__file__))
print(path)
drug_dict = get_feature_dict('GSE92742_Broad_LINCS_pert_info.txt', delimiter='\t', use_int=False) # uncomment for phase 1
# drug_dict = get_feature_dict('GSE70138_Broad_LINCS_pert_info.txt', delimiter='\t', use_int=False) # uncomment for phase 2
# rnaseq drugs # uncomment this and change filename below to get inhouse_morgan_2048.csv
# drug_dict = {}
# drug_dict['Enzalutamide'] = ['','','','','','CNC(=O)C1=C(F)C=C(C=C1)N1C(=S)N(C(=O)C1(C)C)C1=CC=C(C#N)C(=C1)C(F)(F)F']
# drug_dict['VPC14449'] = ['','','','','','Brc1n(-c2nc(N3CCOCC3)sc2)cc(Br)n1']
# drug_dict['VPC17005'] = ['','','','','','O=C(NC=1SCCN=1)c1c2c(sc1)cccc2']
count = 0
for key in drug_dict:
count += 1
try:
smiles = drug_dict[key][5]
m = Chem.MolFromSmiles(smiles)
molecules.append(m)
fp = np.zeros((1,))
DataStructs.ConvertToNumpyArray(AllChem.GetMorganFingerprintAsBitVect(m, 2, nBits=finger_dimension), fp)
fps.append(fp)
id.append(key)
except:
print(i, key, m)
i += 1
header = ["mol"]
for i in range(finger_dimension):
header.append("fps" + str(i))
fps = np.array(fps).reshape(len(fps),finger_dimension)
id = np.array(id)
id = id.reshape(len(fps), 1)
data = np.hstack((id, fps))
header = np.array(header).reshape(1, len(header))
data_header = np.vstack((header, data))
np.savetxt("phase1_compounds_morgan_2048.csv", data_header, delimiter=",", fmt="%s")
| StarcoderdataPython |
82429 | <filename>handlers/basic_commands.py
"""
The part that has basic commands like start, help, cancel, etc.
"""
import logging
from os import listdir, mkdir
from aiogram import types
from aiogram.dispatcher import FSMContext
from aiogram.dispatcher.filters import Text
from loader import dp, input_path, output_path
from states.all_states import *
from utils.clean_up import reset
# this dictionary contains some text and states for each operation
operations_dict = {
"merge": {
"state": MergingStates.waiting_for_files_to_merge,
"text": "Alright, just send me the files that you want merged.",
},
"compress": {
"state": CompressingStates.waiting_for_files_to_compress,
"text": "Cool, send me the PDF that you want compressed and I'll "
"start working on it right away.",
},
"encrypt": {
"state": CryptingStates.waiting_for_files_to_encrypt,
"text": "Okay, send me the PDF that you want to encrypt.",
},
"decrypt": {
"state": CryptingStates.waiting_for_files_to_decrypt,
"text": "Okay, send me the PDF that you want to decrypt",
},
"split": {
"state": SplittingStates.waiting_for_files_to_split,
"text": "Sure, first send me the PDF that you want to split.",
},
"Word to PDF": {
"state": ConvertingStates.waiting_for_word_docs,
"text": "Ok, send me the Word document(s) you'd like to convert to PDF",
},
"Image(s) to PDF": {
"state": ConvertingStates.waiting_for_images,
"text": "Ok, send me the images that you'd like to convert to a PDF",
},
}
@dp.message_handler(commands="start", state="*")
async def welcome(message: types.Message):
"""
This handler will be called when user sends '/start' command.
Creates directories for new users where their files will be stored
temporarily until an operation is complete.
"""
if str(message.chat.id) in listdir(input_path):
pass
else:
mkdir(f"{input_path}/{message.chat.id}")
mkdir(f"{output_path}/{message.chat.id}")
logging.info("Directories for new user created")
await message.reply(
"Hello, I'm Vivy.\n"
"My mission is to make people happy by helping them perform basic "
"operations on their PDF files.\n\n"
"<b>What I can do</b>\n"
"<i>/merge</i> - Merge multiple PDF files into one PDF file.\n"
"<i>/compress</i> - Compress a PDF file (can only compress one "
"file at a time).\n"
"<i>/encrypt</i> - Encrypt PDF file with PDF standard encryption "
"handler.\n"
"<i>/decrypt</i> - Decrypt PDF file if it was encrypted with the "
"PDF standard encryption handler.\n"
"<i>/split</i> - Split PDF (extract certain pages from your PDF, "
"saving those pages into a separate file).\n"
"<i>/convert</i> - Convert Word Documents/Images to PDF.\n\n"
"Type /help for more information."
)
@dp.message_handler(commands="help", state="*")
async def give_help(message: types.Message):
"""
This handler will be called when user sends '/help' command
Provides some simple instructions on how to use the bot.
"""
await message.reply(
"<b>Instructions:</b>\nGo to the special commands <b>☰ Menu</b> "
"and choose the operation that you want me to perform.\n\n"
"<b>Available commands:</b>\n"
"<i>/start</i> - Brief info about me.\n"
"<i>/help</i> - Instructions on how to interact with me.\n"
"<i>/merge</i> - Merge multiple PDF files into one PDF file.\n"
"<i>/compress</i> - Compress a PDF file (can only compress one "
"file at a time).\n"
"<i>/encrypt</i> - Encrypt PDF file with PDF standard encryption "
"handler.\n"
"<i>/decrypt</i> - Decrypt PDF file if it was encrypted with the "
"PDF standard encryption handler.\n"
"<i>/split</i> - Split PDF (extract certain pages from your PDF, "
"saving those pages into a separate file).\n"
"<i>/convert</i> - Convert Word Documents/Images to PDF.\n"
"<i>/cancel</i> - Cancel the current operation.\n"
)
@dp.message_handler(commands="cancel", state="*")
@dp.message_handler(Text(equals="cancel", ignore_case=True), state="*")
async def cancel(message: types.Message, state: FSMContext):
"""
This handler will be called when user sends `/cancel` command.
Triggers the reset function, which cleans up user input/output files and
resets the state.
"""
logging.info("Cancelling operation")
await reset(message, state)
await message.reply(
"Operation cancelled",
reply_markup=types.ReplyKeyboardRemove(),
)
@dp.message_handler(
commands=["merge", "compress", "encrypt", "decrypt", "split", "make"], state="*"
)
async def start_operation(message: types.Message, state: FSMContext):
"""
This handler will be called when user chooses a PDF operation.
This will basically just ask the user to start sending the PDF file.
"""
await reset(message, state)
command = message.get_command()[1:]
await operations_dict[command]["state"].set()
await message.reply(
operations_dict[command]["text"],
reply_markup=types.ReplyKeyboardRemove(),
)
@dp.message_handler(commands="convert", state="*")
async def ask_which_convert(message: types.Message, state: FSMContext):
"""
This handler will be called when user chooses the `convert` operation.
This will ask the user to choose the type of conversion.
"""
await reset(message, state)
keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)
buttons = ["Word to PDF", "Image(s) to PDF"]
keyboard.add(*buttons)
keyboard.add("Cancel")
await message.answer(
"<b>Please choose one of the options for conversion.</b>\n\n"
"Great, currently I can only do 2 conversions:\n"
"<i>Word to PDF, Image(s) to PDF</i>",
reply_markup=keyboard,
)
@dp.message_handler(Text(equals=["Word to PDF", "Image(s) to PDF"]))
async def start_conversion(message: types.Message):
"""
This handler will be called when user chooses the type of conversion.
Asks to send a corresponding file(s).
"""
await operations_dict[message.text]["state"].set()
await message.answer(
operations_dict[message.text]["text"],
reply_markup=types.ReplyKeyboardRemove(),
)
@dp.message_handler(
is_media_group=True,
content_types=types.message.ContentType.DOCUMENT,
state=[
MergingStates.waiting_for_specific_file,
CompressingStates.waiting_for_files_to_compress,
CryptingStates.waiting_for_files_to_encrypt,
CryptingStates.waiting_for_files_to_decrypt,
SplittingStates.waiting_for_files_to_split,
],
)
async def inform_limitations(message: types.Message):
"""
Some file operations cannot handle multiple files at the same time.
This will let the user know that.
"""
await message.reply(
"I cannot handle multiple files at the same time.\n"
"Please send a single file."
)
@dp.message_handler(regexp=("pdf"), state=None)
async def vivy_torreto(message: types.Message):
"""
An easter egg, I guess.
This is just a dead meme, but yeah whatever.
"""
await message.reply("https://ibb.co/9yCkBc1")
@dp.message_handler(regexp=("sing"), state=None)
async def vivy_sing(message: types.Message):
"""
Another easter egg.
The anime opening song.
"""
await message.reply("https://youtu.be/2p8ig-TrYPY")
@dp.message_handler(state=None, content_types=types.message.ContentType.ANY)
async def send_instructions(message: types.Message):
"""
If a state is not specified, provide some help to the user in case they
are not able to figure out what to do.
It's not much, but it's honest work.
"""
await message.reply("Please choose a command or type /help for instructions.")
| StarcoderdataPython |
3377054 | <reponame>USC-CSCI527-Fall2021/autonomous_car_navigation
import numpy as np
import tensorflow as tf
from tensorflow import keras
from queue import deque
import gym
import gym_carla
import carla
import argparse
import tensorflow as tf
params = {
'number_of_vehicles': 25,
'number_of_walkers': 20,
'display_size': 250, # screen size of bird-eye render
'display_height' : 512,
'display_main': True,
'weather': "WetSunset",
'max_past_step': 1, # the number of past steps to draw
'dt': 0.1, # time interval between two frames
'discrete': False, # whether to use discrete control space
'discrete_acc': [1.0, 0.0, 1.0], # discrete value of accelerations
'discrete_steer': [-1, 0, 1], # discrete value of steering angles
'continuous_accel_range': [-3.0, 3.0], # continuous acceleration range
'continuous_steer_range': [-0.2, 0.2], # continuous steering angle range
'ego_vehicle_filter': 'vehicle.tesla.model3', # filter for defining ego vehicle
'address': "localhost", #'localhost',
'port': 2000, #2000 # connection port
'town': 'Town02', # which town to simulate
'task_mode': 'random', # mode of the task, [random, roundabout (only for Town03)]
'max_time_episode': 5000, # maximum timesteps per episode
'max_waypt': 12, # maximum number of waypoints
'obs_range': 32, # observation range (meter)
'lidar_bin': 0.125, # bin size of lidar sensor (meter)
'd_behind': 12, # distance behind the ego vehicle (meter)
'out_lane_thres': 5.0, # threshold for out of lane
'desired_speed': 8, # desired speed (m/s)
'max_ego_spawn_times': 200, # maximum times to spawn ego vehicle
'display_route': True, # whether to render the desired route
'pixor_size': 64, # size of the pixor labels
'pixor': False, # whether to output PIXOR observation
}
acc_model = keras.models.load_model("normal_weights/acc_model_final.h5")
steer_model = keras.models.load_model("normal_weights/steer_model_final_2.hdf5")
def read_transform(img):
return img[76:204,76:204,:]/255
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model", default="ppo_version5", help="Model weights directory")
parser.add_argument("-i", "--ip", default="localhost", help="Server ip address")
parser.add_argument("-p", "--port", default="2000", help="Server Port")
args = parser.parse_args()
MODEL = args.model
SERVER = args.ip
PORT = args.port
params["address"] = SERVER
params["port"] = int(PORT)
env = gym.make('carla-v0', params=params)
observation = env.reset()
for _ in range(30): observation, _, _, _ = env.step([2.5,0])
done = False
cumulative_reward = 0
step = 1
dq = deque([1]*20, maxlen=15)
amplifier = lambda x: (1 + -1*x)*1.75 + 1.25
input("Press any key to continue!")
while not done:
obs = read_transform(observation['birdeye']).reshape( (1, 128,128,3) )
action_ud = np.argmax(acc_model.predict(obs))
action_lr = np.argmax(steer_model.predict(obs))-1
dq.append(action_ud)
amplify = amplifier(np.mean(dq))
observation, reward, done, _ = env.step( [ amplify*action_ud if action_ud==1 else -3, action_lr] )
cumulative_reward = reward+ 0.9*cumulative_reward
act_lr = {-1:"Right", 1: "Left", 0:"Straight"}[action_lr]
act_ud = {1:"Forward", 0: "Brake"}[action_ud]
print(f"Step {step} Discounted Reward: {cumulative_reward:.2f}, ACTION: {act_ud} {act_lr}")
step += 1 | StarcoderdataPython |
4842436 | """
KGX Source for Simple Standard for Sharing Ontology Mappings ("SSSOM")
"""
import gzip
import re
import pandas as pd
from typing import Optional, Generator, Any, Dict, Tuple
import yaml
from kgx.prefix_manager import PrefixManager
from kgx.config import get_logger
from kgx.source import Source
from kgx.utils.kgx_utils import (
validate_node,
sanitize_import,
validate_edge,
generate_uuid,
generate_edge_key,
)
from kgx.utils.rdf_utils import process_predicate
log = get_logger()
SSSOM_NODE_PROPERTY_MAPPING = {
'subject_id': 'id',
'subject_category': 'category',
'object_id': 'id',
'object_category': 'category',
}
class SssomSource(Source):
"""
SssomSource is responsible for reading data as records
from an SSSOM file.
"""
def __init__(self):
super().__init__()
self.predicate_mapping = {}
def set_prefix_map(self, m: Dict) -> None:
"""
Add or override default prefix to IRI map.
Parameters
----------
m: Dict
Prefix to IRI map
"""
self.prefix_manager.set_prefix_map(m)
def set_reverse_prefix_map(self, m: Dict) -> None:
"""
Add or override default IRI to prefix map.
Parameters
----------
m: Dict
IRI to prefix map
"""
self.prefix_manager.set_reverse_prefix_map(m)
def parse(
self,
filename: str,
format: str,
compression: Optional[str] = None,
**kwargs: Any,
) -> Generator:
"""
Parse a SSSOM TSV
Parameters
----------
filename: str
File to read from
format: str
The input file format (``tsv``, by default)
compression: Optional[str]
The compression (``gz``)
kwargs: Dict
Any additional arguments
Returns
-------
Generator
A generator for node and edge records
"""
if 'delimiter' not in kwargs:
kwargs['delimiter'] = '\t'
self.parse_header(filename, compression)
# SSSOM 'mapping provider' may override the default 'knowledge_source' setting?
if 'mapping_provider' in self.graph_metadata:
kwargs['knowledge_source'] = self.graph_metadata['mapping_provider']
self.set_provenance_map(kwargs)
if compression:
FH = gzip.open(filename, 'rb')
else:
FH = open(filename)
file_iter = pd.read_csv(
FH,
comment='#',
dtype=str,
chunksize=10000,
low_memory=False,
keep_default_na=False,
**kwargs,
)
for chunk in file_iter:
yield from self.load_edges(chunk)
def parse_header(self, filename: str, compression: Optional[str] = None) -> None:
"""
Parse metadata from SSSOM headers.
Parameters
----------
filename: str
Filename to parse
compression: Optional[str]
Compression type
"""
yamlstr = ""
if compression:
FH = gzip.open(filename, 'rb')
else:
FH = open(filename)
for line in FH:
if line.startswith('#'):
yamlstr += re.sub('^#', '', line)
else:
break
if yamlstr:
metadata = yaml.safe_load(yamlstr)
log.info(f"Metadata: {metadata}")
if 'curie_map' in metadata:
self.prefix_manager.update_prefix_map(metadata['curie_map'])
for k, v in metadata.items():
self.graph_metadata[k] = v
def load_node(self, node: Dict) -> Tuple[str, Dict]:
"""
Load a node into an instance of BaseGraph
Parameters
----------
node: Dict
A node
Returns
-------
Optional[Tuple[str, Dict]]
A tuple that contains node id and node data
"""
node = validate_node(node)
node_data = sanitize_import(node.copy())
if 'id' in node_data:
n = node_data['id']
self.set_node_provenance(node_data)
self.node_properties.update(list(node_data.keys()))
return n, node_data
else:
log.info("Ignoring node with no 'id': {}".format(node))
def load_edges(self, df: pd.DataFrame) -> Generator:
"""
Load edges from pandas.DataFrame into an instance of BaseGraph
Parameters
----------
df : pandas.DataFrame
Dataframe containing records that represent edges
Returns
-------
Generator
A generator for edge records
"""
for obj in df.to_dict('records'):
yield from self.load_edge(obj)
def load_edge(self, edge: Dict) -> Generator:
"""
Load an edge into an instance of BaseGraph
Parameters
----------
edge : Dict
An edge
Returns
-------
Generator
A generator for node and edge records
"""
(element_uri, canonical_uri, predicate, property_name) = process_predicate(
self.prefix_manager, edge['predicate_id'], self.predicate_mapping
)
if element_uri:
edge_predicate = element_uri
elif predicate:
edge_predicate = predicate
else:
edge_predicate = property_name
if canonical_uri:
edge_predicate = element_uri
data = {
'subject': edge['subject_id'],
'predicate': edge_predicate,
'object': edge['object_id'],
}
del edge['predicate_id']
data = validate_edge(data)
subject_node = {}
object_node = {}
for k, v in edge.items():
if k in SSSOM_NODE_PROPERTY_MAPPING:
if k.startswith('subject'):
mapped_k = SSSOM_NODE_PROPERTY_MAPPING[k]
if mapped_k == 'category' and not PrefixManager.is_curie(v):
v = f"biolink:OntologyClass"
subject_node[mapped_k] = v
elif k.startswith('object'):
mapped_k = SSSOM_NODE_PROPERTY_MAPPING[k]
if mapped_k == 'category' and not PrefixManager.is_curie(v):
v = f"biolink:OntologyClass"
object_node[mapped_k] = v
else:
log.info(f"Ignoring {k} {v}")
else:
data[k] = v
objs = [self.load_node(subject_node), self.load_node(object_node)]
for k, v in self.graph_metadata.items():
if k not in {'curie_map'}:
data[k] = v
edge_data = sanitize_import(data.copy())
if 'subject' in edge_data and 'object' in edge_data:
if 'id' not in edge_data:
edge_data['id'] = generate_uuid()
s = edge_data['subject']
o = edge_data['object']
self.set_edge_provenance(edge_data)
key = generate_edge_key(s, edge_data['predicate'], o)
self.edge_properties.update(list(edge_data.keys()))
objs.append((s, o, key, edge_data))
else:
log.info(
"Ignoring edge with either a missing 'subject' or 'object': {}".format(edge_data)
)
for o in objs:
yield o
| StarcoderdataPython |
1727967 |
#A web scraper to do job searches for Health Leads clients
#This code was adapted from https://github.com/Futvin/Web-Scraper-in-Python-with-BeautifulSoup/blob/master/runSimpleScrap.py
import re
import urllib.request
from bs4 import BeautifulSoup
from docx import Document
from docx.shared import Pt
document = Document()
style = document.styles['Normal']
font = style.font
font.name = 'Garamond'
font.size = Pt(12)
#new_heading_style = document.styles['Heading 1']
#font_heading = new_heading_style.font
#font_heading.name = 'Garamond'
#font_heading.size = Pt(24)
document.add_heading('Job Listings', 1)
def crawler_func(url, page, jobsubj):
job_subj = jobsubj.replace("+", "")
with urllib.request.urlopen(url) as urldata:
rawtext = urldata.read().decode('utf-8', 'ignore')
soup = BeautifulSoup(rawtext, 'html.parser')
jobListings = soup.select(".result")
for job in jobListings:
#Get job details
jobTitle= job.select(".jobtitle")[0].text.lstrip().rstrip()
companyName = job.select(".company")[0].text.lstrip().rstrip()
jobLocation = job.select(".location")[0].text.lstrip().rstrip()
# Figured out if this ad is sponsored.
isSponsered = ""
sponderedSpans = job.find_all("span")
if len(sponderedSpans) > 0:
for span in sponderedSpans:
spanText = span.text.lstrip().rstrip()
if re.match("Sponsored", spanText) is not None:
isSponsered = spanText
#get job URL
all_jobs = job.find_all("a")
for link in all_jobs:
try:
if "turnstileLink" in link['class']:
jobURL = "https://www.indeed.com" + link['href']
except:
pass
try:
#currently only getting two pages of jobs for brevity
if (page < 10):
if(isSponsered != "Sponsored"):
p = document.add_paragraph()
p.add_run(jobTitle).bold = True
q = document.add_paragraph(companyName)
q.add_run(" " + jobLocation)
document.add_paragraph(jobURL)
document.add_paragraph()
except:
pass
document.save('job_listings_' + str(job_subj) + '.docx')
if (page < 10):
document.add_paragraph("If applicable, other jobs you might want to consider will show up below.")
relatedJobs = soup.select(".relatedQuerySpacing")
for related in relatedJobs:
all_related = related.find_all("a")
for link in all_related:
try:
relatedURL = "https://www.indeed.com" + link['href']
document.add_paragraph(relatedURL)
except:
pass
document.save('job_listings_' + str(job_subj) + '.docx')
try:
next_link = soup.select(".pagination")[0].find_all('a')
for link in next_link:
if re.match("Next", link.text) is not None:
#goes through each page of jobs until it reaches the end
page += 10
print(link['href'])
crawler_func("https://www.indeed.com" + link['href'],page, jobsubj)
except:
pass
def menu():
print("Hey fellow Employment Specialist! Welcome to the job scraper!")
zipcode = input("Please input the zipcode of the job you are looking for.\nIf you don't know what zipcode you're looking for, enter 'N/A' and the default location will be set to Baltimore\n")
if (zipcode == "N/A"):
zipcode = 21218
jobsubj = input("Please input the type of job you're seeking.\nIf your input is more than one word, please separate spaces with + symbols, e.g. 'night+jobs'\n")
radius = input("Please input how far you can travel.\n If no preference, input 25.\n")
sortdate = input("Type 'd' if you'd like to sort by date or 'r' to be sorted by relevance.\n")
if (sortdate=='d'):
url = "https://www.indeed.com/jobs?q=" + str(jobsubj) + "&l=" + str(zipcode) + "&radius=" + str(radius) + "&sort=date"
else:
url = "https://www.indeed.com/jobs?q=" + str(jobsubj) + "&l=" + str(zipcode) + "&radius=" + str(radius)
#count = 1
print("Thanks! Starting search now...")
print("Crawling through" + " " + url)
page = 0
crawler_func(url, page, jobsubj)
print("Jobs written to Word document. Thanks for using this service!")
menu()
| StarcoderdataPython |
3316529 | <reponame>TylerDodds/DiceFinding<gh_stars>0
# Copyright 2021 Dice Finding Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Transfoorms/projects visible dice dots to camera or screen space."""
import numpy as np
import pyrender
import DiceConfig
import ProjectionUtil
from TransformUtil import transform_points_3d
from CoordsHelper import Pose
from typing import Tuple
def pose_inverse(pose):
return np.linalg.inv(pose)
def _get_dice_to_camera_transform(camera_node : pyrender.Node, dice_node : pyrender.Node, scene: pyrender.Scene):
"""Gets the transformation matrix from dice node to camera node coordinates."""
dice_scene_pose = scene.get_pose(dice_node)
camera_scene_pose = scene.get_pose(camera_node)
scene_to_camera_pose = pose_inverse(camera_scene_pose)
dice_to_camera_transform = scene_to_camera_pose @ dice_scene_pose
return dice_to_camera_transform
def get_eye_space_dot_point_normals(camera_node : pyrender.Node, dice_node : pyrender.Node, scene: pyrender.Scene):
"""Gets eye-space points and normals of dots given a camera node and a dice node."""
dice_to_camera_transform = _get_dice_to_camera_transform(camera_node, dice_node, scene)
points = DiceConfig.get_local_dot_positions()
normals = DiceConfig.get_local_dot_normals()
points_transformed = transform_points_3d(points, dice_to_camera_transform, False)
normals_transformed = transform_points_3d(normals, dice_to_camera_transform, True)
return points_transformed, normals_transformed
def get_are_eye_space_point_normals_facing_camera(points : np.ndarray, normals : np.ndarray):
"""Gets a true-false mask of which eye-space points and normals are facing the camera."""
points_in_front_of_camera = points[2, :] < 0 #Since positive z-axis is 'backwards'
#normals_pointing_towards_camera = normals[2,:] > 0 #Since positive z-axis is 'backwards' #NB This normal test is not correct! It depends not on z-axis direction, but relative to direction from camera to point
dot_normals_points = np.sum(points * normals, axis = 0)
normals_pointing_towards_camera = dot_normals_points < 0 #Since positive z-axis is 'backwards'
are_visible = np.logical_and(points_in_front_of_camera, normals_pointing_towards_camera)
return are_visible
def filter_eye_space_camera_facing_dot_point_normals(points : np.ndarray, normals : np.ndarray):
"""Returns eye-space points and normals, filtering out points not facing the camera."""
are_visible = get_are_eye_space_point_normals_facing_camera(points, normals)
visible_points = points[:, are_visible]
visible_normals = normals[:, are_visible]
return visible_points, visible_normals
def get_eye_space_camera_facing_dot_point_normals(camera_node : pyrender.Node, dice_node : pyrender.Node, scene: pyrender.Scene):
"""Gets eye-space points and normals facing the camera, given a camera node and dice node."""
points, normals = get_eye_space_dot_point_normals(camera_node, dice_node, scene)
visible_points, visible_normals = filter_eye_space_camera_facing_dot_point_normals(points, normals)
return visible_points, visible_normals
def get_dots_mask_facing_camera_from_eye_space_pose(die_eye_space_pose : Pose):
"""Gets a true-false mask of which dots face the camera, given the die's eye-space pose."""
dice_transform = die_eye_space_pose.as_transformation_matrix()
dot_points = DiceConfig.get_local_dot_positions()
dot_normals = DiceConfig.get_local_dot_normals()
dot_points_transformed = transform_points_3d(dot_points, dice_transform, at_infinity=False)
dot_normals_transformed = transform_points_3d(dot_normals, dice_transform, at_infinity=True)
dots_are_visible = get_are_eye_space_point_normals_facing_camera(dot_points_transformed, dot_normals_transformed)
return dots_are_visible
def get_local_dots_facing_camera_from_eye_space_pose(die_eye_space_pose : Pose, transpose : bool = True):
"""Gets local-to-die-coordinate dots that are facing the camera, given the die's eye-space pose.
Returns Nx3 matrix if transposed, 3xN otherwise."""
dots_are_visible = get_dots_mask_facing_camera_from_eye_space_pose(die_eye_space_pose)
dot_points = DiceConfig.get_local_dot_positions()
local_dots_visible_in_eye_space = dot_points[:, dots_are_visible]
return local_dots_visible_in_eye_space.T if transpose else local_dots_visible_in_eye_space
def get_points_within_image_bounds(points_px : np.ndarray, image_shape : Tuple[int, int]):
"""Get px-space points within bounds of given image_shape."""
points_above_corner = np.all(points_px > 0, axis = 0)
points_below_corner = np.all(points_px < np.array(image_shape)[::-1,np.newaxis], axis = 0)
points_within_image = np.logical_and(points_above_corner, points_below_corner)
return points_within_image
def get_eye_px_space_dot_pt_normals(camera_node : pyrender.Node, dice_node : pyrender.Node, scene: pyrender.Scene, image_shape : Tuple[int, int]):
"""Get px-coordinate points of die dots facing camera, and within image, given a camera and dice node."""
facing_points, facing_normals = get_eye_space_camera_facing_dot_point_normals(camera_node, dice_node, scene)
points_px = ProjectionUtil.project_to_image_px_coords(camera_node.camera, facing_points, image_shape)
points_within_image = get_points_within_image_bounds(points_px, image_shape)
facing_points = facing_points[:,points_within_image]
facing_normals = facing_normals[:,points_within_image]
points_px = points_px[:, points_within_image]
return facing_points, facing_normals, points_px
def sample_buffer(points_px : np.ndarray, buffer : np.ndarray, get_interpolated : bool):
"""Sample a buffer at a set of pixel coordinates, with potential interpolation."""
if get_interpolated:
from scipy.interpolate import interp2d
interpolator = interp2d(np.arange(buffer.shape[1]), np.arange(buffer.shape[0]), buffer)
interpolated = np.hstack([interpolator(points_px[0,i], points_px[1,i]) for i in range(points_px.shape[1])])
result = interpolated
else:
points_round = np.rint(points_px).astype(int)
points_round[0,:] = np.clip(points_round[0,:], 0, buffer.shape[0] - 1)
points_round[1,:] = np.clip(points_round[1,:], 0, buffer.shape[1] - 1)
nearest = np.array([buffer[pt[1], pt[0]] for pt in points_round.T])
result = nearest
return result
def get_eye_space_dot_point_normals_visible_at_mask(camera_node : pyrender.Node, dice_node : pyrender.Node, scene: pyrender.Scene, mask : np.ndarray, dice_mask_index : int, also_return_points_px : bool):
"""Gets eye-space dot points and normals of die dots facing camera, and within image, given a camera and dice node, corresponding to a given die's mask index and mask image array."""
facing_points, facing_normals, points_px = get_eye_px_space_dot_pt_normals(camera_node, dice_node , scene, mask.shape)
mask_values = sample_buffer(points_px, mask, get_interpolated = False)
points_equal_mask = mask_values == dice_mask_index
not_occluded_points = facing_points[:, points_equal_mask]
not_occluded_normals = facing_normals[:, points_equal_mask]
if also_return_points_px:
return not_occluded_points, not_occluded_normals, points_px
else:
return not_occluded_points, not_occluded_normals
def get_image_space_dot_points(camera_node : pyrender.Node, dice_node : pyrender.Node, scene: pyrender.Scene, mask : np.ndarray, dice_mask_index : int):
"""Gets pixel-coordinate dot points given a camera node, dice node, mask and mask index."""
pts, normals, pts_px = get_eye_space_dot_point_normals_visible_at_mask(camera_node, dice_node, scene, mask, dice_mask_index, also_return_points_px = True)
return pts_px
def get_eye_space_corner_points(camera_node : pyrender.Node, dice_node : pyrender.Node, scene: pyrender.Scene):
"""Gets eye-space corner points of a dice node's bounding box."""
dice_to_camera_transform = _get_dice_to_camera_transform(camera_node, dice_node, scene)
points = DiceConfig.get_local_bounding_box_corners()
points_transformed = transform_points_3d(points, dice_to_camera_transform, False)
return points_transformed
def get_eye_space_mesh_points(camera_node : pyrender.Node, dice_node : pyrender.Node, scene: pyrender.Scene):
"""Gets pixel-coordinate die 3D mesh vertex points, given a camera node, dice node."""
dice_to_camera_transform = _get_dice_to_camera_transform(camera_node, dice_node, scene)
points = np.vstack([primitive.positions for primitive in dice_node.mesh.primitives]).T
points_transformed = transform_points_3d(points, dice_to_camera_transform, False)
return points_transformed
def get_image_space_bounding_box(points_px : np.ndarray):
"""Gets an encapsulating bounding box in 2D."""
points_px_min = np.min(points_px, axis = 1)
points_px_max = np.max(points_px, axis = 1)
width_height_px = points_px_max - points_px_min
min_x_y_width_height_px = np.hstack((points_px_min, width_height_px))
return min_x_y_width_height_px
def get_image_space_dot_bounds(camera_node : pyrender.Node, dice_node : pyrender.Node, scene: pyrender.Scene, mask : np.ndarray, dice_mask_index : int, unsunken : bool):
"""Returns image-space bounding boxes for dots, given a dice and camera node, as well as mask image and index.
:param bool unsunken: If dot locations should be considered on the plane of unsunken edges, rather than the sunken center.
"""
pts, normals, pts_px = get_eye_space_dot_point_normals_visible_at_mask(camera_node, dice_node, scene, mask, dice_mask_index, also_return_points_px = True)
eye_space_dot_circles = DiceConfig.get_dot_edge_points(pts, normals, unsunken)
eye_space_dot_circles_px = [ProjectionUtil.project_to_image_px_coords(camera_node.camera, points, mask.shape) for points in eye_space_dot_circles]
def get_within_mask(pts_px):
mask_values = sample_buffer(pts_px, mask, get_interpolated = False)
points_equal_mask = mask_values == dice_mask_index
pts_px_at_mask = pts_px[:, points_equal_mask]
return pts_px_at_mask
eye_space_dot_circles_px_in_mask = [get_within_mask(pts) for pts in eye_space_dot_circles_px]
bounding_boxes = [get_image_space_bounding_box(pts) for pts in eye_space_dot_circles_px_in_mask]
return bounding_boxes
def get_image_space_full_bounding_box_from_eye_space_points(camera_node : pyrender.Node, image_shape : np.ndarray, eye_space_points : np.ndarray):
"""Gets the image space bounding box containing given eye-space points, given the camera node and image shape."""
eye_points_screen = ProjectionUtil.project_to_screen_normalized_coords(camera_node.camera, eye_space_points)
eye_points_px = ProjectionUtil.screen_normalized_to_pixel(eye_points_screen, image_shape)
min_and_width = get_image_space_bounding_box(eye_points_px)
return min_and_width
def get_image_space_full_bounding_box_from_mesh(camera_node : pyrender.Node, dice_node : pyrender.Node, scene: pyrender.Scene, image_shape : np.ndarray):
"""Gets the image space bounding box containing mesh points of the dice model, given the camera and dice nodes."""
eye_space_mesh_points = get_eye_space_mesh_points(camera_node, dice_node, scene)
return get_image_space_full_bounding_box_from_eye_space_points(camera_node, image_shape, eye_space_mesh_points)
def get_scene_space_up_face_index(dice_node : pyrender.Node, scene: pyrender.Scene):
"""Gets the face-up index of a given dice node.
Indices start at 1, for face with 1 dot up"""
face_normals = DiceConfig.get_local_face_normals()
dice_pose = scene.get_pose(dice_node)
scene_face_normals = transform_points_3d(face_normals, dice_pose, True)
scene_face_normals_y = scene_face_normals[1,:]
index = np.argmax(scene_face_normals_y)
index_starting_at_1 = index + 1
return index_starting_at_1
def get_y_rotation_angle_relative_to_camera(dice_node : pyrender.Node, scene: pyrender.Scene, dice_top_face_index : int):
"""Gets the rotation of a dice node around its vertical aspect with respect to the camera, given the top face index.
Note that for each upward-pointing face is defined another face as the forward-pointing face given by zero y-rotation."""
dice_to_camera_transform = _get_dice_to_camera_transform(scene.main_camera_node, dice_node, scene)
dice_pose = scene.get_pose(dice_node)
local_forward_axis = DiceConfig.get_local_face_forward(dice_top_face_index)[:, np.newaxis]
dice_scene_forward = transform_points_3d(local_forward_axis, dice_pose, True)
dice_scene_forward[1] = 0
dice_scene_forward /= np.linalg.norm(dice_scene_forward)
dice_to_camera_translation = dice_to_camera_transform[0:3,3]
direction_camera_from_dice = -dice_to_camera_translation[:, np.newaxis]
direction_camera_from_dice[1] = 0
direction_camera_from_dice /= np.linalg.norm(direction_camera_from_dice)
#Note x axis takes place of 'y' in arctan while z axis takes place of 'x' since y-axis rotations rotates x into -z (equivalently, z into x)
angle_forward = np.arctan2(dice_scene_forward[0], dice_scene_forward[2])
angle_direction = np.arctan2(direction_camera_from_dice[0], direction_camera_from_dice[2])
angle_difference = angle_forward - angle_direction
angle_difference_wrapped = (angle_difference + np.pi) % (2.0 * np.pi) - np.pi
return angle_difference_wrapped
| StarcoderdataPython |
3289235 | __version__ = "1.1.0b5"
| StarcoderdataPython |
50219 | from darr.basedatadir import BaseDataDir, create_basedatadir
from darr.metadata import MetaData
from ._version import get_versions
#TODO: required keys for sndinfo
class DataDir(BaseDataDir):
_classid = 'DataDir'
_classdescr = 'object for IO for disk-persistent sounds'
_version = get_versions()['version']
_suffix = '.snd'
_sndinfopath = 'sndinfo.json' # here goes required sound information
_metadatapath = 'metadata.json' # here goes extra information
def __init__(self, path, accessmode='r'):
BaseDataDir.__init__(self, path)
self._metadata = MetaData(self.path / self._metadatapath,
accessmode=accessmode)
@property
def metadata(self):
"""Dictionary-like access to disk based metadata.
Metadata items can be anything that can be saved in JSON format. If
there is no metadata, the metadata file does not exist, rather than
being empty. This saves a block of disk space (potentially 4kb)."""
return self._metadata
def read_sndinfo(self):
return self._read_jsondict(self._sndinfopath)
def write_sndinfo(self, d, overwrite=False):
self._write_jsondict(self._sndinfopath, d=d,
overwrite=overwrite)
self.read_sndinfo()
def create_datadir(path, overwrite=False):
dd = create_basedatadir(path=path,overwrite=overwrite)
return DataDir(dd.path)
| StarcoderdataPython |
3238985 | <filename>test/integration/ggrc/models/test_clonable.py
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Integration test for Clonable mixin"""
from ggrc import db
from ggrc import models
from ggrc.snapshotter.rules import Types
from integration.ggrc import generator
from integration.ggrc.models import factories
from integration.ggrc_basic_permissions.models \
import factories as rbac_factories
from integration.ggrc.snapshotter import SnapshotterBaseTestCase
from ggrc_basic_permissions.models import Role
from ggrc_basic_permissions.models import UserRole
class TestClonable(SnapshotterBaseTestCase):
"""Test case for Clonable mixin"""
# pylint: disable=invalid-name
def setUp(self):
# pylint: disable=super-on-old-class
# pylint seems to get confused, mro chain successfully resolves and returns
# <type 'object'> as last entry.
super(TestClonable, self).setUp()
self.client.get("/login")
self.generator = generator.Generator()
self.object_generator = generator.ObjectGenerator()
def clone_object(self, obj, mapped_objects=None):
"""Perform clone operation on an object"""
if not mapped_objects:
mapped_objects = []
return self.object_generator.generate_object(
models.Audit,
{
"program": self.object_generator.create_stub(obj.program),
"title": "Audit - copy 1",
"operation": "clone",
"status": "Planned",
"cloneOptions": {
"sourceObjectId": obj.id,
"mappedObjects": mapped_objects
}
})
def test_audit_clone(self):
"""Test that assessment templates get copied correctly"""
audit = factories.AuditFactory()
assessment_template_1 = factories.AssessmentTemplateFactory(
title="test_audit_clone assessment_template_1")
assessment_template_2 = factories.AssessmentTemplateFactory(
title="test_audit_clone assessment_template_2")
factories.RelationshipFactory(
source=audit,
destination=assessment_template_1)
factories.RelationshipFactory(
source=audit,
destination=assessment_template_2)
assessment_template_attributes_def = [
{
"definition_type": "assessment_template",
"definition_id": assessment_template_1.id,
"title": "test text field",
"attribute_type": "Text",
"multi_choice_options": ""
},
{
"definition_type": "assessment_template",
"definition_id": assessment_template_1.id,
"title": "test RTF",
"attribute_type": "Rich Text",
"multi_choice_options": ""
},
{
"definition_type": "assessment_template",
"definition_id": assessment_template_1.id,
"title": "test checkbox",
"attribute_type": "Checkbox",
"multi_choice_options": "test checkbox label"
},
{
"definition_type": "assessment_template",
"definition_id": assessment_template_1.id,
"title": "test date field",
"attribute_type": "Date",
"multi_choice_options": ""
},
{
"definition_type": "assessment_template",
"definition_id": assessment_template_1.id,
"title": "test dropdown field",
"attribute_type": "Dropdown",
"multi_choice_options": "ddv1,ddv2,ddv3"
},
]
assessment_template_attributes = []
for attribute in assessment_template_attributes_def:
attr = factories.CustomAttributeDefinitionFactory(**attribute)
assessment_template_attributes += [attr]
self.clone_object(audit, [u"AssessmentTemplate"])
self.assertEqual(db.session.query(models.Audit).filter(
models.Audit.title.like("%copy%")).count(), 1)
audit_copy = db.session.query(models.Audit).filter(
models.Audit.title.like("%copy%")).first()
assessment_templates = audit_copy.related_objects({"AssessmentTemplate"})
self.assertEqual(len(assessment_templates), 2)
assessment_template_1 = db.session.query(models.AssessmentTemplate).filter(
models.AssessmentTemplate.title ==
"test_audit_clone assessment_template_1"
).first()
self.assertEqual(
db.session.query(models.CustomAttributeDefinition).filter(
models.CustomAttributeDefinition.definition_type ==
"assessment_template",
models.CustomAttributeDefinition.definition_id ==
assessment_template_1.id
).count(), len(assessment_template_attributes_def))
def test_audit_clone_invalid_values(self):
"""Test that audit gets copied successfully if invalid input"""
audit = factories.AuditFactory()
assessment_template_1 = factories.AssessmentTemplateFactory(
title="test_audit_clone assessment_template_1")
assessment_template_2 = factories.AssessmentTemplateFactory(
title="test_audit_clone assessment_template_2")
factories.RelationshipFactory(
source=audit,
destination=assessment_template_1)
factories.RelationshipFactory(
source=audit,
destination=assessment_template_2)
assessment_template_attributes_def = [
{
"definition_type": "assessment_template",
"definition_id": assessment_template_1.id,
"title": "test text field",
"attribute_type": "Text",
"multi_choice_options": ""
},
{
"definition_type": "assessment_template",
"definition_id": assessment_template_1.id,
"title": "test RTF",
"attribute_type": "Rich Text",
"multi_choice_options": ""
},
{
"definition_type": "assessment_template",
"definition_id": assessment_template_1.id,
"title": "test checkbox",
"attribute_type": "Checkbox",
"multi_choice_options": "test checkbox label"
},
{
"definition_type": "assessment_template",
"definition_id": assessment_template_1.id,
"title": "test date field",
"attribute_type": "Date",
"multi_choice_options": ""
},
{
"definition_type": "assessment_template",
"definition_id": assessment_template_1.id,
"title": "test dropdown field",
"attribute_type": "Dropdown",
"multi_choice_options": "ddv1,ddv2,ddv3"
},
]
assessment_template_attributes = []
for attribute in assessment_template_attributes_def:
attr = factories.CustomAttributeDefinitionFactory(**attribute)
assessment_template_attributes += [attr]
self.clone_object(audit, [u"blaaaaaa", 123])
self.assertEqual(db.session.query(models.Audit).filter(
models.Audit.title.like("%copy%")).count(), 1)
audit_copy = db.session.query(models.Audit).filter(
models.Audit.title.like("%copy%")).first()
assessment_templates = audit_copy.related_objects({"AssessmentTemplate"})
self.assertEqual(len(assessment_templates), 0)
def test_audit_clone_template_not_selected(self):
"""Test that assessment templates don't get copied"""
audit = factories.AuditFactory()
assessment_template_1 = factories.AssessmentTemplateFactory(
title="test_audit_clone assessment_template_1")
assessment_template_2 = factories.AssessmentTemplateFactory(
title="test_audit_clone assessment_template_2")
factories.RelationshipFactory(
source=audit,
destination=assessment_template_1)
factories.RelationshipFactory(
source=audit,
destination=assessment_template_2)
assessment_template_attributes_def = [
{
"definition_type": "assessment_template",
"definition_id": assessment_template_1.id,
"title": "test text field",
"attribute_type": "Text",
"multi_choice_options": ""
},
{
"definition_type": "assessment_template",
"definition_id": assessment_template_1.id,
"title": "test RTF",
"attribute_type": "Rich Text",
"multi_choice_options": ""
},
{
"definition_type": "assessment_template",
"definition_id": assessment_template_1.id,
"title": "test checkbox",
"attribute_type": "Checkbox",
"multi_choice_options": "test checkbox label"
},
{
"definition_type": "assessment_template",
"definition_id": assessment_template_1.id,
"title": "test date field",
"attribute_type": "Date",
"multi_choice_options": ""
},
{
"definition_type": "assessment_template",
"definition_id": assessment_template_1.id,
"title": "test dropdown field",
"attribute_type": "Dropdown",
"multi_choice_options": "ddv1,ddv2,ddv3"
},
]
assessment_template_attributes = []
for attribute in assessment_template_attributes_def:
attr = factories.CustomAttributeDefinitionFactory(**attribute)
assessment_template_attributes += [attr]
self.clone_object(audit, [""])
self.assertEqual(db.session.query(models.Audit).filter(
models.Audit.title.like("%copy%")).count(), 1)
audit_copy = db.session.query(models.Audit).filter(
models.Audit.title.like("%copy%")).first()
assessment_templates = audit_copy.related_objects({"AssessmentTemplate"})
self.assertEqual(len(assessment_templates), 0)
def test_audit_clone_auditors(self):
"""Test that auditors get cloned correctly"""
auditor_role = Role.query.filter_by(name="Auditor").first()
audit = factories.AuditFactory()
audit_context = factories.ContextFactory()
audit.context = audit_context
users = [
"<EMAIL>",
"<EMAIL>",
"<EMAIL>"
]
auditors = []
for user in users:
person = factories.PersonFactory(email=user)
auditors += [person]
for auditor in auditors:
rbac_factories.UserRoleFactory(
context=audit_context,
role=auditor_role,
person=auditor)
self.assertEqual(
UserRole.query.filter_by(
role=auditor_role,
context=audit.context).count(), 3, "Auditors not present")
self.clone_object(audit)
audit_copy = db.session.query(models.Audit).filter(
models.Audit.title.like("%copy%")).first()
self.assertEqual(
UserRole.query.filter_by(
role=auditor_role,
context=audit_copy.context
).count(), 3, "Auditors not present on copy")
# Verify that contexts are different for original and copy audit
another_user_4 = factories.PersonFactory(email="<EMAIL>")
rbac_factories.UserRoleFactory(
context=audit_context,
role=auditor_role,
person=another_user_4)
self.assertEqual(
UserRole.query.filter_by(
role=auditor_role,
context=audit.context
).count(), 4, "Auditors not present")
self.assertEqual(
UserRole.query.filter_by(
role=auditor_role,
context=audit_copy.context
).count(), 3, "Auditors not present on copy")
def test_audit_clone_custom_attributes(self):
"""Test if custom attributes were copied correctly"""
audit = factories.AuditFactory()
ca_def_text = factories.CustomAttributeDefinitionFactory(
title="test audit CA def 1",
definition_type="audit",
attribute_type="Text"
)
factories.CustomAttributeValueFactory(
custom_attribute=ca_def_text,
attributable=audit,
attribute_value="CA 1 value"
)
self.clone_object(audit)
audit_copy = db.session.query(models.Audit).filter(
models.Audit.title.like("%copy%")).first()
self.assertEqual(
models.CustomAttributeValue.query.filter_by(
attributable_type="Audit",
attributable_id=audit_copy.id
).count(), 1, "Custom Attribute weren't copied.")
def test_audit_snapshot_scope_cloning(self):
"""Test that exact same copy of original audit scope is created."""
self._import_file("snapshotter_create.csv")
program = db.session.query(models.Program).filter(
models.Program.slug == "Prog-13211"
).one()
self.create_audit(program)
audit = db.session.query(models.Audit).filter(
models.Audit.title == "Snapshotable audit").one()
snapshots = db.session.query(models.Snapshot).filter(
models.Snapshot.parent_type == "Audit",
models.Snapshot.parent_id == audit.id,
)
self.assertEqual(snapshots.count(), len(Types.all) * 3)
self._import_file("snapshotter_update.csv")
# We create another copy of this object to test that it will not be
# snapshotted
new_control = self.create_object(models.Control, {
"title": "Test New Control On Program"
})
self.objgen.generate_relationship(program, new_control)
audit = db.session.query(models.Audit).filter(
models.Audit.title == "Snapshotable audit").one()
self.clone_object(audit)
audit_copy = db.session.query(models.Audit).filter(
models.Audit.title == "Snapshotable audit - copy 1").one()
clones_snapshots = db.session.query(models.Snapshot).filter(
models.Snapshot.parent_type == "Audit",
models.Snapshot.parent_id == audit_copy.id,
)
self.assertEqual(clones_snapshots.count(), len(Types.all) * 3)
original_revisions = {
(snapshot.child_type, snapshot.child_id): snapshot.revision_id
for snapshot in snapshots
}
clone_revisions = {
(snapshot.child_type, snapshot.child_id): snapshot.revision_id
for snapshot in clones_snapshots
}
for child, revision_id in original_revisions.items():
self.assertEqual(revision_id, clone_revisions[child])
self.assertEqual(
db.session.query(models.Snapshot).filter(
models.Snapshot.child_type == "Control",
models.Snapshot.child_id == new_control.id
).count(),
0, "No snapshots should exist for new control."
)
| StarcoderdataPython |
3266215 | from importlib import import_module
from unittest import TestCase
class TestMigration0137(TestCase):
@classmethod
def setUpClass(cls):
super(TestMigration0137, cls).setUpClass()
cls.migration = import_module(
'v1.migrations.0137_migrate_imagetextgroups_to_infounitgroup'
)
def test_image_text_50_50_group_to_info_unit_group(self):
data = {
'heading': 'Image & Text 50/50 Group',
'image_texts': [
{
'heading': 'image_text 1',
'body': '<p>Body content</p>',
'image': {
'upload': 56
},
'links': [
{
'text': 'link 1',
'url': '#'
}
],
'is_widescreen': True,
'is_button': False
}
],
'sharing': True,
'link_image_and_heading': None
}
migrated = self.migration.image_text_group_to_info_unit_group(
data,
'50-50'
)
self.assertEqual(migrated, {
'heading': {
'text': 'Image & Text 50/50 Group',
'level': 'h2'
},
'info_units': [
{
'heading': {
'text': 'image_text 1',
'level': 'h3'
},
'body': '<p>Body content</p>',
'image': {
'upload': 56
},
'links': [
{
'text': 'link 1',
'url': '#'
}
]
}
],
'sharing': True,
'link_image_and_heading': False,
'format': '50-50'
})
def test_image_text_25_75_group_to_info_unit_group(self):
data = {
'heading': 'Image & Text 25/75 Group',
'image_texts': [
{
'heading': 'image_text 1',
'has_rule': True,
'body': '<p>Body content</p>',
'image': {
'upload': 58
},
'is_widescreen': True,
'is_button': False
}
],
'sharing': True,
'link_image_and_heading': True
}
migrated = self.migration.image_text_group_to_info_unit_group(
data,
'25-75'
)
self.assertEqual(migrated, {
'heading': {
'text': 'Image & Text 25/75 Group',
'level': 'h2'
},
'info_units': [
{
'heading': {
'text': 'image_text 1',
'level': 'h3'
},
'body': '<p>Body content</p>',
'image': {
'upload': 58
},
'links': [],
}
],
'lines_between_items': True,
'sharing': True,
'link_image_and_heading': True,
'format': '25-75'
})
| StarcoderdataPython |
1618341 | <reponame>cunningham-lab/monotonic-flds<filename>ssm_spline/ssm/models.py
from ssm.core import _HMM, _LDS, _SwitchingLDS
from ssm.init_state_distns import InitialStateDistribution
from ssm.transitions import \
StationaryTransitions, \
StickyTransitions, \
InputDrivenTransitions, \
RecurrentTransitions, \
RecurrentOnlyTransitions, \
RBFRecurrentTransitions, \
NeuralNetworkRecurrentTransitions
from ssm.observations import \
GaussianObservations, \
BernoulliObservations, \
PoissonObservations, \
VonMisesObservations, \
CategoricalObservations, \
StudentsTObservations, \
AutoRegressiveObservations, \
IndependentAutoRegressiveObservations, \
RobustAutoRegressiveObservations, \
RecurrentAutoRegressiveObservations, \
RecurrentRobustAutoRegressiveObservations, \
IdentityAutoRegressiveObservations, \
IdentitySigmaAutoRegressiveObservations
from ssm.hierarchical import \
HierarchicalInitialStateDistribution, \
HierarchicalTransitions, \
HierarchicalObservations, \
HierarchicalEmissions
from ssm.emissions import \
GaussianEmissions, \
GaussianCompoundEmissions, \
GaussianNonOrthCompoundEmissions, \
GaussianIdentityEmissions, \
GaussianNeuralNetworkEmissions, \
GaussianSplineEmissions, \
GaussianLogisticEmissions, \
GaussianMonotonicNeuralNetworkEmissions, \
IsotropicGaussianSplineEmissions, \
IsotropicGaussianLogisticEmissions, \
IsotropicGaussianMonotonicNeuralNetworkEmissions, \
GaussianCompoundNeuralNetworkEmissions, \
StudentsTEmissions, \
StudentsTIdentityEmissions, \
StudentsTNeuralNetworkEmissions, \
BernoulliEmissions, \
BernoulliIdentityEmissions, \
BernoulliNeuralNetworkEmissions, \
PoissonEmissions, \
PoissonCompoundEmissions, \
PoissonIdentityEmissions, \
PoissonNeuralNetworkEmissions, \
PoissonCompoundNeuralNetworkEmissions, \
AutoRegressiveEmissions, \
AutoRegressiveIdentityEmissions, \
AutoRegressiveNeuralNetworkEmissions
def HMM(K, D, M=0,
transitions="standard",
transition_kwargs=None,
hierarchical_transition_tags=None,
observations="gaussian",
observation_kwargs=None,
hierarchical_observation_tags=None,
**kwargs):
"""
Construct an HMM object with the appropriate observations
and dynamics.
:param K: number of discrete latent states
:param D: observation dimension
:param M: input dimension
:param observations: conditional distribution of the data
:param recurrent: whether or not past observations influence transitions probabilities.
:param recurrent_only: if true, _only_ the past observations influence transitions.
"""
# Make the initial state distribution
init_state_distn = InitialStateDistribution(K, D, M=M)
# Make the transition model
transition_classes = dict(
standard=StationaryTransitions,
stationary=StationaryTransitions,
sticky=StickyTransitions,
inputdriven=InputDrivenTransitions,
recurrent=RecurrentTransitions,
recurrent_only=RecurrentOnlyTransitions,
rbf_recurrent=RBFRecurrentTransitions,
nn_recurrent=NeuralNetworkRecurrentTransitions
)
if transitions not in transition_classes:
raise Exception("Invalid transition model: {}. Must be one of {}".
format(transitions, list(transition_classes.keys())))
transition_kwargs = transition_kwargs or {}
transition_distn = \
HierarchicalTransitions(transition_classes[transitions], K, D, M=M,
tags=hierarchical_transition_tags,
**transition_kwargs) \
if hierarchical_transition_tags is not None \
else transition_classes[transitions](K, D, M=M, **transition_kwargs)
# This is the master list of observation classes.
# When you create a new observation class, add it here.
is_recurrent = (transitions.lower() in ["recurrent", "recurrent_only", "nn_recurrent"])
observation_classes = dict(
gaussian=GaussianObservations,
studentst=StudentsTObservations,
t=StudentsTObservations,
bernoulli=BernoulliObservations,
categorical=CategoricalObservations,
poisson=PoissonObservations,
vonmises=VonMisesObservations,
ar=RecurrentAutoRegressiveObservations if is_recurrent else AutoRegressiveObservations,
autoregressive=RecurrentAutoRegressiveObservations if is_recurrent else AutoRegressiveObservations,
independent_ar=IndependentAutoRegressiveObservations,
robust_ar=RecurrentRobustAutoRegressiveObservations if is_recurrent else RobustAutoRegressiveObservations,
robust_autoregressive=RecurrentRobustAutoRegressiveObservations if is_recurrent else RobustAutoRegressiveObservations,
)
observations = observations.lower()
if observations not in observation_classes:
raise Exception("Invalid observation model: {}. Must be one of {}".
format(observations, list(observation_classes.keys())))
observation_kwargs = observation_kwargs or {}
observation_distn = \
HierarchicalObservations(observation_classes[observations], K, D, M=M,
tags=hierarchical_observation_tags,
**observation_kwargs) \
if hierarchical_observation_tags is not None \
else observation_classes[observations](K, D, M=M, **observation_kwargs)
# Make the HMM
return _HMM(K, D, M, init_state_distn, transition_distn, observation_distn)
def SLDS(N, K, D, M=0,
transitions="standard",
transition_kwargs=None,
hierarchical_transition_tags=None,
dynamics="gaussian",
dynamics_kwargs=None,
hierarchical_dynamics_tags=None,
emissions="gaussian",
emission_kwargs=None,
hierarchical_emission_tags=None,
single_subspace=True,
**kwargs):
"""
Construct an SLDS object with the appropriate observations, latent states, and dynamics.
:param N: observation dimension
:param K: number of discrete latent states
:param D: latent dimension
:param M: input dimension
:param observations: conditional distribution of the data
:param robust_dynamics: if true, continuous latent states have Student's t noise.
:param recurrent: whether or not past observations influence transitions probabilities.
:param recurrent_only: if true, _only_ the past observations influence transitions.
:param single_subspace: if true, all discrete states share the same mapping from
continuous latent states to observations.
"""
# Make the initial state distribution
init_state_distn = InitialStateDistribution(K, D, M=M)
# Make the transition model
transition_classes = dict(
standard=StationaryTransitions,
stationary=StationaryTransitions,
sticky=StickyTransitions,
inputdriven=InputDrivenTransitions,
recurrent=RecurrentTransitions,
recurrent_only=RecurrentOnlyTransitions,
rbf_recurrent=RBFRecurrentTransitions,
nn_recurrent=NeuralNetworkRecurrentTransitions
)
transitions = transitions.lower()
if transitions not in transition_classes:
raise Exception("Invalid transition model: {}. Must be one of {}".
format(transitions, list(transition_classes.keys())))
transition_kwargs = transition_kwargs or {}
transition_distn = \
HierarchicalTransitions(transition_classes[transitions], K, D, M,
tags=hierarchical_transition_tags,
**transition_kwargs) \
if hierarchical_transition_tags is not None\
else transition_classes[transitions](K, D, M=M, **transition_kwargs)
# Make the dynamics distn
is_recurrent = (transitions.lower() in ["recurrent", "recurrent_only", "nn_recurrent"])
dynamics_classes = dict(
none=GaussianObservations,
gaussian=RecurrentAutoRegressiveObservations if is_recurrent else AutoRegressiveObservations,
t=RecurrentRobustAutoRegressiveObservations if is_recurrent else RobustAutoRegressiveObservations,
studentst=RecurrentRobustAutoRegressiveObservations if is_recurrent else RobustAutoRegressiveObservations,
identity=IdentityAutoRegressiveObservations,
identity_sigma=IdentitySigmaAutoRegressiveObservations,
)
dynamics = dynamics.lower()
if dynamics not in dynamics_classes:
raise Exception("Invalid dynamics model: {}. Must be one of {}".
format(dynamics, list(dynamics_classes.keys())))
dynamics_kwargs = dynamics_kwargs or {}
dynamics_distn = \
HierarchicalObservations(dynamics_classes[dynamics], K, D, M,
tags=hierarchical_dynamics_tags,
**dynamics_kwargs) \
if hierarchical_dynamics_tags is not None \
else dynamics_classes[dynamics](K, D, M=M, **dynamics_kwargs)
# Make the emission distn
emission_classes = dict(
gaussian=GaussianEmissions,
gaussian_compound=GaussianCompoundEmissions,
gaussian_non_orth_compound=GaussianNonOrthCompoundEmissions,
gaussian_nn_compound=GaussianCompoundNeuralNetworkEmissions,
gaussian_id=GaussianIdentityEmissions,
gaussian_nn=GaussianNeuralNetworkEmissions,
gaussian_spline=GaussianSplineEmissions,
gaussian_logistic=GaussianLogisticEmissions,
gaussian_mnn=GaussianMonotonicNeuralNetworkEmissions,
iso_gaussian_spline=IsotropicGaussianSplineEmissions,
iso_gaussian_logistic=IsotropicGaussianLogisticEmissions,
iso_gaussian_mnn=IsotropicGaussianMonotonicNeuralNetworkEmissions,
studentst=StudentsTEmissions,
studentst_id=StudentsTIdentityEmissions,
studentst_nn=StudentsTNeuralNetworkEmissions,
t=StudentsTEmissions,
t_id=StudentsTIdentityEmissions,
t_nn=StudentsTNeuralNetworkEmissions,
poisson=PoissonEmissions,
poisson_compound=PoissonCompoundEmissions,
poisson_id=PoissonIdentityEmissions,
poisson_nn=PoissonNeuralNetworkEmissions,
poisson_nn_compound=PoissonCompoundNeuralNetworkEmissions,
bernoulli=BernoulliEmissions,
bernoulli_id=BernoulliIdentityEmissions,
bernoulli_nn=BernoulliNeuralNetworkEmissions,
ar=AutoRegressiveEmissions,
ar_id=AutoRegressiveIdentityEmissions,
ar_nn=AutoRegressiveNeuralNetworkEmissions,
autoregressive=AutoRegressiveEmissions,
autoregressive_id=AutoRegressiveIdentityEmissions,
autoregressive_nn=AutoRegressiveNeuralNetworkEmissions
)
emissions = emissions.lower()
if emissions not in emission_classes:
raise Exception("Invalid emission model: {}. Must be one of {}".
format(emissions, list(emission_classes.keys())))
emission_kwargs = emission_kwargs or {}
emission_distn = \
HierarchicalEmissions(emission_classes[emissions], N, K, D, M,
tags=hierarchical_emission_tags,
single_subspace=single_subspace,
**emission_kwargs) \
if hierarchical_emission_tags is not None \
else emission_classes[emissions](N, K, D, M=M, single_subspace=single_subspace, **emission_kwargs)
# Make the HMM
return _SwitchingLDS(N, K, D, M, init_state_distn, transition_distn, dynamics_distn, emission_distn)
def LDS(N, D, M=0,
dynamics="gaussian",
dynamics_kwargs=None,
hierarchical_dynamics_tags=None,
emissions="gaussian",
emission_kwargs=None,
hierarchical_emission_tags=None,
**kwargs):
"""
Construct an LDS object with the appropriate observations, latent states, and dynamics.
Currently, this uses a lot of the same code path as the SLDS.
:param N: observation dimension
:param D: latent dimension
:param M: input dimension
:param observations: conditional distribution of the data
:param robust_dynamics: if true, continuous latent states have Student's t noise.
"""
# Make the dynamics distn
dynamics_classes = dict(
gaussian=AutoRegressiveObservations,
t=RobustAutoRegressiveObservations,
studentst=RobustAutoRegressiveObservations,
)
dynamics = dynamics.lower()
if dynamics not in dynamics_classes:
raise Exception("Invalid dynamics model: {}. Must be one of {}".
format(dynamics, list(dynamic_classes.keys())))
dynamics_kwargs = dynamics_kwargs or {}
dynamics_distn = \
HierarchicalDynamics(dynamics_classes[dynamics], 1, D, M,
tags=hierarchical_dynamics_tags,
**dynamics_kwargs) \
if hierarchical_dynamics_tags is not None \
else dynamics_classes[dynamics](1, D, M=M, **dynamics_kwargs)
# Make the emission distn
emission_classes = dict(
gaussian=GaussianEmissions,
gaussian_compound=GaussianCompoundEmissions,
gaussian_non_orth_compound=GaussianNonOrthCompoundEmissions,
gaussian_nn_compound=GaussianCompoundNeuralNetworkEmissions,
gaussian_id=GaussianIdentityEmissions,
gaussian_nn=GaussianNeuralNetworkEmissions,
gaussian_spline=GaussianSplineEmissions,
gaussian_logistic=GaussianLogisticEmissions,
gaussian_mnn=GaussianMonotonicNeuralNetworkEmissions,
iso_gaussian_spline=IsotropicGaussianSplineEmissions,
iso_gaussian_logistic=IsotropicGaussianLogisticEmissions,
iso_gaussian_mnn=IsotropicGaussianMonotonicNeuralNetworkEmissions,
studentst=StudentsTEmissions,
studentst_id=StudentsTIdentityEmissions,
studentst_nn=StudentsTNeuralNetworkEmissions,
t=StudentsTEmissions,
t_id=StudentsTIdentityEmissions,
t_nn=StudentsTNeuralNetworkEmissions,
poisson=PoissonEmissions,
poisson_compound=PoissonCompoundEmissions,
poisson_id=PoissonIdentityEmissions,
poisson_nn=PoissonNeuralNetworkEmissions,
poisson_nn_compound=PoissonCompoundNeuralNetworkEmissions,
bernoulli=BernoulliEmissions,
bernoulli_id=BernoulliIdentityEmissions,
bernoulli_nn=BernoulliNeuralNetworkEmissions,
ar=AutoRegressiveEmissions,
ar_id=AutoRegressiveIdentityEmissions,
ar_nn=AutoRegressiveNeuralNetworkEmissions,
autoregressive=AutoRegressiveEmissions,
autoregressive_id=AutoRegressiveIdentityEmissions,
autoregressive_nn=AutoRegressiveNeuralNetworkEmissions
)
emissions = emissions.lower()
if emissions not in emission_classes:
raise Exception("Invalid emission model: {}. Must be one of {}".
format(emissions, list(emission_classes.keys())))
emission_kwargs = emission_kwargs or {}
emission_distn = \
HierarchicalEmissions(emission_classes[emissions], N, 1, D, M,
tags=hierarchical_emission_tags,
**emission_kwargs) \
if hierarchical_emission_tags is not None \
else emission_classes[emissions](N, 1, D, M=M, **emission_kwargs)
# Make the HMM
return _LDS(N, D, M, dynamics_distn, emission_distn)
| StarcoderdataPython |
3373743 | <filename>msgs/apps.py
from django.apps import AppConfig
class MsgsConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'msgs'
| StarcoderdataPython |
3313052 | <filename>mafs/cactus_MAFGeneratorTest.py
#!/usr/bin/env python
#Copyright (C) 2009-2011 by <NAME> (<EMAIL>)
#
#Released under the MIT license, see LICENSE.txt
import unittest
import sys
from cactus.shared.test import parseCactusSuiteTestOptions
from sonLib.bioio import TestStatus
from cactus.shared.test import getCactusInputs_random
from cactusTools.shared.test import runWorkflow_multipleExamples
class TestCase(unittest.TestCase):
def testCactus_RandomAlignmentsOnly(self):
"""Build mafs from cactusDisks containing no trees, faces or reference.
"""
runWorkflow_multipleExamples(getCactusInputs_random, #Just for the alignments.
testNumber=TestStatus.getTestSetup(),
makeMAFs=True,buildAvgs=False, buildReference=False)
def testCactus_Random(self):
"""Build mafs from cactusDisks containing trees, face and an reference (the output will include the MAFS ordered by reference)
"""
runWorkflow_multipleExamples(getCactusInputs_random,
testNumber=TestStatus.getTestSetup(),
makeMAFs=True)
def main():
parseCactusSuiteTestOptions()
sys.argv = sys.argv[:1]
unittest.main()
if __name__ == '__main__':
main()
| StarcoderdataPython |
3233602 | <reponame>fossabot/drive-sdk-python<gh_stars>0
from __future__ import absolute_import
# flake8: noqa
# import apis into api package
from lusid_drive.api.application_metadata_api import ApplicationMetadataApi
| StarcoderdataPython |
91318 | import pyblish.api
import openpype.api
from openpype.pipeline import PublishXmlValidationError
class ValidateTextureBatchWorkfiles(pyblish.api.InstancePlugin):
"""Validates that textures workfile has collected resources (optional).
Collected resources means secondary workfiles (in most cases).
"""
label = "Validate Texture Workfile Has Resources"
hosts = ["standalonepublisher"]
order = openpype.api.ValidateContentsOrder
families = ["texture_batch_workfile"]
optional = True
# from presets
main_workfile_extensions = ['mra']
def process(self, instance):
if instance.data["family"] == "workfile":
ext = instance.data["representations"][0]["ext"]
if ext not in self.main_workfile_extensions:
self.log.warning("Only secondary workfile present!")
return
if not instance.data.get("resources"):
msg = "No secondary workfile present for workfile '{}'". \
format(instance.data["name"])
ext = self.main_workfile_extensions[0]
formatting_data = {"file_name": instance.data["name"],
"extension": ext}
raise PublishXmlValidationError(self, msg,
formatting_data=formatting_data
)
| StarcoderdataPython |
1677032 | import crypto_tools
import json
def elliptic_little_doc():
return "encrypt/decrypt using elliptic algo"
def elliptic_full_doc():
return """
It's quite hard but I tried to write self documented so
just try to read it and google some additional stuff
"""
def elliptic_encrypt(data, open_key, r_number, G):
new_data = []
for i in data:
temp = G * ord(i)
if not crypto_tools.elliptic_point.belong_to_curve(temp):
crypto_tools.cterm(
"output",
f"Warning: {G} * {ord(i)} out of curve", "inf"
)
new_data.append(temp)
crypto_tools.cterm("output", f"Encoded data = {new_data}", "inf")
res_data = []
for point in new_data:
first_point = G * r_number
second_point = point + open_key * r_number
res_data.append(
[[first_point.x, first_point.y], [second_point.x, second_point.y]]
)
return json.dumps(res_data)
def elliptic_decrypt(data, secret_key):
cyphered_data = json.loads(data)
result = []
for cyphered_value in cyphered_data:
first_point = crypto_tools.elliptic_point(
cyphered_value[0][0], cyphered_value[0][1]
)
second_point = crypto_tools.elliptic_point(
cyphered_value[1][0], cyphered_value[1][1]
)
cyphered_part = first_point * secret_key
result.append(second_point - cyphered_part)
return result
def elliptic_processing(data, elliptic_curve, g_value,
secret_key, r_number, encrypt):
crypto_tools.elliptic_point.set_curve(*elliptic_curve)
G = crypto_tools.elliptic_point(*g_value)
open_key = G * secret_key
if encrypt == "encrypt":
return elliptic_encrypt(data, open_key, r_number, G)
else:
return elliptic_decrypt(data, secret_key)
@crypto_tools.file_manipulation()
def elliptic(data):
data = crypto_tools.utf_decoder(data)
elliptic_curve = crypto_tools.cterm('input',
'Enter curve coefficients(a:b:p): ',
'ans')
elliptic_curve = crypto_tools.decode_params(elliptic_curve, 3)
g_value = crypto_tools.cterm('input',
'Enter generator point(x:y): ', 'ans')
g_value = crypto_tools.decode_params(g_value, 2)
secret_key = int(crypto_tools.cterm('input',
'Enter secret key: ', 'ans'))
r_number = int(crypto_tools.cterm('input',
'Enter r number: ', 'ans'))
encrypt = crypto_tools.cterm('input',
'You want encrypt or decrypt: ', 'ans')
if encrypt != "encrypt" and encrypt != "decrypt":
raise ValueError(f"Incorrect action {encrypt}")
return elliptic_processing(data, elliptic_curve, g_value,
secret_key, r_number, encrypt)
elliptic.little_doc = elliptic_little_doc
elliptic.full_doc = elliptic_full_doc
| StarcoderdataPython |
23761 | """ First class definition"""
class Cat:
pass
class RaceCar:
pass
cat1 = Cat()
cat2 = Cat()
cat3 = Cat() | StarcoderdataPython |
1792763 | <reponame>Pankti910/laari<filename>laari_wala/test_graph.py
#%%
#SHOWS TOKEN WITH ITS X AND Y VALUES
import json
import numpy
import json
import nltk
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from matplotlib import pyplot as plt
common_ts=["The","These","Their","Thought","Though","This","That"]
with open("/xampp/htdocs/laari/user_corpuses/Swapnil6.json","r") as read_f:
user_data=json.load(read_f)
#user_searched Corpus
with open("token_string.json","r") as read_f:
corpus_data=json.load(read_f)
#database and keywords corpus
#non-tokenized Array
#words=[]
#words=user_data['Array']
#non-tokenized Array
#flags for tokenizing
i=0
j=0
l=0
k=0
#flags
tk=[]
#input as [words]
#tokenizing
#while i<(len(words)):
#print(data[i])
# sample_tk=word_tokenize(words[i])
# for word in sample_tk:
# if word not in stopwords.words():
# tk.append(word)
# i=i+1
#tokenizing
#final output [tk]
#removing common_ts
#while j != (len(tk)):
# for w_t in common_ts:
# if tk[j]==w_t:
# tk.remove(tk[j])
# j=j+1
#removing common
#cleaning corpus
#while l != (len(tk)):
# k=l+1
# while k!=(len(tk)):
# if tk[l] == tk[k]:
# del tk[k]
# break
# k=k+1
# l=l+1
#cleaning corpus
#TOKENIZED USER_DATA
#cf=0
#checking no of tokens to make it learn
#if(len(tk)>1):
# cf=1
#else:
# cf=0
#checking no of tokens to make it learn
####################################################################################
#TOKENIZING CORPUS_DATA
#non-tokenized Array
words2=[]
words2=corpus_data
#non-tokenized Array
#flags for tokenizing
i=0
j=0
l=0
k=0
#flags
tk1=[]
#input as [words]
#tokenizing
while i<(len(words2)):
#print(data[i])
sample_tk1=word_tokenize(words2[i])
for word1 in sample_tk1:
if word1 not in stopwords.words():
tk1.append(word1)
i=i+1
#tokenizing
#final output [tk]
#removing common_ts
while j != (len(tk1)):
for w_t in common_ts:
if tk1[j]==w_t:
tk1.remove(tk1[j])
j=j+1
#removing common
#cleaning corpus
while l != (len(tk1)):
k=l+1
while k!=(len(tk1)):
if tk1[l] == tk1[k]:
del tk1[k]
break
k=k+1
l=l+1
#cleaning corpus
#TOKENIZED CORPUS_DATA
#print(tk1)
#generating data points
#data_x=numpy.random.random((1,len(main_tk)))
#data_y=numpy.random.random((1,len(main_tk)))
#x_vals=list(data_x[0])
#y_vals=list(data_y[0])
#generating data points
with open("token_data_x.json","r") as rx_file:
data_x=json.load(rx_file)
with open("token_data_y.json","r") as ry_file:
data_y=json.load(ry_file)
i=0
#while(i!=len(tk1)):
# print(tk1[i],data_x[i],data_y[i])
# print("\n")
# i=i+1
plt.scatter(data_x,data_y)
#for i in range(len(data_x)):
# plt.text(data_x[i],data_y[i],tk1[i])
# %%
| StarcoderdataPython |
3249381 | <gh_stars>1-10
import os
from uuid import uuid4
from datetime import datetime
from cassandra.cqlengine.models import Model
from cassandra.cqlengine import columns
class result(Model):
__keyspace__ = os.getenv("CASSANDRA_KEY_SPACE")
__table_name__ = "result"
id = columns.UUID(primary_key=True, default=uuid4)
data = columns.List(value_type=columns.Float)
is_fraud = columns.Boolean(default=False)
time = columns.DateTime(default=datetime.utcnow)
@classmethod
def find_all(cls):
return cls.objects.all()
@classmethod
def count_all(cls):
return cls.objects.count()
| StarcoderdataPython |
69610 | <reponame>lifning/picotool
"""The map section of a PICO-8 cart.
The map region consists of 4096 bytes. The .p8 representation is 32
lines of 256 hexadecimal digits (128 bytes).
The map is 128 tiles wide by 64 tiles high. Each tile is one of the
256 tiles from the spritesheet. Map memory describes the top 32 rows
(128 * 32 = 4096). If the developer draws tiles in the bottom 32 rows,
this is stored in the bottom of the gfx memory region.
"""
__all__ = ['Map']
from .. import util
class Map(util.BaseSection):
"""The map region of a PICO-8 cart."""
HEX_LINE_LENGTH_BYTES = 128
def __init__(self, *args, **kwargs):
"""The initializer.
The Map initializer takes an optional gfx keyword argument
whose value is a reference to the Gfx instance where lower map
data is stored.
"""
self._gfx = None
if 'gfx' in kwargs:
self._gfx = kwargs['gfx']
del kwargs['gfx']
super().__init__(*args, **kwargs)
@classmethod
def empty(cls, version=4, gfx=None):
"""Creates an empty instance.
Args:
version: The PICO-8 file version.
gfx: The Gfx object where lower map data is written.
Returns:
A Map instance.
"""
return cls(data=bytearray(b'\x00' * 4096), version=version, gfx=gfx)
@classmethod
def from_lines(cls, *args, **kwargs):
gfx = None
if 'gfx' in kwargs:
gfx = kwargs['gfx']
del kwargs['gfx']
result = super().from_lines(*args, **kwargs)
result._gfx = gfx
return result
@classmethod
def from_bytes(cls, *args, **kwargs):
gfx = None
if 'gfx' in kwargs:
gfx = kwargs['gfx']
del kwargs['gfx']
result = super().from_bytes(*args, **kwargs)
result._gfx = gfx
return result
def get_cell(self, x, y):
"""Gets the tile ID for a map cell.
Args:
x: The map cell x (column) coordinate. (0-127)
y: The map cell y (row) coordinate. Map must have a Gfx if y > 31.
(0-63)
Returns:
The tile ID for the cell.
"""
assert 0 <= x <= 127
assert (0 <= y <= 31) or ((0 <= y <= 63) and self._gfx is not None)
if y <= 31:
return self._data[y * 128 + x]
return self._gfx._data[4096 + (y - 32) * 128 + x]
def set_cell(self, x, y, val):
"""Sets the tile ID for a map cell.
Args:
x: The map cell x (column) coordinate. (0-127)
y: The map cell y (row) coordinate. (0-63) If y > 31, Map must have a
Gfx, and this method updates the shared data region in the Gfx.
val: The new tile ID for the cell. (0-255)
"""
assert 0 <= x <= 127
assert (0 <= y <= 31) or ((0 <= y <= 63) and self._gfx is not None)
assert 0 <= val <= 255
if y <= 31:
self._data[y * 128 + x] = val
else:
self._gfx._data[4096 + (y - 32) * 128 + x] = val
def get_rect_tiles(self, x, y, width=1, height=1):
"""Gets a rectangle of map tiles.
The map is a grid of 128x32 tiles, or 128x64 if using the
gfx/map shared memory for map data. This method returns a
rectangle of tile IDs on the map, as a list of bytearrays.
If the requested rectangle size goes off the edge of the map,
the off-edge tiles are returned as 0. The bottom edge is
always assumed to be beyond the 64th row in the gfx/map shared
memory region.
Args:
x: The map cell x (column) coordinate. (0-127)
y: The map cell y (row) coordinate. (0-63) If y + height > 31, Map
must have a Gfx.
width: The width of the rectangle, as a number of tiles.
height: The height of the rectangle, as a number of tiles.
Returns:
The rectangle of tile IDs, as a list of bytearrays.
"""
assert 0 <= x <= 127
assert 1 <= width
assert 1 <= height
assert ((0 <= y + height <= 32) or
((0 <= y + height <= 64) and self._gfx is not None))
result = []
for tile_y in range(y, y + height):
row = bytearray()
for tile_x in range(x, x + width):
if (tile_y > 63) or (tile_x > 127):
row.append(0)
else:
row.append(self.get_cell(tile_x, tile_y))
result.append(row)
return result
def set_rect_tiles(self, rect, x, y):
"""Writes a rectangle of tiles to the map.
If writing the given rectangle at the given coordinates causes
the rectangle to extend off the edge of the map, the remainer
is discarded.
Args:
rect: A rectangle of tile IDs, as an iterable of iterables of IDs.
x: The map tile x coordinate (column) of the upper left corner to
start writing.
y: The map tile y coordinate (row) of the upper left corner to
start writing.
"""
for tile_y, row in enumerate(rect):
for tile_x, val in enumerate(row):
if ((tile_y + y) > 127) or ((tile_x + x) > 127):
continue
self.set_cell(tile_x + x, tile_y + y, val)
def get_rect_pixels(self, x, y, width=1, height=1):
"""Gets a rectangel of map tiles as pixels.
This is similar to get_rect_tiles() except the tiles are
extracted from Gfx data and returned as a rectangle of pixels.
Just like PICO-8, tile ID 0 is rendered as empty (all 0's),
not the actual tile at ID 0.
Args:
x: The map cell x (column) coordinate. (0-127)
y: The map cell y (row) coordinate. (0-63) If y + height > 31, Map
must have a Gfx.
width: The width of the rectangle, as a number of tiles.
height: The height of the rectangle, as a number of tiles.
Returns:
The rectangle of pixels, as a list of bytearrays of pixel colors.
"""
assert self._gfx is not None
assert 0 <= x <= 127
assert 1 <= width
assert 1 <= height
assert 0 <= y + height <= 64
tile_rect = self.get_rect_tiles(x, y, width, height)
result = []
for tile_row in tile_rect:
pixel_row = [bytearray(), bytearray(), bytearray(), bytearray(),
bytearray(), bytearray(), bytearray(), bytearray()]
for id in tile_row:
if id == 0:
sprite = [bytearray(b'\x00' * 8)] * 8
else:
sprite = self._gfx.get_sprite(id)
for i in range(0, 8):
pixel_row[i].extend(sprite[i])
for i in range(0, 8):
result.append(pixel_row[i])
return result
| StarcoderdataPython |
1692110 | <reponame>EdgarCastillo101/Python
# what's a list? lol
import copy
ages = [20, 25, 20] # this is a list
names = ["Edgar, <NAME>"] # this is also a list
food = ['chicken', 'pollo', 1738] # did you know that this is also a list
print(ages)
print(names)
print(food)
# indexing (again)
print(ages[0]) # grabs the first element
# how to update a list element
ages[0] = 5
ages[1] = 10
ages[2] = 15
print(ages) # Updated!
# What about slicing?
print(ages[1:]) # It works! #grabs all elements except for the first element lol.
ages[1:] = [6, 7]
print(ages) # it worked?
# this method helps you with updating data from list in a quicker way.
# how to copy list?
names = ['Edgar', 'Steven', 'Jose']
names2 = names[:] # proper way instead of (names2 = names) way.
print(names2)
# here's another method to copy lists
names1 = ['Edgar', 'Steven', 'Jose']
names2 = names1.copy()
names2[0] = 'goat status'
print(names2)
# Nested list
my_favorite_things = ["music", 18, ["HBO Max", "Hulu"]]
# The third element is actually a list itself.
streaming = my_favorite_things[2]
print(streaming) # we get the second list
print(streaming[1]) # we grab the Hulu element from the second list
# a more useful way of grabbing an element from a nested list is like so:
print(my_favorite_things[2][1]) # goat method lol.
#first [] is outer list. second [] is inner list.
#This can be repeated even deeper.
nested_list = [[[[["deep dive"]]]]]
print(nested_list[0][0][0][0][0]) # Got it!
# shallow copy
# This means that any complex types will not be copied but rather available in both
# The easiest way to understand this is with an example.
my_favorite_things = ["music", 18, ["HBO Max", "Hulu"]]
my_favorite_things2 = my_favorite_things.copy()
# now, modify the original
my_favorite_things[2][0] = "Spotify"
print(my_favorite_things2) # now has Spotify instead of HBO Max
# combining lists
msg = "he" "llo" # '+' is optional in strings
print(msg)
good = ['chicken', 'pollo', 'pollo loco']
bad = ['chocolate', 'chocalate chicken', 'chocolate soup']
average_good = good + bad
print(average_good) | StarcoderdataPython |
3304025 | # -*- coding: utf-8 -*-
"""Support to extract sentences from a text.
This module provides functions to extract sentences from a text. Any string of
characters that ends with a terminating punctuation mark such as a period,
exclamation or question mark followed by a whitespace character is
considered to be a sentence, regardless of its grammatical correctness.
Analyzed texts are assumed to be in English; all functions were written and
tested with that assumption. Where decisions had to be made to respect American
or British English conventions, the former was chosen.
The solutions to sentence extraction presented in this module rely heavily
on regular expressions but not on NLP or machine-learning algorithms. Perhaps
because of this, the function 'get_sentences' does not always return sentences
a reader would consider 'complete' when looking upon them. There are
several situations where this may occur:
1) In sentences containing dialogue
"This is ridiculous! What do you mean there's no pizza left?" Marcus asked.
Normally, readers would consider this to be a single, complete sentence while
acknowledging the sentences nested inside. The function 'get_sentences'
doesn't make any such distinction and returns the list
[ '"This is ridiculous!',
'What do you mean there's no pizza left?',
'" Marcus asked.'
]
2) In sentences that end with a terminating punctuation mark but with no
whitespace character afterward.
As mentioned above, a whitespace after a terminating punctuation mark is
required for the module to detect a sentence. A citation symbol or
number immediately after a terminating will prevent this.
For example, in the invented paragraph below, 'get_sentences' should return
two sentences; it will only return one: the whole paragraph.
e.g. The moon is made of green cheese.[6] Scientists discovered this fact in
10 BCE.
Return value of 'get_sentences':
['The moon is made of green cheese.[6] Scientists discovered this fact in
10 BCE.']
3) In sentences that contain 'unfamiliar' abbreviations ending with a period.
As might be suspected, abbreviations such as 'Mr.' or 'U.S.S.R.' might cause
end of sentences to be detected when they shouldn't be. To avoid this,
the module uses a list of abbreviations in the file "abbreviations.txt": any
abbreviation in this file will not be treated as an end of sentence and
ignored as such.
For example, the honorific 'Dr.' is included in 'abbreviations.txt'.
Consequently, the sentence
Dr. Dunne does dissections diligently.
will be parsed as a single sentence,
['Dr. Dunne does dissections diligently.']
whereas the abbreviation 'Bx.', which is not in the abbreviations file, will
result in a sentence being detected prematurely; two 'sentences' are returned.
Bx. Barry borrows bananas.
will be parsed as
['Bx.', 'Barry borrows bananas.']
4) In sentences that end with a 'familiar' abbreviation.
Unfortunately, the fix for the abbreviation above creates a new problem with
sentences that end in an abbreviation. For example, if the abbreviation 'Fl.'
is in 'abbreviations.txt', then the following text
Welcome the Fl. It's the best.
won't be broken into two sentences, but will be parsed as one: the
period in 'Fl.' is not seen as the terminating punctuation mark, but is rather
skipped because the parsing algorithm tells it too!
The only exception to this is if the abbreviation is at the very of the end of
the text: it should be processed then as expected.
5) Sentences containing grawlixes.
E.g. The text
This #$@&%*! module doesn't do what it's supposed to!
would be parsed by get_sentences as
['This #$@&%*!', 'module doesn't do what it's supposed to!']
Note:
The file 'abbreviations.txt' is the product of a separate web-scraping
script 'abbrevscrape.py'. Should you wish to generate the abbreviations file
yourself, its online repository can be found at
https://github.com/dunnesquared/abbrevscrape.
"""
import os # getcwd, path.join, path.dirname, path.realpath
import sys # getsizeof, exit, stderr
import re # sub
import textwrap # dedent
import string # punctuation
#==============================SETTING MAX SIZE================================
# Arbitrary text size set to avoid program from gobbling up too much memory
# Set to what you will...
MAX_TEXTSIZE = 100 * 1024 * 1024 # 100 MB
# Check global constant just in case the bizarre happens...
if MAX_TEXTSIZE < 0:
raise ValueError("MAX_TEXTSIZE set to value less than zero.")
#==============================REGEX GLOBALS===================================
# Unicode general punctuation encodes several punctuation marks that can
# appear at the end of a sentence. Module should handle '⁇' no differently
# than '??'
# Other possible end-of-sentence punctuation marks
DOT_1LEADER = '․'
DOT_2LEADER = '‥'
ELLIPSIS = '…'
DOUBLE_EXCLAM = '‼'
DOUBLE_Q = '⁇'
QEXCLAM = '⁈'
EXCLAMQ = '⁉'
# Double quotation marks
H_DQUOTE = '‟' # high double quote
LBIG_DQUOTE = '❝'
RBIG_DQUOTE = '❞'
FW_DQUOTE = '"' # full-width double-quote
LPRIME_DQUOTE = '〝'
RPRIME_DQUOTE = '〞'
# Single quotation marks
# Module uses the American convention that single quotes should only be
# used within double quotes. Moreover, there is currently no functionality
# to parse a sentence within a sentence. E.g. The quote
# "Don said, 'Go to work!' to Sheila." will be parsed as a single sentence.
# The general punctuation marks are left here, however, for possible future
# versions that could handle this as two separate sentences.
H_SQUOTE = '‛' # High single quote
L_SQUOTE = '‘'
R_SQUOTE = '’' # same as apostrophe
LH_SQUOTE = '❛'
RH_SQUOTE = '❜'
# Group them for concise regular expressions below
LEADERS = DOT_1LEADER + DOT_2LEADER + ELLIPSIS
QEX = DOUBLE_EXCLAM + DOUBLE_Q + QEXCLAM + EXCLAMQ
DQUOTES = (H_DQUOTE + LBIG_DQUOTE + RBIG_DQUOTE + FW_DQUOTE + LPRIME_DQUOTE
+ RPRIME_DQUOTE)
SQUOTES = H_SQUOTE + L_SQUOTE + R_SQUOTE + LH_SQUOTE + RH_SQUOTE
# End-of-sentence patterns to help determine the end of a sentence
REGEX_PERIOD = r'[\.' + LEADERS + r']\s'
REGEX_QEXMARK = r'[\?!' + QEX + r']\s'
REGEX_QUOTE = r'[\.\?!—' + LEADERS + QEX + r']"\s'
# These will be replaced by a simpler, straight single/double quotes: ' / "
REGEX_DQUOTE = r'[\“\”' + DQUOTES + ']'
# To be removed when counting words
REGEX_ALLSYMOBLS = (r'[' + string.punctuation + LEADERS + QEX + DQUOTES
+ SQUOTES +']')
#===================INITIALIZING ABBREVIATIONS SET=============================
# Path of abbreviations.txt file
_ABBREVIATION_DATA_FILEPATH = "./data/abbreviations.txt"
def _get_dir():
"""Returns absolute path of the directory where module exists.
Returns:
dir (str): the unique ('canonical') absolute path of the directory
containing the module (i.e. no symbolic links in path).
"""
# Get the current working directory in Terminal
# when you try to launch the module as a script
cwd = os.getcwd()
# Get the name of the directoy where the module exists
module_dir = os.path.dirname(__file__)
# Intelligently cocantenate the two
joinedpath = os.path.join(cwd, module_dir)
# Get rid of any possible symbolic links found along and return the
# absolute path
return os.path.realpath(joinedpath)
def _load_abbreviations():
"""Gets list of abbreviations as per contents of 'abbreviations.txt'.
Returns:
abbreviations (list): strings found in 'abbreviations.txt'.
"""
try:
# Get directory where input exists, i.e. same dir as this module
absdir = _get_dir()
# Intelligently concatenate the directory and the input file name
# together
full_filename = os.path.join(absdir, _ABBREVIATION_DATA_FILEPATH)
with open(full_filename, "r") as fin:
data = fin.read()
# Each abbreviation is written on a newline
abbreviations = data.split('\n')
# Get rid of extra '' list-entry caused by text editor adding
# a line after the last abbreviation upon saving the file
abbreviations.pop()
except OSError as err:
print("OSError: {0}".format(err), file=sys.stderr)
print("Abbreviations not loaded. Exiting program.", file=sys.stderr)
sys.exit()
return abbreviations
# Common abbreviations found in English language
# Casting them as a set will allow efficient intersection with other data
ABBREVIATIONS = set(_load_abbreviations())
#==============================CLASSES=========================================
class NotInTextError(Exception):
"""String not found in text.
Attributes:
message (str): Message regarding string that was not found in a text.
"""
def __init__(self, message):
"""Init parent class and this class with developer error message."""
super().__init__(message)
self.message = message
#==============================PUBLIC FUNCTIONS================================
def get_sentences(text):
"""Returns a list of sentences from a text.
Any string of characters that ends with a terminating punctuation mark such
as a period, exclamation or question mark followed by a whitespace character
is considered to be a sentence, regardless of its grammatical correctness.
Read the module docstring above for special cases that discuss the
limitations of this feature.
Args:
text (str): Text from which sentences are to be extracted.
Returns:
sent_list (list): A sequence of sentences extracted from argument.
"""
# Check to see whether text is less than defined, yet arbitrary memory max
_too_big(text)
# Prepare text for parsing
text = _clean_text(text)
# Find index of first non-white space character
# start (int): index where parsing begins; moves as each sentence extracted
start = offset(text)
# Set it up...
# i (int): index of the end of a sentence
# sentence (str): extracted sentence from text
# sent_list (list): list of extracted sentences
i, sentence, sent_list = 0, "", []
# Scan text...
while start < (len(text)-1):
i = _get_first_punctuation_mark(text, start)
# No end-of-sentence punctuation marks in sentence
if i == -1:
return sent_list
# _get_first_punctuation_mark returns the index of a terminating
# punctuation mark, which in a sentence that ends in a quotation mark
# is does not point to the sentence's end.
# As such increment the index if sentence ends with a quotation mark.
pos_lastchar = i
if text[i+1] == "\"":
pos_lastchar = i + 1
# Extract sentence and clean it up a bit
sentence = text[start:(pos_lastchar + 1)]
# Add extracted sentence to list
sent_list.append(sentence)
# The next sentence starts one character away from the end of the
# previous sentence
start = pos_lastchar + 1
return sent_list
def get_words(sentence):
"""Returns words in a sentence, excluding certain punctuation marks.
The punctuation marks excluded are . , ! ? : ; “ ” " . Hyphens and
apostrophes are kept, but em dashes are not.
Args:
sentence (str): Sentence from which words are to be extracted.
Returns:
words (list): Sequence of words from the given sentence.
"""
# Remove all symbols and punctuation from sentence
# e.g. Do not let something like "? ^ & * -" to count as five different
# words: function should return no words
sentence = re.sub(REGEX_ALLSYMOBLS, '', sentence)
# Only symbol not removed from above. Not sure why not...
sentence = sentence.replace('\\', '')
# Remove en dash – and em dash —
# An en dash is used to denote a period, e.g. 1914–1918
# An em dash is used to insert a parenthetical phrase in the middle of or
# an interruption at the end of a sentence
# Removing them will prevent two distinct words being counted as one
if '–' in sentence or '—' in sentence:
sentence = sentence.replace("–", " ") # en dash
sentence = sentence.replace("—", " ") # em dash
# Default delimiter in split is blank space
words = sentence.split()
return words
def find_start_end(substring, text, start_search=0):
"""Returns start and end indices of a substring within a given text.
Args:
substring (str): Substring to search within another string.
text (str): The string that will be searched for the substring indices.
start_search (int): The index where the search in text should begin.
Raises:
ValueError: Arguments substring and text are empty strings.
Returns:
-1 (int): If substring not in text being searched.
(start_pos, end_pos): An integer tuple representing the start and end
indices of the substring in the searched string.
"""
# Don't bother to find empty substrings in possibly empty texts
sublen = len(substring.strip())
textlen = len(text.strip())
if sublen == 0 or textlen == 0:
raise ValueError("ValueError in textanalysis.find_start_end:" +
"empty string(s) passed to parameters 'substring' " +
"or 'text'.")
# Substrings came from cleaned text. You won't get matches unless you
# make sure your text is cleaned too.
text = _clean_text(text)
# Clean the substring too of curly quotes
substring = re.sub(r'[\“\”]', '"', substring)
# Make sure our start position is something sensible
if start_search < 0:
raise ValueError("ValueError in textanalysis.find_start_end:" +
"argument for parameter 'start_search' less than" +
"zero.")
# No point in continuing if substring not in text
if substring not in text:
raise NotInTextError(f"Substring '{substring}' not found in text.'")
# Initialize start and end positions of substring in text
start_pos = 0
end_pos = 0
# Find or calculate the start and end positions of substring in text
start_pos = text.find(substring, start_search, len(text) + 1)
end_pos = start_pos + len(substring)
return (start_pos, end_pos)
def offset(text):
"""Returns index of first non-whitespace character.
Args:
text (str): string to be analyzed for whitespaces.
Returns:
index (int): index of first non-whitespace character; -1 if none found.
"""
index = 0 # Assume first character is not a whitespace
# \s == whitespace character, ^\s == NOT whitespace character
match = re.search(r'[^\s]', text, re.IGNORECASE)
# -1 only really possible if whole text is blank or made of whitespace
# characters
index = match.start() if match else -1
return index
# =============================PRIVATE FUNCTIONS===============================
def _clean_text(text):
"""Returns text that is ready for sentence-parsing.
Args:
text (str): unedited text to be parsed.
Returns:
text (str): edited text ready for parsing.
"""
# No need to continue cleaning if dealing with an empty string
if text:
# Parsing only works for straight quotes
text = re.sub(REGEX_DQUOTE, '"', text)
# Add a space at the end of text to make sure last sentence in text is
# added to sentence list
text = text + " "
return text
def _get_first_punctuation_mark(text, start):
"""Returns index of the first terminating punctuation mark encountered after
start position.
To illustrate the principles implemented in this function, examine the
following text:
Hello, Mr. Darcy! Would you like a cup of tea?
There are three terminating punctuation marks in this text: one period,
one exclamation mark and one question mark. Assuming a parsing algorithm
begins scanning at index 0, which terminating punctuation mark should it
pick as the end of the first sentence of the text? The exclamation mark,
of course.
This is what this function does.
Args:
text (str): text being parsed.
start (int): index where to start search in text.
Returns:
index (int): index of first terminating punctuation mark found; -1
if no punctuation mark found
"""
end_of_text = len(text)
# Search text for first occurence of the following punctuation marks:
# Period
pos_period = 0
match = re.search(REGEX_PERIOD, text[start:])
pos_period = start + match.start() if match else -1
# Exclamation or question mark
pos_exqmark = 0
match = re.search(REGEX_QEXMARK, text[start:])
pos_exqmark = start + match.start() if match else -1
# Period, question, exclamation, em-dash followed by a quotation mark
pos_quote = 0
match = re.search(REGEX_QUOTE, text[start:])
pos_quote = start + match.start() if match else -1
# Handle abbreviations
# Variable will hold the index num right after the period of an
# abbreviation that should be 'skipped'
new_start = start
while True:
# See whether there's any meaningful text to parse after a period
not_blank = bool(text[pos_period+1:].strip())
# Abbreviations at the very end of the text should not be skipped
# and be recognized as the end of a sentence. Unfortunately, I could
# not think of a way to make my program smart enough to skip
# abbreviations in the middle of a sentence, but not at the end of one!
if _is_abbreviation(text, new_start, pos_period) and not_blank:
new_start = pos_period + 1
pos_period = text.find('. ', new_start, end_of_text+1)
else:
break
# Check to see whether first non-whitespace character after end of a
# quotation is lowercase. If it is, don't treat the end of the
# quotation as the end of the sentence
if pos_quote != -1: # quote found
pos_quote = _ignore_quote(pos_quote, text)
# Get position of the punctuation mark at the end of the current
# sentence
pos_list = [pos_period, pos_exqmark, pos_quote]
# Negative values will always be the smaller index; get rid of them!!
pos_list = list(filter(lambda x: x != -1, pos_list))
# Return position of the punctuation mark at the end of the current
# sentence assuming there's a mark in the first place!
index = min(pos_list) if pos_list else -1
return index
def _is_abbreviation(text, start, end):
"""Returns True if abbreviation found; False otherwise.
An abbreviation is only considered found if exists in the file
"abbreviations.txt" accompanying this module.
Args:
text (str): String being scanned for abbreviation.
start (int): Index where to start search in text.
end (int): Index where to end search in text.
Returns:
True (bool): abbreviation found.
False (bool): No abbreviation found.
"""
# Focus only on the part of text that may contain an abbreviation
part = text[start:end+1]
# See whether any of the abbreviations are in that part.
# Need words of sentence since we want to check for a whole abbreviation
# not a substring of it
# E.g. In the text "Back in the U.S.S.R." we don't want the abbreviation
# 'U.S.' cause this function to return True!
sent_words = set(part.split())
# Disjoint means two sets share nothing in common (essentially their
# intersection is the null set). So, if the two sets are NOT disjoint,
# then you've found an abbreviation; otherwise you (maybe) haven't.
disjoint = sent_words.isdisjoint(ABBREVIATIONS)
return not disjoint
def _ignore_quote(pos, text):
"""Check whether quote is truly end of a sentence.
The end of a quotation may not be the end of the sentence. This function
does a 'weak' test to find out: if the next non-whitespace character is
lower case, then you don't have a full-sentence. As such, the quote
does not mark the end of a sentence; set its position to -1.
Args:
pos (int): Relevant index near where quote detected.
text (str): Text being parsed.
Returns:
-1 (int): if quote is not the end of the sentence.
pos (int): if quote is the end of the sentence.
"""
# Don't want to look at something outside the bounds of text
if (pos + 3) < len(text):
# The 'weak' criterion...
if text[pos + 3].islower():
return -1
# Quote 'may' be end of sentence
return pos
def _too_big(text):
"""Determine whether text string is larger than arbitrary size limit
Args:
text (str): input text to be parsed.
Raises:
MemoryError: 'text' memory size > MAX_TEXTSIZE.
Returns:
False (bool): 'text' is <= MAX_TEXTSIZE.
"""
if sys.getsizeof(text) > MAX_TEXTSIZE:
# Give reading in kilobytes rather than bytes
max_mb = MAX_TEXTSIZE / 1000
text_mb = sys.getsizeof(text) / 2**10
err = textwrap.dedent('''
Python string object 'text' greater than MAX_TEXTSIZE:
MAX_TEXTSIZE:\t\t\t{:10.4f} kB
'text'object size:\t\t{:10.4f} kB'''.format(max_mb, text_mb))
raise MemoryError(err)
# Everything good
return False
# ==================================MAIN=======================================
if __name__ == "__main__":
# Examples to help developers get a quick idea of what the public api can
# do
print("\ntextanalysis - module for extracting sentences from text " +
"and more!")
print("\nQuickstart Tutorial Examples:")
print("====================")
print("\nget_sentences:")
print("--------------")
print("Scans input string for sentences; returns list of sentences.")
print("\n>>> text = 'There once was a man from Nantucket. He liked " +
"living in a bucket! What about you?'")
print(">>> sent_list = get_sentences(text)")
print(">>> sent_list")
print("['There once was a man from Nantucket.', 'He liked living " +
"in a bucket!', 'What about you?']")
print("")
print("\nget_words:")
print("----------")
print("Scans input string for words; returns list of words " +
"without certain punctuation marks.")
print("\n>>> text = \"Dog-lovers, like me, hate cats—false!\"")
print(">>> words = get_words(text)")
print(">>> words")
print("[Dog-lovers, like, me, hate, cats, false]")
print("")
print("\nfind_start_end:")
print("---------------")
print("Returns start and end indices of a substring in a string.")
print("\n>>> text = \"Your pizza is delicious.\"")
print(">>> find_start_end(\"pizza\", text, start_search=0)")
print("(5, 9)")
print("")
print("\noffset:")
print("-------")
print("Returns index of first non-whitespace character.")
print("\n>>> text = \" There are four spaces before this sentence.\"")
print(">>> offset(text)")
print("4")
print("")
| StarcoderdataPython |
88222 | <reponame>Shukla-G/cray-cogs
import datetime
import logging
import time
from collections import Counter
from functools import reduce
from typing import Dict, Optional
import discord
from discord.ext import tasks
from redbot.core import Config, commands
from redbot.core.bot import Red
from redbot.core.utils.chat_formatting import box, humanize_list
from topgg import DBLClient, WebhookManager
from .models import VoteInfo
global log
log = logging.getLogger("red.craycogs.VoteTracker")
class VoteTracker(commands.Cog):
"""Track votes for your bot on [Top.gg](https://top.gg)"""
__version__ = "1.5.0"
__author__ = ["crayyy_zee#2900"]
def __init__(self, bot: Red, token: str, password: str):
self.bot: Red = bot
self.config: Config = Config.get_conf(None, 1, True, "VoteTracker")
self.config.register_user(votes=0, vote_cd=None)
self.config.register_global(role_id=None, chan=None, guild_id=None)
self.topgg_client: DBLClient = DBLClient(
bot,
token,
True,
)
bot.topgg_client = self.topgg_client
self.topgg_webhook = WebhookManager(bot).dbl_webhook("/dbl", password)
self.cache: Dict[int, Dict[str, int]] = {}
def format_help_for_context(self, ctx: commands.Context) -> str:
pre_processed = super().format_help_for_context(ctx) or ""
n = "\n" if "\n\n" not in pre_processed else ""
text = [
f"{pre_processed}{n}",
f"Cog Version: **{self.__version__}**",
f"Author: {humanize_list(self.__author__)}",
]
return "\n".join(text)
async def get_guild(self) -> Optional[discord.Guild]:
gid = await self.config.guild_id()
if gid:
return self.bot.get_guild(gid)
else:
return None
async def red_delete_data_for_user(self, *, requester, user_id):
user = [x for x in self.cache if (x == user_id)][0]
if user:
await self.config.user_from_id(user).clear()
log.debug("Deleted user from cache.")
return True
else:
return False
async def _populate_cache(self):
users = await self.config.all_users()
if users:
self.cache = {uid: v for uid, v in users.items()}
log.debug("Transferred config to cache.")
else:
self.cache = {
k: {"votes": v, "vote_cd": None}
for k, v in (await self.get_monthly_votes()).items()
}
log.debug("Populated cache.")
async def get_monthly_votes(self):
"""
Credits to Predä for this
"""
data = await self.topgg_client.get_bot_votes()
votes_count = Counter()
for user_data in data:
votes_count[user_data["id"]] += 1
final = {}
for user_id, value in votes_count.most_common():
final[user_id] = value
return final
@property
def total_votes(self):
return reduce(
lambda x, y: x["votes"] + y["votes"] if isinstance(x, dict) else x + y["votes"],
self.cache.values(),
)
@classmethod
async def initialize(cls, bot: Red):
tokens = await bot.get_shared_api_tokens("topgg")
key = tokens.get("api_key")
password = tokens.get("pass")
if not key or not password:
await bot.send_to_owners(
f"The cog `VoteTracker` requires an api token and webhook password from top.gg to function. "
"To get these, you must visit the top.gg website, go to your profile, click on your bot's edit buttons "
"Go to the webhooks section and click the `reveal` button to get your token. "
"Scroll down to find the `Webhook url` field and replace it with `https://<Your-vps-ip-here>:5400/dbl`. "
"Below that will be the password field and set that to whatever you want."
"Then use the following command on your bot: `[p]set api topgg api_key,<api_token> pass,<password>` "
"to add the token to the bot's shared tokens and then try reloading the cog "
"again. If it still doesnt work, contact crayyy_zee#2900. "
"\nHere's a little gif showing where everything is present: "
"\nhttps://media.giphy.com/media/XB4JIFSPvC7WurI62B/giphy.gif"
)
return
else:
s = cls(bot, key, password)
await s.topgg_webhook.run(5400)
await s._populate_cache()
return s
async def _unload(self):
await self.topgg_webhook.close()
if self.cache:
for k, v in self.cache.items():
await self.config.user_from_id(k).set(v)
log.debug("Transferred cache to config.")
def cog_unload(self):
self.bot.loop.create_task(self._unload())
@staticmethod
def sort_dict(d: dict):
d = sorted(d.items(), key=lambda x: x[1], reverse=True)
d = {i[0]: i[1] for i in d}
return d
@commands.command(name="listvotes", aliases=["lv"])
@commands.guild_only()
@commands.cooldown(1, 5, commands.BucketType.user)
async def lvotes(self, ctx: commands.Context):
"""
List all votes **[botname]** has recieved in a leaderboard."""
if not self.cache:
return await ctx.send("No votes have been recieved so far.")
lb = self.sort_dict({k: v["votes"] for k, v in self.cache.items()})
embed = discord.Embed(
title=f"All votes for {self.bot.user.name.title()}", color=await ctx.embed_colour()
)
for i, (k, v) in enumerate(lb.items(), 1):
k = await self.bot.get_or_fetch_user(k)
embed.add_field(
name=f"{i}. {k.name}",
value=f"Amount of votes: \n**{box(f'{v}')}**",
inline=False,
)
embed.set_footer(
text=f"Total Votes: {self.total_votes}",
icon_url=ctx.author.avatar_url,
)
await ctx.send(embed=embed)
@commands.command(name="listmonthlyvotes", aliases=["lmv"])
@commands.guild_only()
@commands.cooldown(1, 5, commands.BucketType.user)
async def lmvotes(self, ctx: commands.Context):
"""
List this month's votes for **[botname]**"""
lb = await self.get_monthly_votes()
lb = self.sort_dict(lb)
embed = discord.Embed(
title=f"This month's top votes for {self.bot.user.name.title()}",
color=await ctx.embed_colour(),
)
for i, (k, v) in enumerate(lb.items(), 1):
k = await self.bot.get_or_fetch_user(k)
embed.add_field(
name=f"{i}. {k.name}",
value=f"Amount of votes: \n**{box(f'{v}')}**",
inline=False,
)
embed.set_footer(
text=f"Total Monthly Votes: {reduce(lambda x, y: x + y, lb.values())}",
icon_url=ctx.author.avatar_url,
)
await ctx.send(embed=embed)
@commands.is_owner()
@commands.command(name="setvoterole", aliases=["svr", "voterole"])
async def svr(self, ctx: commands.Context, role: discord.Role):
"""
Set the role to be assigned to the user upon recieving a vote from them.
This command can only be run by bot owners."""
await self.config.role_id.set(role.id)
await self.config.guild_id.set(ctx.guild.id)
await ctx.send(f"Set the role for voting to {role.name}")
@commands.is_owner()
@commands.command(name="setvotechannel", aliases=["svc", "votechannel"])
async def svc(self, ctx: commands.Context, channel: discord.TextChannel):
"""
Set the channel where vote logs will be sent.
This command can only be run by bot owners."""
await self.config.chan.set(channel.id)
return await ctx.send(f"Set the channel for vote logging to {channel.name}")
@commands.command(name="getuservotes", aliases=["uservotes"])
@commands.guild_only()
@commands.cooldown(1, 5, commands.BucketType.user)
async def guv(self, ctx: commands.Context, user: discord.User):
"""
Check how many times a certain user has voted for **[botname]**."""
user_votes = self.cache.setdefault(user.id, {"votes": 0, "vote_cd": None}).get("votes")
if user_votes == 0:
return await ctx.send(f"{user.name} has not voted yet.")
return await ctx.send(
f"{user.name} has voted for **{self.bot.user.name}** *{user_votes}* time{'s' if user_votes > 1 else ''}."
)
@commands.Cog.listener()
async def on_dbl_vote(self, data: dict):
vote = VoteInfo(self.bot, data)
user = vote.user
user_mention = user.mention
user_id = user.id
g = await self.get_guild()
if vote.type.name == "test":
log.info(f"Test vote recieved from: {user_mention} (`{user_id}`)")
return
u_data = self.cache.setdefault(user_id, {"votes": 0, "vote_cd": None})
u_data.update({"votes": u_data["votes"] + 1})
if (r := await self.config.role_id()) and g:
if mem := g.get_member(user_id):
role = g.get_role(r)
if role:
try:
await mem.add_roles(role)
except Exception:
pass
role_recieved = (
f"\n{user_mention} has recieved the role: <@&{r}>"
if (r := await self.config.role_id())
else ""
)
embed = discord.Embed(
title="Vote recieved on Top.gg!",
description=f"{user_mention} (`{user_id}`) has voted for **{self.bot.user}**"
f"\nTheir total votes are: {self.cache.get(user_id)['votes']}" + role_recieved,
color=0x303036,
)
embed.set_footer(text=f"Total Votes: {self.total_votes}")
embed.timestamp = datetime.datetime.now()
u_data["vote_cd"] = int(time.time() + (3600 * 12))
self.cache[user_id] = u_data # just to make sure data is actually updated in cache
if chanid := await self.config.chan():
await self.bot.get_channel(chanid).send(embed=embed)
log.info(f"Vote recieved from: {user_mention} (`{user_id}`)")
@tasks.loop(minutes=10)
async def remove_role_from_members(self):
if not (g := await self.get_guild()):
return
await g.chunk()
if not (r := await self.config.role_id()):
return
if not self.cache:
return
if not (role := g.get_role(r)):
return
for k, v in self.cache.items():
if not (mem := g.get_member(k)):
continue
if not role in mem.roles:
continue
if not v["vote_cd"]:
continue
if v["vote_cd"] > time.time():
continue
if not g.me.guild_permissions.manage_roles:
continue
if not g.me.top_role.position > mem.top_role.position:
continue
mem: discord.Member
await mem.remove_roles(role, reason="Automatic voter role removal after timeout.")
self.cache[k]["vote_cd"] = None
| StarcoderdataPython |
1798819 | import sys
import argparse
from typing import List, Optional
from .lib import FileHelper
from .lib import TerminalStyle
from .converter.ConverterInterface import ConverterInterface
from .model.IntermediateLocalization import IntermediateLocalization
from .model.LocalizationFile import LocalizationFile
#--------------------
# properties
#--------------------
sourceFilepath = ""
destinationDirectory = ""
importConverterIdentifier = ""
exportConverterIdentifier = ""
dryRun = False
forceOverride = False
#--------------------
# Starting point
#--------------------
def start(args: argparse.Namespace, converter: List[ConverterInterface]) -> None:
_parseArgsForConverting(args, converter)
intermediate = _importToIntermediateLocalization(sourceFilepath, converter)
if isinstance(intermediate, IntermediateLocalization):
_exportToLocalizationFile(intermediate, converter)
#--------------------
# private helper
#--------------------
def _parseArgsForConverting(args: argparse.Namespace, converter: List[ConverterInterface]) -> None:
# select and validate converter for export
global exportConverterIdentifier
exportConverterIdentifier = args.exportConverter
selectedExportConverter = list(filter(lambda x: x.identifier() == exportConverterIdentifier, converter))
if len(selectedExportConverter) == 0:
_handleError("ERROR: Converter with identifier {} not found".format(exportConverterIdentifier))
# validate source filepath
global sourceFilepath
sourceFilepath = args.source
if not FileHelper.exists(sourceFilepath):
_handleError("ERROR: Source does not exists")
# select and validate converter for import
global importConverterIdentifier
importConverterIdentifier = args.importConverter
extension = FileHelper.fileExtension(sourceFilepath)
matchingImportConverter = list(
filter(
lambda x: x.fileExtension() == extension and x.identifier() == importConverterIdentifier,
converter
)
)
if len(matchingImportConverter) == 0:
_handleError("ERROR: No matching converter found with identifier {} for fileextension {}".format(importConverterIdentifier, extension))
else:
importConverterIdentifier = matchingImportConverter[0].identifier()
# save and handle dryRun argument
global dryRun
dryRun = args.dryRun
if dryRun:
if args.verbose:
_printSummary(sourceFilepath, "dryRun", importConverterIdentifier, exportConverterIdentifier)
# If dryRun is enabled, there is no need to process destination directory.
return
# save forceOverride argument which is used when saving to a filepath
global forceOverride
forceOverride = args.force
# save and validate destination filepath
global destinationDirectory
destinationDirectory = args.destination
if not FileHelper.exists(destinationDirectory):
FileHelper.createDir(destinationDirectory)
elif FileHelper.exists(destinationDirectory) and forceOverride:
_handleWarning("Warning: Destination directory [{}] already exists. Overwriting it.".format(destinationDirectory))
FileHelper.removeDir(destinationDirectory)
FileHelper.createDir(destinationDirectory)
else:
_handleError("Error: Destination directory [{}] already exists. Use flag -f to override it.".format(destinationDirectory))
exit()
# At this point everything was validated and nothing can go wrong (hopefully).
if args.verbose:
_printSummary(sourceFilepath, destinationDirectory, importConverterIdentifier, exportConverterIdentifier)
def _importToIntermediateLocalization(
sourceFilepath: str,
converter: List[ConverterInterface]
) -> Optional[IntermediateLocalization]:
importer = list(filter(lambda x: x.identifier() == importConverterIdentifier, converter))
return importer[0].toIntermediate(sourceFilepath)
def _exportToLocalizationFile(
intermediateLocalization: IntermediateLocalization,
converter: List[ConverterInterface]
) -> None:
exportConverter = list(filter(lambda x: x.identifier() == exportConverterIdentifier, converter))
for exporter in exportConverter:
for file in exporter.fromIntermediate(intermediateLocalization):
_handleLocalizationFile(file)
def _handleLocalizationFile(localizationFile: LocalizationFile) -> None:
global dryRun
if dryRun:
print(localizationFile.filecontent)
else:
destination = destinationDirectory + "/" + localizationFile.filepath
_writeFile(destination, localizationFile.filecontent)
def _writeFile(path: str, content: str) -> None:
directoryPath = FileHelper.directoryPath(path)
if FileHelper.exists(path):
pass
else:
FileHelper.createDir(directoryPath)
FileHelper.writeFile(path, content)
#--------------------
# cli output
#--------------------
def _printSummary(
sourceFilepath: str,
destinationFilepath: str,
importConverterIdentifier: str,
exportConverterIdentifier: str
) -> None:
_handleInfo(
"Summary:\n"
+ "input: {}\n".format(sourceFilepath)
+ "destination: {}\n".format(destinationFilepath)
+ "converter for import: {}\n".format(importConverterIdentifier)
+ "converter for export: {}".format(exportConverterIdentifier)
)
def _handleError(errorText: str) -> None:
print(TerminalStyle.FAIL + errorText + TerminalStyle.ENDC)
sys.exit()
def _handleWarning(warningText: str) -> None:
print(TerminalStyle.WARNING + warningText + TerminalStyle.ENDC)
def _handleInfo(infoText: str) -> None:
print(TerminalStyle.GREEN + infoText + TerminalStyle.ENDC)
| StarcoderdataPython |
3368749 | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RAnaquin(RPackage):
"""The project is intended to support the use of sequins
(synthetic sequencing spike-in controls) owned and made available
by the Garvan Institute of Medical Research. The goal is to
provide a standard open source library for quantitative analysis,
modelling and visualization of spike-in controls."""
homepage = "https://www.bioconductor.org/packages/Anaquin/"
git = "https://git.bioconductor.org/packages/Anaquin.git"
version('1.2.0', commit='<PASSWORD>')
depends_on('r@3.4.0:3.4.9', when='@1.2.0')
depends_on('r-deseq2', type=('build', 'run'))
depends_on('r-plyr', type=('build', 'run'))
depends_on('r-locfit', type=('build', 'run'))
depends_on('r-qvalue', type=('build', 'run'))
depends_on('r-knitr', type=('build', 'run'))
depends_on('r-rocr', type=('build', 'run'))
depends_on('r-ggplot2', type=('build', 'run'))
| StarcoderdataPython |
1749586 | <reponame>ricklentz/2dimageto3dmodel
"""
https://www.haroldserrano.com/blog/quaternions-in-computer-graphics#:~:text=Quaternions%20are%20mainly%20used%20in,sequentially%20as%20matrix%20rotation%20allows.&text=Matrix%20rotations%20suffer%20from%20what%20is%20known%20as%20Gimbal%20Lock.
author: <NAME>
"""
import torch
from math import pow
class QuaternionOperations(object):
def __init__(self):
print("Quaternion Operations called.")
def quaternion_addition(self, q1, q2):
"""
Function for addition of two quaternions.
:param q1: first quaternion
:param q2: second quaternion
:return: result of the addition q1 + q2
"""
"""
Unpack these quaternions.
tensor = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
print(torch.unbind(tensor)) => (tensor([1, 2, 3]), tensor([4, 5, 6]), tensor([7, 8, 9]))
"""
a_scalar, a_vecx, a_vecy, a_vecz = torch.unbind(q1,
dim=-1)
b_scalar, b_vecx, b_vecy, b_vecz = torch.unbind(q2,
dim=-1)
r_scalar = a_scalar + b_scalar
r_vecx = a_vecx + b_vecx
r_vecy = a_vecy + b_vecy
r_vecz = a_vecz + b_vecz
return torch.stack(
[r_scalar, r_vecx, r_vecy, r_vecz],
dim=-1
)
def quaternion_subtraction(self, q1, q2):
"""
Function for subtraction of two quaternions.
:param q1: first quaternion
:param q2: second quaternion
:return: result of the subtraction q1 - q2
"""
# Unpack these quaternions
a_scalar, a_vecx, a_vecy, a_vecz = torch.unbind(q1,
dim=-1)
b_scalar, b_vecx, b_vecy, b_vecz = torch.unbind(q2,
dim=-1)
r_scalar = a_scalar - b_scalar
r_vecx = a_vecx - b_vecx
r_vecy = a_vecy - b_vecy
r_vecz = a_vecz - b_vecz
return torch.stack(
[r_scalar, r_vecx, r_vecy, r_vecz],
dim=-1
)
def quaternion_multiplication(self, q1, q2):
"""
Function for multiplication of two quaternions.
:param q1: first quaternion
:param q2: second quaternion
:return: result of the multiplication q1 * q2
"""
# Unpack these quaternions
a_scalar, a_vecx, a_vecy, a_vecz = torch.unbind(q1,
dim=-1)
b_scalar, b_vecx, b_vecy, b_vecz = torch.unbind(q2,
dim=-1)
r_scalar = a_scalar * b_scalar - a_vecx * b_vecx - a_vecy * b_vecy - a_vecz * b_vecz
r_vecx = a_scalar * b_vecx + a_vecx * b_scalar + a_vecy * b_vecz - a_vecz * b_vecy
r_vecy = a_scalar * b_vecy + a_vecy * b_scalar + a_vecz * b_vecx - a_vecx * b_vecz
r_vecz = a_scalar * b_vecz + a_vecz * b_scalar + a_vecx * b_vecy - a_vecy * b_vecx
"""
a = torch.randn([2, 3, 4])
b = torch.randn([2, 3, 4])
print(a) # 2 matrices of size 3 x 4
print(b) # 2 matrices of size 3 x 4
print(torch.stack([a, b])) # 4 matrices of size 3 x 4, first a, then b
"""
return torch.stack(
[r_scalar, r_vecx, r_vecy, r_vecz],
dim=-1
)
def quaternion_square(self, q):
"""
Function for squaring the quaternion.
:param q: qauternion to be squared
:return: result of the squaring q*q
"""
# Unpack the quaternion
a_scalar, a_vecx, a_vecy, a_vecz = torch.unbind(q,
dim=-1)
r_scalar = pow(a_scalar, 2) - pow(a_vecx, 2) - pow(a_vecy, 2) - pow(a_vecz, 2)
r_vecx = 2 * a_scalar * a_vecx
r_vecy = 2 * a_scalar * a_vecy
r_vecz = 2 * a_scalar * a_vecz
return torch.stack(
[r_scalar, r_vecx, r_vecy, r_vecz],
dim=-1
)
def quaternion_conjugate(self, q):
"""
Function for computing the inverse of a quaternion.
:param q: quaternion to be inverted
:return: result of the inversion of q
"""
"""
in-place operation is an operation that changes directly the content of a given Tensor without making a copy.
ALL operations on the tensor that operate in-place on it will have an _ postfix.
"""
q_star = q.new(4).fill_(-1)
# leave the scalar unchanged and change signs of i, j, k number parts
q_star[0] = 1.0
return q * q_star
| StarcoderdataPython |
4801258 | from django.utils.translation import ugettext_lazy as _
from django.db import models as ext
class Currency(ext.Model):
id = ext.IntegerField(_("id"), primary_key=True)
iso_code = ext.CharField(_("iso code"), max_length=10, null=True, blank=True)
iso_name = ext.CharField(_("iso name"), max_length=10, null=True, blank=True)
decimal_digits = ext.IntegerField(_("decimal digits"), null=True, blank=True)
class Meta:
verbose_name = _('currency')
verbose_name_plural = _('currencies')
def __str__(self):
return "%d : %s" % (self.id, self.iso_name)
class Country(ext.Model):
iso_code = ext.CharField(_("iso code"), max_length=10, null=True, blank=True)
name = ext.CharField(_("name"), max_length=255, null=True, blank=True)
class Meta:
verbose_name = _('country')
verbose_name_plural = _('countries')
def __str__(self):
return "%d : %s" % (self.id, self.name)
| StarcoderdataPython |
99896 | <filename>python/lookout/sdk/service_analyzer_pb2.py<gh_stars>1-10
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: lookout/sdk/service_analyzer.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from bblfsh.github.com.gogo.protobuf.gogoproto import gogo_pb2 as github_dot_com_dot_gogo_dot_protobuf_dot_gogoproto_dot_gogo__pb2
from lookout.sdk import event_pb2 as lookout_dot_sdk_dot_event__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='lookout/sdk/service_analyzer.proto',
package='pb',
syntax='proto3',
serialized_pb=_b('\n\"lookout/sdk/service_analyzer.proto\x12\x02pb\x1a-github.com/gogo/protobuf/gogoproto/gogo.proto\x1a\x17lookout/sdk/event.proto\"H\n\rEventResponse\x12\x18\n\x10\x61nalyzer_version\x18\x01 \x01(\t\x12\x1d\n\x08\x63omments\x18\x02 \x03(\x0b\x32\x0b.pb.Comment\"G\n\x07\x43omment\x12\x0c\n\x04\x66ile\x18\x01 \x01(\t\x12\x0c\n\x04line\x18\x02 \x01(\x05\x12\x0c\n\x04text\x18\x03 \x01(\t\x12\x12\n\nconfidence\x18\x04 \x01(\r2x\n\x08\x41nalyzer\x12\x37\n\x11NotifyReviewEvent\x12\x0f.pb.ReviewEvent\x1a\x11.pb.EventResponse\x12\x33\n\x0fNotifyPushEvent\x12\r.pb.PushEvent\x1a\x11.pb.EventResponseB\x04\xc8\xe1\x1e\x00\x62\x06proto3')
,
dependencies=[github_dot_com_dot_gogo_dot_protobuf_dot_gogoproto_dot_gogo__pb2.DESCRIPTOR,lookout_dot_sdk_dot_event__pb2.DESCRIPTOR,])
_EVENTRESPONSE = _descriptor.Descriptor(
name='EventResponse',
full_name='pb.EventResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='analyzer_version', full_name='pb.EventResponse.analyzer_version', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='comments', full_name='pb.EventResponse.comments', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=114,
serialized_end=186,
)
_COMMENT = _descriptor.Descriptor(
name='Comment',
full_name='pb.Comment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='file', full_name='pb.Comment.file', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='line', full_name='pb.Comment.line', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='text', full_name='pb.Comment.text', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='confidence', full_name='pb.Comment.confidence', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=188,
serialized_end=259,
)
_EVENTRESPONSE.fields_by_name['comments'].message_type = _COMMENT
DESCRIPTOR.message_types_by_name['EventResponse'] = _EVENTRESPONSE
DESCRIPTOR.message_types_by_name['Comment'] = _COMMENT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
EventResponse = _reflection.GeneratedProtocolMessageType('EventResponse', (_message.Message,), dict(
DESCRIPTOR = _EVENTRESPONSE,
__module__ = 'lookout.sdk.service_analyzer_pb2'
# @@protoc_insertion_point(class_scope:pb.EventResponse)
))
_sym_db.RegisterMessage(EventResponse)
Comment = _reflection.GeneratedProtocolMessageType('Comment', (_message.Message,), dict(
DESCRIPTOR = _COMMENT,
__module__ = 'lookout.sdk.service_analyzer_pb2'
# @@protoc_insertion_point(class_scope:pb.Comment)
))
_sym_db.RegisterMessage(Comment)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\310\341\036\000'))
_ANALYZER = _descriptor.ServiceDescriptor(
name='Analyzer',
full_name='pb.Analyzer',
file=DESCRIPTOR,
index=0,
options=None,
serialized_start=261,
serialized_end=381,
methods=[
_descriptor.MethodDescriptor(
name='NotifyReviewEvent',
full_name='pb.Analyzer.NotifyReviewEvent',
index=0,
containing_service=None,
input_type=lookout_dot_sdk_dot_event__pb2._REVIEWEVENT,
output_type=_EVENTRESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='NotifyPushEvent',
full_name='pb.Analyzer.NotifyPushEvent',
index=1,
containing_service=None,
input_type=lookout_dot_sdk_dot_event__pb2._PUSHEVENT,
output_type=_EVENTRESPONSE,
options=None,
),
])
_sym_db.RegisterServiceDescriptor(_ANALYZER)
DESCRIPTOR.services_by_name['Analyzer'] = _ANALYZER
# @@protoc_insertion_point(module_scope)
| StarcoderdataPython |
15571 | #!/usr/bin/env python
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Merge translations from a set of .po or XMB files into a set of .po files.
Usage:
../tools/merge_messages <source-dir> <template-file>
../tools/merge_messages <source-dir> <template-file> <target-dir>
../tools/merge_messages <source-po-file> <template-file> <target-po-file>
<source-dir> should be a directory containing a subdirectories named with
locale codes (e.g. pt_BR). For each locale, this script looks for the first
.po or .xml file it finds anywhere under <source-dir>/<locale-code>/ and
adds all its messages and translations to the corresponding django.po file
in the target directory, at <target-dir>/<locale-code>/LC_MESSAGES/django.po.
<template-file> is the output file from running:
'find_missing_translations --format=po'
With the name that corresponds to the --format=xmb output.
Make sure to run this in a tree that corresponds to the version used for
generating the xmb file or the resulting merge will be wrong. See
validate_merge for directions on verifying the merge was correct.
If <target-dir> is unspecified, it defaults to the app/locale directory of
the current app. Alternatively, you can specify a single source file and
a single target file to update.
When merging messages from a source file into a target file:
- Empty messages and messages marked "fuzzy" in the source file are ignored.
- Translations in the source file will replace any existing translations
for the same messages in the target file.
- Other translations in the source file will be added to the target file.
- If the target file doesn't exist, it will be created.
- To minimize unnecessary changes from version to version, the target file
has no "#: filename:line" comments and the messages are sorted by msgid.
"""
import babel.messages
from babel.messages import pofile
import codecs
import os
import sys
import xml.sax
class XmbCatalogReader(xml.sax.handler.ContentHandler):
"""A SAX handler that populates a babel.messages.Catalog with messages
read from an XMB file."""
def __init__(self, template):
"""template should be a Catalog containing the untranslated messages
in the same order as the corresponding messages in the XMB file."""
self.tags = []
self.catalog = babel.messages.Catalog()
self.iter = iter(template)
assert self.iter.next().id == '' # skip the blank metadata message
def startElement(self, tag, attrs):
self.tags.append(tag)
if tag == 'msg':
self.string = ''
self.message = babel.messages.Message(self.iter.next().id)
if tag == 'ph':
self.string += '%(' + attrs['name'] + ')s'
self.message.flags.add('python-format')
def endElement(self, tag):
assert self.tags.pop() == tag
if tag == 'msg':
self.message.string = self.string
self.catalog[self.message.id] = self.message
def characters(self, content):
if self.tags[-1] == 'msg':
self.string += content
def log(text):
"""Prints out Unicode text."""
print text.encode('utf-8')
def log_change(old_message, new_message):
"""Describes an update to a message."""
if not old_message:
if new_message.id:
log('+ msgid "%s"' % str(new_message.id))
else:
print >>sys.stderr, 'no message id: %s' % new_message
log('+ msgstr "%s"' % str(new_message.string.encode('ascii', 'ignore')))
if new_message.flags:
log('+ #, %s' % ', '.join(sorted(new_message.flags)))
else:
if (new_message.string != old_message.string or
new_message.flags != old_message.flags):
log(' msgid "%s"' % old_message.id)
log('- msgstr "%s"' % old_message.string)
if old_message.flags:
log('- #, %s' % ', '.join(sorted(old_message.flags)))
log('+ msgstr "%s"' % new_message.string)
if new_message.flags:
log('+ #, %s' % ', '.join(sorted(new_message.flags)))
def create_file(filename):
"""Opens a file for writing, creating any necessary parent directories."""
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
return open(filename, 'w')
def merge(source, target_filename):
"""Merges the messages from the source Catalog into a .po file at
target_filename. Creates the target file if it doesn't exist."""
if os.path.exists(target_filename):
target = pofile.read_po(open(target_filename))
for message in source:
if message.id and message.string and not message.fuzzy:
log_change(message.id in target and target[message.id], message)
# This doesn't actually replace the message! It just updates
# the fields other than the string. See Catalog.__setitem__.
target[message.id] = message
# We have to mutate the message to update the string and flags.
target[message.id].string = message.string
target[message.id].flags = message.flags
else:
for message in source:
log_change(None, message)
target = source
target_file = create_file(target_filename)
pofile.write_po(target_file, target,
no_location=True, sort_output=True, ignore_obsolete=True)
target_file.close()
def merge_file(source_filename, target_filename, template_filename):
if source_filename.endswith('.po'):
merge(pofile.read_po(open(source_filename)), target_filename)
elif source_filename.endswith('.xml'):
handler = XmbCatalogReader(pofile.read_po(open(template_filename)))
xml.sax.parse(open(source_filename), handler)
merge(handler.catalog, target_filename)
if __name__ == '__main__':
args = sys.argv[1:]
if len(args) not in [1, 2, 3]:
print __doc__
sys.exit(1)
args = (args + [None, None])[:3]
source_path = args[0]
template_path = args[1]
target_path = args[2] or os.path.join(os.environ['APP_DIR'], 'locale')
# If a single file is specified, merge it.
if ((source_path.endswith('.po') or source_path.endswith('.xml')) and
target_path.endswith('.po')):
print target_path
merge_file(source_path, target_path, template_path)
sys.exit(0)
# Otherwise, we expect two directories.
if not os.path.isdir(source_path) or not os.path.isdir(target_path):
print __doc__
sys.exit(1)
# Find all the source files.
source_filenames = {} # {locale: po_filename}
def find_po_file(key, dir, filenames):
"""Looks for a .po file and records it in source_filenames."""
for filename in filenames:
if filename.endswith('.po') or filename.endswith('.xml'):
source_filenames[key] = os.path.join(dir, filename)
for locale in os.listdir(source_path):
os.path.walk(os.path.join(source_path, locale), find_po_file,
locale.replace('-', '_'))
# Merge them into the target files.
for locale in sorted(source_filenames.keys()):
target = os.path.join(target_path, locale, 'LC_MESSAGES', 'django.po')
print target
merge_file(source_filenames[locale], target, template_path)
| StarcoderdataPython |
147843 | <filename>rdmo/options/serializers/export.py<gh_stars>0
from rest_framework import serializers
from ..models import OptionSet, Option
class OptionSerializer(serializers.ModelSerializer):
optionset = serializers.CharField(source='optionset.uri', default=None, read_only=True)
class Meta:
model = Option
fields = (
'uri',
'uri_prefix',
'key',
'path',
'comment',
'order',
'text_en',
'text_de',
'additional_input',
'optionset'
)
class OptionSetSerializer(serializers.ModelSerializer):
options = OptionSerializer(many=True)
conditions = serializers.SerializerMethodField()
class Meta:
model = OptionSet
fields = (
'uri',
'uri_prefix',
'key',
'comment',
'order',
'options',
'conditions'
)
def get_conditions(self, obj):
return [condition.uri for condition in obj.conditions.all()]
| StarcoderdataPython |
1686001 | <filename>meme/api_1_0/resources/tasks.py
from datetime import datetime
from flask_restful import Resource, reqparse
from flask import jsonify
from meme import db, ma
from meme.models import Task, Engineer
class TaskSchema(ma.ModelSchema):
"""Marshmallow "serialization schema for Task db Table"""
class Meta:
model = Task
class Tasks(Resource):
"""REST API resource for getting list of all tasks and creating task for specified engineer"""
def __init__(self):
self.task_schema = TaskSchema()
def get(self):
tasks = Task.query.all()
data, errors = self.task_schema.dump(tasks, many=True)
return jsonify(data)
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('id_engineer', type=int, help='id of the assignee engineer')
parser.add_argument('task_name', type=str, help='task description')
parser.add_argument('task_description', type=str, help='task full description')
parser.add_argument('start_time', type=str, help='task start time')
parser.add_argument('photo_required', type=bool, help='are photo required')
post_args = parser.parse_args()
if post_args['task_name'] is None or post_args['task_description'] is None or post_args['photo_required'] is None or post_args['id_engineer'] is None:
return {'status': 'error', 'error': 'missing required parameter'}, 400
assignee_engineer = Engineer.query.get(post_args['id_engineer'])
if not assignee_engineer:
return {'status': 'error', 'error': 'engineer not found'}, 404
start_time = post_args['start_time']
if start_time is None:
start_time = datetime.now(tz=None)
else:
start_time = datetime.strptime(start_time, '%Y-%m-%dT%H:%M:%S+00:00')
task = Task(task_name=post_args['task_name'], task_description=post_args['task_description'],
start_time=start_time, photo_required=post_args['photo_required'], engineer=assignee_engineer)
db.session.add(task)
db.session.commit()
return {'status': 'created', 'id': task.id_task}
| StarcoderdataPython |
75501 | import pytest
from app.api.services import abr_service
from app.api.business.errors import AbrError
import requests
import mock
from mock import patch
class TestAbrService():
def mocked_find_business_by_abn(self):
data = '<ABR><response><stateCode>NSW</stateCode><postcode>2750</postcode>'\
'<organisationName>yay</organisationName></response></ABR>'
return data
def mocked_payload_exception(self):
data = '<ABR><response><exception><exceptionDescription>Search text is not a '\
'valid ABN or ACN</exceptionDescription><exceptionCode>WEBSERVICES</exceptionCode>'\
'</exception></response></ABR>'
return data
def mocked_payload_exception_with_no_description(self):
data = '<ABR><response><exception><exceptionCode>WEBSERVICES</exceptionCode>'\
'</exception></response></ABR>'
return data
def mocked_payload_exception_with_no_code(self):
data = '<ABR><response><exception><exceptionDescription>Search text is not a '\
'valid ABN or ACN</exceptionDescription>'\
'</exception></response></ABR>'
return data
def mocked_payload_exception_with_no_code_and_no_description(self):
data = '<ABR><response></response></ABR>'
return data
@mock.patch("app.api.services.abr_service.call_abr_api")
def test_abr_response_can_be_parsed(self, mocked_find_business_by_abn):
expected_parsed_data = {'state': 'NSW', 'organisation_name': 'yay', 'postcode': '2750'}
data = abr_service.get_data(self.mocked_find_business_by_abn())
assert data == expected_parsed_data
@mock.patch("app.api.services.abr_service.call_abr_api")
def test_abr_exception_can_be_parsed(self, mocked_payload_exception):
expected_msg = 'WEBSERVICES: Search text is not a valid ABN or ACN'
result = abr_service.get_abr_exception(self.mocked_payload_exception())
assert result == expected_msg
@mock.patch("app.api.services.abr_service.call_abr_api")
def test_abr_exception_can_be_parsed_with_no_exception_desc(self, mocked_payload_exception_with_no_description):
expected_msg = 'WEBSERVICES: No exception description found'
result = abr_service.get_abr_exception(self.mocked_payload_exception_with_no_description())
assert result == expected_msg
@mock.patch("app.api.services.abr_service.call_abr_api")
def test_abr_exception_can_be_parsed_with_no_exception_code(self, mocked_payload_exception_with_no_code):
expected_msg = 'No exception code found: Search text is not a valid ABN or ACN'
result = abr_service.get_abr_exception(self.mocked_payload_exception_with_no_code())
assert result == expected_msg
@mock.patch("app.api.services.abr_service.call_abr_api")
def test_abr_exception_parsed_with_no_ex_code_desc(self, mocked_payload_exception_with_no_code_and_no_description):
expected_msg = None
result = abr_service.get_abr_exception(self.mocked_payload_exception_with_no_code_and_no_description())
assert result == expected_msg
@mock.patch('app.api.services.abr_service.call_abr_api')
def test_connecton_error_exception_raised(self, mock_requests_get):
mock_requests_get.side_effect = requests.exceptions.ConnectionError()
url = 'http://google.com'
with pytest.raises(requests.exceptions.ConnectionError):
abr_service.call_abr_api(url)
@mock.patch('app.api.services.abr_service.call_abr_api')
def test_ssl_error_exception_raised(self, mock_requests_get):
mock_requests_get.side_effect = requests.exceptions.SSLError()
url = 'http://google.com'
with pytest.raises(requests.exceptions.SSLError):
abr_service.call_abr_api(url)
@mock.patch('app.api.services.abr_service.call_abr_api')
def test_http_error_exception_raised(self, mock_requests_get):
mock_requests_get.side_effect = requests.exceptions.HTTPError()
url = 'http://google.com'
with pytest.raises(requests.exceptions.HTTPError):
abr_service.call_abr_api(url)
@mock.patch('app.api.services.abr_service.call_abr_api')
def test_proxy_error_exception_raised(self, mock_requests_get):
mock_requests_get.side_effect = requests.exceptions.ProxyError()
url = 'http://google.com'
with pytest.raises(requests.exceptions.ProxyError):
abr_service.call_abr_api(url)
@mock.patch('app.api.services.abr_service.call_abr_api')
def test_http_exception_message(self, mock_requests_get):
mock_requests_get.side_effect = requests.exceptions.HTTPError('HTTP Error')
url = 'http://google.com'
with pytest.raises(requests.exceptions.HTTPError) as ex_info:
abr_service.call_abr_api(url)
assert str(ex_info.value) == 'HTTP Error'
@mock.patch('app.api.services.abr_service.call_abr_api')
def test_proxy_exception_message(self, mock_requests_get):
mock_requests_get.side_effect = requests.exceptions.ProxyError('Proxy Error')
url = 'http://google.com'
with pytest.raises(requests.exceptions.ProxyError) as ex_msg:
abr_service.call_abr_api(url)
assert str(ex_msg.value) == 'Proxy Error'
@mock.patch('app.api.services.abr_service.call_abr_api')
def test_ssl_exception_message(self, mock_requests_get):
mock_requests_get.side_effect = requests.exceptions.SSLError('SSL Error')
url = 'http://google.com'
with pytest.raises(requests.exceptions.SSLError) as ex_msg:
abr_service.call_abr_api(url)
assert str(ex_msg.value) == 'SSL Error'
@mock.patch('app.api.services.abr_service.call_abr_api')
def test_exception_message(self, mock_requests_get):
mock_requests_get.side_effect = requests.exceptions.RequestException('Unexpected request error')
url = 'http://google.com'
with pytest.raises(requests.exceptions.RequestException) as ex_msg:
abr_service.call_abr_api(url)
assert str(ex_msg.value) == 'Unexpected request error'
| StarcoderdataPython |
3206518 | <reponame>shbatm/regenmaschine
"""Define an object to interact with programs."""
from typing import Any, Awaitable, Callable, Dict, List, cast
class Program:
"""Define a program object."""
def __init__(self, request: Callable[..., Awaitable[Dict[str, Any]]]) -> None:
"""Initialize."""
self._request = request
async def all(self, include_inactive: bool = False) -> Dict[int, Dict[str, Any]]:
"""Return all programs."""
data = await self._request("get", "program")
return {
program["uid"]: program
for program in data["programs"]
if include_inactive or program["active"]
}
async def disable(self, program_id: int) -> Dict[str, Any]:
"""Disable a program."""
return await self._request(
"post", f"program/{program_id}", json={"active": False}
)
async def enable(self, program_id: int) -> Dict[str, Any]:
"""Enable a program."""
return await self._request(
"post", f"program/{program_id}", json={"active": True}
)
async def get(self, program_id: int) -> Dict[str, Any]:
"""Return a specific program."""
return await self._request("get", f"program/{program_id}")
async def next(self) -> List[Dict[str, Any]]:
"""Return the next run date/time for all programs."""
data = await self._request("get", "program/nextrun")
return cast(List[Dict[str, Any]], data["nextRuns"])
async def running(self) -> List[Dict[str, Any]]:
"""Return all running programs."""
data = await self._request("get", "watering/program")
return cast(List[Dict[str, Any]], data["programs"])
async def start(self, program_id: int) -> Dict[str, Any]:
"""Start a program."""
return await self._request("post", f"program/{program_id}/start")
async def stop(self, program_id: int) -> Dict[str, Any]:
"""Stop a program."""
return await self._request("post", f"program/{program_id}/stop")
| StarcoderdataPython |
1747265 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class BuildFilter(Model):
"""Properties that are enabled for Odata querying.
:param build_id: The unique identifier for the build.
:type build_id: str
:param build_type: The type of build. Possible values include:
'AutoBuild', 'QuickBuild'
:type build_type: str or
~azure.mgmt.containerregistry.v2018_02_01_preview.models.BuildType
:param status: The current status of the build. Possible values include:
'Queued', 'Started', 'Running', 'Succeeded', 'Failed', 'Canceled',
'Error', 'Timeout'
:type status: str or
~azure.mgmt.containerregistry.v2018_02_01_preview.models.BuildStatus
:param create_time: The create time for a build.
:type create_time: datetime
:param finish_time: The time the build finished.
:type finish_time: datetime
:param output_image_names: The list of all images that were generated from
the build.
:type output_image_names: list[str]
:param is_archive_enabled: The value that indicates whether archiving is
enabled or not.
:type is_archive_enabled: bool
:param build_task_name: The name of the build task that the build
corresponds to.
:type build_task_name: str
"""
_attribute_map = {
'build_id': {'key': 'buildId', 'type': 'str'},
'build_type': {'key': 'buildType', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'create_time': {'key': 'createTime', 'type': 'iso-8601'},
'finish_time': {'key': 'finishTime', 'type': 'iso-8601'},
'output_image_names': {'key': 'outputImageNames', 'type': '[str]'},
'is_archive_enabled': {'key': 'isArchiveEnabled', 'type': 'bool'},
'build_task_name': {'key': 'buildTaskName', 'type': 'str'},
}
def __init__(self, *, build_id: str=None, build_type=None, status=None, create_time=None, finish_time=None, output_image_names=None, is_archive_enabled: bool=None, build_task_name: str=None, **kwargs) -> None:
super(BuildFilter, self).__init__(**kwargs)
self.build_id = build_id
self.build_type = build_type
self.status = status
self.create_time = create_time
self.finish_time = finish_time
self.output_image_names = output_image_names
self.is_archive_enabled = is_archive_enabled
self.build_task_name = build_task_name
| StarcoderdataPython |
1777716 | <gh_stars>100-1000
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
import sys
import time
import pydevd
from _pydevd_bundle.pydevd_comm import get_global_debugger
from ptvsd.pydevd_hooks import install
from ptvsd.runner import run as no_debug_runner
from ptvsd.socket import Address
from ptvsd._util import new_hidden_thread
PYDEVD_DEFAULTS = {
'--qt-support=auto',
}
def _set_pydevd_defaults(pydevd_args):
args_to_append = []
for arg in PYDEVD_DEFAULTS:
if arg not in pydevd_args:
args_to_append.append(arg)
return pydevd_args + args_to_append
########################
# high-level functions
def debug_main(address, name, kind, *extra, **kwargs):
if not kwargs.pop('wait', False) and address.isserver:
def unblock_debugger():
debugger = get_global_debugger()
while debugger is None:
time.sleep(0.1)
debugger = get_global_debugger()
debugger.ready_to_run = True
new_hidden_thread('ptvsd.unblock_debugger', unblock_debugger).start()
if kind == 'module':
run_module(address, name, *extra, **kwargs)
else:
run_file(address, name, *extra, **kwargs)
def run_main(address, name, kind, *extra, **kwargs):
addr = Address.from_raw(address)
sys.argv[:] = _run_main_argv(name, extra)
runner = kwargs.pop('_runner', no_debug_runner)
runner(addr, name, kind == 'module', *extra, **kwargs)
########################
# low-level functions
def run_module(address, modname, *extra, **kwargs):
"""Run pydevd for the given module."""
addr = Address.from_raw(address)
if not addr.isserver:
kwargs['singlesession'] = True
run = kwargs.pop('_run', _run)
prog = kwargs.pop('_prog', sys.argv[0])
filename = modname + ':'
argv = _run_argv(addr, filename, extra, _prog=prog)
argv.insert(argv.index('--file'), '--module')
run(argv, addr, **kwargs)
def run_file(address, filename, *extra, **kwargs):
"""Run pydevd for the given Python file."""
addr = Address.from_raw(address)
if not addr.isserver:
kwargs['singlesession'] = True
run = kwargs.pop('_run', _run)
prog = kwargs.pop('_prog', sys.argv[0])
argv = _run_argv(addr, filename, extra, _prog=prog)
run(argv, addr, **kwargs)
def _run_argv(address, filename, extra, _prog=sys.argv[0]):
"""Convert the given values to an argv that pydevd.main() supports."""
if '--' in extra:
pydevd = list(extra[:extra.index('--')])
extra = list(extra[len(pydevd) + 1:])
else:
pydevd = []
extra = list(extra)
pydevd = _set_pydevd_defaults(pydevd)
host, port = address
argv = [
_prog,
'--port', str(port),
]
if not address.isserver:
argv.extend([
'--client', host or 'localhost',
])
return argv + pydevd + [
'--file', filename,
] + extra
def _run_main_argv(filename, extra):
if '--' in extra:
pydevd = list(extra[:extra.index('--')])
extra = list(extra[len(pydevd) + 1:])
else:
extra = list(extra)
return [filename] + extra
def _run(argv, addr, _pydevd=pydevd, _install=install, **kwargs):
"""Start pydevd with the given commandline args."""
#print(' '.join(argv))
# Pydevd assumes that the "__main__" module is the "pydevd" module
# and does some tricky stuff under that assumption. For example,
# when the debugger starts up it calls save_main_module()
# (in pydevd_bundle/pydevd_utils.py). That function explicitly sets
# sys.modules["pydevd"] to sys.modules["__main__"] and then sets
# the __main__ module to a new one. This makes some sense since
# it gives the debugged script a fresh __main__ module.
#
# This complicates things for us since we are running a different
# file (i.e. this one) as the __main__ module. Consequently,
# sys.modules["pydevd"] gets set to ptvsd/__main__.py. Subsequent
# imports of the "pydevd" module then return the wrong module. We
# work around this by avoiding lazy imports of the "pydevd" module.
# We also replace the __main__ module with the "pydevd" module here.
if sys.modules['__main__'].__file__ != _pydevd.__file__:
sys.modules['__main___orig'] = sys.modules['__main__']
sys.modules['__main__'] = _pydevd
daemon = _install(_pydevd, addr, **kwargs)
sys.argv[:] = argv
try:
_pydevd.main()
except SystemExit as ex:
daemon.exitcode = int(ex.code)
raise
| StarcoderdataPython |
1709016 | <reponame>stefano-bragaglia/DePYsible
from unittest import TestCase
from assertpy import assert_that
from depysible.domain.definitions import Program
from depysible.domain.definitions import Rule
from depysible.domain.interpretation import Derivation
from depysible.domain.interpretation import Interpreter
class TestComparing(TestCase):
def test__is_strictly_more_specific__0(self):
p = Program.parse("""
bird(X) <- chicken(X).
bird(X) <- penguin(X).
~flies(X) <- penguin(X).
chicken(tina).
penguin(tweety).
scared(tina).
flies(X) -< bird(X).
flies(X) -< chicken(X), scared(X).
nests_in_trees(X) -< flies(X).
~flies(X) -< chicken(X).
""")
i = Interpreter(p)
d1 = Derivation([Rule.parse('~flies(tina) -< chicken(tina).'), Rule.parse('chicken(tina).')], i)
s1 = d1.get_structure()
d2 = Derivation([
Rule.parse('flies(tina) -< bird(tina).'),
Rule.parse('bird(tina) <- chicken(tina).'),
Rule.parse('chicken(tina).'),
], i)
s2 = d2.get_structure()
result = s1.is_strictly_more_specific_than(s2)
assert_that(result).is_true()
def test__is_strictly_more_specific__1(self):
p = Program.parse("""
bird(X) <- chicken(X).
bird(X) <- penguin(X).
~flies(X) <- penguin(X).
chicken(tina).
penguin(tweety).
scared(tina).
flies(X) -< bird(X).
flies(X) -< chicken(X), scared(X).
nests_in_trees(X) -< flies(X).
~flies(X) -< chicken(X).
""")
i = Interpreter(p)
d1 = Derivation([
Rule.parse('~flies(tina) -< chicken(tina).'),
Rule.parse('chicken(tina).'),
], i)
s1 = d1.get_structure()
d2 = Derivation([
Rule.parse('flies(tina) -< bird(tina).'),
Rule.parse('bird(tina) <- chicken(tina).'),
Rule.parse('chicken(tina).'),
], i)
s2 = d2.get_structure()
result = s2.is_strictly_more_specific_than(s1)
assert_that(result).is_false()
def test__is_strictly_more_specific__2(self):
p = Program.parse("""
bird(X) <- chicken(X).
bird(X) <- penguin(X).
~flies(X) <- penguin(X).
chicken(tina).
penguin(tweety).
scared(tina).
flies(X) -< bird(X).
flies(X) -< chicken(X), scared(X).
nests_in_trees(X) -< flies(X).
~flies(X) -< chicken(X).
""")
i = Interpreter(p)
d1 = Derivation([
Rule.parse('flies(tina) -< chicken(tina), scared(tina).'),
Rule.parse('chicken(tina).'),
Rule.parse('scared(tina).'),
], i)
s1 = d1.get_structure()
d2 = Derivation([
Rule.parse('~flies(tina) -< chicken(tina).'),
Rule.parse('chicken(tina).'),
], i)
s2 = d2.get_structure()
result = s2.is_strictly_more_specific_than(s1)
assert_that(result).is_false()
def test__is_strictly_more_specific__3(self):
p = Program.parse("""
bird(X) <- chicken(X).
bird(X) <- penguin(X).
~flies(X) <- penguin(X).
chicken(tina).
penguin(tweety).
scared(tina).
flies(X) -< bird(X).
flies(X) -< chicken(X), scared(X).
nests_in_trees(X) -< flies(X).
~flies(X) -< chicken(X).
""")
i = Interpreter(p)
d1 = Derivation([
Rule.parse('flies(tina) -< chicken(tina), scared(tina).'),
Rule.parse('chicken(tina).'),
Rule.parse('scared(tina).'),
], i)
s1 = d1.get_structure()
d2 = Derivation([
Rule.parse('~flies(tina) -< chicken(tina).'),
Rule.parse('chicken(tina).'),
], i)
s2 = d2.get_structure()
result = s1.is_strictly_more_specific_than(s2)
assert_that(result).is_true()
| StarcoderdataPython |
101687 | # This function tells a user whether or not a number is prime
def isPrime(number):
# this will tell us if the number is prime, set to True automatically
# We will set to False if the number is divisible by any number less than it
number_is_prime = True
# loop over all numbers less than the input number
for i in range(2, number):
# calculate the remainder
remainder = number % i
# if the remainder is 0, then the number is not prime by definition!
if remainder == 0:
number_is_prime = False
# return result to the user
return number_is_prime
| StarcoderdataPython |
136163 | """
The COCO dataset or other datasets for the YOLOv5 model with using NNRT.
"""
import logging
import os
import math
from plato.config import Config
from plato.datasources import base
from nnrt_datasource_yolo_utils import LoadImagesAndLabels
def make_divisible(x, divisor):
# Returns x evenly divisible by divisor
return math.ceil(x / divisor) * divisor
def check_img_size(img_size, s=32):
# Verify img_size is a multiple of stride s
new_size = make_divisible(img_size, int(s)) # ceil gs-multiple
if new_size != img_size:
print(
'WARNING: --img-size %g must be multiple of max stride %g, updating to %g'
% (img_size, s, new_size))
return new_size
class DataSource(base.DataSource):
"""The YOLO dataset."""
def __init__(self):
super().__init__()
_path = Config().data.data_path
if not os.path.exists(_path):
os.makedirs(_path)
logging.info(
"Downloading the YOLO dataset. This may take a while.")
urls = Config().data.download_urls
for url in urls:
if not os.path.exists(_path + url.split('/')[-1]):
DataSource.download(url, _path)
assert 'grid_size' in Config().params
self.grid_size = Config().params['grid_size']
self.image_size = check_img_size(Config().data.image_size,
self.grid_size)
self.train_set = None
self.test_set = None
def num_train_examples(self):
return Config().data.num_train_examples
def num_test_examples(self):
return Config().data.num_test_examples
def classes(self):
"""Obtains a list of class names in the dataset."""
return Config().data.classes
def get_train_set(self):
single_class = (Config().data.num_classes == 1)
if self.train_set is None:
self.train_set = LoadImagesAndLabels(
Config().data.train_path,
self.image_size,
Config().trainer.batch_size,
augment=False, # augment images
hyp=None, # augmentation hyperparameters
rect=False, # rectangular training
cache_images=False,
single_cls=single_class,
stride=int(self.grid_size),
pad=0.0,
image_weights=False,
prefix='')
return self.train_set
def get_test_set(self):
single_class = (Config().data.num_classes == 1)
if self.test_set is None:
self.test_set = LoadImagesAndLabels(
Config().data.test_path,
self.image_size,
Config().trainer.batch_size,
augment=False, # augment images
hyp=None, # augmentation hyperparameters
rect=False, # rectangular training
cache_images=False,
single_cls=single_class,
stride=int(self.grid_size),
pad=0.0,
image_weights=False,
prefix='')
return self.test_set
| StarcoderdataPython |
87987 | <gh_stars>0
"""
Add FrameNet lexicon data to a data release folder
python add_lexicon_data.py --path_config_json=<path_config_json> --verbose=<verbose>
Usage:
add_lexicon_data.py --path_config_json=<path_config_json> --verbose=<verbose>
Options:
--path_config_json=<path_config_json> e.g., ../config/v0.json
--verbose=<verbose> 0 nothing, 1 descriptive stats, 2 debugging information
Example:
python add_lexicon_data.py --path_config_json="../config/v0.json" --verbose="2"
"""
from docopt import docopt
import json
import os
import shutil
# load arguments
arguments = docopt(__doc__)
print()
print('PROVIDED ARGUMENTS')
print(arguments)
print()
verbose = int(arguments['--verbose'])
settings = json.load(open(arguments['--path_config_json']))
shutil.copytree(settings['paths']['lexicon_data'],
settings['paths']['data_release_frames_folder'])
| StarcoderdataPython |
3201103 | <gh_stars>0
# Literally just shader strings so that the main file isn't so cluttered
vertexCode = """#version 330 core
layout(location = 0) in vec3 aPos;
layout(location = 1) in float aBrightness;
layout(location = 2) in vec2 aTexCoords;
// Uniforms
uniform mat4 modelMatrix;
uniform mat4 projectionMatrix;
uniform mat4 viewMatrix;
// Output
out vec3 FragPos;
out float Brightness;
out vec2 TexCoords;
void main()
{
// FragPos
FragPos = vec3(modelMatrix * vec4(aPos, 1.0));
Brightness = aBrightness;
// TexCoords
TexCoords = aTexCoords.xy;
gl_Position = projectionMatrix * viewMatrix * vec4(FragPos, 1.0);
}
"""
################################
# Debug Shader #
################################
# Vertex Shader
debugVertexCode = """#version 330 core
layout(location = 0) in vec3 aPos;
layout(location = 1) in vec3 aColor;
out vec3 Color;
uniform mat4 modelMatrix;
uniform mat4 projectionMatrix;
uniform mat4 viewMatrix;
void main() {
Color = aColor.rgb;
vec4 FragPos = modelMatrix * vec4(aPos, 1.0);
gl_Position = projectionMatrix * viewMatrix * FragPos;
}
"""
# Fragment Shader
debugFragmentCode = """#version 330 core
out vec4 FragColor;
in vec3 Color;
void main() {
FragColor = vec4(Color, 1.0);
}
"""
################################
# Skybox shader #
################################
skyFragmentCode = """#version 330 core
out vec4 FragColor;
// Vertex Shader Output
in vec3 FragPos;
// Uniforms
uniform float sunHeight;
void main()
{
// Declare result
vec3 result = vec3(0.0);
// Get the gradient factor based on y component of the fragment position
float mixFactor = (normalize(FragPos).y + 1.0) / 2; // We do this to transform it to positive space between 0 and 1 (from -1 to 1)
mixFactor = 1.0 - mixFactor; // Just flip the mix factor
// Set result color
vec3 topColor = vec3(0.3, 0.6, 0.9); // Sky blue
vec3 bottomColor = vec3(0.6, 0.3, 0.3); // A nice darkish pink
// Move the mix factor to bring the horizon haze up when the sun is low
float offset = (abs(sunHeight) - 0.8) / 2;
result = mix(topColor, bottomColor, max(min((mixFactor + offset) * 1.5, 1.0), 0.0));
// Multiply the result by some float based on the height of the sun to make it darker
result *= 1.0 - (sunHeight * 3 / 4);
//result = pow(result, vec3(1.0 / 1.5));
FragColor = vec4(result, 1.0);
}
"""
######################################
# Sun and Moon drawing shader #
######################################
sunMoonFragmentCode = """#version 330 core
out vec4 FragColor;
// Vertex Shader Output
in vec3 FragPos;
in vec2 TexCoords;
in float Color;
// Uniforms
uniform sampler2D uCelestialTexture;
void main()
{
// Declare result
vec3 result = texture(uCelestialTexture, TexCoords).rgb;
result = pow(result, vec3(1.0 / 2.2));
FragColor = vec4(result, 1.0);
}
"""
#############################
# Block Shader #
#############################
blockFragmentCode = """#version 330 core
out vec4 FragColor;
in vec3 FragPos;
in vec2 TexCoords;
in float Brightness;
uniform sampler2D uTextureAtlas;
uniform float uSunHeight;
uniform float uMinLighting = 0.0;
void main()
{
vec4 result = texture(uTextureAtlas, TexCoords);
result.xyz *= Brightness;
result.xyz *= (1.0 - (uSunHeight * 3 / 4)) * (1.0 - uMinLighting) + uMinLighting;
// Return
FragColor = result;
}
"""
#############################
# Reticle Shader #
#############################
reticleVertexCode = """#version 330 core
layout(location = 0) in vec3 aPos;
layout(location = 2) in vec2 aTexCoords;
// Uniforms
uniform mat4 projectionMatrix;
// Output
out vec2 TexCoords;
void main()
{
// TexCoords
TexCoords = aTexCoords.xy;
gl_Position = projectionMatrix * vec4(aPos, 1.0);
}
"""
reticleFragmentCode = """#version 330 core
out vec4 FragColor;
void main() {
FragColor = vec4(0.8, 0.8, 0.8, 1.0);
}
"""
| StarcoderdataPython |
1714124 | import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="wagglepy",
version="0.0.1",
author="<NAME>",
author_email="<EMAIL>",
description="The wagglepy package consists of a collection of template notebooks and some utility functionality for drawing and analyzing the particle traces in the study of plasma physics. Wagglepy, however, does not restrict itself in particle-tracing, but would expand its application across ray-tracing, stochastic procedure and so on, all of which share the same essence, that is, the ordinary differential system.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/WenyinWei/wagglepy",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.8',
) | StarcoderdataPython |
95466 | <reponame>corner4world/cubeai<filename>uaa-python/app/database/github_user_db.py<gh_stars>0
from app.global_data.global_data import g
from app.domain.github_user import GithubUser
def create_github_user(github_user):
sql = '''
INSERT INTO github_user (
github_login,
user_login
) VALUES ("{}", "{}")
'''.format(
github_user.githubLogin,
github_user.userLogin
)
conn = g.db.pool.connection()
with conn.cursor() as cursor:
cursor.execute(sql)
conn.commit()
cursor.execute('SELECT last_insert_id() FROM github_user limit 1')
id = cursor.fetchone()[0]
conn.close()
return id
def find_github_user(github_login):
sql = 'SELECT * FROM github_user WHERE github_login = "{}" limit 1'.format(github_login)
conn = g.db.pool.connection()
with conn.cursor() as cursor:
cursor.execute(sql)
records = cursor.fetchall()
conn.close()
github_user_list = []
for record in records:
github_user = GithubUser()
github_user.from_record(record)
github_user_list.append(github_user)
return github_user_list[0] if len(github_user_list) > 0 else None
def update_github_user(github_user):
sql = '''
UPDATE github_user SET
user_login = "{}"
WHERE id = {}
'''.format(
github_user.user_login,
github_user.id
)
conn = g.db.pool.connection()
with conn.cursor() as cursor:
cursor.execute(sql)
conn.commit()
conn.close()
| StarcoderdataPython |
3282961 | # Generated by Django 2.1.7 on 2020-02-08 03:18
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('scanner', '0017_address_locblock'),
]
operations = [
migrations.AddField(
model_name='event',
name='loc_block',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='scanner.LocBlock'),
),
]
| StarcoderdataPython |
3214540 | <filename>calculadora.py
#calculadora
valorParaOperar1 = float(input("Introduzca el primer valor: "))
operador = int(input("Qué operación quieres realizar?\n 0 - SUMAR\n1 - RESTAR\n2 - MULTIPLICAR\n3 - DIVIDIR: \n"))
while (operador!=0 and operador!=1 and operador!=2 and operador!=3):
operador = int(input("Valor introducido no valido// Qué operación quieres realizar?\n 0 - SUMAR\n1 - RESTAR\n2 - MULTIPLICAR\n3 - DIVIDIR: \n"))
valorParaOperar2 = float(input("Introduzca el segundo valor: "))
if operador == 0:
result = valorParaOperar1 + valorParaOperar2
print("El resultado de la suma: ", valorParaOperar1, " + ", valorParaOperar2, " = ", result)
elif operador == 1:
result = valorParaOperar1 - valorParaOperar2
print("El resultado de la resta: ", valorParaOperar1, " - ", valorParaOperar2, " = ", result)
elif operador == 2:
result = valorParaOperar1 * valorParaOperar2
print("El resultado de la multiplicacion: ", valorParaOperar1, " x ", valorParaOperar2, " = ", result)
elif operador==3:
while valorParaOperar2 ==0:
valorParaOperar2 = float(input("División entre CERO, no valido. Introduzca otro divisor:"))
result = valorParaOperar1 / valorParaOperar2
print("El resultado de la division: ", valorParaOperar1, " / ", valorParaOperar2, " = ", result)
| StarcoderdataPython |
3304062 | <reponame>pi4alina/rollingpin
import datetime
import logging
import os
import time
class LogFormatter(logging.Formatter):
converter = time.gmtime
def format(self, record):
formatted = logging.Formatter.format(self, record).decode("utf8")
if hasattr(record, "host"):
formatted = ("[%10s] " % record.host) + formatted
formatted = self.formatTime(record) + " " + formatted
return formatted
def log_to_file(config, word):
formatter = LogFormatter()
now = datetime.datetime.utcnow()
log_name = now.strftime("%Y-%m-%d_%H:%M:%S") + "-" + word + ".log"
log_path = os.path.join(config["deploy"]["log-directory"], log_name)
handler = logging.FileHandler(log_path, mode="w")
handler.setFormatter(formatter)
handler.setLevel(logging.DEBUG)
root = logging.getLogger()
# the default level for the root logger is WARNING, changing it to
# NOTSET allows handlers to choose for themselves their verbosity
root.setLevel(logging.NOTSET)
root.addHandler(handler)
return log_path
| StarcoderdataPython |
3219518 | <reponame>ahojukka5/scikit-fem
"""Restricting a problem to a subdomain.
.. note::
This example requires the external package `pygmsh <https://pypi.org/project/pygmsh/>`_.
The `ex17.py` example solved the steady-state heat equation with uniform
volumetric heating in a central wire surrounded by an annular insulating layer
of lower thermal conductivity. Here, the problem is completely restricted to
the wire, taking the temperature as zero throughout the annulus.
Thus the problem reduces to the same Poisson equation with uniform forcing and
homogeneous Dirichlet conditions:
.. math::
\nabla\cdot(k\nabla T) + A = 0, \qquad 0 < r < a
with
.. math::
T = 0, \qquad\text{on}\quad r = a.
The exact solution is
.. math::
T = \frac{s}{4k}(a^2 - r^2).
The novelty here is that the temperature is defined as a finite element function
throughout the mesh (:math:`r < b`) but only solved on a subdomain.
"""
from skfem import *
from skfem.models.poisson import laplace, unit_load
import numpy as np
from docs.examples.ex17 import mesh, basis, radii,\
joule_heating, thermal_conductivity
insulation = np.unique(basis.element_dofs[:, mesh.subdomains['insulation']])
temperature = np.zeros(basis.N)
wire = basis.complement_dofs(insulation)
wire_basis = InteriorBasis(mesh, basis.elem, elements=mesh.subdomains['wire'])
L = asm(laplace, wire_basis)
f = asm(unit_load, wire_basis)
temperature = solve(*condense(thermal_conductivity['wire'] * L,
joule_heating * f,
D=insulation))
if __name__ == '__main__':
from os.path import splitext
from sys import argv
from skfem.visuals.matplotlib import draw, plot
T0 = {'skfem': basis.interpolator(temperature)(np.zeros((2, 1)))[0],
'exact':
joule_heating * radii[0]**2 / 4 / thermal_conductivity['wire']}
print('Central temperature:', T0)
ax = draw(mesh)
plot(mesh, temperature[basis.nodal_dofs.flatten()],
ax=ax, edgecolors='none', colorbar=True)
ax.get_figure().savefig(splitext(argv[0])[0] + '_solution.png')
| StarcoderdataPython |
168936 | <reponame>mrlooi/maskrcnn-benchmark
from torch import nn
from torch.nn import functional as F
from maskrcnn_benchmark.layers import Conv2d
from maskrcnn_benchmark.layers import ConvTranspose2d
# from maskrcnn_benchmark.modeling import registry
# @registry.ROI_MASK_PREDICTOR.register("MaskRCNNC4Classifier")
class MaskRCNNC4Classifier(nn.Module):
def __init__(self, cfg, in_channels):
super(MaskRCNNC4Classifier, self).__init__()
num_inputs = in_channels
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.cls_score = nn.Linear(num_inputs, 1)
nn.init.normal_(self.cls_score.weight, mean=0, std=0.01)
nn.init.constant_(self.cls_score.bias, 0)
def forward(self, x):
x = self.avgpool(x)
x = x.view(x.size(0), -1)
cls_logit = self.cls_score(x)
return cls_logit
# def make_roi_mask_predictor(cfg, in_channels):
# func = registry.ROI_MASK_PREDICTOR[cfg.MODEL.ROI_MASK_HEAD.PREDICTOR]
# return func(cfg, in_channels)
| StarcoderdataPython |
3222977 | <filename>upsdata/config.py
options = {
'unavailable_msg': "Delivery date unavailable, use tracking link for current information"
} | StarcoderdataPython |
1627463 | from .tool.func import *
def give_admin_2(conn, name):
curs = conn.cursor()
owner = admin_check()
curs.execute("select acl from user where id = ?", [name])
user = curs.fetchall()
if not user:
return re_error('/error/2')
else:
if owner != 1:
curs.execute('select name from alist where name = ? and acl = "owner"', [user[0][0]])
if curs.fetchall():
return re_error('/error/3')
if ip_check() == name:
return re_error('/error/3')
if flask.request.method == 'POST':
if admin_check(7, 'admin (' + name + ')') != 1:
return re_error('/error/3')
if owner != 1:
curs.execute('select name from alist where name = ? and acl = "owner"', [flask.request.form.get('select', None)])
if curs.fetchall():
return re_error('/error/3')
if flask.request.form.get('select', None) == 'X':
curs.execute("update user set acl = 'user' where id = ?", [name])
else:
curs.execute("update user set acl = ? where id = ?", [flask.request.form.get('select', None), name])
conn.commit()
return redirect('/admin/' + url_pas(name))
else:
if admin_check(7) != 1:
return re_error('/error/3')
div = '<option value="X">X</option>'
curs.execute('select distinct name from alist order by name asc')
for data in curs.fetchall():
if user[0][0] == data[0]:
div += '<option value="' + data[0] + '" selected="selected">' + data[0] + '</option>'
else:
if owner != 1:
curs.execute('select name from alist where name = ? and acl = "owner"', [data[0]])
if not curs.fetchall():
div += '<option value="' + data[0] + '">' + data[0] + '</option>'
else:
div += '<option value="' + data[0] + '">' + data[0] + '</option>'
return easy_minify(flask.render_template(skin_check(),
imp = [name, wiki_set(), custom(), other2([' (' + load_lang('authorize') + ')', 0])],
data = '''
<form method="post">
<select name="select">''' + div + '''</select>
<hr class=\"main_hr\">
<button type="submit">''' + load_lang('save') + '''</button>
</form>
''',
menu = [['manager', load_lang('return')]]
)) | StarcoderdataPython |
57718 | import uuid
from django.conf import settings
from django.db import models
from django.utils import timezone
class AbstractBase(models.Model):
id = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False
)
created_at = models.DateTimeField(default=timezone.now)
updated_at = models.DateTimeField(auto_now=True)
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='+'
)
updated_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
def clean(self):
if self.updated_by is None and self.created_by is not None:
self.updated_by = self.created_by
class Meta:
abstract = True
| StarcoderdataPython |
1640163 | from .main import main
__all__ = ["main"]
__version__ = '0.1.0' | StarcoderdataPython |
3323332 | import pandas as pd
import numpy as np
from sklearn import linear_model
import matplotlib.pyplot as plt
df = pd.read_csv('homeprices.csv')
x = df[['area']]
y = df[['price']]
# Training our model
reg = linear_model.LinearRegression()
reg.fit(x, y)
print(f'reg score: {reg.score(x,y)}')
print(f'predict for 3300 area: {reg.predict([[3300]])}')
print(f'reg coef: {reg.coef_}')
print(f'reg intercept: {reg.intercept_}')
# Save to .pkl file
import joblib
joblib.dump(reg, 'linear_reg.pkl')
| StarcoderdataPython |
1755364 | from .normalize import Normalize
from .posterize import Posterize
from .img_calculator import ImageCalculator
| StarcoderdataPython |
3386611 | <reponame>tobias-fyi/vela<gh_stars>0
"""
Blockchain :: Sprint challenge
"""
import hashlib
import random
from timeit import default_timer as timer
from uuid import uuid4
import sys
import requests
def proof_of_work(last_proof):
"""Multi-Ouroboros Proof of Work Algorithm.
- Find a number p' such that the last five digits of hash(p) are equal
to the first five digits of hash(p')
- IE: last_hash: ...AE912345, new hash 12345888...
- p is the previous proof, and p' is the new proof
- Use the same method to generate SHA-256 hashes as the examples in class
"""
start = timer()
print("Searching for next proof")
proof = 1
# Find last_hash by hashing last proof
last_bytes = str(last_proof).encode()
last_hash = hashlib.sha256(last_bytes).hexdigest()
while not valid_proof(last_hash, proof):
# TODO: Find better method of iterating through proofs
proof *= 4 / 0.88
print("Proof found: " + str(proof) + " in " + str(timer() - start))
return proof
def valid_proof(last_hash, proof):
"""Validates the Proof.
Multi-ouroborus: Do the last five characters of the hash of the last
proof match the first five characters of the hash of the new proof?
IE: last_hash: ...AE912345, new hash 12345E88...
"""
guess = str(proof).encode()
guess_hash = hashlib.sha256(guess).hexdigest()
return guess_hash[:5] == last_hash[-5:]
if __name__ == "__main__":
# What node are we interacting with?
if len(sys.argv) > 1:
node = sys.argv[1]
else:
node = "https://lambda-coin.herokuapp.com/api"
coins_mined = 0
# Load or create ID
with open("my_id.txt", "r") as f:
id = f.read().strip()
print(f"Good morning, {id}")
if id == "NONAME":
print("ERROR: You must change your name in `my_id.txt`!")
exit()
# Run forever until interrupted
while True:
# Get the last proof from the server
print("\nRequesting last proof from server...")
r = requests.get(url=node + "/last_proof")
# Parse the response to get last proof
try:
data = r.json()
except ValueError:
print("Error: Non-JSON response")
print(r)
break
# The Emperor's New Proof!
last_proof = data.get("proof")
print(f"Starting proof of work using last_proof: {last_proof}")
new_proof = proof_of_work(last_proof)
print("Sending to server...")
post_data = {"proof": new_proof, "id": id}
r = requests.post(url=node + "/mine", json=post_data)
data = r.json()
if data.get("message") == "New Block Forged":
print(f"{data.get('message')}!")
coins_mined += 1
print("Total coins mined: " + str(coins_mined))
else:
print(f"Error: {data.get('message')}")
| StarcoderdataPython |
1620209 | <filename>third_party/buildbot_8_4p1/buildbot/process/properties.py
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import re
import weakref
from buildbot import util
from buildbot.interfaces import IRenderable
from twisted.python.components import registerAdapter
from zope.interface import implements
class Properties(util.ComparableMixin):
"""
I represent a set of properties that can be interpolated into various
strings in buildsteps.
@ivar properties: dictionary mapping property values to tuples
(value, source), where source is a string identifing the source
of the property.
Objects of this class can be read like a dictionary -- in this case,
only the property value is returned.
As a special case, a property value of None is returned as an empty
string when used as a mapping.
"""
compare_attrs = ('properties',)
def __init__(self, **kwargs):
"""
@param kwargs: initial property values (for testing)
"""
self.properties = {}
# Track keys which are 'runtime', and should not be
# persisted if a build is rebuilt
self.runtime = set()
self.pmap = PropertyMap(self)
if kwargs: self.update(kwargs, "TEST")
def __getstate__(self):
d = self.__dict__.copy()
del d['pmap']
return d
def __setstate__(self, d):
self.__dict__ = d
self.pmap = PropertyMap(self)
if not hasattr(self, 'runtime'):
self.runtime = set()
def __contains__(self, name):
return name in self.properties
def __getitem__(self, name):
"""Just get the value for this property."""
rv = self.properties[name][0]
return rv
def __nonzero__(self):
return not not self.properties
def has_key(self, name):
return self.properties.has_key(name)
def getProperty(self, name, default=None):
"""Get the value for the given property."""
return self.properties.get(name, (default,))[0]
def getPropertySource(self, name):
return self.properties[name][1]
def asList(self):
"""Return the properties as a sorted list of (name, value, source)"""
l = [ (k, v[0], v[1]) for k,v in self.properties.iteritems() ]
l.sort()
return l
def asDict(self):
"""Return the properties as a simple key:value dictionary"""
return dict(self.properties)
def __repr__(self):
return ('Properties(**' +
repr(dict((k,v[0]) for k,v in self.properties.iteritems())) +
')')
def setProperty(self, name, value, source, runtime=False):
self.properties[name] = (value, source)
if runtime:
self.runtime.add(name)
def update(self, dict, source, runtime=False):
"""Update this object from a dictionary, with an explicit source specified."""
for k, v in dict.items():
self.properties[k] = (v, source)
if runtime:
self.runtime.add(k)
def updateFromProperties(self, other):
"""Update this object based on another object; the other object's """
self.properties.update(other.properties)
self.runtime.update(other.runtime)
def updateFromPropertiesNoRuntime(self, other):
"""Update this object based on another object, but don't
include properties that were marked as runtime."""
for k,v in other.properties.iteritems():
if k not in other.runtime:
self.properties[k] = v
class PropertyMap:
"""
Privately-used mapping object to implement WithProperties' substitutions,
including the rendering of None as ''.
"""
colon_minus_re = re.compile(r"(.*):-(.*)")
colon_tilde_re = re.compile(r"(.*):~(.*)")
colon_plus_re = re.compile(r"(.*):\+(.*)")
def __init__(self, properties):
# use weakref here to avoid a reference loop
self.properties = weakref.ref(properties)
self.temp_vals = {}
def __getitem__(self, key):
properties = self.properties()
assert properties is not None
def colon_minus(mo):
# %(prop:-repl)s
# if prop exists, use it; otherwise, use repl
prop, repl = mo.group(1,2)
if prop in self.temp_vals:
return self.temp_vals[prop]
elif properties.has_key(prop):
return properties[prop]
else:
return repl
def colon_tilde(mo):
# %(prop:~repl)s
# if prop exists and is true (nonempty), use it; otherwise, use repl
prop, repl = mo.group(1,2)
if prop in self.temp_vals and self.temp_vals[prop]:
return self.temp_vals[prop]
elif properties.has_key(prop) and properties[prop]:
return properties[prop]
else:
return repl
def colon_plus(mo):
# %(prop:+repl)s
# if prop exists, use repl; otherwise, an empty string
prop, repl = mo.group(1,2)
if properties.has_key(prop) or prop in self.temp_vals:
return repl
else:
return ''
for regexp, fn in [
( self.colon_minus_re, colon_minus ),
( self.colon_tilde_re, colon_tilde ),
( self.colon_plus_re, colon_plus ),
]:
mo = regexp.match(key)
if mo:
rv = fn(mo)
break
else:
# If explicitly passed as a kwarg, use that,
# otherwise, use the property value.
if key in self.temp_vals:
rv = self.temp_vals[key]
else:
rv = properties[key]
# translate 'None' to an empty string
if rv is None: rv = ''
return rv
def add_temporary_value(self, key, val):
'Add a temporary value (to support keyword arguments to WithProperties)'
self.temp_vals[key] = val
def clear_temporary_values(self):
self.temp_vals = {}
class WithProperties(util.ComparableMixin):
"""
This is a marker class, used fairly widely to indicate that we
want to interpolate build properties.
"""
implements(IRenderable)
compare_attrs = ('fmtstring', 'args')
def __init__(self, fmtstring, *args, **lambda_subs):
self.fmtstring = fmtstring
self.args = args
if not self.args:
self.lambda_subs = lambda_subs
for key, val in self.lambda_subs.iteritems():
if not callable(val):
raise ValueError('Value for lambda substitution "%s" must be callable.' % key)
elif lambda_subs:
raise ValueError('WithProperties takes either positional or keyword substitutions, not both.')
def getRenderingFor(self, build):
pmap = build.getProperties().pmap
if self.args:
strings = []
for name in self.args:
strings.append(pmap[name])
s = self.fmtstring % tuple(strings)
else:
for k,v in self.lambda_subs.iteritems():
pmap.add_temporary_value(k, v(build))
s = self.fmtstring % pmap
pmap.clear_temporary_values()
return s
class Property(util.ComparableMixin):
"""
An instance of this class renders a property of a build.
"""
implements(IRenderable)
compare_attrs = ('key','default', 'defaultWhenFalse')
def __init__(self, key, default=None, defaultWhenFalse=True):
"""
@param key: Property to render.
@param default: Value to use if property isn't set.
@param defaultWhenFalse: When true (default), use default value
if property evaluates to False. Otherwise, use default value
only when property isn't set.
"""
self.key = key
self.default = default
self.defaultWhenFalse = defaultWhenFalse
def getRenderingFor(self, build):
if self.defaultWhenFalse:
return build.getProperty(self.key) or self.default
else:
return build.getProperty(self.key, default=self.default)
class _DefaultRenderer:
"""
Default IRenderable adaptor. Calls .getRenderingFor if availble, otherwise
returns argument unchanged.
"""
implements(IRenderable)
def __init__(self, value):
try:
self.renderer = value.getRenderingFor
except AttributeError:
self.renderer = lambda _: value
def getRenderingFor(self, build):
return self.renderer(build)
registerAdapter(_DefaultRenderer, object, IRenderable)
class _ListRenderer:
"""
List IRenderable adaptor. Maps Build.render over the list.
"""
implements(IRenderable)
def __init__(self, value):
self.value = value
def getRenderingFor(self, build):
return [ build.render(e) for e in self.value ]
registerAdapter(_ListRenderer, list, IRenderable)
class _TupleRenderer:
"""
Tuple IRenderable adaptor. Maps Build.render over the tuple.
"""
implements(IRenderable)
def __init__(self, value):
self.value = value
def getRenderingFor(self, build):
return tuple([ build.render(e) for e in self.value ])
registerAdapter(_TupleRenderer, tuple, IRenderable)
class _DictRenderer:
"""
Dict IRenderable adaptor. Maps Build.render over the keya and values in the dict.
"""
implements(IRenderable)
def __init__(self, value):
self.value = value
def getRenderingFor(self, build):
return dict([ (build.render(k), build.render(v)) for k,v in self.value.iteritems() ])
registerAdapter(_DictRenderer, dict, IRenderable)
| StarcoderdataPython |
99792 | # Copyright 2014 Open Connectome Project (http://openconnecto.me)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Create your views here.
import re
import django
import tile
from ndtilecacheerror import NDTILECACHEError
import logging
logger=logging.getLogger("ndtilecache")
def getVikingTile(request, webargs):
"""Return a viking tile"""
try:
# argument of format token/volume/channel/resolution/Xtile_Ytile_Ztile
m = re.match(r'(\w+)/volume/(\w+)/(\d+)/X(\d+)_Y(\d+)_Z(\d+).png$', webargs)
[token, channel, resolution, xtile, ytile, ztile] = [i for i in m.groups()]
# rewriting args here into catmaid format token/channel/slice_type/z/y_x_res.png
webargs = '{}/{}/xy/{}/{}_{}_{}.png'.format(token, channel, ztile, ytile, xtile, resolution)
response = getTile(request, webargs)
response['content-length'] = len(response.content)
return response
except Exception, e:
raise
return django.http.HttpResponseNotFound(e)
def getTile(request, webargs):
"""Return a tile or load the cache"""
# Parse the tile request and turn it into an nd request
try:
# argument of format /mcfc(optional)/token/channel_list/slice_type/time(optional)/z/y_x_res.png
m = re.match("(?P<mcfc>mcfc/)?(\w+)/([\w+,:]+)/(\w+)/(\d/)?(\d+)/(\d+)_(\d+)_(\d+).png$", webargs)
[mcfc, token, channels, slice_type] = [i for i in m.groups()[:4]]
except Exception, e:
logger.warning("Incorrect arguments {}. {}".format(webargs, e))
raise NDTILECACHEError("Incorrect arguments {}. {}".format(webargs, e))
if mcfc is not None:
# arguments of the form channel:color,channel:color OR channel,channel
channels, colors = zip(*re.findall("(\w+)[:]?(\w)?", channels))
orignal_colors = ('C','M','Y','R','G','B')
# checking for a non-empty list
if not not filter(None, colors):
# if it is a mixed then replace the missing ones with the existing schema
colors = [ b if a is u'' else a for a,b in zip(colors, orignal_colors)]
else:
colors = orignal_colors
else:
try:
# only a single channel if not mcfc cutout
channels = re.match("(\w+)$", channels).groups()
colors = None
except Exception, e:
logger.warning("Incorrect channel {} for simple cutout. {}".format(channels, e))
raise NDTILECACHEError("Incorrect channel {} for simple cutout. {}".format(channels, e))
if slice_type == 'xy':
[tvalue, zvalue, yvalue, xvalue, res] = [int(i.strip('/')) if i is not None else None for i in m.groups()[4:]]
elif slice_type == 'xz':
[tvalue, yvalue, zvalue, xvalue, res] = [int(i.strip('/')) if i is not None else None for i in m.groups()[4:]]
elif slice_type == 'yz':
[tvalue, xvalue, zvalue, yvalue, res] = [int(i.strip('/')) if i is not None else None for i in m.groups()[4:]]
try:
t = tile.Tile(token, slice_type, res, xvalue, yvalue, zvalue, tvalue, channels, colors)
tiledata = t.fetch()
return django.http.HttpResponse(tiledata, content_type='image/png')
except Exception, e:
raise
return django.http.HttpResponseNotFound(e)
| StarcoderdataPython |
3363575 | # -*- coding:UTF-8 -*-
import requests
import openpyxl
import cookies
import random
import time
import os
from bs4 import BeautifulSoup
from openpyxl import workbook
class Spider():
def __init__(self):
print("[INFO]: Maoyan Spider...")
print("[Author]: mwteck")
self.url = "http://maoyan.com/films"
self.domain = "http://maoyan.com"
def main(self, page = 100):
films = self.Get.File_Info(page)
self.save_to_excel(films)
#抓取电影信息
def Get_File_Info(self, page = 100):
print("[INFO]: Start to pick up info ...")
i = 0
error_num = 0
films = []
flag = True
while True:
if (error_num > 2) or (i > page-1):
break
print("[INFO]: Getting Page %s %(i+1)")
if i < 100 and flag:
res = requests.get(self.url.format(i*30), headers = self.get_headers(False))
else:
res = requests.get(self.url.format(i*30), headers = self.get_headers(True))
soup = BeautifulSoup(res.text, 'lxml')
temp1 = soup.find_all('dev', attrs={'class': 'channel-detail movie-item-title'})
if len(temp1) < 1:
flag = False
error_num += 1
print("[ERROR]: Page %s void ... "% (i+1))
i += 1
continue
temp2 = soup.find_all('dev', attrs={'class': 'channel-detail channel-detail-orange'})
error_num2 = 0
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.