id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
6653829 | import csv
import random
import torch
import pandas as pd
from glob import glob
from collections import Counter
from torch.utils.data import Dataset
from sklearn.model_selection import train_test_split
# import constants
from constants import *
class ModelData(Dataset):
def __init__(self, raw_data, entity_vocab, relation_vocab, demographic_vocab):
super(ModelData, self).__init__()
self.triples = torch.LongTensor([[entity_vocab[h], relation_vocab[r], entity_vocab[t]] for h, r, t, _, _ in raw_data]).to(DEVICE)
self.demographics = torch.LongTensor([demographic_vocab[d] for _, _, _, d, _ in raw_data]).to(DEVICE)
self.probabilities = torch.FloatTensor([p for _, _, _, _, p in raw_data]).to(DEVICE)
self.triples_num = len(self.triples)
def __len__(self):
return self.triples_num
def __getitem__(self, item):
return {
TRIPLE: self.triples[item],
DEMOGRAPHIC: self.demographics[item],
PROBABILITY: self.probabilities[item]
}
class MedicalKG:
def __init__(self):
self.data_path = str(ROOT_PATH) + args.data_path
self.read_data()
self.create_vocabs()
self.create_model_data()
def read_file_with_pandas(self, path, col_sep='\t', col_names=[HEAD, RELATION, TAIL, DEMOGRAPHIC, PROBABILITY]):
return pd.read_csv(path,
sep=col_sep,
header=None,
names=col_names,
keep_default_na=False,
encoding='utf-8')
def read_data(self):
# read train data
self.train_raw_data = self.read_file_with_pandas(f'{self.data_path}/train.txt')
# read validation data
self.val_raw_data = self.read_file_with_pandas(f'{self.data_path}/val.txt')
# read test data
self.test_raw_data = self.read_file_with_pandas(f'{self.data_path}/test.txt')
def create_vocabs(self):
# extarct train parts
train_head = Counter(self.train_raw_data[HEAD])
train_relation = Counter(self.train_raw_data[RELATION])
train_tail = Counter(self.train_raw_data[TAIL])
train_demographic = Counter(self.train_raw_data[DEMOGRAPHIC])
# extarct val parts
val_head = Counter(self.val_raw_data[HEAD])
val_relation = Counter(self.val_raw_data[RELATION])
val_tail = Counter(self.val_raw_data[TAIL])
val_demographic = Counter(self.val_raw_data[DEMOGRAPHIC])
# extarct test parts
test_head = Counter(self.test_raw_data[HEAD])
test_relation = Counter(self.test_raw_data[RELATION])
test_tail = Counter(self.test_raw_data[TAIL])
test_demographic = Counter(self.test_raw_data[DEMOGRAPHIC])
# create list with entities and relations
entity_list = list((train_head + val_head + test_head + train_tail + val_tail + test_tail).keys())
relation_list = list(train_relation.keys())
demographic_list = list((train_demographic + val_demographic + test_demographic).keys())
# create entity and relation vocabularies
self.entity_vocab = {word: i for i, word in enumerate(entity_list)}
self.relation_vocab = {word: i for i, word in enumerate(relation_list)}
self.demographic_vocab = {word: i for i, word in enumerate(demographic_list)}
def create_model_data(self):
self.train_data = ModelData(self.train_raw_data.values, self.entity_vocab, self.relation_vocab, self.demographic_vocab)
self.val_data = ModelData(self.val_raw_data.values, self.entity_vocab, self.relation_vocab, self.demographic_vocab)
self.test_data = ModelData(self.test_raw_data.values, self.entity_vocab, self.relation_vocab, self.demographic_vocab)
def get_vocabs(self):
return {
ENTITY: self.entity_vocab,
RELATION: self.relation_vocab,
DEMOGRAPHIC: self.demographic_vocab
}
def get_data(self):
return self.train_data, self.val_data, self.test_data
| StarcoderdataPython |
5188637 | <reponame>ViniGarcia/FlexibleNFV-RA<filename>CHEF/CHEF.py
########### CHEF CLASS DESCRIPTION ############
#PROJECT: NFV FLERAS (FLExible Resource Allocation Service)
#CREATED BY: <NAME>
#CONTACT: <EMAIL>
#RECEIVES A DICTIONARY OF EVALUATION METRICS (ID:(#OBJECTIVE,
#WEIGHT)) AND A DICTIONARY OF PARTIAL RESULTS (METRIC
#EVALUATIONS) FOR EACH CANDIDATE ID OF A DEPLOYMENT STAGE
#(DICTIONARY OF DICTIONARIES). IT USES THESE PARTIAL RESULTS
#TO CALCULATE THE SUITABILITY INDEXES FOR THE CANDIDATES. IT
#RETURNS A DICTIONARY WITH OF CANDIDATES ID WITH THEIR RESPEC-
#VELY SUITABILITY INDEXES (FLOAT VALUE).
#THE CLASS STATUS ATTRIBUTE INDICATE ITS
#OPERATIONS RESULTS CODES:
#NORMAL CODES ->
#0: IDLE STATE (WAITING FOR METRICS CONF.)
#1: ACTIVE STATE (WAITING FOR PARTIAL RESULTS)
#2: EVALUATION SUCCESS
#ERROR CODES ->
#-1 -> Wrong argument for configuration
#-2 -> Wrong element in metrics dictionary
#-3 -> Wrong argument in metric obj. description
#-4 -> Invalid metric objective
#-5 -> Wrong argument in metric weight description
#-6 -> Invalid metric weight
#-7 -> CHEF is not configured
#-8 -> Wrong argument for evaluation
#-9 -> Wrong argument for partial result
#-10 -> Partial results does not match with evaluation metrics
#-11 -> invalid value for partial result
#################################################
from itertools import combinations
from statistics import mean
from numpy import array
from scipy import stats
from copy import deepcopy
############### CHEF CLASS BEGIN ################
class CHEF:
__status = None
__evalMetrics = None
__partialResults = None
__lastIndexing = None
######## CONSTRUCTOR ########
def __init__(self, evalMetrics):
if evalMetrics == None:
self.__status = 0
else:
self.cConfigure(evalMetrics)
######## PRIVATE METHODS ########
def __cSI(self):
partialResults = {}
weightSum = 0
for metric in self.__evalMetrics:
partialResults[metric] = [candidate[metric] for candidate in self.__partialResults.values()]
weightSum += self.__evalMetrics[metric][1]
for metric in partialResults:
partialResults[metric] = array(partialResults[metric])
if self.__evalMetrics[metric][0] == "MAX":
if partialResults[metric].max(axis=0) != partialResults[metric].min(axis=0):
partialResults[metric] = ((partialResults[metric] - partialResults[metric].min(axis=0)) / (partialResults[metric].max(axis=0) - partialResults[metric].min(axis=0))) * self.__evalMetrics[metric][1] / weightSum
else:
partialResults[metric] = [self.__evalMetrics[metric][1] / weightSum for candidate in self.__partialResults]
else:
if partialResults[metric].max(axis=0) != partialResults[metric].min(axis=0):
partialResults[metric] = (partialResults[metric].max(axis=0) - partialResults[metric]) / (partialResults[metric].max(axis=0) - partialResults[metric].min(axis=0)) * self.__evalMetrics[metric][1] / weightSum
else:
partialResults[metric] = [self.__evalMetrics[metric][1] / weightSum for candidate in self.__partialResults]
self.__lastIndexing = {}
keys = list(self.__partialResults.keys())
for index in range(len(self.__partialResults)):
self.__lastIndexing[keys[index]] = sum([candidate[index] for candidate in partialResults.values()])
return self.__lastIndexing
def __cPearson(self, samples):
#samples: {cKey:{mKey:$float ...}...}
mKeys = list(self.__evalMetrics.keys())
mValues = [[] for key in mKeys]
mCoefficients = {}
for cKey in samples:
for index in range(len(mKeys)):
mValues[index].append(samples[cKey][mKeys[index]])
mCombinations = combinations(range(len(mKeys)), 2)
for combination in mCombinations:
mPearson = stats.pearsonr(mValues[combination[0]], mValues[combination[1]])
if self.__evalMetrics[mKeys[combination[0]]][0] == self.__evalMetrics[mKeys[combination[1]]][0]:
mCoefficients[mKeys[combination[0]], mKeys[combination[1]]] = (mPearson[0], mPearson[1])
else:
mCoefficients[mKeys[combination[0]], mKeys[combination[1]]] = (mPearson[0] * -1, mPearson[1])
return mCoefficients
def __cBias(self, correlatedBiases):
def cRecursiveBias(metric, checked, aggregation, weights):
for bias in correlatedBiases[metric]:
if bias in checked:
continue
checked.append(bias)
aggregation.append(bias)
weights.append(self.__evalMetrics[bias][1])
cRecursiveBias(bias, checked, aggregation, weights)
nonBiasesMetrics = {}
checkedMetrics = []
reallocWeight = 0
for metric in correlatedBiases:
if metric in checkedMetrics:
continue
if len(correlatedBiases[metric]) == 0:
nonBiasesMetrics[metric] = self.__evalMetrics[metric][1]
else:
aggregatedMetrics = []
aggregatedWeights = []
checkedMetrics.append(metric)
aggregatedMetrics.append(metric)
aggregatedWeights.append(self.__evalMetrics[metric][1])
cRecursiveBias(metric, checkedMetrics, aggregatedMetrics, aggregatedWeights)
maxWeight = max(aggregatedWeights)
sumWeight = sum(aggregatedWeights)
reallocWeight += sum(aggregatedWeights) - maxWeight
for index in range(len(aggregatedMetrics)):
nonBiasesMetrics[aggregatedMetrics[index]] = maxWeight * (aggregatedWeights[index] / sumWeight)
for metric in nonBiasesMetrics:
nonBiasesMetrics[metric] = nonBiasesMetrics[metric] + (nonBiasesMetrics[metric] / (1 - reallocWeight)) * reallocWeight
return nonBiasesMetrics
######## PUBLIC METHODS ########
def cConfigure(self, evalMetrics):
if not isinstance(evalMetrics, dict):
self.__status = -1
return -1
for key in evalMetrics:
if not isinstance(evalMetrics[key], tuple):
self.__status = -2
return -2
if not isinstance(evalMetrics[key][0], str):
self.__status = -3
return -3
if evalMetrics[key][0] != "MAX" and evalMetrics[key][0] != "MIN":
self.__status = -4
return -4
if not isinstance(evalMetrics[key][1], float) and not isinstance(evalMetrics[key][1], int):
self.__status = -5
return -5
if evalMetrics[key][1] <= 0:
self.__status = -6
return -6
self.__evalMetrics = evalMetrics
self.__partialResults = None
self.__lastIndexing = None
self.__status = 1
return 1
def cPreprocess(self, metricSamples, correlationLevel = 0.95):
correlatedBiases = {key:[] for key in self.__evalMetrics.keys()}
linearInspection = self.__cPearson(metricSamples)
for inspection in linearInspection:
if abs(linearInspection[inspection][0]) >= correlationLevel:
if linearInspection[inspection][0] > 0:
correlatedBiases[inspection[0]].append(inspection[1])
correlatedBiases[inspection[1]].append(inspection[0])
nonBiasesWeights = self.__cBias(correlatedBiases)
for metric in nonBiasesWeights:
self.__evalMetrics[metric] = (self.__evalMetrics[metric][0], nonBiasesWeights[metric])
def cEvaluate(self, partialResults):
if not self.__status == 1:
return -7
if not isinstance(partialResults, dict):
return -8
metricKeys = list(self.__evalMetrics.keys())
for rKey in partialResults:
if not isinstance(partialResults[rKey], dict):
return -9
if partialResults[rKey].keys() != set(metricKeys):
return -10
for mKey in partialResults[rKey]:
if not isinstance(partialResults[rKey][mKey], float) and not isinstance(partialResults[rKey][mKey], int):
return -11
self.__partialResults = partialResults
self.__lastIndexing = None
return self.__cSI()
def getStatus(self):
return self.__status
def getIndexing(self):
return self.__lastIndexing
def getPartialResults(self):
return self.__partialResults
def getEvalMetrics(self):
return self.__evalMetrics
################ CHEF CLASS END ################# | StarcoderdataPython |
3275424 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Create list which can be used in https://github.com/edsu/anon
# Строит список, который можно использовать в https://github.com/edsu/anon
import json
def generate(filename,lang):
ranges = {'ranges':{}}
with open(filename) as f:
jsonranges = json.load(f)
for entry in jsonranges:
ranges['ranges'][entry['name'][lang]['short']] = entry['ranges']
print json.dumps(ranges, ensure_ascii = False, sort_keys=True, indent=2, separators=(',', ': '))
generate('ranges.json','uk')
generate('ranges.json','ru')
generate('ranges.json','en')
| StarcoderdataPython |
9737005 | #!/usr/bin/env python
"""
FINISHED, <NAME>
"""
import sys, argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=
'Input: output of dfammer.py. Ouput: list of classified RepeatModeler repeats, not \
classified by RepeatClassifier or by BLASTX homology to RepeatPeps.lib')
parser.add_argument('-i', '--input', action='store', help='', type=argparse.FileType('r'), default = '-')
args = parser.parse_args()
tmp = ""
for line in args.input:
linesplit = line.split()
if "DNA" in line:
print linesplit[2].split("#")[0] + "#" + "DNA" + "\t" + linesplit[2]
if "SINE" in line:
print linesplit[2].split("#")[0] + "#" + "SINE" + "\t" + linesplit[2]
if "LINE" in line:
print linesplit[2].split("#")[0] + "#" + "LINE" + "\t" + linesplit[2]
if "non-LTR" not in line and "LTR" in line:
print linesplit[2].split("#")[0] + "#" + "LTR" + "\t" + linesplit[2]
| StarcoderdataPython |
3319792 | # -*- coding: utf-8 -*-
# Scrapy settings for for_360pi project
from datetime import datetime
import os
BOT_NAME = 'for_360pi'
SPIDER_MODULES = ['for_360pi.spiders']
NEWSPIDER_MODULE = 'for_360pi.spiders'
LOG_ENABLED = True
LOG_ENCODING = 'utf-8'
timestmp = datetime.now().strftime('%Y-%b-%d:%I-%M-%p')
LOG_FILE = os.getcwd() + '/LOGS/' + BOT_NAME + '-' + timestmp + '.log'
# Export results, as a json feed, to file
FEED_DIR = '/'.join(os.getcwd().split('/')[:-1]) + '/spiders/FEEDS'
FEED_URI = 'file:///' + FEED_DIR + '/%(name)s' + '/' + timestmp + '.json'
FEED_FORMAT = 'json'
FEED_EXPORTERS = {
'json': 'scrapy.contrib.exporter.JsonItemExporter',
}
| StarcoderdataPython |
1663454 | <reponame>secretppcdc/secretppcdc.github.com<gh_stars>1-10
import keras
import cv2
import os
import numpy as np
path = './weight/model.h5'
path_image = './testset/'
thre = 0.2
feed_test = np.zeros(shape=(1,224,224,3))
model_vgg = keras.models.load_model(path)
f = open('predictions.txt','w')
for i in os.listdir(path_image):
for j in os.listdir(path_image+i):
try:
image = cv2.imread(path_image+i+'/'+j)
image = cv2.resize(image,(224,224))
feed_test[0,:,:,:] = image
result = model_vgg.predict(feed_test)[0]
result_1 = ['1' if result[i] > thre else '-1' for i in range(len(result))]
f.write(j+' '+' '.join(result_1)+'\n')
except:
print(j)
#cv2.waitKey(10)
f.close()
| StarcoderdataPython |
6693718 | """
sentry.tsdb.redis
~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import logging
import operator
from binascii import crc32
from collections import defaultdict, namedtuple
from datetime import timedelta
from hashlib import md5
import six
from django.utils import timezone
from pkg_resources import resource_string
from redis.client import Script
from sentry.tsdb.base import BaseTSDB
from sentry.utils.dates import to_timestamp
from sentry.utils.redis import check_cluster_versions, get_cluster_from_options
from sentry.utils.versioning import Version
logger = logging.getLogger(__name__)
SketchParameters = namedtuple('SketchParameters', 'depth width capacity')
CountMinScript = Script(
None,
resource_string('sentry', 'scripts/tsdb/cmsketch.lua'),
)
class RedisTSDB(BaseTSDB):
"""
A time series storage backend for Redis.
The time series API supports three data types:
* simple counters
* distinct counters (number of unique elements seen)
* frequency tables (a set of items ranked by most frequently observed)
The backend also supports virtual nodes (``vnodes``) which controls shard
distribution. This value should be set to the anticipated maximum number of
physical hosts and not modified after data has been written.
Simple counters are stored in hashes. The key of the hash is composed of
the model, epoch (which defines the start of the rollup period), and a
shard identifier. This allows TTLs to be applied to the entire bucket,
instead of having to be stored for every individual element in the rollup
period. This results in a data layout that looks something like this::
{
"<model>:<epoch>:<shard id>": {
"<key>": value,
...
},
...
}
Distinct counters are stored using HyperLogLog, which provides a
cardinality estimate with a standard error of 0.8%. The data layout looks
something like this::
{
"<model>:<epoch>:<key>": value,
...
}
Frequency tables are modeled using two data structures:
* top-N index: a sorted set containing the most frequently observed items,
* estimation matrix: a hash table containing counters, used in a Count-Min sketch
Member scores are 100% accurate until the index is filled (and no memory is
used for the estimation matrix until this point), after which the data
structure switches to a probabilistic implementation and accuracy begins to
degrade for less frequently observed items, but remains accurate for more
frequently observed items.
Frequency tables are especially useful when paired with a (non-distinct)
counter of the total number of observations so that scores of items of the
frequency table can be displayed as percentages of the whole data set.
(Additional documentation and the bulk of the logic for implementing the
frequency table API can be found in the ``cmsketch.lua`` script.)
"""
DEFAULT_SKETCH_PARAMETERS = SketchParameters(3, 128, 50)
def __init__(self, prefix='ts:', vnodes=64, **options):
self.cluster, options = get_cluster_from_options('SENTRY_TSDB_OPTIONS', options)
self.prefix = prefix
self.vnodes = vnodes
self.enable_frequency_sketches = options.pop('enable_frequency_sketches', False)
super(RedisTSDB, self).__init__(**options)
def validate(self):
logger.debug('Validating Redis version...')
version = Version((2, 8, 18)) if self.enable_frequency_sketches else Version((2, 8, 9))
check_cluster_versions(
self.cluster,
version,
recommended=Version((2, 8, 18)),
label='TSDB',
)
def make_key(self, model, rollup, timestamp, key):
"""
Make a key that is used for distinct counter and frequency table
values.
"""
return '{prefix}{model}:{epoch}:{key}'.format(
prefix=self.prefix,
model=model.value,
epoch=self.normalize_ts_to_rollup(timestamp, rollup),
key=self.get_model_key(key),
)
def make_counter_key(self, model, epoch, model_key):
"""
Make a key that is used for counter values.
"""
if isinstance(model_key, six.integer_types):
vnode = model_key % self.vnodes
else:
vnode = crc32(model_key) % self.vnodes
return '{0}{1}:{2}:{3}'.format(self.prefix, model.value, epoch, vnode)
def get_model_key(self, key):
# We specialize integers so that a pure int-map can be optimized by
# Redis, whereas long strings (say tag values) will store in a more
# efficient hashed format.
if not isinstance(key, six.integer_types):
# enforce utf-8 encoding
if isinstance(key, unicode):
key = key.encode('utf-8')
return md5(repr(key)).hexdigest()
return key
def incr(self, model, key, timestamp=None, count=1):
self.incr_multi([(model, key)], timestamp, count)
def incr_multi(self, items, timestamp=None, count=1):
"""
Increment project ID=1 and group ID=5:
>>> incr_multi([(TimeSeriesModel.project, 1), (TimeSeriesModel.group, 5)])
"""
make_key = self.make_counter_key
normalize_to_rollup = self.normalize_to_rollup
if timestamp is None:
timestamp = timezone.now()
with self.cluster.map() as client:
for rollup, max_values in self.rollups:
norm_rollup = normalize_to_rollup(timestamp, rollup)
for model, key in items:
model_key = self.get_model_key(key)
hash_key = make_key(model, norm_rollup, model_key)
client.hincrby(hash_key, model_key, count)
client.expireat(
hash_key,
self.calculate_expiry(rollup, max_values, timestamp),
)
def get_range(self, model, keys, start, end, rollup=None):
"""
To get a range of data for group ID=[1, 2, 3]:
Start and end are both inclusive.
>>> now = timezone.now()
>>> get_keys(TimeSeriesModel.group, [1, 2, 3],
>>> start=now - timedelta(days=1),
>>> end=now)
"""
normalize_to_epoch = self.normalize_to_epoch
normalize_to_rollup = self.normalize_to_rollup
make_key = self.make_counter_key
if rollup is None:
rollup = self.get_optimal_rollup(start, end)
results = []
timestamp = end
with self.cluster.map() as client:
while timestamp >= start:
real_epoch = normalize_to_epoch(timestamp, rollup)
norm_epoch = normalize_to_rollup(timestamp, rollup)
for key in keys:
model_key = self.get_model_key(key)
hash_key = make_key(model, norm_epoch, model_key)
results.append((real_epoch, key,
client.hget(hash_key, model_key)))
timestamp = timestamp - timedelta(seconds=rollup)
results_by_key = defaultdict(dict)
for epoch, key, count in results:
results_by_key[key][epoch] = int(count.value or 0)
for key, points in results_by_key.iteritems():
results_by_key[key] = sorted(points.items())
return dict(results_by_key)
def record(self, model, key, values, timestamp=None):
self.record_multi(((model, key, values),), timestamp)
def record_multi(self, items, timestamp=None):
"""
Record an occurence of an item in a distinct counter.
"""
if timestamp is None:
timestamp = timezone.now()
ts = int(to_timestamp(timestamp)) # ``timestamp`` is not actually a timestamp :(
with self.cluster.fanout() as client:
for model, key, values in items:
c = client.target_key(key)
for rollup, max_values in self.rollups:
k = self.make_key(
model,
rollup,
ts,
key,
)
c.pfadd(k, *values)
c.expireat(
k,
self.calculate_expiry(
rollup,
max_values,
timestamp,
),
)
def get_distinct_counts_series(self, model, keys, start, end=None, rollup=None):
"""
Fetch counts of distinct items for each rollup interval within the range.
"""
rollup, series = self.get_optimal_rollup_series(start, end, rollup)
responses = {}
with self.cluster.fanout() as client:
for key in keys:
c = client.target_key(key)
r = responses[key] = []
for timestamp in series:
r.append((
timestamp,
c.pfcount(
self.make_key(
model,
rollup,
timestamp,
key,
),
),
))
return {key: [(timestamp, promise.value) for timestamp, promise in value] for key, value in responses.iteritems()}
def get_distinct_counts_totals(self, model, keys, start, end=None, rollup=None):
"""
Count distinct items during a time range.
"""
rollup, series = self.get_optimal_rollup_series(start, end, rollup)
responses = {}
with self.cluster.fanout() as client:
for key in keys:
# XXX: The current versions of the Redis driver don't implement
# ``PFCOUNT`` correctly (although this is fixed in the Git
# master, so should be available in the next release) and only
# supports a single key argument -- not the variadic signature
# supported by the protocol -- so we have to call the commnand
# directly here instead.
ks = []
for timestamp in series:
ks.append(self.make_key(model, rollup, timestamp, key))
responses[key] = client.target_key(key).execute_command('PFCOUNT', *ks)
return {key: value.value for key, value in responses.iteritems()}
def make_frequency_table_keys(self, model, rollup, timestamp, key):
prefix = self.make_key(model, rollup, timestamp, key)
return map(
operator.methodcaller('format', prefix),
('{}:c', '{}:i', '{}:e'),
)
def record_frequency_multi(self, requests, timestamp=None):
if not self.enable_frequency_sketches:
return
if timestamp is None:
timestamp = timezone.now()
ts = int(to_timestamp(timestamp)) # ``timestamp`` is not actually a timestamp :(
commands = {}
for model, request in requests:
for key, items in request.iteritems():
keys = []
expirations = {}
# Figure out all of the keys we need to be incrementing, as
# well as their expiration policies.
for rollup, max_values in self.rollups:
chunk = self.make_frequency_table_keys(model, rollup, ts, key)
keys.extend(chunk)
expiry = self.calculate_expiry(rollup, max_values, timestamp)
for k in chunk:
expirations[k] = expiry
arguments = ['INCR'] + list(self.DEFAULT_SKETCH_PARAMETERS)
for member, score in items.items():
arguments.extend((score, member))
# Since we're essentially merging dictionaries, we need to
# append this to any value that already exists at the key.
cmds = commands.setdefault(key, [])
cmds.append((CountMinScript, keys, arguments))
for k, t in expirations.items():
cmds.append(('EXPIREAT', k, t))
self.cluster.execute_commands(commands)
def get_most_frequent(self, model, keys, start, end=None, rollup=None, limit=None):
if not self.enable_frequency_sketches:
raise NotImplementedError("Frequency sketches are disabled.")
rollup, series = self.get_optimal_rollup_series(start, end, rollup)
commands = {}
arguments = ['RANKED']
if limit is not None:
arguments.append(int(limit))
for key in keys:
ks = []
for timestamp in series:
ks.extend(self.make_frequency_table_keys(model, rollup, timestamp, key))
commands[key] = [(CountMinScript, ks, arguments)]
results = {}
for key, responses in self.cluster.execute_commands(commands).items():
results[key] = [(member, float(score)) for member, score in responses[0].value]
return results
def get_frequency_series(self, model, items, start, end=None, rollup=None):
if not self.enable_frequency_sketches:
raise NotImplementedError("Frequency sketches are disabled.")
rollup, series = self.get_optimal_rollup_series(start, end, rollup)
# Freeze ordering of the members (we'll need these later.)
for key, members in items.items():
items[key] = tuple(members)
commands = {}
for key, members in items.items():
ks = []
for timestamp in series:
ks.extend(self.make_frequency_table_keys(model, rollup, timestamp, key))
commands[key] = [(CountMinScript, ks, ('ESTIMATE',) + members)]
results = {}
for key, responses in self.cluster.execute_commands(commands).items():
members = items[key]
chunk = results[key] = []
for timestamp, scores in zip(series, responses[0].value):
chunk.append((timestamp, dict(zip(members, map(float, scores)))))
return results
def get_frequency_totals(self, model, items, start, end=None, rollup=None):
if not self.enable_frequency_sketches:
raise NotImplementedError("Frequency sketches are disabled.")
responses = {}
for key, series in self.get_frequency_series(model, items, start, end, rollup).iteritems():
response = responses[key] = {}
for timestamp, results in series:
for member, value in results.items():
response[member] = response.get(member, 0.0) + value
return responses
| StarcoderdataPython |
1829770 | ##############################################################################
#
# Copyright (c) 2011 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import sys
from zope.interface import implementer
from persistent.interfaces import IPersistent
from persistent.interfaces import GHOST
from persistent.interfaces import UPTODATE
from persistent.interfaces import CHANGED
from persistent.interfaces import STICKY
from persistent.interfaces import OID_TYPE
from persistent.interfaces import SERIAL_TYPE
from persistent.timestamp import TimeStamp
from persistent.timestamp import _ZERO
from persistent._compat import copy_reg
from persistent._compat import intern
from . import ring
_INITIAL_SERIAL = _ZERO
# Bitwise flags
_CHANGED = 0x0001
_STICKY = 0x0002
_OGA = object.__getattribute__
_OSA = object.__setattr__
# These names can be used from a ghost without causing it to be
# activated. These are standardized with the C implementation
SPECIAL_NAMES = ('__class__',
'__del__',
'__dict__',
'__of__',
'__setstate__',)
# And this is an implementation detail of this class; it holds
# the standard names plus the slot names, allowing for just one
# check in __getattribute__
_SPECIAL_NAMES = set(SPECIAL_NAMES)
@implementer(IPersistent)
class Persistent(object):
""" Pure Python implmentation of Persistent base class
"""
__slots__ = ('__jar', '__oid', '__serial', '__flags', '__size', '__ring',)
def __new__(cls, *args, **kw):
inst = super(Persistent, cls).__new__(cls)
# We bypass the __setattr__ implementation of this object
# at __new__ time, just like the C implementation does. This
# makes us compatible with subclasses that want to access
# properties like _p_changed in their setattr implementation
_OSA(inst, '_Persistent__jar', None)
_OSA(inst, '_Persistent__oid', None)
_OSA(inst, '_Persistent__serial', None)
_OSA(inst, '_Persistent__flags', None)
_OSA(inst, '_Persistent__size', 0)
_OSA(inst, '_Persistent__ring', None)
return inst
# _p_jar: see IPersistent.
def _get_jar(self):
return _OGA(self, '_Persistent__jar')
def _set_jar(self, value):
jar = _OGA(self, '_Persistent__jar')
if self._p_is_in_cache(jar) and value is not None and jar != value:
# The C implementation only forbids changing the jar
# if we're already in a cache. Match its error message
raise ValueError('can not change _p_jar of cached object')
if _OGA(self, '_Persistent__jar') != value:
_OSA(self, '_Persistent__jar', value)
_OSA(self, '_Persistent__flags', 0)
def _del_jar(self):
jar = _OGA(self, '_Persistent__jar')
if jar is not None:
if self._p_is_in_cache(jar):
raise ValueError("can't delete _p_jar of cached object")
_OSA(self, '_Persistent__jar', None)
_OSA(self, '_Persistent__flags', None)
_p_jar = property(_get_jar, _set_jar, _del_jar)
# _p_oid: see IPersistent.
def _get_oid(self):
return _OGA(self, '_Persistent__oid')
def _set_oid(self, value):
if value == _OGA(self, '_Persistent__oid'):
return
# The C implementation allows *any* value to be
# used as the _p_oid.
#if value is not None:
# if not isinstance(value, OID_TYPE):
# raise ValueError('Invalid OID type: %s' % value)
# The C implementation only forbids changing the OID
# if we're in a cache, regardless of what the current
# value or jar is
if self._p_is_in_cache():
# match the C error message
raise ValueError('can not change _p_oid of cached object')
_OSA(self, '_Persistent__oid', value)
def _del_oid(self):
jar = _OGA(self, '_Persistent__jar')
oid = _OGA(self, '_Persistent__oid')
if jar is not None:
if oid and jar._cache.get(oid):
raise ValueError('Cannot delete _p_oid of cached object')
_OSA(self, '_Persistent__oid', None)
_p_oid = property(_get_oid, _set_oid, _del_oid)
# _p_serial: see IPersistent.
def _get_serial(self):
serial = _OGA(self, '_Persistent__serial')
if serial is not None:
return serial
return _INITIAL_SERIAL
def _set_serial(self, value):
if not isinstance(value, SERIAL_TYPE):
raise ValueError('Invalid SERIAL type: %s' % value)
if len(value) != 8:
raise ValueError('SERIAL must be 8 octets')
_OSA(self, '_Persistent__serial', value)
def _del_serial(self):
_OSA(self, '_Persistent__serial', None)
_p_serial = property(_get_serial, _set_serial, _del_serial)
# _p_changed: see IPersistent.
def _get_changed(self):
if _OGA(self, '_Persistent__jar') is None:
return False
flags = _OGA(self, '_Persistent__flags')
if flags is None: # ghost
return None
return bool(flags & _CHANGED)
def _set_changed(self, value):
if _OGA(self, '_Persistent__flags') is None:
if value:
self._p_activate()
self._p_set_changed_flag(value)
else:
if value is None: # -> ghost
self._p_deactivate()
else:
self._p_set_changed_flag(value)
def _del_changed(self):
self._p_invalidate()
_p_changed = property(_get_changed, _set_changed, _del_changed)
# _p_mtime
def _get_mtime(self):
# The C implementation automatically unghostifies the object
# when _p_mtime is accessed.
self._p_activate()
self._p_accessed()
serial = _OGA(self, '_Persistent__serial')
if serial is not None:
ts = TimeStamp(serial)
return ts.timeTime()
_p_mtime = property(_get_mtime)
# _p_state
def _get_state(self):
# Note the use of OGA and caching to avoid recursive calls to __getattribute__:
# __getattribute__ calls _p_accessed calls cache.mru() calls _p_state
if _OGA(self, '_Persistent__jar') is None:
return UPTODATE
flags = _OGA(self, '_Persistent__flags')
if flags is None:
return GHOST
if flags & _CHANGED:
result = CHANGED
else:
result = UPTODATE
if flags & _STICKY:
return STICKY
return result
_p_state = property(_get_state)
# _p_estimated_size: XXX don't want to reserve the space?
def _get_estimated_size(self):
return _OGA(self, '_Persistent__size') * 64
def _set_estimated_size(self, value):
if isinstance(value, int):
if value < 0:
raise ValueError('_p_estimated_size must not be negative')
_OSA(self, '_Persistent__size', _estimated_size_in_24_bits(value))
else:
raise TypeError("_p_estimated_size must be an integer")
def _del_estimated_size(self):
_OSA(self, '_Persistent__size', 0)
_p_estimated_size = property(
_get_estimated_size, _set_estimated_size, _del_estimated_size)
# The '_p_sticky' property is not (yet) part of the API: for now,
# it exists to simplify debugging and testing assertions.
def _get_sticky(self):
flags = _OGA(self, '_Persistent__flags')
if flags is None:
return False
return bool(flags & _STICKY)
def _set_sticky(self, value):
flags = _OGA(self, '_Persistent__flags')
if flags is None:
raise ValueError('Ghost')
if value:
flags |= _STICKY
else:
flags &= ~_STICKY
_OSA(self, '_Persistent__flags', flags)
_p_sticky = property(_get_sticky, _set_sticky)
# The '_p_status' property is not (yet) part of the API: for now,
# it exists to simplify debugging and testing assertions.
def _get_status(self):
if _OGA(self, '_Persistent__jar') is None:
return 'unsaved'
flags = _OGA(self, '_Persistent__flags')
if flags is None:
return 'ghost'
if flags & _STICKY:
return 'sticky'
if flags & _CHANGED:
return 'changed'
return 'saved'
_p_status = property(_get_status)
# Methods from IPersistent.
def __getattribute__(self, name):
""" See IPersistent.
"""
oga = _OGA
if (not name.startswith('_p_') and
name not in _SPECIAL_NAMES):
if oga(self, '_Persistent__flags') is None:
oga(self, '_p_activate')()
oga(self, '_p_accessed')()
return oga(self, name)
def __setattr__(self, name, value):
special_name = (name in _SPECIAL_NAMES or
name.startswith('_p_'))
volatile = name.startswith('_v_')
if not special_name:
if _OGA(self, '_Persistent__flags') is None:
_OGA(self, '_p_activate')()
if not volatile:
_OGA(self, '_p_accessed')()
_OSA(self, name, value)
if (_OGA(self, '_Persistent__jar') is not None and
_OGA(self, '_Persistent__oid') is not None and
not special_name and
not volatile):
before = _OGA(self, '_Persistent__flags')
after = before | _CHANGED
if before != after:
_OSA(self, '_Persistent__flags', after)
_OGA(self, '_p_register')()
def __delattr__(self, name):
special_name = (name in _SPECIAL_NAMES or
name.startswith('_p_'))
if not special_name:
if _OGA(self, '_Persistent__flags') is None:
_OGA(self, '_p_activate')()
_OGA(self, '_p_accessed')()
before = _OGA(self, '_Persistent__flags')
after = before | _CHANGED
if before != after:
_OSA(self, '_Persistent__flags', after)
if (_OGA(self, '_Persistent__jar') is not None and
_OGA(self, '_Persistent__oid') is not None):
_OGA(self, '_p_register')()
object.__delattr__(self, name)
def _slotnames(self, _v_exclude=True):
slotnames = copy_reg._slotnames(type(self))
return [x for x in slotnames
if not x.startswith('_p_') and
not (x.startswith('_v_') and _v_exclude) and
not x.startswith('_Persistent__') and
x not in Persistent.__slots__]
def __getstate__(self):
""" See IPersistent.
"""
idict = getattr(self, '__dict__', None)
slotnames = self._slotnames()
if idict is not None:
d = dict([x for x in idict.items()
if not x[0].startswith('_p_') and
not x[0].startswith('_v_')])
else:
d = None
if slotnames:
s = {}
for slotname in slotnames:
value = getattr(self, slotname, self)
if value is not self:
s[slotname] = value
return d, s
return d
def __setstate__(self, state):
""" See IPersistent.
"""
if isinstance(state,tuple):
inst_dict, slots = state
else:
inst_dict, slots = state, ()
idict = getattr(self, '__dict__', None)
if inst_dict is not None:
if idict is None:
raise TypeError('No instance dict')
idict.clear()
for k, v in inst_dict.items():
# Normally the keys for instance attributes are interned.
# Do that here, but only if it is possible to do so.
idict[intern(k) if type(k) is str else k] = v
slotnames = self._slotnames()
if slotnames:
for k, v in slots.items():
setattr(self, k, v)
def __reduce__(self):
""" See IPersistent.
"""
gna = getattr(self, '__getnewargs__', lambda: ())
return (copy_reg.__newobj__,
(type(self),) + gna(), self.__getstate__())
def _p_activate(self):
""" See IPersistent.
"""
oga = _OGA
before = oga(self, '_Persistent__flags')
if before is None: # Only do this if we're a ghost
# Begin by marking up-to-date in case we bail early
_OSA(self, '_Persistent__flags', 0)
jar = oga(self, '_Persistent__jar')
if jar is None:
return
oid = oga(self, '_Persistent__oid')
if oid is None:
return
# If we're actually going to execute a set-state,
# mark as changed to prevent any recursive call
# (actually, our earlier check that we're a ghost should
# prevent this, but the C implementation sets it to changed
# while calling jar.setstate, and this is observable to clients).
# The main point of this is to prevent changes made during
# setstate from registering the object with the jar.
_OSA(self, '_Persistent__flags', CHANGED)
try:
jar.setstate(self)
except:
_OSA(self, '_Persistent__flags', before)
raise
else:
# If we succeed, no matter what the implementation
# of setstate did, mark ourself as up-to-date. The
# C implementation unconditionally does this.
_OSA(self, '_Persistent__flags', 0) # up-to-date
# In the C implementation, _p_invalidate winds up calling
# _p_deactivate. There are ZODB tests that depend on this;
# it's not documented but there may be code in the wild
# that does as well
def _p_deactivate(self):
""" See IPersistent.
"""
flags = _OGA(self, '_Persistent__flags')
if flags is not None and not flags:
self._p_invalidate_deactivate_helper()
def _p_invalidate(self):
""" See IPersistent.
"""
# If we think we have changes, we must pretend
# like we don't so that deactivate does its job
_OSA(self, '_Persistent__flags', 0)
self._p_deactivate()
def _p_invalidate_deactivate_helper(self, clear=True):
jar = _OGA(self, '_Persistent__jar')
if jar is None:
return
if _OGA(self, '_Persistent__flags') is not None:
_OSA(self, '_Persistent__flags', None)
if clear:
try:
idict = _OGA(self, '__dict__')
except AttributeError:
pass
else:
idict.clear()
type_ = type(self)
# for backward-compatibility reason we release __slots__ only if
# class does not override __new__
if type_.__new__ is Persistent.__new__:
for slotname in Persistent._slotnames(self, _v_exclude=False):
try:
getattr(type_, slotname).__delete__(self)
except AttributeError:
# AttributeError means slot variable was not initialized at all -
# - we can simply skip its deletion.
pass
# Implementation detail: deactivating/invalidating
# updates the size of the cache (if we have one)
# by telling it this object no longer takes any bytes
# (-1 is a magic number to compensate for the implementation,
# which always adds one to the size given)
try:
cache = jar._cache
except AttributeError:
pass
else:
cache.update_object_size_estimation(_OGA(self, '_Persistent__oid'), -1)
# See notes in PickleCache.sweep for why we have to do this
cache._persistent_deactivate_ran = True
def _p_getattr(self, name):
""" See IPersistent.
"""
if name.startswith('_p_') or name in _SPECIAL_NAMES:
return True
self._p_activate()
self._p_accessed()
return False
def _p_setattr(self, name, value):
""" See IPersistent.
"""
if name.startswith('_p_'):
setattr(self, name, value)
return True
self._p_activate()
self._p_accessed()
return False
def _p_delattr(self, name):
""" See IPersistent.
"""
if name.startswith('_p_'):
delattr(self, name)
return True
self._p_activate()
self._p_accessed()
return False
# Helper methods: not APIs: we name them with '_p_' to bypass
# the __getattribute__ bit which bumps the cache.
def _p_register(self):
jar = _OGA(self, '_Persistent__jar')
if jar is not None and _OGA(self, '_Persistent__oid') is not None:
jar.register(self)
def _p_set_changed_flag(self, value):
if value:
before = _OGA(self, '_Persistent__flags')
after = before | _CHANGED
if before != after:
self._p_register()
_OSA(self, '_Persistent__flags', after)
else:
flags = _OGA(self, '_Persistent__flags')
flags &= ~_CHANGED
_OSA(self, '_Persistent__flags', flags)
def _p_accessed(self):
# Notify the jar's pickle cache that we have been accessed.
# This relies on what has been (until now) an implementation
# detail, the '_cache' attribute of the jar. We made it a
# private API to avoid the cycle of keeping a reference to
# the cache on the persistent object.
# The below is the equivalent of this, but avoids
# several recursive through __getattribute__, especially for _p_state,
# and benchmarks much faster
#
# if(self.__jar is None or
# self.__oid is None or
# self._p_state < 0 ): return
oga = _OGA
jar = oga(self, '_Persistent__jar')
if jar is None:
return
oid = oga(self, '_Persistent__oid')
if oid is None:
return
flags = oga(self, '_Persistent__flags')
if flags is None: # ghost
return
# The KeyError arises in ZODB: ZODB.serialize.ObjectWriter
# can assign a jar and an oid to newly seen persistent objects,
# but because they are newly created, they aren't in the
# pickle cache yet. There doesn't seem to be a way to distinguish
# that at this level, all we can do is catch it.
# The AttributeError arises in ZODB test cases
try:
jar._cache.mru(oid)
except (AttributeError,KeyError):
pass
def _p_is_in_cache(self, jar=None):
oid = _OGA(self, '_Persistent__oid')
if not oid:
return False
jar = jar or _OGA(self, '_Persistent__jar')
cache = getattr(jar, '_cache', None)
if cache is not None:
return cache.get(oid) is self
def _estimated_size_in_24_bits(value):
if value > 1073741696:
return 16777215
return (value//64) + 1
_SPECIAL_NAMES.update([intern('_Persistent' + x) for x in Persistent.__slots__])
| StarcoderdataPython |
9765509 | <gh_stars>0
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
"""
@Time: 19-6-12 下午4:19
@Author: hezhiqiang
@FileName: re_test.py
@IDE: PyCharm
测试正则表达式
注意:
-re.findall("a(.*?)b", "str") 能够返回括号中间的内容,括号起到定位和过滤的效果
-原始字符串r,待匹配字符串中有反斜杠的时候,使用r能忽视反斜杠带来的转义效果
-点号默认情况下平匹配不到'\n'
-\s的能够匹配到空白字符,不仅仅包括空格,还有\t
"""
import re
b = "chuan1zhi2"
# sub替换
print(re.sub("\d", "_", b)) # 产生一个新的字符串
print(b)
# compile方法来节省时间,直接添加正则表达式
p = re.compile("\d")
print(p.findall(b))
print(p.sub("_", b))
print(r'\n') # 添加r表示原始字符串
c = r'a\nb'
print(len(c))
print(c[1]) | StarcoderdataPython |
3595250 | import scrapy
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
class QuotetutorialItem(scrapy.Item):
title = scrapy.Field()
author = scrapy.Field()
tag = scrapy.Field()
pass
class testSpider(CrawlSpider):
name = "aranha"
start_urls = ['https://stackoverflow.com/questions/37380588/performing-a-scrapy-broad-crawl-with-high-concurrency-and-low-request-rate-on-ea']
rules = [Rule(LinkExtractor(allow=''),
callback='parse', follow=True)]
def parse(self, response):
oi = QuotetutorialItem()
yield oi | StarcoderdataPython |
229657 | import click, os, re
from pathlib import Path
from sqlalchemy import create_engine
from mgi.models import db
def create_db(url):
engine = create_engine(url)
db.metadata.create_all(engine)
#with engine.connect() as con, open(sql_fn, "r") as f:
# for line in f.readlines():
# con.execute(statement, **line)
#-- create_db
@click.group(short_help="hopefully handy commands")
def utils_cli():
pass
@utils_cli.group(name="db", short_help="tools for the database")
def utils_db_cli():
pass
@utils_db_cli.command(name="create", short_help="create and deploy the database")
@click.argument("url", type=click.STRING, required=True, nargs=1)
def db_create_cmd(url):
"""
Create and Deploy the DB Schema
SQLite3: sqlite:///name.db
Hopefully other URIs work!
"""
fn = re.sub(r"sqlite:///", "", url)
if fn != url:
if os.path.exists(fn):
raise Exception(f"Database already exists: {fn}")
Path(fn).touch()
create_db(url)
print(f"Created DB with {url}")
@utils_db_cli.command(name="set", short_help="show the database URI")
@click.argument("uri", type=click.STRING, required=False, nargs=1)
def db_set_cmd(uri):
"""
Show the Command to Set the Database URI
This is set an an environment variable called SQLALCHEMY_DATABASE_URI, and must be set in your courrent environment. Please run the command output here.
If no URI is given, an example will be used.
"""
if uri is None:
uri = "sqlite:///tests/data/db"
print(f"export SQLALCHEMY_DATABASE_URI={uri}")
@utils_db_cli.command(name="show", short_help="show the database URI")
def db_show_cmd():
"""
Show the Current DB URI
This is set an an enivirnment variable called SQLALCHEMY_DATABASE_URI
"""
print(f"{os.environ.get('SQLALCHEMY_DATABASE_URI', None)}")
| StarcoderdataPython |
5093097 | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
import torch
from .base import Datasets
from torchvision import transforms, set_image_backend
import random, os
from PIL import Image
import numpy as np
import logging
np.random.seed(123)
class THUREAD(Datasets):
def __init__(self, args, ground_truth, modality, phase='train'):
super(THUREAD, self).__init__(args, ground_truth, modality, phase)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
sl = self.get_sl(self.inputs[index][1])
self.data_path = os.path.join(self.dataset_root, self.inputs[index][0])
self.clip, skgmaparr = self.image_propose(self.data_path, sl)
if self.args.Network == 'FusionNet':
assert self.typ == 'rgb'
self.data_path1 = self.data_path.replace('RGB', 'Depth')
self.data_path1 = '/'.join(self.data_path1.split('/')[:-1]) + '/{}'.format(
self.data_path1.split('/')[-1].replace('Depth', 'D'))
self.clip1, skgmaparr1 = self.image_propose(self.data_path1, sl)
return (self.clip.permute(0, 3, 1, 2), self.clip1.permute(0, 3, 1, 2)), (skgmaparr, skgmaparr1), \
self.inputs[index][2], self.data_path
return self.clip.permute(0, 3, 1, 2), skgmaparr, self.inputs[index][2], self.data_path
def __len__(self):
return len(self.inputs)
| StarcoderdataPython |
8025072 | import gevent.monkey
gevent.monkey.patch_all()
import psycogreen.gevent
psycogreen.gevent.patch_psycopg()
from odooku.patch import apply_patches
apply_patches()
import csv
csv.field_size_limit(500 * 1024 * 1024) | StarcoderdataPython |
11342970 | # Copyright 2015 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2015 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import hashlib
import simplejson
import time
import warnings
from oslo_log import log as logging
import requests
import six
from stevedore import driver as stevedore_driver
from octavia.amphorae.driver_exceptions import exceptions as driver_except
from octavia.amphorae.drivers import driver_base as driver_base
from octavia.amphorae.drivers.haproxy import exceptions as exc
from octavia.amphorae.drivers.keepalived import vrrp_rest_driver
from octavia.common.config import cfg
from octavia.common import constants as consts
from octavia.common.jinja.haproxy import jinja_cfg
from octavia.common.tls_utils import cert_parser
from octavia.common import utils
LOG = logging.getLogger(__name__)
API_VERSION = consts.API_VERSION
OCTAVIA_API_CLIENT = (
"Octavia HaProxy Rest Client/{version} "
"(https://wiki.openstack.org/wiki/Octavia)").format(version=API_VERSION)
CONF = cfg.CONF
class HaproxyAmphoraLoadBalancerDriver(
driver_base.AmphoraLoadBalancerDriver,
vrrp_rest_driver.KeepalivedAmphoraDriverMixin):
def __init__(self):
super(HaproxyAmphoraLoadBalancerDriver, self).__init__()
self.client = AmphoraAPIClient()
self.cert_manager = stevedore_driver.DriverManager(
namespace='octavia.cert_manager',
name=CONF.certificates.cert_manager,
invoke_on_load=True,
).driver
self.jinja = jinja_cfg.JinjaTemplater(
base_amp_path=CONF.haproxy_amphora.base_path,
base_crt_dir=CONF.haproxy_amphora.base_cert_dir,
haproxy_template=CONF.haproxy_amphora.haproxy_template)
def update(self, listener, vip):
LOG.debug("Amphora %s haproxy, updating listener %s, vip %s",
self.__class__.__name__, listener.protocol_port,
vip.ip_address)
# Process listener certificate info
certs = self._process_tls_certificates(listener)
for amp in listener.load_balancer.amphorae:
if amp.status != consts.DELETED:
# Generate HaProxy configuration from listener object
config = self.jinja.build_config(
host_amphora=amp,
listener=listener,
tls_cert=certs['tls_cert'],
user_group=CONF.haproxy_amphora.user_group)
self.client.upload_config(amp, listener.id, config)
self.client.reload_listener(amp, listener.id)
def upload_cert_amp(self, amp, pem):
LOG.debug("Amphora %s updating cert in REST driver "
"with amphora id %s,",
self.__class__.__name__, amp.id)
self.client.update_cert_for_rotation(amp, pem)
def _apply(self, func, listener=None, *args):
for amp in listener.load_balancer.amphorae:
if amp.status != consts.DELETED:
func(amp, listener.id, *args)
def stop(self, listener, vip):
self._apply(self.client.stop_listener, listener)
def start(self, listener, vip):
self._apply(self.client.start_listener, listener)
def delete(self, listener, vip):
self._apply(self.client.delete_listener, listener)
def get_info(self, amphora):
self.driver.get_info(amphora.lb_network_ip)
def get_diagnostics(self, amphora):
self.driver.get_diagnostics(amphora.lb_network_ip)
def finalize_amphora(self, amphora):
pass
def post_vip_plug(self, amphora, load_balancer, amphorae_network_config):
if amphora.status != consts.DELETED:
subnet = amphorae_network_config.get(amphora.id).vip_subnet
# NOTE(blogan): using the vrrp port here because that
# is what the allowed address pairs network driver sets
# this particular port to. This does expose a bit of
# tight coupling between the network driver and amphora
# driver. We will need to revisit this to try and remove
# this tight coupling.
# NOTE (johnsom): I am loading the vrrp_ip into the
# net_info structure here so that I don't break
# compatibility with old amphora agent versions.
port = amphorae_network_config.get(amphora.id).vrrp_port
LOG.debug("Post-VIP-Plugging with vrrp_ip %s vrrp_port %s",
amphora.vrrp_ip, port.id)
host_routes = [{'nexthop': hr.nexthop,
'destination': hr.destination}
for hr in subnet.host_routes]
net_info = {'subnet_cidr': subnet.cidr,
'gateway': subnet.gateway_ip,
'mac_address': port.mac_address,
'vrrp_ip': amphora.vrrp_ip,
'mtu': port.network.mtu,
'host_routes': host_routes}
try:
self.client.plug_vip(amphora,
load_balancer.vip.ip_address,
net_info)
except exc.Conflict:
LOG.warning('VIP with MAC %(mac)s already exists on amphora, '
'skipping post_vip_plug',
{'mac': port.mac_address})
def post_network_plug(self, amphora, port):
fixed_ips = []
for fixed_ip in port.fixed_ips:
host_routes = [{'nexthop': hr.nexthop,
'destination': hr.destination}
for hr in fixed_ip.subnet.host_routes]
ip = {'ip_address': fixed_ip.ip_address,
'subnet_cidr': fixed_ip.subnet.cidr,
'host_routes': host_routes}
fixed_ips.append(ip)
port_info = {'mac_address': port.mac_address,
'fixed_ips': fixed_ips,
'mtu': port.network.mtu}
try:
self.client.plug_network(amphora, port_info)
except exc.Conflict:
LOG.warning('Network with MAC %(mac)s already exists on amphora, '
'skipping post_network_plug',
{'mac': port.mac_address})
def _process_tls_certificates(self, listener):
"""Processes TLS data from the listener.
Converts and uploads PEM data to the Amphora API
return TLS_CERT and SNI_CERTS
"""
tls_cert = None
sni_certs = []
certs = []
data = cert_parser.load_certificates_data(
self.cert_manager, listener)
if data['tls_cert'] is not None:
tls_cert = data['tls_cert']
certs.append(tls_cert)
if data['sni_certs']:
sni_certs = data['sni_certs']
certs.extend(sni_certs)
for cert in certs:
pem = cert_parser.build_pem(cert)
md5 = hashlib.md5(pem).hexdigest() # nosec
name = '{cn}.pem'.format(cn=cert.primary_cn)
self._apply(self._upload_cert, listener, pem, md5, name)
return {'tls_cert': tls_cert, 'sni_certs': sni_certs}
def _upload_cert(self, amp, listener_id, pem, md5, name):
try:
if self.client.get_cert_md5sum(amp, listener_id, name) == md5:
return
except exc.NotFound:
pass
self.client.upload_cert_pem(
amp, listener_id, name, pem)
# Check a custom hostname
class CustomHostNameCheckingAdapter(requests.adapters.HTTPAdapter):
def cert_verify(self, conn, url, verify, cert):
conn.assert_hostname = self.uuid
return super(CustomHostNameCheckingAdapter,
self).cert_verify(conn, url, verify, cert)
class AmphoraAPIClient(object):
def __init__(self):
super(AmphoraAPIClient, self).__init__()
self.secure = False
self.get = functools.partial(self.request, 'get')
self.post = functools.partial(self.request, 'post')
self.put = functools.partial(self.request, 'put')
self.delete = functools.partial(self.request, 'delete')
self.head = functools.partial(self.request, 'head')
self.start_listener = functools.partial(self._action,
consts.AMP_ACTION_START)
self.stop_listener = functools.partial(self._action,
consts.AMP_ACTION_STOP)
self.reload_listener = functools.partial(self._action,
consts.AMP_ACTION_RELOAD)
self.start_vrrp = functools.partial(self._vrrp_action,
consts.AMP_ACTION_START)
self.stop_vrrp = functools.partial(self._vrrp_action,
consts.AMP_ACTION_STOP)
self.reload_vrrp = functools.partial(self._vrrp_action,
consts.AMP_ACTION_RELOAD)
self.session = requests.Session()
self.session.cert = CONF.haproxy_amphora.client_cert
self.ssl_adapter = CustomHostNameCheckingAdapter()
self.session.mount('https://', self.ssl_adapter)
def _base_url(self, ip):
if utils.is_ipv6_lla(ip):
ip = '[{ip}%{interface}]'.format(
ip=ip,
interface=CONF.haproxy_amphora.lb_network_interface)
elif utils.is_ipv6(ip):
ip = '[{ip}]'.format(ip=ip)
return "https://{ip}:{port}/{version}/".format(
ip=ip,
port=CONF.haproxy_amphora.bind_port,
version=API_VERSION)
def request(self, method, amp, path='/', **kwargs):
LOG.debug("request url %s", path)
_request = getattr(self.session, method.lower())
_url = self._base_url(amp.lb_network_ip) + path
LOG.debug("request url %s", _url)
timeout_tuple = (CONF.haproxy_amphora.rest_request_conn_timeout,
CONF.haproxy_amphora.rest_request_read_timeout)
reqargs = {
'verify': CONF.haproxy_amphora.server_ca,
'url': _url,
'timeout': timeout_tuple, }
reqargs.update(kwargs)
headers = reqargs.setdefault('headers', {})
headers['User-Agent'] = OCTAVIA_API_CLIENT
self.ssl_adapter.uuid = amp.id
exception = None
# Keep retrying
for a in six.moves.xrange(CONF.haproxy_amphora.connection_max_retries):
try:
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message="A true SSLContext object is not available"
)
r = _request(**reqargs)
LOG.debug('Connected to amphora. Response: %(resp)s',
{'resp': r})
content_type = r.headers.get('content-type', '')
# Check the 404 to see if it is just that the network in the
# amphora is not yet up, in which case retry.
# Otherwise return the response quickly.
if r.status_code == 404:
LOG.debug('Got a 404 (content-type: %s) -- connection '
'data: %s' % (content_type, r.content))
if content_type.find("application/json") == -1:
LOG.debug("Amphora agent not ready.")
raise requests.ConnectionError
try:
json_data = r.json().get('details', '')
if 'No suitable network interface found' in json_data:
LOG.debug("Amphora network interface not found.")
raise requests.ConnectionError
except simplejson.JSONDecodeError: # if r.json() fails
pass # TODO(rm_work) Should we do something?
return r
except (requests.ConnectionError, requests.Timeout) as e:
exception = e
LOG.warning("Could not connect to instance. Retrying.")
time.sleep(CONF.haproxy_amphora.connection_retry_interval)
LOG.error("Connection retries (currently set to %(max_retries)s) "
"exhausted. The amphora is unavailable. Reason: "
"%(exception)s",
{'max_retries': CONF.haproxy_amphora.connection_max_retries,
'exception': exception})
raise driver_except.TimeOutException()
def upload_config(self, amp, listener_id, config):
r = self.put(
amp,
'listeners/{amphora_id}/{listener_id}/haproxy'.format(
amphora_id=amp.id, listener_id=listener_id),
data=config)
return exc.check_exception(r)
def get_listener_status(self, amp, listener_id):
r = self.get(
amp,
'listeners/{listener_id}'.format(listener_id=listener_id))
if exc.check_exception(r):
return r.json()
def _action(self, action, amp, listener_id):
r = self.put(amp, 'listeners/{listener_id}/{action}'.format(
listener_id=listener_id, action=action))
return exc.check_exception(r)
def upload_cert_pem(self, amp, listener_id, pem_filename, pem_file):
r = self.put(
amp,
'listeners/{listener_id}/certificates/{filename}'.format(
listener_id=listener_id, filename=pem_filename),
data=pem_file)
return exc.check_exception(r)
def update_cert_for_rotation(self, amp, pem_file):
r = self.put(amp, 'certificate', data=pem_file)
return exc.check_exception(r)
def get_cert_md5sum(self, amp, listener_id, pem_filename):
r = self.get(amp,
'listeners/{listener_id}/certificates/{filename}'.format(
listener_id=listener_id, filename=pem_filename))
if exc.check_exception(r):
return r.json().get("md5sum")
def delete_listener(self, amp, listener_id):
r = self.delete(
amp, 'listeners/{listener_id}'.format(listener_id=listener_id))
return exc.check_exception(r)
def get_info(self, amp):
r = self.get(amp, "info")
if exc.check_exception(r):
return r.json()
def get_details(self, amp):
r = self.get(amp, "details")
if exc.check_exception(r):
return r.json()
def get_all_listeners(self, amp):
r = self.get(amp, "listeners")
if exc.check_exception(r):
return r.json()
def delete_cert_pem(self, amp, listener_id, pem_filename):
r = self.delete(
amp,
'listeners/{listener_id}/certificates/{filename}'.format(
listener_id=listener_id, filename=pem_filename))
return exc.check_exception(r)
def plug_network(self, amp, port):
r = self.post(amp, 'plug/network',
json=port)
return exc.check_exception(r)
def plug_vip(self, amp, vip, net_info):
r = self.post(amp,
'plug/vip/{vip}'.format(vip=vip),
json=net_info)
return exc.check_exception(r)
def upload_vrrp_config(self, amp, config):
r = self.put(amp, 'vrrp/upload', data=config)
return exc.check_exception(r)
def _vrrp_action(self, action, amp):
r = self.put(amp, 'vrrp/{action}'.format(action=action))
return exc.check_exception(r)
def get_interface(self, amp, ip_addr):
r = self.get(amp, 'interface/{ip_addr}'.format(ip_addr=ip_addr))
if exc.check_exception(r):
return r.json()
| StarcoderdataPython |
1609672 | import torch
import torch.nn as nn
class PROMPTEmbedding(nn.Module):
def __init__(self,
wte: nn.Embedding,
n_tokens: tuple = (10, 10, 10),
random_range: float = 0.5,
initialize_from_vocab: bool = True,
prompt_token_id: int=50257,
initialize_tokens: torch.Tensor=torch.LongTensor([])):
super(PROMPTEmbedding, self).__init__()
self.wte = wte
self.n_tokens = n_tokens
self.learned_embedding = nn.parameter.Parameter(self.initialize_embedding(wte,
n_tokens,
random_range,
initialize_from_vocab, initialize_tokens))
self.prompt_token_id = prompt_token_id
def initialize_embedding(self,
wte: nn.Embedding,
n_tokens: tuple = (10, 10, 10),
random_range: float = 0.5,
initialize_from_vocab: bool = True,
initialize_tokens: torch.Tensor=torch.LongTensor([])):
tot_tokens = sum(n_tokens)
if initialize_from_vocab:
if len(initialize_tokens)!=sum(n_tokens):
assert 'init token length should match the number of tokens in n_tokens'
# print('size', self.wte(initialize_tokens).size())
return self.wte(initialize_tokens).clone().detach() # self.wte.weight[:tot_tokens].clone().detach()
return torch.FloatTensor(wte.weight.size(1), tot_tokens).uniform_(-random_range, random_range)
def return_embeddings(self):
to_return = {}
to_return['n_tokens'] = self.n_tokens
to_return['learned_embedding'] = self.learned_embedding
to_return['prompt_token_id'] = prompt_token_id
return to_return
def forward(self, tokens):
# TODO separate!
# input_embedding= self.wte(tokens)
# print(input_embedding.size(0))
learned_embedding = self.learned_embedding #.repeat(tokens.size(0), 1, 1)
# print(tokens.size())
embeddings = []
prompt_counter = 0
embeddings = self.wte(tokens)
pre_indices = (tokens == self.prompt_token_id).nonzero()
if pre_indices.size(0)==0:
return embeddings
if pre_indices.size(0)<sum(self.n_tokens):
return embeddings
blocked_indices = (tokens == self.prompt_token_id).nonzero().reshape((tokens.shape[0], sum(self.n_tokens), 2))[:, :, 1]
for idx in range(blocked_indices.size(0)):
for idx2 in range(blocked_indices.size(1)):
# print('e', idx, blocked_indices[idx, idx2])
embeddings[idx, blocked_indices[idx, idx2], :] = learned_embedding[idx2, :]
return embeddings
# embeddings = self.wte(tokens)
# blocked_indices = (tokens == self.prompt_token_id).nonzero()
# if blocked_indices.size(0)==0:
# return embeddings
# blocked_indices.reshape((tokens.shape[0], sum(self.n_tokens), 2))[:, :, 1]
# # print(blocked_indices)
# for idx in range(blocked_indices.size(0)):
# for idx2 in range(sum(self.n_tokens)):
# embeddings[idx, blocked_indices[idx, idx2], :] = learned_embedding[idx2, :]
# return embeddings
# # <NAME>: This part is "that" poorly written part...
# # TODO change the part below
# whole = []
# for idx in range(tokens.size(0)):
# row = []
# prompt_counter = 0
# for idx2 in range(tokens.size(1)):
# token = tokens[idx:idx+1, idx2:idx2+1]
# # print('token', token)
# if self.prompt_token_id in token:
# row.append(learned_embedding[prompt_counter:prompt_counter+1].repeat(1,1,1))
# prompt_counter = prompt_counter+1
# else:
# row.append(self.wte(token))
# # print(row)
# row = torch.cat(row, 1)
# # print(row.size())
# whole.append(row)
# # print(whole.append(row))
# # print(torch.cat(whole, 0).size())
# return torch.cat(whole, 0) | StarcoderdataPython |
8126851 | <reponame>berkerY/rdmo
# Generated by Django 2.2.13 on 2020-08-31 15:07
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tasks', '0031_related_name'),
('projects', '0033_default_value_type'),
]
operations = [
migrations.CreateModel(
name='Issue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(choices=[('open', 'open'), ('in_progress', 'in progress'), ('closed', 'closed')], default='open', help_text='The status for this issue.', max_length=12, verbose_name='Status')),
('project', models.ForeignKey(help_text='The project for this issue.', on_delete=django.db.models.deletion.CASCADE, related_name='issues', to='projects.Project', verbose_name='Project')),
('task', models.ForeignKey(help_text='The task for this issue.', on_delete=django.db.models.deletion.CASCADE, related_name='issues', to='tasks.Task', verbose_name='Task')),
],
options={
'verbose_name': 'Issue',
'verbose_name_plural': 'Issues',
'ordering': ('project__title',),
},
),
]
| StarcoderdataPython |
11294482 | <filename>Card Validator.py
#* basically luhns algorithm
# Takes in a credit card number from a common credit card vendor (Visa, MasterCard, American Express,
# Discoverer) and validates it to make sure that it is a valid number (look into how credit cards
# use a checksum).
# splits a string of numbers, doubles every second number, if number is two digits then adds the two digits
# together, adds new even numbers together with the odd numbers, if answer = multiple of 10 then correct
# def creditCheck(n):
# digits = [int(x) for x in str(n)]
# odd_digits = digits[0::2]
# even_digits = digits[1::2] #ives every second number
def luhn_checksum(card_number):
def digits_of(n):
return [int(d) for d in str(n)] #separates numbers into list
digits = digits_of(card_number) #adds numbers to a list
odd_digits = digits[-1::-2] #gets every second number
even_digits = digits[-2::-2]
checksum = 0
checksum += sum(odd_digits) #adds all numbers in the list together
for d in even_digits:
checksum += sum(digits_of(d*2)) # doubles digits in list, separates new numbers and adds all together
print(checksum)
return checksum % 10 # finds mod 10 of the equation
def is_luhn_valid(card_number):
return luhn_checksum(card_number) == 0 #checks if the mod is zero, true = valid
cardNum = input("Please enter credit card number > ")
result = is_luhn_valid(cardNum)
print(result) | StarcoderdataPython |
11222967 | # <NAME>
# ID успешной посылки 65406475
from typing import List, Tuple
def twist_of_the_wrist(number_keys: int, matrix: List[str]) -> int:
limit_pressures = number_keys * 2
count_numbers = dict((number, matrix.count(number))
for number in set(matrix)
if matrix.count(number) > 0)
win = sum(True for _, k in count_numbers.items() if k <= limit_pressures)
return(win)
def read_input() -> Tuple[int, List[List[str]]]:
number_keys = int(input())
matrix = []
for _ in range(4):
matrix.extend([element for element in input().strip()
if element != '.'])
return number_keys, matrix
if __name__ == '__main__':
number_keys, matrix = read_input()
print(twist_of_the_wrist(number_keys, matrix))
| StarcoderdataPython |
56980 | <reponame>amshelhack3r/MangaDownloader
from bs4 import BeautifulSoup
import requests
import enum
from pprint import pformat
import logging
logging.basicConfig(filename='app.log', filemode='w', format='%(asctime)s - %(message)s', level=logging.INFO)
class Scraper():
@staticmethod
def getChapter(obj):
chapter_number = 'chapter_'+str(obj.get('chapter'))
final_dict = (chapter_number, [])
url = Scraper.urlBuilder(UrlType.chapter.name,
obj.get('name'), chapter_number)
logging.info(pformat(url))
soup = Scraper.getSoup(url)
image_div = soup.find('div', attrs={'class': 'container-chapter-reader'})
images = image_div.findAll('img')
for link in images:
final_dict[1].append(link.get('src'))
logging.info(pformat(final_dict))
return final_dict
# This function will take a string name and return list containing a
# dictionary of name of the manga and the url
@staticmethod
def getSearchResults(name):
search_results = dict()
fname = name.replace(" ", "_")
# build sarch url
url = Scraper.urlBuilder(UrlType.search.name, fname)
soup = Scraper.getSoup(url)
# get the div that contains the info i need
story_item = soup.findAll('div', attrs= {'class': 'search-story-item'})
for item in story_item:
link = item.find('a', attrs={'class':'item-img'})
search_results[link.get("title")] = link.get('href')
logging.info('search result: %s', str(search_results))
return search_results
@staticmethod
def getMangaInfo(name):
manga_info = dict(count=0, chapters=[])
url = Scraper.urlBuilder(UrlType.manga.name, name)
soup = Scraper.getSoup(url)
chapter_list = soup.find_all('li', attrs={'class': 'a-h'})
manga_info['count'] = len(chapter_list)
for chapter in chapter_list:
link = chapter.find('a')
manga_info['chapters'].append({link.get('title'): link.get('href')})
# logging.info('manga info: %s', str(manga_info))
return manga_info
@classmethod
def getSoup(cls, url):
html = requests.get(url)
return BeautifulSoup(html.content, 'html.parser')
@classmethod
def urlBuilder(cls, enumType, *args):
base_url = 'https://manganelo.com/'+enumType
separator = '/'
for word in args:
base_url = base_url+separator+word
return base_url
class UrlType(enum.Enum):
search = 'search'
manga = 'manga'
chapter = 'chapter'
| StarcoderdataPython |
11334187 | <filename>ansys/mapdl/core/mapdl_console.py<gh_stars>100-1000
"""Module to control interaction with an ANSYS shell instance.
Used when launching Mapdl via pexpect on Linux when <= 17.0
"""
import os
import time
import re
# from ansys.mapdl.core.misc import kill_process
from ansys.mapdl.core.mapdl import _MapdlCore
from ansys.mapdl.core.errors import MapdlExitedError
ready_items = [
rb"BEGIN:",
rb"PREP7:",
rb"SOLU_LS[0-9]+:",
rb"POST1:",
rb"POST26:",
rb"RUNSTAT:",
rb"AUX2:",
rb"AUX3:",
rb"AUX12:",
rb"AUX15:",
# continue
rb"YES,NO OR CONTINUOUS\)\=",
rb"executed\?",
# errors
rb"SHOULD INPUT PROCESSING BE SUSPENDED\?",
# prompts
rb"ENTER FORMAT for",
]
CONTINUE_IDX = ready_items.index(rb"YES,NO OR CONTINUOUS\)\=")
WARNING_IDX = ready_items.index(rb"executed\?")
ERROR_IDX = ready_items.index(rb"SHOULD INPUT PROCESSING BE SUSPENDED\?")
PROMPT_IDX = ready_items.index(rb"ENTER FORMAT for")
nitems = len(ready_items)
expect_list = []
for item in ready_items:
expect_list.append(re.compile(item))
ignored = re.compile(r"[\s\S]+".join(["WARNING", "command", "ignored"]))
def launch_pexpect(
exec_file=None,
run_location=None,
jobname=None,
nproc=None,
additional_switches="",
start_timeout=60,
):
"""Launch MAPDL as a pexpect process.
Limited to only a linux instance
"""
import pexpect
command = "%s -j %s -np %d %s" % (exec_file, jobname, nproc, additional_switches)
process = pexpect.spawn(command, cwd=run_location)
process.delaybeforesend = None
try:
index = process.expect(["BEGIN:", "CONTINUE"], timeout=start_timeout)
except: # capture failure
raise RuntimeError(process.before.decode("utf-8"))
if index: # received ... press enter to continue
process.sendline("")
process.expect("BEGIN:", timeout=start_timeout)
return process
class MapdlConsole(_MapdlCore):
"""Control interaction with an ANSYS shell instance.
Only works on Linux.
"""
def __init__(self, loglevel="INFO", log_apdl="w", use_vtk=True, **start_parm):
"""Opens an ANSYS process using pexpect"""
self._auto_continue = True
self._continue_on_error = False
self._process = None
self._launch(start_parm)
super().__init__(
loglevel=loglevel, use_vtk=use_vtk, log_apdl=log_apdl, **start_parm
)
def _launch(self, start_parm):
"""Connect to MAPDL process using pexpect"""
self._process = launch_pexpect(**start_parm)
def _run(self, command, **kwargs):
"""Sends command and returns ANSYS's response"""
self._reset_cache()
if not self._process.isalive():
raise MapdlExitedError("ANSYS exited")
command = command.strip()
if not command:
raise ValueError("Cannot run empty command")
if command[:4].lower() == "/out":
items = command.split(",")
if len(items) > 1:
self._output = ".".join(items[1:])
else:
self._output = ""
# send the command
self._log.debug("Sending command %s", command)
self._process.sendline(command)
# do not expect
if "/MENU" in command:
self._log.info("Enabling GUI")
self._process.sendline(command)
return
full_response = ""
while True:
i = self._process.expect_list(expect_list, timeout=None)
response = self._process.before.decode("utf-8")
full_response += response
if i >= CONTINUE_IDX and i < WARNING_IDX: # continue
self._log.debug(
"Continue: Response index %i. Matched %s",
i,
ready_items[i].decode("utf-8"),
)
self._log.info(response + ready_items[i].decode("utf-8"))
if self._auto_continue:
user_input = "y"
else:
user_input = input("Response: ")
self._process.sendline(user_input)
elif i >= WARNING_IDX and i < ERROR_IDX: # warning
self._log.debug(
"Prompt: Response index %i. Matched %s",
i,
ready_items[i].decode("utf-8"),
)
self._log.warning(response + ready_items[i].decode("utf-8"))
if self._auto_continue:
user_input = "y"
else:
user_input = input("Response: ")
self._process.sendline(user_input)
elif i >= ERROR_IDX and i < PROMPT_IDX: # error
self._log.debug(
"Error index %i. Matched %s", i, ready_items[i].decode("utf-8")
)
self._log.error(response)
response += ready_items[i].decode("utf-8")
raise Exception(response)
elif i >= PROMPT_IDX: # prompt
self._log.debug(
"Prompt index %i. Matched %s", i, ready_items[i].decode("utf-8")
)
self._log.info(response + ready_items[i].decode("utf-8"))
raise RuntimeError(
"User input expected. " "Try using ``with mapdl.non_interactive``"
)
else: # continue item
self._log.debug(
"continue index %i. Matched %s", i, ready_items[i].decode("utf-8")
)
break
# return last response and all preceding responses
return full_response
def exit(self, close_log=True, timeout=3):
"""Exit MAPDL process.
Parameters
----------
timeout : float
Maximum time to wait for MAPDL to exit. Set to 0 or
``None`` to not wait until MAPDL stops.
"""
self._log.debug("Exiting ANSYS")
if self._process is not None:
try:
self._process.sendline("FINISH")
self._process.sendline("EXIT")
except:
pass
if close_log:
self._close_apdl_log()
self._exited = True
# edge case: need to wait until process dies, otherwise future
# commands might talk to a dead process...
if timeout:
tstart = time.time()
while self._process.isalive():
time.sleep(0.05)
telap = tstart - time.time()
if telap > timeout:
return 1
return 0
def kill(self):
"""Forces ANSYS process to end and removes lock file"""
if self._process is not None:
try:
self.exit()
except:
try:
os.kill(self._process.pid, 9)
except:
self._log.warning("Unable to kill process %d", self._process.pid)
self._log.debug("Killed process %d", self._process.pid)
@property
def _name(self):
"""Instance unique identifier."""
return f"Console_PID_{self._process.pid}"
| StarcoderdataPython |
11320982 | import torch
import torch.nn as nn
from torch.autograd import Variable
class ConvLSTMCell(nn.Module):
"""
ConvLSTMCell originates from the idea of:
Convolutional LSTM Network: A Machine Learning Approach for Precipitation Nowcasting
https://arxiv.org/abs/1506.04214
The intuition is to replace the vector inner product with 2d-convolution in classical LSTM cell.
However, the details in original paper is unconsistent with the notation and implementation for
torch.nn.LSTMCeil(include previous C state into input gate, forget gate). For consistency
, we use following rules the same as the official document:
https://pytorch.org/docs/master/nn.html#torch.nn.LSTMCell
Only replace Wx with 2d-conv(W, x) in implementation. Good tutorial for LSTM:
english: http://colah.github.io/posts/2015-08-Understanding-LSTMs/
chinese: https://www.jianshu.com/p/9dc9f41f0b29
"""
def __init__(self, D_in, D_hidden, kernel_size, bias=True):
super(ConvLSTMCell, self).__init__()
self.D_in = D_in
self.D_hidden = D_hidden
self.bias = bias
self.kernel_size = kernel_size
# padding to keep the same shape except channel dimension
self.padding = int((kernel_size - 1) / 2)
# input gate (input info)
self.Wii = nn.Conv2d(self.D_in, self.D_hidden, self.kernel_size, 1, self.padding, bias=bias)
self.Whi = nn.Conv2d(self.D_hidden, self.D_hidden, self.kernel_size, 1, self.padding, bias=bias)
# forget gate
self.Wif = nn.Conv2d(self.D_in, self.D_hidden, self.kernel_size, 1, self.padding, bias=bias)
self.Whf = nn.Conv2d(self.D_hidden, self.D_hidden, self.kernel_size, 1, self.padding, bias=bias)
# input gate (cell info)
self.Wig = nn.Conv2d(self.D_in, self.D_hidden, self.kernel_size, 1, self.padding, bias=bias)
self.Whg = nn.Conv2d(self.D_hidden, self.D_hidden, self.kernel_size, 1, self.padding, bias=bias)
# output gate
self.Wio = nn.Conv2d(self.D_in, self.D_hidden, self.kernel_size, 1, self.padding, bias=bias)
self.Who = nn.Conv2d(self.D_hidden, self.D_hidden, self.kernel_size, 1, self.padding, bias=bias)
# save convolution kernels (to-be cudaed)
self._kernels = [self.Wii, self.Whi, self.Wif, self.Whf, self.Wig, self.Whg, self.Wio, self.Who]
def forward(self, x, internal_state):
h, c = internal_state
# input
i = torch.sigmoid(self.Wii(x) + self.Whi(h))
# forget
f = torch.sigmoid(self.Wif(x) + self.Whf(h))
# input (internel cell status)
g = torch.tanh(self.Wig(x) + self.Whg(h))
# output
o = torch.sigmoid(self.Wio(x) + self.Who(h))
# next cell status
cx = f * c + i * g
# next hidden/output status
hx = o * torch.tanh(cx)
return hx, cx
def cuda(self, device=None):
super(ConvLSTMCell, self).cuda(device)
for k in self._kernels:
k.cuda()
class CONV_lstm_unit(torch.nn.Module):
def __init__(self, D_in, D_hidden, kernel_size, k, bias=True):
"""
initilalize the traditional LSTM module by convLSTMCell(implemented above)
with TBPTT
Note1: TBPTT: truncated back propogation through time.The main difference is that
it only computes the gradients on lastest k time steps.
Note2: it does not support sequence input like build-in LSTM.
:param D_in:
:param D_hidden:
:param kernel_size:
:param k:
"""
super(CONV_lstm_unit, self).__init__()
self.D_in = D_in
self.D_hidden = D_hidden
self.k = k # reserve gradients of how many time steps
self.LSTM_cell = ConvLSTMCell(D_in, D_hidden, kernel_size, bias)
self.internal_state = []
self.is_cuda = False # is on cuda?
def init_hiddens(self, batch_num, input_shape):
"""
initialize hidden status for conv LSTM is more challenging then classical one
since convolution operation should consider the shape of the input
:param batch_num:
:param D_hidden: depth for hidden/output tensor
:param input_shape: (width(int) x height(int))
:return:
"""
hx = torch.zeros(batch_num, self.D_hidden, input_shape[0], input_shape[1])
cx = torch.zeros(batch_num, self.D_hidden, input_shape[0], input_shape[1])
if self.is_cuda:
hx = hx.cuda()
cx = cx.cuda()
self.internal_state.append([hx, cx, None])
def forward(self, x):
"""
For pytorch does not support dynamic compute graph(cannot set require_grad for non-leaf variable).
It seem to be an overhead that we recompute previous k-steps. However, reduce the redundant requires
us to modify the backward computing method which requires more advanced modification that will
be not covered in my implemenetation.
:param x:
:return:
"""
# require to call init_hiddens to intialialize the internal state
# for the first time step
if len(self.internal_state) == 0:
self.init_hiddens(x.shape[0], (x.shape[2], x.shape[3]))
hx, cx, _ = self.internal_state[0]
hx = hx.detach()
cx = cx.detach()
for hxcx in self.internal_state:
_, _, x_ = hxcx
if x_ is None:
x_ = x.detach()
hxcx[2] = x
hx, cx = self.LSTM_cell(x, (hx, cx))
self.internal_state.append([hx, cx, None])
if len(hxcx) >= self.k:
evict_element = self.internal_state.pop(0)
del(evict_element)
return hx
def get_hidden(self):
for hxcx in self.internal_state:
hx, cx, x = hxcx
if self.is_cuda:
hx.cpu()
cx.cpu()
if not x is None:
x.cpu()
return self.internal_state
def dump_hidden(self, internal_state):
self.internal_state = internal_state
for hxcx in self.internal_state:
hx, cx, x = hxcx
if self.is_cuda:
hx.cuda()
cx.cuda()
if not x is None:
x.cuda()
def cuda(self, device=None):
super(CONV_lstm_unit, self).cuda(device)
self.is_cuda = True
self.LSTM_cell.cuda()
if __name__ == "__main__":
# # unit tests
# # 1. ConvLSTMCell
# cell = ConvLSTMCell(10, 5, 3)
#
# # dim = (batch, chanel, shape)
# x = torch.randn(5, 10, 10, 10)
# h = torch.randn(5, 5, 10, 10)
# c = torch.randn(5, 5, 10, 10)
#
# hx, cx = cell(x, h, c)
# print(hx.size(), cx.size())
#
# # 2. ConvLSTMCell (with cuda)
# x = x.cuda()
# h = h.cuda()
# c = c.cuda()
# cell.cuda()
# hx, cx = cell(x, h, c)
# print(hx.size(), cx.size())
# 3. Conv_lstm_unit
# def __init__(self, D_in, D_hidden, kernel_size, k, bias=True):
foo = CONV_lstm_unit(5, 10, 3, 10)
# dim = (batch, chanel, shape)
x = torch.randn(5, 5, 10, 10)
# foo.init_hiddens(5, (10, 10))
out = foo(x) ** 2
print(out.size())
loss = torch.sum(out)
loss.backward()
| StarcoderdataPython |
326368 | <filename>api/src/Wordle.py
from flask import render_template
from python_framework import ResourceManager
import ModelAssociation
app = ResourceManager.initialize(__name__, ModelAssociation.MODEL)
@app.route(f'{app.api.baseUrl}')
def home():
return render_template('home-page.html', staticUrl=ResourceManager.getApiStaticUrl(app))
| StarcoderdataPython |
8077133 | <gh_stars>0
import argparse
import logging
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
import scipy
from random import sample
from sklearn.manifold import TSNE
from tqdm import tqdm
from typing import Dict
logger = logging.getLogger('sna')
def load_graph(is_weighted: bool,
is_directed: bool,
file_path: str) -> nx.classes.graph.Graph:
"""Load the graph from a file in the input directory.
Args:
is_weighted (boolean): Denotes if the graph should be weighted.
is_directed (boolean): Denotes if the graph should be directed.
file_path (str): Path to the file with the input graph.
Returns:
G (networkx.classes.graph.Graph): A NetworkX graph object.
"""
if is_weighted:
G = nx.read_gpickle(file_path)
else:
G = nx.read_gpickle(file_path)
for edge in G.edges():
G[edge[0]][edge[1]]['weight'] = 1
if is_directed:
G = G.to_undirected()
return G
def transform_graph_from_multiple_files(args: argparse.Namespace) -> nx.classes.graph.Graph:
"""Load the graph from multiple files and save it in pickle format to the input directory.
Args:
args (argparse.Namespace): The provided application arguments.
Returns:
G (networkx.classes.graph.Graph): A NetworkX graph object.
"""
edges = pd.read_csv(args.input_edges, sep=',')
G = nx.from_pandas_edgelist(edges, args.column_one, args.column_two)
nodes = pd.read_csv(args.input_nodes, sep=',')
nx.set_node_attributes(G, pd.Series(
nodes.ml_target, index=nodes.id).to_dict(), args.node_ml_target)
nx.set_node_attributes(G, pd.Series(
nodes.id, index=nodes.id).to_dict(), 'id')
nx.write_gpickle(G, args.output)
return G
def transform_graph_from_adjacency_list(args: argparse.Namespace) -> nx.classes.graph.Graph:
"""Load the graph from an adjacency list and save it in pickle format to the input directory.
Args:
args (argparse.Namespace): The provided application arguments.
Returns:
G (networkx.classes.graph.Graph): A NetworkX graph object.
"""
edges = pd.read_csv(args.input_edges, sep=',')
G = nx.from_pandas_edgelist(edges, args.column_one, args.column_two)
nx.write_gpickle(G, args.output)
return G
def sample_graph(args: argparse.Namespace) -> nx.classes.graph.Graph:
"""Load the graph in pickle format, sample the new graph and save it to the input directory.
Args:
args (argparse.Namespace): The provided application arguments.
Returns:
G (networkx.classes.graph.Graph): A NetworkX graph object.
"""
G = nx.read_gpickle(args.input)
H = G.copy()
samples = sample(list(G.nodes()), 10000)
for n in tqdm(H):
if n not in samples:
G.remove_node(n)
nx.write_gpickle(G, args.output)
return G
def get_labels(G: nx.classes.graph.Graph,
ml_target: str) -> Dict:
"""Get the all the node labels from a graph with the ML target as dictionary values.
Args:
G (networkx.classes.graph.Graph): A NetworkX graph object.
ml_target (str): The node ML target label.
Returns:
labels (networkx.classes.graph.Graph): A dictionary of node ids as keys and the ML target label as values.
"""
labels = {}
for n in G.nodes(data=True):
labels[n[1]['id']] = n[1][ml_target]
return labels
def print_graph_info(G: nx.classes.graph.Graph,
graph_name: str) -> None:
"""Print information about the graph.
Args:
G (networkx.classes.graph.Graph): A NetworkX graph object.
graph_name (str): The name of the graph.
"""
number_of_nodes = nx.number_of_nodes(G)
number_of_edges = nx.number_of_edges(G)
density = nx.density(G)
logger.info(f'\nInformation about the {graph_name}')
logger.info(
f'Number of nodes: {number_of_nodes}\tNumber of edges: {number_of_edges}\tDensity: {density}\n')
def load_embedding(file_path: str) -> Dict:
"""Load the node embeddings from a file.
Args:
file_path (str): Path to the file with the node embeddings.
Results:
embedding_dict (dict): A dictionary of node embedding vectors with nodes as keys.
"""
embedding_dict = {}
first_line = True
with open(file_path) as f:
for line in f:
if first_line:
first_line = False
continue
vector = [float(i) for i in line.strip().split()]
embedding_dict[vector[0]] = vector[1:]
f.close()
return embedding_dict
def str2bool(argument: str) -> bool:
"""Transform a string argument to a boolean value.
Args:
argument (str): String argument that represents a boolean value.
Results:
(bool): A boolean value.
"""
if isinstance(argument, bool):
return argument
if argument.lower() in ('true', 't'):
return True
elif argument.lower() in ('false', 'f'):
return False
else:
raise argparse.ArgumentTypeError(
'The argument must be a boolean value.')
def calculate_node_degrees(G: nx.classes.graph.Graph,
adj_matrix: scipy.sparse) -> Dict:
"""Calculate the degree of every node.
Args:
G (networkx.classes.graph.Graph): A NetworkX graph object.
adj_matrix (scipy.sparse): Graph adjacency matrix.
Returns:
degree_dict (networkx.classes.graph.Graph): A dictionary of node ids as keys and degrees as values.
"""
degree_dict = {}
B = adj_matrix.sum(axis=1)
for cnt, node_id in enumerate(list(G.nodes())):
degree_dict[node_id] = B[cnt, 0]
return degree_dict
def visualize_embeddings(embeddings, node_targets):
"""Calculate the degree of every node.
Args:
G (networkx.classes.graph.Graph): A NetworkX graph object.
adj_matrix (scipy.sparse): Graph adjacency matrix.
Returns:
degree_dict (networkx.classes.graph.Graph): A dictionary of node ids as keys and degrees as values.
"""
tsne = TSNE(n_components=2)
two_dimensional_embeddings = tsne.fit_transform(embeddings)
label_map = {l: i for i, l in enumerate(np.unique(node_targets))}
node_colors = [label_map[target] for target in node_targets]
plt.scatter(
two_dimensional_embeddings[:, 0],
two_dimensional_embeddings[:, 1],
c=node_colors,
cmap="jet",
alpha=0.7,
)
plt.show()
| StarcoderdataPython |
1911873 | # -*- coding: utf-8 -*-
__author__ = 'rldotai'
__email__ = '<EMAIL>'
__version__ = '0.0.0'
from .dropout import DropOut
from .int2binary import Int2Bin
from .int2unary import Int2Unary
from .random_binomial import RandomBinomial
from .tile_coding import TileCoder
from .traces import AccumulatingTrace, ReplacingTrace | StarcoderdataPython |
11224691 | <reponame>NipunBhalla/image-similarity
bind = "0.0.0.0:5000"
timeout = 120 | StarcoderdataPython |
11372152 | ### Count Number of Teams - Solution
### O(n^2): (less_left*greater_right) + (greater_left*less_right)
class Solution:
def numTeams(self, rating: List[int]) -> int:
count = 0
for i in range(1, len(rating)-1):
less_left, less_right = 0, 0
greater_left, greater_right = 0, 0
for j in range(i):
if rating[j] < rating[i]:
less_left += 1
if rating[j] > rating[i]:
greater_left += 1
for k in range(i+1, len(rating)):
if rating[k] < rating[i]:
less_right += 1
if rating[k] > rating[i]:
greater_right += 1
count += less_left*greater_right + greater_left*less_right
return count | StarcoderdataPython |
3445930 | import six
import unittest
import os
import shutil
from coopy.journal import DiskJournal
JOURNAL_DIR = 'journal_test/'
CURRENT_DIR = os.getcwd()
class TestJournal(unittest.TestCase):
def setUp(self):
os.mkdir(JOURNAL_DIR)
def tearDown(self):
shutil.rmtree(JOURNAL_DIR)
def test_current_journal_file(self):
journal = DiskJournal(JOURNAL_DIR, CURRENT_DIR)
expected_file_name = '%s%s' % (JOURNAL_DIR,
'transaction_000000000000002.log')
self.assertEquals(expected_file_name,
journal.current_journal_file(JOURNAL_DIR).name)
# test hack! - create next file
new_file_name = expected_file_name.replace('2','3')
open(new_file_name, 'wt').close()
self.assertEquals(new_file_name,
journal.current_journal_file(JOURNAL_DIR).name)
def test_receive(self):
import pickle
class Message(object):
def __init__(self, value):
self.value = value
def __getstate__(self):
raise pickle.PicklingError()
message = Message('test message')
journal = DiskJournal(JOURNAL_DIR, CURRENT_DIR)
journal.setup()
self.assertRaises(
pickle.PicklingError,
journal.receive,
(message)
)
def test_close(self):
journal = DiskJournal(JOURNAL_DIR, CURRENT_DIR)
self.assertTrue(not journal.file)
journal.setup()
self.assertTrue(not journal.file.closed)
journal.close()
self.assertTrue(journal.file.closed)
def test_setup(self):
journal = DiskJournal(JOURNAL_DIR, CURRENT_DIR)
self.assertEquals(JOURNAL_DIR, journal.basedir)
journal.setup()
expected_file_name = '%s%s' % (JOURNAL_DIR,
'transaction_000000000000002.log')
self.assertEquals(expected_file_name,
journal.file.name)
if six.PY3:
import pickle
else:
import cPickle as pickle
# test hack
pickle_class = pickle.Pickler(open(expected_file_name, 'rb'))\
.__class__
self.assertTrue(isinstance(journal.pickler, pickle_class))
| StarcoderdataPython |
5003367 | <filename>setup.py
from setuptools import setup, find_packages
from mpunet import __version__
with open('README.md') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read().replace('.. :changelog:', '')
with open("requirements.txt") as req_file:
requirements = list(filter(None, req_file.read().split("\n")))
setup(
name='mpunet',
version=__version__,
description='Multi-Planar UNet for autonomous '
'segmentation of 3D medical images',
long_description=readme + "\n\n" + history,
long_description_content_type='text/markdown',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/perslev/MultiPlanarUNet',
license='LICENSE.txt',
packages=find_packages(),
package_dir={'mpunet':
'mpunet'},
include_package_data=True,
setup_requires=["setuptools_git>=0.3",],
entry_points={
'console_scripts': [
'mp=mpunet.bin.mp:entry_func',
],
},
install_requires=requirements,
classifiers=['Development Status :: 3 - Alpha',
'Environment :: Console',
'Operating System :: POSIX',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: MIT License']
)
| StarcoderdataPython |
3328840 | from math import sqrt
x1=int(input("enter x1: "))
x2=int(input("enter x2: "))
y1=int(input("enter y1: "))
y2=int(input("enter y2: "))
distance = ((((x2-x1)**2) + ((y2-y1)**2)))
print('Distance between the points is: ', sqrt(distance)) | StarcoderdataPython |
5071790 | <filename>python/delaunay.py<gh_stars>1-10
# MIT License; Copied from:
# http://code.activestate.com/recipes/579021-delaunay-triangulation/
#
# (I've had trouble with scipy.spatial,
# so going with pure Python even if it's slower.)
# Ignore long lines:
# flake8: noqa: E501
import numpy
import math
import copy
class DictDelaunay2d:
'''
Wrapper for Delaunay2d: Instead of supplying a list and getting indices, this takes a dict and
returns keys for the dict.
Start with these points:
D F
B
C E
A
>>> points = {'A':[0,0], 'B':[1,2], 'C':[2,1], 'D':[1,3], 'E':[3,1], 'F':[3,3]}
>>> delaunay = DictDelaunay2d(points)
>>> delaunay.getTriangles()
[['B', 'C', 'F'], ['B', 'F', 'D'], ['C', 'F', 'E'], ['B', 'A', 'C'], ['C', 'A', 'E'], ['B', 'D', 'A']]
'''
def __init__(self, pairs_dict):
items = pairs_dict.items()
self.keys = [item[0] for item in items]
pairs = [item[1] for item in items]
self.delaunay = Delaunay2d(pairs)
def getTriangles(self):
triangles = self.delaunay.getTriangles()
return [[self.keys[point] for point in tri] for tri in triangles]
class Delaunay2d:
'''
The algorithm uses the S-hull method by <NAME> (http://www.s-hull.org/paper/s_hull.pdf).
The method involves ordering the points in increasing distance from the cloud's center of
gravity, creating a triangle with the first three points, and adding the remaining points while
contructing triangles between the point and boundary edges - only triangles with a definite sign
need to be added (the edge must be visible). Finally, an edge flipping step ensures that the
triangles are well formed, i.e. the sum of opposite angles to an edge is < 180 degree (the
so-called Delaunay criterion). It takes about 30 seconds to triangulate 1000 points, most of the
time is spent in two methods (flipOneEdge and getArea).
Further improvements could include adding support for holes and automatically adding Steiner
points. Generally speaking, triangles which have an edge on the boundary (convex hull), tend to
be squashed; this problem can be alleviated by adding more points on the boundary. Holes can be
treaded by removing triangles, edges, and points that are contained inside a given closed path.
Start with these points:
3 5
1
2 4
0
>>> points = [[0,0], [1,2], [2,1], [1,3], [3,1], [3,3]]
>>> delaunay = Delaunay2d(points)
>>> [list(point) for point in delaunay.points]
[[1, 2], [2, 1], [1, 3], [3, 1], [3, 3], [0, 0]]
Points have been reordered:
2 4
0
1 3
5
>>> delaunay.triangles
[[0, 1, 4], [0, 4, 2], [1, 4, 3], [0, 5, 1], [1, 5, 3], [0, 2, 5]]
Internal order!
>>> delaunay.getTriangles()
[[1, 2, 5], [1, 5, 3], [2, 5, 4], [1, 0, 2], [2, 0, 4], [1, 3, 0]]
Original order!
'''
EPS = 1.23456789e-14
def __init__(self, pairs):
# data structures
self.points = [numpy.array(pair) for pair in pairs]
self.triangles = [] # cells
self.edge2Triangles = {} # edge to triangle(s) map
self.boundaryEdges = set()
self.appliedBoundaryEdges = None
self.holes = None
# compute center of gravity
cg = numpy.zeros((2,), numpy.float64)
for pt in self.points:
cg += pt
cg /= len(self.points)
# sort
def distanceSquare(pt):
d = pt - cg
return numpy.dot(d, d)
dSqFromCenter = numpy.array([distanceSquare(pt) for pt in self.points])
self.order = list(dSqFromCenter.argsort())
self.points = [self.points[i] for i in self.order]
# create first triangle, make sure we're getting a non-zero area otherwise
# drop the points
area = 0.0
index = 0
stop = False
while not stop and index + 2 < len(self.points):
area = self.getArea(index, index + 1, index + 2)
if abs(area) < self.EPS:
del self.points[index]
else:
stop = True
if index <= len(self.points) - 3:
tri = [index, index + 1, index + 2]
self.makeCounterClockwise(tri)
self.triangles.append(tri)
# boundary edges
e01 = (tri[0], tri[1])
self.boundaryEdges.add(e01)
e12 = (tri[1], tri[2])
self.boundaryEdges.add(e12)
e20 = (tri[2], tri[0])
self.boundaryEdges.add(e20)
e01 = self.makeKey(e01[0], e01[1])
self.edge2Triangles[e01] = [0, ]
e12 = self.makeKey(e12[0], e12[1])
self.edge2Triangles[e12] = [0, ]
e20 = self.makeKey(e20[0], e20[1])
self.edge2Triangles[e20] = [0, ]
else:
# all the points fall on a line
return
# add additional points
for i in range(3, len(self.points)):
self.addPoint(i)
# remove all triangles inside holes
# TO DO
def getTriangles(self):
"""
@return triangles
"""
return [[self.order[point] for point in tri] for tri in self.triangles]
def getEdges(self):
"""
@return egdes
"""
return self.edge2Triangles.keys()
def getArea(self, ip0, ip1, ip2):
"""
Compute the parallelipiped area
@param ip0 index of first vertex
@param ip1 index of second vertex
@param ip2 index of third vertex
"""
d1 = self.points[ip1] - self.points[ip0]
d2 = self.points[ip2] - self.points[ip0]
return (d1[0]*d2[1] - d1[1]*d2[0])
def isEdgeVisible(self, ip, edge):
"""
Return true iff the point lies to its right when the edge points down
@param ip point index
@param edge (2 point indices with orientation)
@return True if visible
"""
area = self.getArea(ip, edge[0], edge[1])
if area < self.EPS:
return True
return False
def makeCounterClockwise(self, ips):
"""
Re-order nodes to ensure positive area (in-place operation)
"""
area = self.getArea(ips[0], ips[1], ips[2])
if area < -self.EPS:
ip1, ip2 = ips[1], ips[2]
# swap
ips[1], ips[2] = ip2, ip1
def flipOneEdge(self, edge):
"""
Flip one edge then update the data structures
@return set of edges to interate over at next iteration
"""
# start with empty set
res = set()
# assume edge is sorted
tris = self.edge2Triangles.get(edge, [])
if len(tris) < 2:
# nothing to do, just return
return res
iTri1, iTri2 = tris
tri1 = self.triangles[iTri1]
tri2 = self.triangles[iTri2]
# find the opposite vertices, not part of the edge
iOpposite1 = -1
iOpposite2 = -1
for i in range(3):
if not tri1[i] in edge:
iOpposite1 = tri1[i]
if not tri2[i] in edge:
iOpposite2 = tri2[i]
# compute the 2 angles at the opposite vertices
da1 = self.points[edge[0]] - self.points[iOpposite1]
db1 = self.points[edge[1]] - self.points[iOpposite1]
da2 = self.points[edge[0]] - self.points[iOpposite2]
db2 = self.points[edge[1]] - self.points[iOpposite2]
crossProd1 = self.getArea(iOpposite1, edge[0], edge[1])
crossProd2 = self.getArea(iOpposite2, edge[1], edge[0])
dotProd1 = numpy.dot(da1, db1)
dotProd2 = numpy.dot(da2, db2)
angle1 = abs(math.atan2(crossProd1, dotProd1))
angle2 = abs(math.atan2(crossProd2, dotProd2))
# Delaunay's test
if angle1 + angle2 > math.pi*(1.0 + self.EPS):
# flip the triangles
# / ^ \ / b \
# iOpposite1 + a|b + iOpposite2 => + - > +
# \ / \ a /
newTri1 = [iOpposite1, edge[0], iOpposite2] # triangle a
newTri2 = [iOpposite1, iOpposite2, edge[1]] # triangle b
# update the triangle data structure
self.triangles[iTri1] = newTri1
self.triangles[iTri2] = newTri2
# now handle the topolgy of the edges
# remove this edge
del self.edge2Triangles[edge]
# add new edge
e = self.makeKey(iOpposite1, iOpposite2)
self.edge2Triangles[e] = [iTri1, iTri2]
# modify two edge entries which now connect to
# a different triangle
e = self.makeKey(iOpposite1, edge[1])
v = self.edge2Triangles[e]
for i in range(len(v)):
if v[i] == iTri1:
v[i] = iTri2
res.add(e)
e = self.makeKey(iOpposite2, edge[0])
v = self.edge2Triangles[e]
for i in range(len(v)):
if v[i] == iTri2:
v[i] = iTri1
res.add(e)
# these two edges might need to be flipped at the
# next iteration
res.add(self.makeKey(iOpposite1, edge[0]))
res.add(self.makeKey(iOpposite2, edge[1]))
return res
def flipEdges(self):
"""
Flip edges to statisfy Delaunay's criterion
"""
# start with all the edges
edgeSet = set(self.edge2Triangles.keys())
continueFlipping = True
while continueFlipping:
#
# iterate until there are no more edges to flip
#
# collect the edges to flip
newEdgeSet = set()
for edge in edgeSet:
# union
newEdgeSet |= self.flipOneEdge(edge)
edgeSet = copy.copy(newEdgeSet)
continueFlipping = (len(edgeSet) > 0)
def addPoint(self, ip):
"""
Add point
@param ip point index
"""
# collection for later updates
boundaryEdgesToRemove = set()
boundaryEdgesToAdd = set()
for edge in self.boundaryEdges:
if self.isEdgeVisible(ip, edge):
# create new triangle
newTri = [edge[0], edge[1], ip]
newTri.sort()
self.makeCounterClockwise(newTri)
self.triangles.append(newTri)
# update the edge to triangle map
e = list(edge[:])
e.sort()
iTri = len(self.triangles) - 1
self.edge2Triangles[tuple(e)].append(iTri)
# add the two boundary edges
e1 = [ip, edge[0]]
e1.sort()
e1 = tuple(e1)
e2 = [edge[1], ip]
e2.sort()
e2 = tuple(e2)
v1 = self.edge2Triangles.get(e1, [])
v1.append(iTri)
v2 = self.edge2Triangles.get(e2, [])
v2.append(iTri)
self.edge2Triangles[e1] = v1
self.edge2Triangles[e2] = v2
# keep track of the boundary edges to update
boundaryEdgesToRemove.add(edge)
boundaryEdgesToAdd.add((edge[0], ip))
boundaryEdgesToAdd.add((ip, edge[1]))
# update the boundary edges
for bedge in boundaryEdgesToRemove:
self.boundaryEdges.remove(bedge)
for bedge in boundaryEdgesToAdd:
bEdgeSorted = list(bedge)
bEdgeSorted.sort()
bEdgeSorted = tuple(bEdgeSorted)
if len(self.edge2Triangles[bEdgeSorted]) == 1:
# only add boundary edge if it does not appear
# twice in different order
self.boundaryEdges.add(bedge)
# recursively flip edges
flipped = True
while flipped:
flipped = self.flipEdges()
def makeKey(self, i1, i2):
"""
Make a tuple key such at i1 < i2
"""
if i1 < i2:
return (i1, i2)
return (i2, i1)
def show(self, width=500, height=500, showVertices=False, showCells=False, showContour=[]):
import tkinter
xmin = min([p[0] for p in self.points])
ymin = min([p[1] for p in self.points])
xmax = max([p[0] for p in self.points])
ymax = max([p[1] for p in self.points])
padding = 5
w = width - 2*padding
h = height - 2*padding
master = tkinter.Tk()
c = tkinter.Canvas(master, width=width, height=height)
c.pack()
for e in self.edge2Triangles:
i1, i2 = e
xp1 = padding + int(w*(self.points[i1][0] - xmin)/(xmax - xmin))
yp1 = padding + int(h*(ymax - self.points[i1][1])/(ymax - ymin))
xp2 = padding + int(w*(self.points[i2][0] - xmin)/(xmax - xmin))
yp2 = padding + int(h*(ymax - self.points[i2][1])/(ymax - ymin))
c.create_line(xp1, yp1, xp2, yp2)
if showVertices:
for i in range(len(self.points)):
xp = padding + int(w*(self.points[i][0] - xmin)/(xmax - xmin))
yp = padding + int(h*(ymax - self.points[i][1])/(ymax - ymin))
c.create_text(xp, yp, text=str(i))
if showCells:
for tId, tVals in self.triangles.items():
cg = reduce(operator.add, [self.points[i] for i in tVals])/float(len(tVals))
xp = padding + int(w*(cg[0] - xmin)/(xmax - xmin))
yp = padding + int(h*(ymax - cg[1])/(ymax - ymin))
c.create_text(xp, yp, text=str(tId))
if len(showContour) > 0:
for i in range(len(showContour) - 1):
xp1 = padding + int(w*(showContour[i][0] - xmin)/(xmax - xmin))
yp1 = padding + int(h*(ymax - showContour[i][1])/(ymax - ymin))
xp2 = padding + int(w*(showContour[i+1][0] - xmin)/(xmax - xmin))
yp2 = padding + int(h*(ymax - showContour[i+1][1])/(ymax - ymin))
c.create_line(xp1, yp1, xp2, yp2, fill='red')
tkinter.mainloop()
| StarcoderdataPython |
6605034 | <filename>scripts/release/bug_bash.py
#!/usr/bin/env python3
# Copyright (c) 2021, Facebook
#
# SPDX-License-Identifier: Apache-2.0
"""Query the Top-Ten Bug Bashers
This script will query the top-ten Bug Bashers in a specified date window.
Usage:
./scripts/bug-bash.py -t ~/.ghtoken -b 2021-07-26 -e 2021-08-07
GITHUB_TOKEN="..." ./scripts/bug-bash.py -b 2021-07-26 -e 2021-08-07
"""
import argparse
from datetime import datetime, timedelta
import operator
import os
# Requires PyGithub
from github import Github
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--all', dest='all',
help='Show all bugs squashed', action='store_true')
parser.add_argument('-t', '--token', dest='tokenfile',
help='File containing GitHub token', metavar='FILE')
parser.add_argument('-b', '--begin', dest='begin', help='begin date (YYYY-mm-dd)',
metavar='date', type=valid_date_type, required=True)
parser.add_argument('-e', '--end', dest='end', help='end date (YYYY-mm-dd)',
metavar='date', type=valid_date_type, required=True)
args = parser.parse_args()
if args.end < args.begin:
raise ValueError(
'end date {} is before begin date {}'.format(args.end, args.begin))
if args.tokenfile:
with open(args.tokenfile, 'r') as file:
token = file.read()
token = token.strip()
else:
if 'GITHUB_TOKEN' not in os.environ:
raise ValueError('No credentials specified')
token = os.environ['GITHUB_TOKEN']
setattr(args, 'token', token)
return args
class BugBashTally(object):
def __init__(self, gh, begin_date, end_date):
"""Create a BugBashTally object with the provided Github object,
begin datetime object, and end datetime object"""
self._gh = gh
self._repo = gh.get_repo('zephyrproject-rtos/zephyr')
self._begin_date = begin_date
self._end_date = end_date
self._issues = []
self._pulls = []
def get_tally(self):
"""Return a dict with (key = user, value = score)"""
tally = dict()
for p in self.get_pulls():
user = p.user.login
tally[user] = tally.get(user, 0) + 1
return tally
def get_rev_tally(self):
"""Return a dict with (key = score, value = list<user>) sorted in
descending order"""
# there may be ties!
rev_tally = dict()
for user, score in self.get_tally().items():
if score not in rev_tally:
rev_tally[score] = [user]
else:
rev_tally[score].append(user)
# sort in descending order by score
rev_tally = dict(
sorted(rev_tally.items(), key=operator.itemgetter(0), reverse=True))
return rev_tally
def get_top_ten(self):
"""Return a dict with (key = score, value = user) sorted in
descending order"""
top_ten = []
for score, users in self.get_rev_tally().items():
# do not sort users by login - hopefully fair-ish
for user in users:
if len(top_ten) == 10:
return top_ten
top_ten.append(tuple([score, user]))
return top_ten
def get_pulls(self):
"""Return GitHub pull requests that squash bugs in the provided
date window"""
if self._pulls:
return self._pulls
self.get_issues()
return self._pulls
def get_issues(self):
"""Return GitHub issues representing bugs in the provided date
window"""
if self._issues:
return self._issues
cutoff = self._end_date + timedelta(1)
issues = self._repo.get_issues(state='closed', labels=[
'bug'], since=self._begin_date)
for i in issues:
# the PyGithub API and v3 REST API do not facilitate 'until'
# or 'end date' :-/
if i.closed_at < self._begin_date or i.closed_at > cutoff:
continue
ipr = i.pull_request
if ipr is None:
# ignore issues without a linked pull request
continue
prid = int(ipr.html_url.split('/')[-1])
pr = self._repo.get_pull(prid)
if not pr.merged:
# pull requests that were not merged do not count
continue
self._pulls.append(pr)
self._issues.append(i)
return self._issues
# https://gist.github.com/monkut/e60eea811ef085a6540f
def valid_date_type(arg_date_str):
"""custom argparse *date* type for user dates values given from the
command line"""
try:
return datetime.strptime(arg_date_str, "%Y-%m-%d")
except ValueError:
msg = "Given Date ({0}) not valid! Expected format, YYYY-MM-DD!".format(arg_date_str)
raise argparse.ArgumentTypeError(msg)
def print_top_ten(top_ten):
"""Print the top-ten bug bashers"""
for score, user in top_ten:
# print tab-separated value, to allow for ./script ... > foo.csv
print('{}\t{}'.format(score, user))
def main():
args = parse_args()
bbt = BugBashTally(Github(args.token), args.begin, args.end)
if args.all:
# print one issue per line
issues = bbt.get_issues()
pulls = bbt.get_pulls()
n = len(issues)
m = len(pulls)
assert n == m
for i in range(0, n):
print('{}\t{}\t{}'.format(
issues[i].number, pulls[i].user.login, pulls[i].title))
else:
# print the top ten
print_top_ten(bbt.get_top_ten())
if __name__ == '__main__':
main()
| StarcoderdataPython |
3367290 | from db import words
def test_get_id_for_word(db_conn):
cursor = db_conn.cursor()
assert words.get_id_for_word(cursor, '&c') == (1,)
# Should also test when the word *doesn't* exist in the database
assert words.get_id_for_word(cursor, 'rgnthm') is None
def test_get_word_for_id(db_conn):
cursor = db_conn.cursor()
# Test the affirmative case
assert words.get_word_for_id(cursor, 35810) == ('boogaloo',)
# Let's test on a few that obviously aren't valid ids
assert words.get_word_for_id(cursor, -1) is None
assert words.get_word_for_id(cursor, 1.5) is None
assert words.get_word_for_id(cursor, 'wjksjksjkadbf') is None
def test_word_exists(db_conn):
cursor = db_conn.cursor()
assert words.word_exists(cursor, 'boogaloo')
assert words.word_exists(cursor, '&c')
assert not words.word_exists(cursor, 'rgnthm')
assert not words.word_exists(cursor, 4)
def test_get_word_list(db_conn):
cursor = db_conn.cursor()
dct = words.get_word_list(cursor)
assert len(dct) == 354971
assert 'boogaloo' in dct
assert 'rgnthm' not in dct
dct = words.get_word_list(cursor, ids=True)
assert all(len(value) == 2 for value in dct.values())
def test_append_word(db_conn):
cursor = db_conn.cursor()
words.append_word(cursor, 'rgnthm')
assert words.word_exists(cursor, 'rgnthm')
| StarcoderdataPython |
4876707 | <reponame>chrislangst/scalable-data-science
# Databricks notebook source exported at Tue, 28 Jun 2016 10:38:24 UTC
# MAGIC %md
# MAGIC # [Scalable Data Science](http://www.math.canterbury.ac.nz/~r.sainudiin/courses/ScalableDataScience/)
# MAGIC
# MAGIC ## Student Project Presentation by <NAME>
# MAGIC
# MAGIC *supported by* [](https://databricks.com/)
# MAGIC and
# MAGIC [](https://www.awseducate.com/microsite/CommunitiesEngageHome)
# COMMAND ----------
# MAGIC %md
# MAGIC The [html source url](https://raw.githubusercontent.com/raazesh-sainudiin/scalable-data-science/master/db/studentProjects/05_ShanshanZhou/051_EEG_Explore.html) of this databricks notebook and its recorded Uji :
# MAGIC
# MAGIC [](https://www.youtube.com/v/zJirlHAV6YU?rel=0&autoplay=1&modestbranding=1&start=4677&)
# COMMAND ----------
# MAGIC %md
# MAGIC # Identify hand motions from EEG recordings
# MAGIC ## by <NAME>
# MAGIC
# MAGIC **Patients who have lost hand function due to amputation or neurological disabilities wake up to this reality everyday. **
# MAGIC
# MAGIC * Restoring a patient's ability to perform these basic activities of daily life with a brain-computer interface (BCI) prosthetic device would greatly increase their independence and quality of life.
# MAGIC * Currently, there are no realistic, affordable, or low-risk options for neurologically disabled patients to directly control external prosthetics with their brain activity.
# MAGIC
# MAGIC **A possible solution ...**
# MAGIC * Recorded from the human scalp, EEG signals are evoked by brain activity.
# MAGIC * The relationship between brain activity and EEG signals is complex and poorly understood outside of specific laboratory tests.
# MAGIC * Providing affordable, low-risk, non-invasive BCI devices is dependent on further advancements in interpreting EEG signals.
# COMMAND ----------
# MAGIC %md
# MAGIC # A tutorial on how to process EEG data
# MAGIC ## by <NAME>
# MAGIC
# MAGIC http://blog.kaggle.com/2015/10/12/grasp-and-lift-eeg-winners-interview-1st-place-cat-dog/
# COMMAND ----------
# MAGIC %scala
# MAGIC //This allows easy embedding of publicly available information into any other notebook
# MAGIC //when viewing in git-book just ignore this block - you may have to manually chase the URL in frameIt("URL").
# MAGIC //Example usage:
# MAGIC // displayHTML(frameIt("https://en.wikipedia.org/wiki/Latent_Dirichlet_allocation#Topics_in_LDA",250))
# MAGIC def frameIt( u:String, h:Int ) : String = {
# MAGIC """<iframe
# MAGIC src=""""+ u+""""
# MAGIC width="95%" height="""" + h + """"
# MAGIC sandbox>
# MAGIC <p>
# MAGIC <a href="http://spark.apache.org/docs/latest/index.html">
# MAGIC Fallback link for browsers that, unlikely, don't support frames
# MAGIC </a>
# MAGIC </p>
# MAGIC </iframe>"""
# MAGIC }
# MAGIC displayHTML(frameIt("http://blog.kaggle.com/2015/10/12/grasp-and-lift-eeg-winners-interview-1st-place-cat-dog/",600))
# COMMAND ----------
display(dbutils.fs.ls("dbfs:/datasets/eeg/")) #data already in dbfs - see below for details
# COMMAND ----------
testRdd = sc.textFile('dbfs:/datasets/eeg/test.zip')
trainRdd = sc.textFile('dbfs:/datasets/eeg/train.zip')
# COMMAND ----------
# MAGIC %fs ls "dbfs:/home/ubuntu/databricks/EEG/train"
# COMMAND ----------
subj3_series3_events_Path = "dbfs:/home/ubuntu/databricks/EEG/train/subj3_series3_events.csv"
subj3_series4_events_Path = "dbfs:/home/ubuntu/databricks/EEG/train/subj3_series4_events.csv"
# COMMAND ----------
subj3_series3_data_Path = "dbfs:/home/ubuntu/databricks/EEG/train/subj3_series3_data.csv"
subj3_series4_data_Path = "dbfs:/home/ubuntu/databricks/EEG/train/subj3_series4_data.csv"
# COMMAND ----------
# MAGIC %md generate RDD
# COMMAND ----------
subj3_series3_events = sc.textFile(subj3_series3_events_Path)
subj3_series4_events = sc.textFile(subj3_series4_events_Path)
subj3_series34_events = subj3_series3_events.union(subj3_series4_events)
# COMMAND ----------
subj3_series3_data = sc.textFile(subj3_series3_data_Path)
subj3_series4_data = sc.textFile(subj3_series4_data_Path)
subj3_series34 = subj3_series3_data.union(subj3_series4_data)
# COMMAND ----------
# MAGIC %md generate DataFrame from csv file
# COMMAND ----------
subj3_series3_Raw_DF = sqlContext.read.format('com.databricks.spark.csv').options(header='true', inferSchema='true').load(subj3_series3_data_Path)
# COMMAND ----------
subj3_series3_DF = subj3_series3_Raw_DF.drop('id')
# COMMAND ----------
display(subj3_series3_DF)
# COMMAND ----------
# MAGIC %md create DF from RDD
# COMMAND ----------
subj3_series4_Raw_DF = subj3_series4_data.map(lambda x: (x, )).toDF()
# COMMAND ----------
subj3_series3_events_Raw_DF = sqlContext.read.format('com.databricks.spark.csv').options(header='true', inferSchema='true').load(subj3_series3_events_Path)
subj3_series4_events_Raw_DF = subj3_series4_events.map(lambda x: (x, )).toDF()
# COMMAND ----------
display(subj3_series3_events_DF)
# COMMAND ----------
# MAGIC %md
# MAGIC #neural oscillation
# MAGIC * neural oscillationis characterized by change in signal power in specific frequency bands. These oscillations appear naturally in ongoing EEG activity, can be induced by a specific task, for example a hand movement, or mental calculus.
# MAGIC * For each subject, we should see a spot over the electrode C3 (Left motor cortex,corresponding to a right hand movement), and a decrease of the signal power in
# MAGIC 10 and 20 Hz during the movement (by reference to after the movement).
# COMMAND ----------
subj3_series3_events_DF.filter("HandStart = 1").count()
# COMMAND ----------
subj3_series34.map(lambda x: (x, )).toDF().filter("HandStart = 1").count()
# COMMAND ----------
raw = creat_mne_raw_object(subj3_series3_DF)
# COMMAND ----------
# get chanel names
ch_names = list(subj3_series3_DF)
ch_names
# COMMAND ----------
# MAGIC %md
# MAGIC ### To get data to dbfs let's download and save.
# COMMAND ----------
# MAGIC %sh
# MAGIC pwd
# COMMAND ----------
# MAGIC %sh
# MAGIC df -h /databricks/driver
# COMMAND ----------
# MAGIC %md
# MAGIC This data in `http://www.math.canterbury.ac.nz/~r.sainudiin/tmp/` may be deleted in the future.
# COMMAND ----------
# MAGIC %sh
# MAGIC wget http://www.math.canterbury.ac.nz/~r.sainudiin/tmp/test.zip
# COMMAND ----------
# MAGIC %sh
# MAGIC wget http://www.math.canterbury.ac.nz/~r.sainudiin/tmp/train.zip
# COMMAND ----------
dbutils.fs.mkdirs("dbfs:/datasets/eeg")
# COMMAND ----------
dbutils.fs.cp("file:/databricks/driver/train.zip","dbfs:/datasets/eeg/")
# COMMAND ----------
display(dbutils.fs.ls("dbfs:/datasets/eeg/"))
# COMMAND ----------
testRdd = sc.textFile('dbfs:/datasets/eeg/test.zip')
trainRdd = sc.textFile('dbfs:/datasets/eeg/train.zip')
# COMMAND ----------
testRdd.take(5)
# COMMAND ----------
trainRdd.take(5)
# COMMAND ----------
# MAGIC %sh
# MAGIC rm train.zip test.zip
# COMMAND ----------
# COMMAND ----------
dbutils.fs.help()
# COMMAND ----------
# MAGIC %md
# MAGIC # [Scalable Data Science](http://www.math.canterbury.ac.nz/~r.sainudiin/courses/ScalableDataScience/)
# MAGIC
# MAGIC ## Student Project Presentation by <NAME>
# MAGIC
# MAGIC *supported by* [](https://databricks.com/)
# MAGIC and
# MAGIC [](https://www.awseducate.com/microsite/CommunitiesEngageHome) | StarcoderdataPython |
3246949 | """
Per-robot configuration file that is particular to each individual robot, not just the type of robot.
"""
import numpy as np
MICROS_PER_RAD = 11.333 * 180.0 / np.pi # Must be calibrated
NEUTRAL_ANGLE_DEGREES = np.array(
## [[ -0, 7, 2, 3], [ 17, 57, 46, 52], [-39, -35, -33, -64]]
# [[ -0, -4, -13, -9],[ 51, 45, 37, 45],[-53, -47, -45, -46]]
# [[ 14., -1., -11., -8.], [ 54., 56., 37., 46.],[-53., -48., -45., -45.]]
# [[ 13., -3., -13., -9.], [ 53., 56., 36., 45.], [-53., -48., -45., -46.]]
[[-1.0, -4.0, -14.0, -10.0], [45.0, 47.0, 43.0, 40.0], [-41.0, -43.0, -28.0, -22.0]]
)
PS4_COLOR = {"red": 0, "blue": 255, "green": 0}
PS4_DEACTIVATED_COLOR = {"red": 0, "blue": 50, "green": 0}
PS4_RECORD_COLOR = {"red": 255, "blue": 0, "green": 0}
PS4_ARM_COLOR = {"red": 0, "blue": 0, "green": 255}
| StarcoderdataPython |
8000760 | from django.contrib import admin
from boa.core.models import Answer
class AnswerAdmin(admin.ModelAdmin):
list_display = ('chanswer','enanswer',)
search_field = ('chanswer',)
list_filter = ('id',)
ordering = ('id',)
admin.site.register(Answer)
| StarcoderdataPython |
11245263 | <filename>src/templates.py
from grimoire.templates import default_page
from grimoire.utils import make_decorator
from hype import Div, P
@make_decorator
@default_page("Grimoire Story")
def template(fn, state, *opts):
paragraphs, options, state = fn(state, *opts)
content = Div(
*[P(p) for p in paragraphs],
)
return content, options, state | StarcoderdataPython |
5092648 | import os
import json, decimal
import boto3
from boto3.dynamodb.conditions import Key, Attr
tableName = os.environ.get('LEVELS_TABLE_NAME')
def handler(event, context):
client = boto3.resource('dynamodb')
table = client.Table(tableName)
print(table.table_status)
print(event)
user_data = event['requestContext']['authorizer']['claims']
res = table.scan(FilterExpression=Key('username').eq(user_data['cognito:username']))
data = res['Items']
while 'LastEvaluatedKey' in res:
res = table.scan(ExclusiveStartKey=res['LastEvaluatedKey'])
data.extend(res['Items'])
def decimal_default(obj):
if isinstance(obj, decimal.Decimal):
return float(obj)
raise TypeError
data = json.dumps(data, default=decimal_default)
oxygen_levels = []
timestamps = []
pulse_rates = []
for entry in json.loads(data):
oxygen_levels.append(entry['oxygen_level'])
timestamps.append(entry['timestamp'])
pulse_rates.append(entry['bpm'])
body = {
'oxygen_levels': oxygen_levels,
'timestamps': timestamps,
'pulse_rates': pulse_rates
}
response = {
"statusCode": 200,
"body": json.dumps(body),
"headers": {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Headers": "*"
}
}
print(response)
return response | StarcoderdataPython |
1757080 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: execute.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import pipeline_pb2 as pipeline__pb2
import value_pb2 as value__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='execute.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\rexecute.proto\x1a\x0epipeline.proto\x1a\x0bvalue.proto\"c\n\x16PipelineExecuteRequest\x12\x31\n\x13pipelineDescription\x18\x01 \x01(\x0b\x32\x14.PipelineDescription\x12\x16\n\x06inputs\x18\x02 \x03(\x0b\x32\x06.Value\",\n\x17PipelineExecuteResponse\x12\x11\n\tresultURI\x18\x01 \x01(\t2R\n\x08\x45xecutor\x12\x46\n\x0f\x45xecutePipeline\x12\x17.PipelineExecuteRequest\x1a\x18.PipelineExecuteResponse\"\x00\x62\x06proto3')
,
dependencies=[pipeline__pb2.DESCRIPTOR,value__pb2.DESCRIPTOR,])
_PIPELINEEXECUTEREQUEST = _descriptor.Descriptor(
name='PipelineExecuteRequest',
full_name='PipelineExecuteRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pipelineDescription', full_name='PipelineExecuteRequest.pipelineDescription', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='inputs', full_name='PipelineExecuteRequest.inputs', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=46,
serialized_end=145,
)
_PIPELINEEXECUTERESPONSE = _descriptor.Descriptor(
name='PipelineExecuteResponse',
full_name='PipelineExecuteResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resultURI', full_name='PipelineExecuteResponse.resultURI', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=147,
serialized_end=191,
)
_PIPELINEEXECUTEREQUEST.fields_by_name['pipelineDescription'].message_type = pipeline__pb2._PIPELINEDESCRIPTION
_PIPELINEEXECUTEREQUEST.fields_by_name['inputs'].message_type = value__pb2._VALUE
DESCRIPTOR.message_types_by_name['PipelineExecuteRequest'] = _PIPELINEEXECUTEREQUEST
DESCRIPTOR.message_types_by_name['PipelineExecuteResponse'] = _PIPELINEEXECUTERESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PipelineExecuteRequest = _reflection.GeneratedProtocolMessageType('PipelineExecuteRequest', (_message.Message,), dict(
DESCRIPTOR = _PIPELINEEXECUTEREQUEST,
__module__ = 'execute_pb2'
# @@protoc_insertion_point(class_scope:PipelineExecuteRequest)
))
_sym_db.RegisterMessage(PipelineExecuteRequest)
PipelineExecuteResponse = _reflection.GeneratedProtocolMessageType('PipelineExecuteResponse', (_message.Message,), dict(
DESCRIPTOR = _PIPELINEEXECUTERESPONSE,
__module__ = 'execute_pb2'
# @@protoc_insertion_point(class_scope:PipelineExecuteResponse)
))
_sym_db.RegisterMessage(PipelineExecuteResponse)
_EXECUTOR = _descriptor.ServiceDescriptor(
name='Executor',
full_name='Executor',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=193,
serialized_end=275,
methods=[
_descriptor.MethodDescriptor(
name='ExecutePipeline',
full_name='Executor.ExecutePipeline',
index=0,
containing_service=None,
input_type=_PIPELINEEXECUTEREQUEST,
output_type=_PIPELINEEXECUTERESPONSE,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_EXECUTOR)
DESCRIPTOR.services_by_name['Executor'] = _EXECUTOR
# @@protoc_insertion_point(module_scope)
| StarcoderdataPython |
1899034 | <reponame>ckamtsikis/cmssw
import FWCore.ParameterSet.Config as cms
from TrackingTools.KalmanUpdators.KFUpdatorESProducer_cfi import *
from TrackingTools.KalmanUpdators.KFSwitching1DUpdatorESProducer_cfi import *
from TrackingTools.KalmanUpdators.Chi2MeasurementEstimator_cfi import *
from TrackingTools.MaterialEffects.Propagators_cff import *
from TrackingTools.TrackFitters.TrackFitters_cff import *
from RecoLocalTracker.SiStripRecHitConverter.StripCPEfromTrackAngle_cfi import *
from RecoLocalTracker.SiStripRecHitConverter.SiStripRecHitMatcher_cfi import *
from RecoTracker.TransientTrackingRecHit.TTRHBuilders_cff import *
from RecoTracker.TrackProducer.TrackProducer_cfi import *
from RecoTracker.TrackProducer.CTFFinalFitWithMaterial_cfi import *
| StarcoderdataPython |
9618311 | #!/usr/bin/env python
#
# Copyright 2014 - 2016 The BCE Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the license.txt file.
#
import bce.utils.mathml.base as _base
import bce.utils.mathml.types as _types
class NumberComponent(_base.Base):
"""Number component."""
def __init__(self, number_str):
"""Initialize the number component.
:type number_str: str
:param number_str: The string of the number.
"""
self.__num_str = number_str
_base.Base.__init__(self, _types.COMPONENT_TYPE_NUMBER)
def get_number_string(self):
"""Get the string of the number.
:rtype : str
:return: The string of the number.
"""
return self.__num_str
def set_number_string(self, number_str):
"""Set the string of the number.
:type number_str: str
:param number_str: The string of the number.
"""
self.__num_str = number_str
def to_string(self, indent=0):
"""Serialize the component to string.
:type indent: int
:param indent: The indent space count.
:rtype : str
:return: The serialized string.
"""
return " " * indent + "<mn>" + self.__num_str + "</mn>"
| StarcoderdataPython |
8109019 | import graphene
from graphql_jwt.decorators import login_required
from reports.models import WorkingHoursReport
from reports.object_types import WorkingHoursReportType
class WorkingHoursReportQuery(object):
working_hours_report = graphene.Field(WorkingHoursReportType,
id=graphene.Int(required=True),
token=graphene.String(required=True))
working_hours_reports = graphene.List(WorkingHoursReportType, token=graphene.String(required=True))
@login_required
def resolve_working_hours_report(self, info, id, **kwargs):
return WorkingHoursReport.objects.get(pk=id)
@login_required
def resolve_working_hours_reports(self, info, **kwargs):
user = info.context.user
if not user or user.is_anonymous:
raise Exception('Not logged in.')
return WorkingHoursReport.objects.filter(user=user).order_by('-date')
class Query(WorkingHoursReportQuery):
pass
| StarcoderdataPython |
6689997 | <reponame>xuecan/fishbowl<filename>fishbowl/listutil.py
#!/usr/bin/python2.7
# -*- coding: UTF-8 -*-
# Copyright (C) 2016 <NAME> <<EMAIL>> and contributors.
# Licensed under the MIT license: http://opensource.org/licenses/mit-license
"""fishbowl list utilities"""
__version__ = '1.0.1'
import sys
import click
SETTINGS = {
'LIMIT': 0,
'OFFSET': 1,
'REVERSE': False,
}
def _slice(items):
begin = max(0, SETTINGS['OFFSET'] - 1)
count = max(0, SETTINGS['LIMIT'])
if not count:
end = len(items)
else:
end = begin + count
return items[begin:end]
def _echo(items):
for item in items:
click.echo(click.format_filename(item))
@click.group(context_settings=dict(help_option_names=['-h', '--help']))
@click.option('--limit', '-l', default=0,
help='Limit items count.')
@click.option('--offset', '-o', default=1,
help='Offset of items, starts at 1.')
@click.option('--reverse', '-r', is_flag=True,
help='Reverses items.')
def main(**kwargs):
for key in kwargs:
setting_key = key.replace('_', '-').upper()
assert setting_key in SETTINGS, 'Oops'
SETTINGS[setting_key] = kwargs[key]
@main.command()
@click.argument('items', nargs=-1)
def rmdup(items):
"""Removes duplicated values in ITEMS."""
result = list()
for item in items:
if item not in result:
result.append(item)
_echo(_slice(result))
@main.command()
@click.argument('value', nargs=1)
@click.argument('items', nargs=-1)
def rm(value, items):
"""Remove the given VALUE in ITEMS."""
result = list()
for item in items:
if item != value:
result.append(item)
_echo(_slice(result))
@main.command()
@click.option('--type', '-t', default='str',
type=click.Choice(['num', 'str']),
help='Data type of items.')
@click.argument('items', nargs=-1)
def sort(type, items):
"""Sort ITEMS."""
try:
if type == 'num':
from decimal import Decimal
result = [Decimal(item) for item in items]
else:
result = list(items)
except Exception as err:
sys.stderr.write(err.args[0])
sys.exit(2)
result.sort()
_echo(_slice(result))
@main.command()
@click.argument('items', nargs=-1)
def sliced(items):
"""slice ITEMS"""
result = list(items)
_echo(_slice(result))
if __name__ == '__main__':
main()
| StarcoderdataPython |
3513091 | <reponame>meseta/advent-of-code-2020
""" Models for User data """
from pydantic import BaseModel, Field # pylint: disable=no-name-in-module
class GameData(BaseModel):
""" Data to store for game """
fork_url: str = Field("", title="Url of player's fork")
| StarcoderdataPython |
11229023 | from setuptools import setup, find_packages
setup(
name="djangy_server_shared",
version="0.1",
packages=find_packages(),
author="<NAME>",
author_email="<EMAIL>",
description="Djangy.com server shared code",
keywords="djangy django",
url="http://www.djangy.com",
license="University of Illinois/NCSA Open Source License"
)
| StarcoderdataPython |
6550018 | <filename>ctpn/utils/gt_utils.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
File Name: gt_utils
Description : gt 四边形分割为固定宽度的系列gt boxes
Author : mick.yi
date: 2019/3/18
"""
import numpy as np
def linear_fit_y(xs, ys, x_list):
"""
线性函数拟合两点(x1,y1),(x2,y2);并求得x_list在的取值
:param xs: [x1,x2]
:param ys: [y1,y2]
:param x_list: x轴坐标点,numpy数组 [n]
:return:
"""
if xs[0] == xs[1]: # 垂直线
return np.ones_like(x_list) * np.mean(ys)
elif ys[0] == ys[1]: # 水平线
return np.ones_like(x_list) * ys[0]
else:
fn = np.poly1d(np.polyfit(xs, ys, 1)) # 一元线性函数
return fn(x_list)
def get_min_max_y(quadrilateral, xs):
"""
获取指定x值坐标点集合四边形上的y轴最小值和最大值
:param quadrilateral: 四边形坐标;x1,y1,x2,y2,x3,y3,x4,y4
:param xs: x轴坐标点,numpy数组 [n]
:return: x轴坐标点在四边形上的最小值和最大值
"""
x1, y1, x2, y2, x3, y3, x4, y4 = quadrilateral.tolist()
y_val_1 = linear_fit_y(np.array([x1, x2]), np.array([y1, y2]), xs)
y_val_2 = linear_fit_y(np.array([x2, x3]), np.array([y2, y3]), xs)
y_val_3 = linear_fit_y(np.array([x3, x4]), np.array([y3, y4]), xs)
y_val_4 = linear_fit_y(np.array([x4, x1]), np.array([y4, y1]), xs)
y_val_min = []
y_val_max = []
for i in range(len(xs)):
y_val = []
if min(x1, x2) <= xs[i] <= max(x1, x2):
y_val.append(y_val_1[i])
if min(x2, x3) <= xs[i] <= max(x2, x3):
y_val.append(y_val_2[i])
if min(x3, x4) <= xs[i] <= max(x3, x4):
y_val.append(y_val_3[i])
if min(x4, x1) <= xs[i] <= max(x4, x1):
y_val.append(y_val_4[i])
# print("y_val:{}".format(y_val))
y_val_min.append(min(y_val))
y_val_max.append(max(y_val))
return np.array(y_val_min), np.array(y_val_max)
def get_xs_in_range(x_array, x_min, x_max):
"""
获取分割坐标点
:param x_array: 宽度方向分割坐标点数组;0~image_width,间隔16 ;如:[0,16,32,...608]
:param x_min: 四边形x最小值
:param x_max: 四边形x最大值
:return:
"""
indices = np.logical_and(x_array >= x_min, x_array <= x_max)
xs = x_array[indices]
# 处理两端的值
if xs.shape[0] == 0 or xs[0] > x_min:
xs = np.insert(xs, 0, x_min)
if xs.shape[0] == 0 or xs[-1] < x_max:
xs = np.append(xs, x_max)
return xs
def gen_gt_from_quadrilaterals(gt_quadrilaterals, input_gt_class_ids, image_shape, width_stride, box_min_size=3):
"""
从gt 四边形生成,宽度固定的gt boxes
:param gt_quadrilaterals: GT四边形坐标,[n,(x1,y1,x2,y2,x3,y3,x4,y4)]
:param input_gt_class_ids: GT四边形类别,一般就是1 [n]
:param image_shape:
:param width_stride: 分割的步长,一般16
:param box_min_size: 分割后GT boxes的最小尺寸
:return:
gt_boxes:[m,(y1,x1,y2,x2)]
gt_class_ids: [m]
"""
h, w = list(image_shape)[:2]
x_array = np.arange(0, w + 1, width_stride, np.float32) # 固定宽度间隔的x坐标点
# 每个四边形x 最小值和最大值
x_min_np = np.min(gt_quadrilaterals[:, ::2], axis=1)
x_max_np = np.max(gt_quadrilaterals[:, ::2], axis=1)
gt_boxes = []
gt_class_ids = []
for i in np.arange(len(gt_quadrilaterals)):
xs = get_xs_in_range(x_array, x_min_np[i], x_max_np[i]) # 获取四边形内的x中坐标点
ys_min, ys_max = get_min_max_y(gt_quadrilaterals[i], xs)
# print("xs:{}".format(xs))
# 为每个四边形生成固定宽度的gt
for j in range(len(xs) - 1):
x1, x2 = xs[j], xs[j + 1]
y1, y2 = np.min(ys_min[j:j + 2]), np.max(ys_max[j:j + 2])
gt_boxes.append([y1, x1, y2, x2])
gt_class_ids.append(input_gt_class_ids[i])
gt_boxes = np.reshape(np.array(gt_boxes), (-1, 4))
gt_class_ids = np.reshape(np.array(gt_class_ids), (-1,))
# 过滤高度太小的边框
height = gt_boxes[:, 2] - gt_boxes[:, 0]
width = gt_boxes[:, 3] - gt_boxes[:, 1]
indices = np.where(np.logical_and(height >= 8, width >= 2))
return gt_boxes[indices], gt_class_ids[indices]
| StarcoderdataPython |
11256654 | # -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import, unicode_literals, division
import six
from os.path import expanduser
import json
import itertools
from ginga.util import wcs
from ginga.canvas.types.all import (Line, CompoundObject)
from astropy import units as u
from astropy.coordinates import SkyCoord
from hcam_widgets.compo.utils import (InjectionArm, PickoffArm, INJECTOR_THETA, PARK_POSITION)
from hcam_widgets.tkutils import get_root
from .finders import FovSetter
from .shapes import (CompoPatrolArc, CompoFreeRegion)
if not six.PY3:
import tkFileDialog as filedialog
else:
from tkinter import filedialog
class HCAMFovSetter(FovSetter):
overlay_names = ['ccd_overlay', 'compo_overlay']
def window_string(self):
g = get_root(self).globals
wframe = g.ipars.wframe
if g.ipars.isFF():
winlist = []
if g.ipars.isDrift():
winlist = [
'xsl: {}, xsr: {}, ys: {}, nx: {}, ny: {}'.format(xsl, xsr, ys, nx, ny)
for (xsl, xsr, ys, nx, ny) in wframe
]
else:
winlist = [
'xsll: {}, xslr: {}, xsul: {}, xsur: {}, ys: {}, nx: {}, ny: {}'.format(
xsll, xslr, xsul, xsur, ys, nx, ny
) for (xsll, xsul, xslr, xsur, ys, nx, ny) in wframe
]
return '\n'.join(winlist)
def saveconf(self):
fname = filedialog.asksaveasfilename(
initialdir=expanduser("~"),
defaultextension='.json',
filetypes=[('config files', '.json')],
title='Name of setup file')
if not fname:
print('Aborted save to disk')
return False
g = get_root(self).globals
data = dict()
data['appdata'] = g.ipars.dumpJSON()
# add user info that we should know of
# includes target, user and proposal
user = dict()
user['target'] = self.targName.value()
data['user'] = user
# target info
target = dict()
target['target'] = self.targName.value()
targ_coord = SkyCoord(self.targCoords.value(), unit=(u.hour, u.deg))
target['TARG_RA'] = targ_coord.ra.to_string(sep=':', unit=u.hour, pad=True, precision=2)
target['TARG_DEC'] = targ_coord.dec.to_string(sep=':', precision=1, unit=u.deg,
alwayssign=False, pad=True)
target['RA'] = self.ra._value.to_string(sep=':', unit=u.hour, pad=True, precision=2)
target['DEC'] = self.dec._value.to_string(sep=':', precision=1, pad=True, unit=u.deg, alwayssign=False)
target['PA'] = self.pa.value()
data['target'] = target
# write file
with open(fname, 'w') as of:
of.write(json.dumps(data, sort_keys=True, indent=4,
separators=(',', ': ')))
print('Saved setup to ' + fname)
return True
def _step_ccd(self):
"""
Move CCD to next nod position
"""
g = get_root(self).globals
try:
np = g.ipars.nodPattern
if not np:
raise ValueError('no nod pattern defined')
nd = len(np['ra'])
di = self.dither_index % nd
raoff = np['ra'][di]
decoff = np['dec'][di]
self.dither_index += 1
except Exception as err:
self.logger.warn('could not get dither position {}: {}'.format(di, str(err)))
return
self.logger.info('moving CCD to dither position {:d} ({} {})'.format(
di, raoff, decoff
))
# get new dither cen
ra, dec = wcs.add_offset_radec(
self.ctr_ra_deg, self.ctr_dec_deg,
raoff/3600., decoff/3600.)
image = self.fitsimage.get_image()
xc, yc = image.radectopix(self.ra_as_drawn, self.dec_as_drawn)
xn, yn = image.radectopix(ra, dec)
# update latest dither centre
self.ra_as_drawn, self.dec_as_drawn = ra, dec
obj = self.canvas.get_object_by_tag('ccd_overlay')
obj.move_delta(xn-xc, yn-yc)
self.canvas.update_canvas()
def _make_ccd(self, image):
"""
Converts the current instrument settings to a ginga canvas object
"""
# get window pair object from top widget
g = get_root(self).globals
wframe = g.ipars.wframe
# all values in pixel coords of the FITS frame
# get centre
ctr_x, ctr_y = image.radectopix(self.ctr_ra_deg, self.ctr_dec_deg)
self.ctr_x, self.ctr_y = ctr_x, ctr_y
nx, ny = self.nxtot.value, self.nytot.value
mainCCD = self._make_win(0, 0, nx, ny, image,
fill=True, fillcolor='blue',
fillalpha=0.3, name='mainCCD')
# dashed lines to mark quadrants of CCD
chip_ctr_ra, chip_ctr_dec = self._chip_cen()
xright, ytop = wcs.add_offset_radec(chip_ctr_ra, chip_ctr_dec,
self.fov_x/2, self.fov_y/2)
xleft, ybot = wcs.add_offset_radec(chip_ctr_ra, chip_ctr_dec,
-self.fov_x/2, -self.fov_y/2)
points = [image.radectopix(ra, dec) for (ra, dec) in (
(chip_ctr_ra, ybot), (chip_ctr_ra, ytop)
)]
points = list(itertools.chain.from_iterable(points))
hline = Line(*points, color='red', linestyle='dash', linewidth=2)
points = [image.radectopix(ra, dec) for (ra, dec) in (
(xleft, chip_ctr_dec), (xright, chip_ctr_dec)
)]
points = list(itertools.chain.from_iterable(points))
vline = Line(*points, color='red', linestyle='dash', linewidth=2)
# list of objects for compound object
obl = [mainCCD, hline, vline]
# iterate over window pairs
# these coords in ccd pixel vaues
params = dict(fill=True, fillcolor='red', fillalpha=0.3)
if not g.ipars.isFF():
if g.ipars.isDrift():
for xsl, xsr, ys, nx, ny in wframe:
obl.append(self._make_win(xsl, ys, nx, ny, image, **params))
obl.append(self._make_win(xsr, ys, nx, ny, image, **params))
else:
for xsll, xsul, xslr, xsur, ys, nx, ny in wframe:
obl.append(self._make_win(xsll, ys, nx, ny, image, **params))
obl.append(self._make_win(xsul, 1024-ys, nx, -ny, image, **params))
obl.append(self._make_win(xslr, ys, nx, ny, image, **params))
obl.append(self._make_win(xsur, 1024-ys, nx, -ny, image, **params))
obj = CompoundObject(*obl)
obj.editable = True
return obj
def _make_compo(self, image):
# get COMPO widget from main GUI
g = get_root(self).globals
compo_angle = g.compo_hw.setup_frame.pickoff_angle.value()
compo_side = g.compo_hw.setup_frame.injection_side.value()
# get chip coordinates - COMPO is aligned to chip
chip_ctr_ra, chip_ctr_dec = self._chip_cen()
if compo_side == 'R':
ia = -INJECTOR_THETA
elif compo_side == 'L':
ia = INJECTOR_THETA
else:
ia = PARK_POSITION
# add COMPO components
compo_arc = CompoPatrolArc(chip_ctr_ra, chip_ctr_dec, image,
linewidth=10, color='black', linestyle='dash',
name='COMPO_Arc')
compo_free = CompoFreeRegion(chip_ctr_ra, chip_ctr_dec, image,
fill=True, fillcolor='green', fillalpha=0.1,
name='compo_free_region')
compo_pickoff = PickoffArm().to_ginga_object(compo_angle*u.deg, chip_ctr_ra*u.deg, chip_ctr_dec*u.deg,
fill=True, fillcolor='yellow', fillalpha=0.3,
name='COMPO_pickoff')
compo_injector = InjectionArm().to_ginga_object(ia, chip_ctr_ra*u.deg, chip_ctr_dec*u.deg,
color='yellow', fillalpha=0.3, fill=True,
name='COMPO_injector')
obl = [compo_arc, compo_free, compo_pickoff, compo_injector]
obj = CompoundObject(*obl)
obj.editable = True
return obj
def draw_ccd(self, *args):
image = self.fitsimage.get_image()
if image is None:
return
try:
pa = self.pa.value() - self.paOff
if not self.EofN:
pa *= -1
except Exception as err:
errmsg = "failed to find rotation: {}".format(str(err))
self.logger.error(errmsg)
try:
obj = self._make_ccd(image)
obj.showcap = True
self.canvas.deleteObjectByTag('ccd_overlay')
self.canvas.add(obj, tag='ccd_overlay', redraw=False)
# rotate
obj.rotate(pa, self.ctr_x, self.ctr_y)
obj.color = 'red'
# save old values so we don't have to recompute FOV if we're just moving
self.pa_as_drawn = pa
self.ra_as_drawn, self.dec_as_drawn = self.ctr_ra_deg, self.ctr_dec_deg
except Exception as err:
errmsg = "failed to draw CCD: {}".format(str(err))
self.logger.error(msg=errmsg)
try:
g = get_root(self).globals
if g.ipars.compo():
obj = self._make_compo(image)
obj.showcap = True
self.canvas.deleteObjectByTag('compo_overlay')
self.canvas.add(obj, tag='compo_overlay', redraw=False)
# rotate
obj.rotate(pa, self.ctr_x, self.ctr_y)
else:
self.canvas.deleteObjectByTag('compo_overlay')
except Exception as err:
errmsg = "failed to draw COMPO: {}".format(str(err))
self.logger.error(msg=errmsg)
self.canvas.update_canvas()
| StarcoderdataPython |
9717242 | <reponame>Mergon/bluenet
#!/usr/bin/python2
import re
import pygame
from pygame.locals import *
from random import randint
import os
DOCS_DIR_PREFIX = "../"
DOCS_DIR = "../docs/"
DIR = "diagrams/"
GEN_DIR = DOCS_DIR + DIR
FILENAMES = [DOCS_DIR + F for F in ["PROTOCOL.md", "BEHAVIOUR.md", "SERVICE_DATA.md", "SERVICE_DATA_DEPRECATED.md", "BROADCAST_PROTOCOL.md", "UART_PROTOCOL.md", "MESH_PROTOCOL.md", "IPC.md"]]
fontPath = DOCS_DIR + "diagrams/fonts/LiberationSans-Regular.ttf"
fontSizeBlocks = 24
fontSizeBytes = 16
STEP_X = 50
STEP_Y = 150
MARGIN = 12
BOX_WIDTH = 4
MAX_VAR_LEN = 6
DEFAULT_VAR_LEN = 6
# Define the colors we will use in RGB format
BLACK = ( 0, 0, 0)
WHITE = (255, 255, 255)
BLUE = ( 0, 0, 255)
GREEN = ( 0, 255, 0)
RED = (255, 0, 0)
BACKGROUND = WHITE
GREYS = [None]*16
GREYS[4] = (178, 178, 178)
SKY_BLUES = [None]*16
SKY_BLUES[2] = (51, 153, 255)
SKY_BLUES[3] = (0, 102, 255)
SKY_BLUES[6] = (0, 0, 128)
SKY_BLUES[7] = (0, 51, 102) # Dark
SKY_BLUES[8] = (51, 102, 153)
SKY_BLUES[9] = (102, 153, 204)
SKY_BLUES[10] = (153, 204, 255) # Light
GREENS = [None]*16
GREENS[4] = (0, 153, 0)
GREENS[5] = (0, 102, 0)
GREENS[7] = (51, 102, 51)
GREENS[8] = (102, 153, 102)
GREENS[9] = (153, 204, 153)
GREENS[10] = (204, 255, 204)
REDS = [None]*16
REDS[3] = (255, 0, 0)
REDS[5] = (153, 0, 0)
REDS[7] = (102, 51, 51)
REDS[8] = (153, 102, 102)
REDS[9] = (204, 153, 153)
REDS[10] = (255, 204, 204)
YELLOWS = [None]*16
YELLOW_7 = (102, 102, 51)
YELLOW_8 = (153, 153, 102)
YELLOW_9 = (204, 204, 153)
YELLOW_10 = (255, 255, 204)
YELLOWS = [YELLOW_7, YELLOW_8, YELLOW_9, YELLOW_10]
PURPLES = [None]*16
PURPLES[4] = (102, 0, 204)
PURPLES[5] = (51, 0, 153)
PURPLES[7] = (51, 0, 102)
PURPLES[8] = (102, 51, 153)
PURPLES[9] = (153, 102, 204)
PURPLES[10] = (204, 153, 255)
CHARTS = [None]*16
CHARTS[1] = ( 0, 69, 134) # Blue
CHARTS[2] = (255, 66, 14) # Orange
CHARTS[3] = (255, 255, 255) # Yellow
CHARTS[4] = ( 87, 157, 28) # Green
CHARTS[5] = (126, 0, 33) # Brown
CHARTS[6] = (131, 202, 255) # Light blue
CHARTS[7] = ( 49, 64, 4) # Dark green
CHARTS[8] = (174, 207, 0) # Light green
CHARTS[9] = ( 75, 31, 111) # Purple
CHARTS[10] = (255, 149, 14) # Orange
CHARTS[11] = (197, 0, 11) # Red
CHARTS[12] = ( 0, 132, 209) # Blue
COLOR_PALETTE = []
# COLOR_PALETTE.extend(SKY_BLUES[7:10])
# COLOR_PALETTE.extend(GREENS[7:10])
# COLOR_PALETTE.extend(REDS[7:10])
# COLOR_PALETTE.extend(PURPLES[7:10])
# COLOR_PALETTE.extend(CHARTS[1:13])
COLOR_PALETTE.extend([SKY_BLUES[6], SKY_BLUES[3], SKY_BLUES[2]])
COLOR_PALETTE.extend([REDS[3], REDS[5]])
COLOR_PALETTE.extend([GREENS[4], GREENS[5]])
COLOR_PALETTE.extend([PURPLES[4], PURPLES[5]])
COLOR_PALETTE.extend([CHARTS[2], CHARTS[10]])
# COLOR_PALETTE.extend([GREYS[4]])
# All text in lower case
colorDict = {}
colorDict['type'] = SKY_BLUES[3]
colorDict['data type'] = SKY_BLUES[3]
colorDict['device type'] = SKY_BLUES[2]
colorDict['length'] = SKY_BLUES[2]
colorDict['size'] = SKY_BLUES[2]
colorDict['ad length'] = SKY_BLUES[6]
colorDict['ad type'] = SKY_BLUES[3]
colorDict['flags'] = SKY_BLUES[2]
colorDict['etc'] = GREYS[4]
colorDict['reserved'] = GREYS[4]
colorDict['padding'] = GREYS[4]
colorDict['rand'] = GREYS[4]
# Payload, data, etc
colorDict['payload'] = CHARTS[2]
colorDict['encrypted payload'] = CHARTS[2]
colorDict['data'] = CHARTS[2]
colorDict['data part'] = CHARTS[2]
colorDict['list'] = CHARTS[2]
colorDict['encrypted data'] = CHARTS[2]
colorDict['service data'] = CHARTS[2]
colorDict['command payload'] = CHARTS[2]
colorDict['command data'] = CHARTS[2]
def drawRect(rect, color):
# Rect: left, top, width, height
# width of 0 to fill
pygame.draw.rect(screen, color, rect)
pygame.draw.rect(screen, BACKGROUND, rect, BOX_WIDTH)
def drawTextLines(x, y, labels, width, height, vertical, zoom):
maxLineWidth = 0
maxLineHeight = 0
for label in labels:
if (label.get_width() > maxLineWidth):
maxLineWidth = label.get_width()
if (label.get_height() > maxLineHeight):
maxLineHeight = label.get_height()
maxLineWidth = maxLineWidth * zoom
maxLineHeight = maxLineHeight * zoom
textWidth = maxLineWidth
textHeight = maxLineHeight*len(labels)
xCenter = x + 0.5*width
yCenter = y + 0.5*height
if vertical:
for i in range(0, len(labels)):
rotated = pygame.transform.rotozoom(labels[i], -90, zoom)
drawY = yCenter - 0.5*labels[i].get_width()*zoom
drawX = xCenter - 0.5*textHeight + (len(labels) - 1 - i) * maxLineHeight
screen.blit(rotated, (drawX, drawY))
else:
for i in range(0, len(labels)):
zoomed = pygame.transform.rotozoom(labels[i], 0, zoom)
drawX = xCenter - 0.5*labels[i].get_width()*zoom
drawY = yCenter - 0.5*textHeight + i*maxLineHeight
screen.blit(zoomed, (drawX, drawY))
# Return zoom required to fit
def calcTextZoom(labels, width, height, vertical):
maxLineWidth = 0
maxLineHeight = 0
for label in labels:
if (label.get_width() > maxLineWidth):
maxLineWidth = label.get_width()
if (label.get_height() > maxLineHeight):
maxLineHeight = label.get_height()
textWidth = maxLineWidth
textHeight = maxLineHeight*len(labels)
if vertical:
zoom = 1.0*height / textWidth
horZoom = 1.0*width / textHeight
if (horZoom < zoom):
zoom = horZoom
else:
zoom = 1.0*width / textWidth
horZoom = 1.0*height / textHeight
if (horZoom < zoom):
zoom = horZoom
if zoom > 1.0:
return 1.0
return zoom
def drawText(x, y, text, color, width, height, forceVertical=False):
# IDEA: Try to find a fit with the highest zoom.
# Options are horizontal/vertical and replacing spaces with line breaks
if (forceVertical):
verticals = [True]
else:
verticals = [False, True]
# Keep up best required zoom
bestZoom = 0.0
bestLines = []
bestVert = False
# First try without line breaks
lines = [text]
labels = []
for line in lines:
# The text can only be a single line: newline characters are not rendered.
label = fontBlocks.render(line, True, WHITE)
labels.append(label)
for vert in verticals:
zoom = calcTextZoom(labels, width, height, vert)
if (zoom > bestZoom):
bestZoom = zoom
bestLines = lines
bestLabels = labels
bestVert = vert
# Try 1 line break
if (' ' in text):
splitText = text.split(' ')
for i in range(1,len(splitText)):
# Build up the string with 1 line break
lines = ["", ""]
for line in splitText[0:i]:
lines[0] = lines[0] + " " + line
for line in splitText[i:]:
lines[1] = lines[1] + " " + line
labels = []
for line in lines:
# The text can only be a single line: newline characters are not rendered.
label = fontBlocks.render(line, True, WHITE)
labels.append(label)
# Calculate the required zoom for this text
for vert in verticals:
zoom = calcTextZoom(labels, width, height, vert)
if (zoom > bestZoom):
bestZoom = zoom
bestLines = lines
bestLabels = labels
bestVert = vert
# Draw text with best zoom
drawTextLines(x, y, bestLabels, width, height, bestVert, bestZoom)
def drawVar(startX, y, varName, varLen, color):
if varLen > MAX_VAR_LEN:
varLen = MAX_VAR_LEN
width = varLen*STEP_X
height = STEP_Y
drawRect([startX, y, width, height], color)
drawText(startX+MARGIN, y+MARGIN, varName, WHITE, width-2*MARGIN, height-2*MARGIN, varLen < 3)
return startX + width
def drawByteText(text, x, y, center=True):
global screen
x += 1
y += 1
byteLabel = fontBytes.render(text, True, WHITE)
if center:
x -= 0.5*byteLabel.get_width()
borderSize = 1
for border in range(1, borderSize + 1):
for dx in range(-border, border + 1):
for dy in range(-border, border + 1):
screen.blit(byteLabel, (x + dx, y + dy))
byteLabel = fontBytes.render(text, True, BLACK)
screen.blit(byteLabel, (x, y))
def drawVarList(varList, filename, lengthInBits):
if not filename:
print("no filename for:")
for var in varList:
print(" " + var[0])
return
print("Generating " + filename)
totalLen = 0
for var in varList:
# print var[0] + " " + str(var[1])
if (var[1] > MAX_VAR_LEN):
totalLen += MAX_VAR_LEN
else:
totalLen += var[1]
size = [totalLen * STEP_X, STEP_Y]
# Add text "byte" or "bit" to screen size
byteTxt = "Bit" if lengthInBits else "Byte"
byteLabel = fontBytes.render(byteTxt, True, BLACK)
size[0] += byteLabel.get_width()
size[1] += byteLabel.get_height()
global screen
screen = pygame.display.set_mode(size)
screen.fill(BACKGROUND)
x=0
y=0
# Draw the text "byte"
# screen.blit(byteLabel, (x, y))
drawByteText(byteTxt, x, y, False)
xVar = x + byteLabel.get_width()
yVar = y + byteLabel.get_height()
x += byteLabel.get_width()
cycleColorInd = 0
prevColorInd = -1
byteNum = 0
byteNumKnown = True
for var in varList:
varName = var[0]
varLen = var[1]
varLenKnown = var[2]
# Draw the byte numbers
if byteNumKnown:
if varLenKnown:
if varLen > MAX_VAR_LEN:
endByteNum = byteNum + varLen-1
for i in range(0, MAX_VAR_LEN-2):
drawByteText(str(byteNum), x + 0.5*STEP_X, y)
byteNum += 1
x += STEP_X
drawByteText("...", x + 0.5 * STEP_X, y)
byteNum += 1
x += STEP_X
byteNum = endByteNum
drawByteText(str(byteNum), x + 0.5*STEP_X, y)
byteNum += 1
x += STEP_X
else:
for i in range(0, varLen):
drawByteText(str(byteNum), x + 0.5*STEP_X, y)
byteNum += 1
x += STEP_X
else:
drawByteText(str(byteNum), x + 0.5*STEP_X, y)
drawByteText("...", x + 1.5*STEP_X, y)
byteNumKnown = False
# Determine color
# First check if this var name already has an assigned color
varNameLower = varName.lower()
# print "in dict " + varNameLower + "=" + str(varNameLower in colorDict)
if varNameLower in colorDict:
color = colorDict[varNameLower]
else:
# Don't use the same color as the previous color
colorInd = prevColorInd
while colorInd == prevColorInd:
# colorInd = randint(0, len(COLOR_PALETTE)-1)
colorInd = cycleColorInd
cycleColorInd = (cycleColorInd + 1) % len(COLOR_PALETTE)
color = COLOR_PALETTE[colorInd]
colorDict[varNameLower] = color
# Keep up last used color index
if (color in COLOR_PALETTE):
prevColorInd = COLOR_PALETTE.index(color)
else:
prevColorInd = -1
xVar = drawVar(xVar, yVar, varName, varLen, color)
pygame.image.save(screen, filename)
def parseFile(textFilename):
file = open(textFilename)
filename = None
foundTableHeader=False
lengthInBits=False
foundTableLines=False
varList = []
for line in file:
#print "line: " line
if (foundTableLines):
match = patternTableRow.findall(line)
if (len(match)):
#print "found line: " + line
varName = match[0][0].strip()
linkMatch = patternLink.findall(varName)
if (linkMatch):
varName = linkMatch[0]
varLenKnown = True
try:
varLen = int(match[0][1].strip())
except:
varLenKnown = False
varLen = DEFAULT_VAR_LEN
varList.append((varName, varLen, varLenKnown))
#print "varName=" + varName + " varLen=" + str(varLen)
else:
# End of table, draw and reset
drawVarList(varList, filename, lengthInBits)
filename = None
foundTableHeader=False
foundTableLines = False
varList = []
# Just skip one line
if (foundTableHeader):
#print "foundTableLines"
foundTableLines = True
foundTableHeader = False
matches = patternTableHeader.findall(line)
if len(matches):
#print "foundTableHeader: " + matches[0]
lengthInBits = (matches[0] == "in bits")
foundTableHeader = True
matches = patternFileName.findall(line)
if len(matches):
filename = GEN_DIR + matches[0]
# End of file
if (foundTableLines):
# Draw last table
drawVarList(varList, filename, lengthInBits)
if not os.path.exists(GEN_DIR):
print("Make dir " + GEN_DIR)
os.makedirs(GEN_DIR)
pygame.init()
screen = None
#myFont = pygame.font.SysFont("monospace", 15)
fontBlocks = pygame.font.Font(fontPath, fontSizeBlocks)
fontBytes = pygame.font.Font(fontPath, fontSizeBytes)
# Regex patterns
patternFileNameString = "\\(" + DOCS_DIR + DIR + "([^\\)]+)\\)"
patternFileName = re.compile(patternFileNameString)
patternTableHeader = re.compile("Type +\\| +Name +\\| +Length (in bits)? *\\| +Description")
patternTableRow = re.compile("[^|]\\|([^|]+)\\|([^|]+)\\|.*")
patternLink = re.compile("\\[([^]]+)\\]\\([^\\)]+\\)")
for filename in FILENAMES:
print(filename)
parseFile(filename)
# pygame.display.flip()
# done = False
# while not done:
# for event in pygame.event.get(): # User did something
# if event.type == pygame.QUIT: # If user clicked close
# done=True # Flag that we are done so we exit this loop
pygame.quit()
| StarcoderdataPython |
9557 | # comments------------------
def a(x):
print x
if True:
a(10) | StarcoderdataPython |
176467 | <reponame>RafaelPAndrade/LEIC-A-IST
#!/usr/bin/env python3
import socket, sys, getopt, os
from signal import signal, pause, SIGINT, SIGTERM, SIG_IGN
from pickle import load, dump
from multiprocessing import Process
from multiprocessing.managers import SyncManager
from lib.server import tcp_server, udp_server, udp_client
from lib.utils import (read_bytes_until, DEFAULT_CS_PORT, CS_KNOWN_BS_SAVEFILE,
CS_VALID_USERS_SAVEFILE, CS_DIRS_LOCATION_SAVEFILE,
backup_dict_to_file, restore_dict_from_file,
ignore_sigint, get_best_ip)
# Function to deal with any protocol unexpected error
def unexpected_command(my_socket):
""" Informs that there was a error. TCP and UDP compatible. """
my_socket.sendall("ERR\n".encode())
# Code to deal with queries from BS (UDP server)
def deal_with_udp(udp_socket, known_bs):
def signal_handler(_signum, _frame):
udp_socket.close()
exit(0)
# ignore CTRL-C; handle .terminate() from parent
signal(SIGINT, SIG_IGN)
signal(SIGTERM, signal_handler)
while True:
response, address = udp_socket.recvfrom(32)
args = response.decode().split(" ")
command = args[0]
args = args[1:]
if command == "REG":
add_bs(known_bs, args, udp_socket, address)
elif command == "UNR":
remove_bs(known_bs, args, udp_socket, address)
else:
unexpected_command(udp_socket)
def add_bs(known_bs, args, udp_socket, address):
status = "ERR"
ip_bs = args[0]
port_bs = args[1].split("\n")[0]
if len(args) != 2 or port_bs.isdigit() is False:
print("Error in arguments received from BS server: {} {}".format(ip_bs, port_bs))
elif (ip_bs, port_bs) in known_bs:
print("Error: Already added BS {}".format(ip_bs))
status = "NOK"
else:
known_bs[(ip_bs, port_bs)] = 0
backup_dict_to_file(known_bs, CS_KNOWN_BS_SAVEFILE)
status = "OK"
print("-> BS added:\n - ip: {}\n - port: {}\n".format(ip_bs, port_bs))
udp_socket.sendto("RGR {}\n".format(status).encode(), address)
def remove_bs(known_bs, args, udp_socket, address):
status = "ERR\n"
ip_bs = args[0]
port_bs = args[1].split("\n")[0]
if len(args) != 2 or port_bs.isdigit() is False:
print("Error in arguments received from BS server: {} {}".format(ip_bs, port_bs))
elif (ip_bs, port_bs) not in known_bs:
print("Error: User {} does not exist".format(ip_bs))
status = "NOK\n"
else:
del known_bs[(ip_bs, port_bs)]
backup_dict_to_file(known_bs, CS_KNOWN_BS_SAVEFILE)
status = "OK\n"
print("-> BS removed:\n - ip: {}\n - port: {}\n".format(ip_bs, port_bs))
udp_socket.sendto("UAR {}\n".format(status).encode(), address)
def deal_with_tcp(tcp_socket, valid_users, dirs_location, known_bs):
def signal_handler(_signum, _frame):
tcp_socket.close()
exit(0)
def deal_with_client(client, valid_users, dirs_location, known_bs):
""" Code / function for forked worker """
conn = client[0]
logged_in = False # this var is False or contains the user id
while True:
try:
command = read_bytes_until(conn, " \n")
if command == "AUT":
logged_in, password = authenticate_user(valid_users, conn)
elif command == "DLU" and logged_in:
delete_user(logged_in, conn, dirs_location, valid_users)
break
elif command == "BCK" and logged_in:
backup_dir(logged_in, conn, known_bs, password, dirs_location)
break
elif command == "RST" and logged_in:
restore_dir(logged_in, conn, dirs_location)
break
elif command == "LSD" and logged_in:
list_user_dirs(logged_in, conn, dirs_location)
break
elif command == "LSF" and logged_in:
list_files_in_dir(logged_in, conn, dirs_location)
break
elif command == "DEL" and logged_in:
delete_dir(logged_in, conn, dirs_location)
break
else:
unexpected_command(conn)
except (BrokenPipeError, ConnectionResetError):
print("{}: connection closed\n".format(client[1]))
exit(0)
conn.close() # end of code
# Mask CTRL-C, handle SIGTERM (terminate, from father)
signal(SIGINT, SIG_IGN)
signal(SIGTERM, signal_handler)
while True:
client = tcp_socket.accept()
p_client = Process(target=deal_with_client, args=(client, valid_users, dirs_location, known_bs), daemon=True)
p_client.start()
def authenticate_user(valid_users, conn):
""" Authenticates user, returns (user,pass) (AUT/AUR) """
username = read_bytes_until(conn, " ")
password = read_bytes_until(conn, "\n")
print("-> AUT {} {}".format(username, password))
res = (False, False)
status = "NOK"
if username not in valid_users:
valid_users[username] = password
backup_dict_to_file(valid_users, CS_VALID_USERS_SAVEFILE)
res = (username, password)
status = "NEW"
print("New user: {}".format(username))
elif valid_users[username] != password:
print("Password received does not match")
else:
res = (username, password)
status = "OK"
print("User {} logged in sucessfully".format(username))
response = "AUR {}\n".format(status)
conn.sendall(response.encode())
return res
def delete_user(username, conn, dirs_location, valid_users):
print(">> DLU")
status = "NOK\n"
if username in [f[0] for f in dict(dirs_location)]:
print("There is still information stored for user\n")
else:
del valid_users[username]
backup_dict_to_file(valid_users, CS_VALID_USERS_SAVEFILE)
status = "OK\n"
print("User {} deleted sucessfully\n".format(username))
response = "DLR " + status
conn.sendall(response.encode())
def backup_dir(username, conn, known_bs, password, dirs_location):
flag = 0
folder = read_bytes_until(conn, " ")
nr_user_files = int(read_bytes_until(conn, " "))
print(">> BCK {} {}".format(folder, str(nr_user_files)))
user_dict = {} # {"filename": [date, time, size]}
bs_dict = {} # {"filename": [date, time, size]}
string_of_files = ""
registered_in_bs = 0
files_user = read_bytes_until(conn, "\n").split()
for i in range(nr_user_files):
filename = files_user[4*i]
date = files_user[4*i+1]
time = files_user[4*i+2]
size = files_user[4*i+3]
user_dict[filename] = [date, time, size]
string_of_files += " {} {} {} {}".format(filename, date, time, size)
if (username, folder) in dirs_location:
flag = 1
ip_bs = dirs_location[(username, folder)][0]
port_bs = dirs_location[(username, folder)][1]
print("BCK {} {} {} {}".format(username, folder, ip_bs, port_bs))
bs_socket = udp_client(ip_bs, int(port_bs))
bs_socket.sendall("LSF {} {}\n".format(username, folder).encode())
response = bs_socket.recv(2048).decode().split()
bs_socket.close()
command = response[0]
if command != "LFD":
print("Error in command")
exit(0)
nr_bs_files = int(response[1])
for i in range(nr_bs_files):
filename = response[2 + 4*i]
date = response[2 + 4*i + 1]
time = response[2 + 4*i + 2]
size = response[2 + 4*i + 3]
bs_dict[filename] = [date, time, size]
final_string_of_files = ""
nr_files_final = 0
for user_file in user_dict:
for bs_file in bs_dict:
if user_file == bs_file and user_dict[user_file] != bs_dict[bs_file]:
final_string_of_files += " {} {} {} {}".format(user_file, bs_dict[user_file][0], bs_dict[user_file][1], bs_dict[user_file][2])
nr_files_final += 1
if nr_files_final == 0:
print("No files to backup\n")
response = "BKR {} {} {}{}\n".format(ip_bs, port_bs, nr_files_final, final_string_of_files)
conn.sendall(response.encode())
if flag == 0:
ip_bs = ""
flag_bs = 0
flag_first_user = 1
first_user = ()
if not known_bs:
print("No BS available to backup [BKR EOF]\n")
conn.sendall("BKR EOF\n".encode())
return
known_bs_temp = dict(known_bs)
for (ip, port) in known_bs_temp:
'''verifica se e a primeira chave do dicionario
Se for, guarda caso os BS ja tenham sido usados para backup
o mesmo numero de vezes'''
if flag_first_user:
ip_bs, port_bs = (ip, port)
flag_first_user = 0
elif known_bs_temp[(ip, port)] < known_bs_temp[(ip_bs, port_bs)]:
ip_bs, port_bs = (ip, port)
known_bs[(ip_bs, port_bs)] += 1
print("BS with ip: {} and port: {} was chosen for backup".format(ip_bs, port_bs))
for (user, directory) in dict(dirs_location):
if dirs_location[(user, directory)] == (ip_bs, port_bs) and user == username:
print("User {} is already registered in BS with ip: {} and port: {}\n".format(username, ip_bs, port_bs))
registered_in_bs = 1
break
dirs_location[(username, folder)] = (ip_bs, port_bs)
backup_dict_to_file(dirs_location, CS_DIRS_LOCATION_SAVEFILE)
if not registered_in_bs:
bs_socket = udp_client(ip_bs, int(port_bs))
response = "LSU {} {}\n".format(username, password)
bs_socket.sendall(response.encode())
command, status = bs_socket.recv(32).decode()[:-1].split()
bs_socket.close()
if command != "LUR":
print("Error in command\n")
exit(0)
elif status == "NOK\n":
print("Already knew user\n")
exit(0)
elif status == "ERR\n":
print("Error in arguments sent from CS to BS\n")
exit(0)
else:
print("User {} was added to BS with ip: {} and port: {} sucessfully\n".format(username, ip_bs, port_bs))
response = "BKR {} {} {}{}\n".format(ip_bs, port_bs, nr_user_files, string_of_files)
conn.sendall(response.encode())
#check conditions of error
def restore_dir(username, conn, dirs_location):
flag = 0
folder = read_bytes_until(conn, "\n")
print("Restore {}".format(folder))
if (username, folder) in dirs_location:
print("Entered")
flag = 1
ip_bs = dirs_location[(username, folder)][0]
port_bs = dirs_location[(username, folder)][1]
response = "RSR {} {}\n".format(ip_bs, port_bs)
print(response)
conn.sendall(response.encode())
if flag == 0:
print("RSR EOF")
response = "RSR EOF\n"
conn.sendall(response.encode())
def list_user_dirs(username, conn, dirs_location):
print(">> LSD")
nr_files = 0
dirs_str = ""
if dirs_location:
for (user, folder) in dict(dirs_location):
if user == username:
nr_files += 1
dirs_str += folder + " "
print(folder)
response = "LDR {} {}\n".format(str(nr_files), dirs_str)
print(response)
conn.sendall(response.encode())
def list_files_in_dir(username, conn, dirs_location):
flag = 0
folder = read_bytes_until(conn, " \n")
print(">> LSF {}".format(folder))
if (username, folder) in dirs_location:
flag = 1
ip_bs = dirs_location[(username, folder)][0]
port_bs = dirs_location[(username, folder)][1]
bs_socket = udp_client(ip_bs, int(port_bs))
bs_socket.sendall("LSF {} {}\n".format(username, folder).encode())
response = bs_socket.recv(2048).decode().split()
bs_socket.close()
if response[0] != "LFD":
print("Error in command\n")
exit(0)
nr_bs_files = int(response[1])
conn.sendall("LFD {} {} {}".format(ip_bs, port_bs, nr_bs_files).encode())
for i in range(nr_bs_files):
filename = response[2 + 4*i]
date = response[2 + 4*i + 1]
time = response[2 + 4*i + 2]
size = response[2 + 4*i + 3]
conn.sendall(" {} {} {} {}".format(filename, date, time, size).encode())
conn.sendall("\n".encode())
if flag == 0:
response = "LFD NOK\n"
conn.sendall(response.encode())
def delete_dir(username, conn, dirs_location):
print(">> DEL")
status_del = "NOK"
flag = 0
folder = read_bytes_until(conn, " \n")
if (username, folder) in dirs_location:
flag = 1
ip_bs = dirs_location[(username, folder)][0]
port_bs = dirs_location[(username, folder)][1]
bs_socket = udp_client(ip_bs, int(port_bs))
bs_socket.sendall("DLB {} {}\n".format(username, folder).encode())
command, status = bs_socket.recv(8).decode().split(" ")
bs_socket.close()
if command != "DBR":
print("Error in protocol\n")
conn.sendall("ERR\n".encode())
else:
if status == "NOK":
print("No such folder exists in the chosen BS\n")
else:
status_del = "OK"
del dirs_location[(username, folder)]
backup_dict_to_file(dirs_location, CS_DIRS_LOCATION_SAVEFILE)
print("Directory {} was sucessfully deleted\n".format(folder))
response = "DDR {}\n".format(status_del)
conn.sendall(response.encode())
if flag == 0:
print("No such folder for the user {}\n".format(username))
response = "DDR {}\n".format(status_del)
conn.sendall(response.encode())
def main():
manager = SyncManager()
manager.start(ignore_sigint)
known_bs = manager.dict() # {("ip_BS", "port_BS"): counter}
valid_users = manager.dict() # {"user": password}
dirs_location = manager.dict() # {(username, "folder"): (ipBS, portBS)}
my_address = get_best_ip()
my_port = DEFAULT_CS_PORT
try:
a = getopt.getopt(sys.argv[1:], "p:")[0]
except getopt.GetoptError as error:
print(error)
exit(2)
for opt, arg in a:
if opt == '-p':
my_port = int(arg)
print("My address is {}\n".format(my_address))
udp_receiver = udp_server(my_address, my_port)
tcp_receiver = tcp_server(my_address, my_port)
if os.path.isfile(CS_KNOWN_BS_SAVEFILE):
known_bs.update(restore_dict_from_file(CS_KNOWN_BS_SAVEFILE))
if os.path.isfile(CS_VALID_USERS_SAVEFILE):
valid_users.update(restore_dict_from_file(CS_VALID_USERS_SAVEFILE))
if os.path.isfile(CS_DIRS_LOCATION_SAVEFILE):
dirs_location.update(restore_dict_from_file(CS_DIRS_LOCATION_SAVEFILE))
try:
# "Forking"
p_udp = Process(target=deal_with_udp, args=(udp_receiver, known_bs))
p_tcp = Process(target=deal_with_tcp, args=(tcp_receiver, valid_users, dirs_location, known_bs))
p_udp.start()
p_tcp.start()
pause()
except KeyboardInterrupt:
pass
finally:
tcp_receiver.close()
udp_receiver.close()
p_tcp.terminate()
p_udp.terminate()
p_tcp.join()
p_udp.join()
backup_dict_to_file(known_bs, CS_KNOWN_BS_SAVEFILE)
backup_dict_to_file(valid_users, CS_VALID_USERS_SAVEFILE)
backup_dict_to_file(dirs_location, CS_DIRS_LOCATION_SAVEFILE)
print()
if __name__ == '__main__':
main()
| StarcoderdataPython |
5098023 | <reponame>p4cx/optaradio
from flask import *
from flask_socketio import SocketIO, send
from globals_web import *
from app.forms import *
from app import helpers, station_model
from werkzeug.utils import secure_filename
import os
app = Flask(__name__)
app.secret_key = "super secret key"
socket_io = SocketIO(app)
@app.route("/station_form")
def station_form(station=None):
form_station = AddStationForm()
if station is not None:
form_station.station_name.data = station[1]
form_station.station_url.data = station[2]
form_station.station_desc.data = station[3]
form_station.station_country.data = station[5]
countries = helpers.load_country_choices()
return render_template('station_form.html', form=form_station, countries=countries)
@app.route('/add_station', methods=['POST'])
def add_station():
if request.method == 'POST':
if 'station_cover' not in request.files:
print('No file part')
return render_template('report.html', success=False, message="No file selected for uploading.", station=[])
file = request.files['station_cover']
if file.filename == '':
print('No file selected for uploading')
return render_template('report.html', success=False, message="No file selected for uploading.", station=[])
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(TEMP_PATH, filename))
print('File successfully uploaded')
success, message, station = station_model.add(request.form, os.path.join(TEMP_PATH, filename))
return render_template('report.html', success=success, message=message, station=station)
else:
print('Allowed file types are png')
return render_template('report.html', success=False, message="Wrong file format.", station=[])
@app.route('/mod_station/<string:old_station>', methods=['POST'])
def mod_station(old_station):
if request.method == 'POST':
print(request.form)
old_name = old_station
if 'station_cover' in request.files:
file = request.files['station_cover']
if file.filename != '' and file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(TEMP_PATH, filename))
success, message, station = station_model.modify_station(request.form, os.path.join(TEMP_PATH, filename), old_name)
return render_template('report.html', success=success, message=message, station=station)
filename = request.form['station_name'] + ".png"
old_file = os.path.join(THUMBS_PATH, old_name + ".png")
new_file = os.path.join(THUMBS_PATH, filename)
os.rename(old_file, new_file)
success, message, station = station_model.modify_station(request.form, filename, old_name)
return render_template('report.html', success=success, message=message, station=station)
@app.route('/action_station', methods=['POST'])
def action_station():
if request.method == 'POST':
for key, value in request.form.items():
if key == "del_radio_station":
return render_template('del_station.html', station=station_model.crawl_station_list(value))
elif key == "mod_radio_station":
return station_form(station_model.crawl_station_list(value))
@app.route('/del_station', methods=['POST'])
def del_station():
if request.method == 'POST':
for key, value in request.form.items():
if value[:6] == "Delete":
station_model.delete_station(key)
return render_template('report.html', success=True, message="Is deleted.", station=[])
return render_template('report.html', success=False, message="Something went wrong.", station=[])
@app.route('/image/<path:filename>')
def download_file(filename):
return send_from_directory(THUMBS_PATH, filename, as_attachment=True)
@app.route("/")
def stations():
return render_template('stations.html', station_list=station_model.get())
def send_message():
print("send hello")
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ['png']
if __name__ == "__main__":
socket_io.run(app, host='0.0.0.0', port=80)
application = app
| StarcoderdataPython |
3268131 | from itertools import count
from django.shortcuts import render,redirect
from django.contrib.auth.models import User, auth
from django.contrib.auth import authenticate
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.db import connection
from django.contrib.auth import get_user_model
from .choices import *
from .email import *
from django import template
from superadmin.models import Customer, Goal, SocialIcon
User = get_user_model()
def tables():
table = []
tables = connection.introspection.table_names()
seen_models = connection.introspection.installed_models(tables)
for i in seen_models:
table.append(i)
return table
@login_required
def homepage(request):
template_name = 'index.html'
table = ['User', 'Goal']
goal = Goal.objects.filter().count()
user = User.objects.filter(user_type='USER').count()
vendor = User.objects.filter(user_type='VENDOR').count()
return render(request, template_name, {'table':table, 'goal':goal, 'user':user, 'vendor':vendor})
def loginSuperAdmin(request):
template_name = 'login.html'
if request.method == 'POST':
email = request.POST.get('email')
password = request.POST.get('password')
admin = auth.authenticate(email=email, password=password)
if admin.is_superuser == True:
auth.login(request, admin)
messages.success(request, "You have Successfully Registerd!")
return redirect('dashboard')
elif admin is None:
messages.error(request, "Invalid username or password ")
return redirect('login')
return render(request, template_name)
def tables(request):
template_name = 'users.html'
user = User.objects.filter(user_type='USER')
return render(request, template_name, {'user': user})
def addUser(request):
template_name = 'add-user.html'
if request.method == 'POST':
email = request.POST.get('email')
password = request.POST.get('password')
first_name = request.POST.get('first_name')
last_name = request.POST.get('last_name')
mobile = request.POST.get('mobile')
user_type = request.POST.get('user_type')
data = User.objects.filter(email = request.POST.get('email'))
if not data:
user, created = User.objects.get_or_create(email=email, password=password, first_name=first_name, last_name=last_name, mobile=mobile, user_type=user_type)
user.set_password(user.password)
user.save()
messages.success(request, "You have Successfully Registerd!")
return redirect('tables')
else:
messages.error(request, "This email address is already exists!")
return render(request, template_name,{'roll':USER_TYPE })
def userView(request, id):
template_name = 'user-view.html'
user = User.objects.get(id=id)
return render(request, template_name, {'user': user})
def userUpdate(request, id):
template_name = 'user-edit.html'
if request.method=="POST":
first_name = request.POST.get('first_name')
last_name = request.POST.get('last_name')
mobile = request.POST.get('mobile')
user = User.objects.filter(id=id, user_type='USER')
if user:
user.update(first_name=first_name, last_name=last_name, mobile=mobile)
messages.success(request, "User updated!")
return redirect('tables')
else:
messages.error(request, "Something went wrong!")
return redirect('user_edit')
else:
user = User.objects.get(id=id)
return render(request, template_name, {'user': user})
def userDelete(request, id):
user = User.objects.get(id=id)
user.delete()
messages.success(request, "User is deleted!")
return redirect('tables')
def vendor(request):
template_name = 'vendors.html'
vendor = User.objects.filter(user_type='VENDOR')
return render(request, template_name, {'vendor': vendor})
def vendorView(request, id):
template_name = 'vendor-view.html'
vendor = User.objects.get(id=id)
return render(request, template_name, {'vendor': vendor})
def addVendor(request):
template_name = 'add-vendor.html'
if request.method == 'POST':
email = request.POST.get('email')
password = request.POST.get('password')
first_name = request.POST.get('first_name')
last_name = request.POST.get('last_name')
mobile = request.POST.get('mobile')
user_type = request.POST.get('user_type')
data = User.objects.filter(email = request.POST.get('email'))
if not data:
user, created = User.objects.get_or_create(email=email, password=password, first_name=first_name, last_name=last_name, mobile=mobile, user_type=user_type)
user.set_password(<PASSWORD>)
user.save()
messages.success(request, "You have Successfully Registerd!")
return redirect('vendors')
else:
messages.error(request, "This email address is already exists!")
return render(request, template_name,{'roll':USER_TYPE })
def vendorUpdate(request, id):
template_name = 'vendor-edit.html'
if request.method=="POST":
first_name = request.POST.get('first_name')
last_name = request.POST.get('last_name')
mobile = request.POST.get('mobile')
vendor = User.objects.filter(id=id, user_type='VENDOR')
if vendor:
vendor.update(first_name=first_name, last_name=last_name, mobile=mobile)
messages.success(request, "User updated!")
return redirect('vendors')
else:
messages.error(request, "Something went wrong!")
return redirect('vendor_edit')
else:
vendor = User.objects.get(id=id)
return render(request, template_name, {'vendor': vendor})
def vendorDelete(request, id):
user = User.objects.get(id=id)
user.delete()
messages.success(request, "Vendor is deleted")
return redirect('vendors')
def goalList(request):
template_name = 'goal.html'
goal = Goal.objects.all()
return render(request, template_name, {'data': goal})
def goalView(request, id):
template_name = 'goal-view.html'
goal = Goal.objects.get(id=id)
return render(request, template_name, {'data': goal})
def socialList(request):
template_name = 'social.html'
social = SocialIcon.objects.all()
return render(request, template_name, {'social': social})
def addSocialLink(request):
template_name = 'add-social.html'
if request.method == 'POST':
name = request.POST.get('name')
icon = request.POST.get('icon')
link = request.POST.get('link')
social, created = SocialIcon.objects.get_or_create(name=name, icon=icon, link=link)
social.save()
messages.success(request, "You have Successfully Add Link!")
return redirect('social')
return render(request, template_name)
def register(request):
template_name = 'register.html'
if request.method == 'POST':
email = request.POST.get('email')
password = request.POST.get('password')
first_name = request.POST.get('first_name')
last_name = request.POST.get('last_name')
mobile = request.POST.get('mobile')
data = Customer.objects.filter(email = request.POST.get('email'))
if data:
messages.error(request, "This email address is already exists!")
else:
user, created = Customer.objects.get_or_create(email=email, password=password, first_name=first_name, last_name=last_name, mobile=mobile)
user.save()
messages.success(request, "You have Successfully Registerd!")
return render(request, template_name)
def logout(request):
auth.logout(request)
return redirect('/admin/login/')
def registerUser(request):
template_name = 'user-register.html'
if request.method == 'POST':
email = request.POST.get('email')
password = request.POST.get('password')
first_name = request.POST.get('first_name')
last_name = request.POST.get('last_name')
mobile = request.POST.get('mobile')
user_type = request.POST.get('user_type')
data = User.objects.filter(email = request.POST.get('email'))
if not data:
user, created = User.objects.get_or_create(email=email, password=password, first_name=first_name, last_name=last_name, mobile=mobile, user_type=user_type)
user.set_password(<PASSWORD>)
user.save()
sendOTP(user)
print(user.slug, 'ssssssssdfffffffffffffffff')
messages.success(request, "You have Successfully Registerd!")
return redirect('/admin/verify/'+str(user.slug)+'/')
else:
messages.error(request, "This email address is already exists!")
return render(request, template_name,{'roll':USER_TYPE})
def verifyUser(request, slug):
template_name = 'verify-otp.html'
if request.method == 'POST':
otp = request.POST.get('otp')
user = User.objects.get(slug=slug)
if user.otp == otp:
print(user.otp, 'pppppppppp')
user.is_verified = True
user.save()
messages.success(request, 'Email Verification Complete.')
return redirect('dashboard')
else:
messages.error(request, 'OTP does not match!')
else:
messages.error(request, 'Something Went wrong!')
return render(request, template_name)
def pendingUser(request):
template_name = 'pending-users.html'
pending = User.objects.filter(is_active=False)
return render(request, template_name, {'pending': pending})
def approveUser(request):
template_name = 'approve-users.html'
approve = User.objects.filter(is_active=True, is_verified=True)
return render(request, template_name, {'approve': approve})
| StarcoderdataPython |
9722549 | <reponame>gilgamezh/dht22_exporter
#!/usr/bin/env python3
import time
import logging
import Adafruit_DHT
from prometheus_client import start_http_server, Summary, Gauge
logger = logging.getLogger("DHT22_exporter")
SLEEP_TIME = 5
SENSOR = Adafruit_DHT.DHT22
PIN = 4
REQUEST_TIME = Summary('request_processing_seconds', 'Time spent processing request')
temperature_gauge = Gauge("Temperature", "Current Temperature")
humidity_gauge = Gauge("Humidity", "Current Humidity")
@REQUEST_TIME.time()
def get_values():
"""Read Temp and Humidity from the DHT22 sensor."""
logger.debug("Quering sensor. Sensor: %s Pin: %s", SENSOR, PIN)
humidity, temperature = Adafruit_DHT.read_retry(SENSOR, PIN)
logger.info("Got data from the sensor. Temperature: %s, Humidity: %s", temperature, humidity)
temperature_gauge.set(temperature)
humidity_gauge.set(humidity)
logger.debug("Metrics update done. going to sleep for %s seconds", SLEEP_TIME)
time.sleep(SLEEP_TIME)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)-8s %(message)s')
start_http_server(8042)
# collect in loop
while True:
get_values()
| StarcoderdataPython |
8147563 | <reponame>BUVANEASH/AdaConv<filename>styletransfer/dataload.py<gh_stars>0
import os
import glob
import tensorflow as tf
class DataLoad():
"""
Dataset loader class
"""
def __init__(self):
pass
def get_dataset(self):
self.content_train_list = sorted(glob.glob(os.path.join(self.raw_data,'content/train2017/*.jpg')))
self.content_test_list = sorted(glob.glob(os.path.join(self.raw_data,'content/test2017/*.jpg')))
self.style_train_list = sorted(glob.glob(os.path.join(self.raw_data,'style/train/*.jpg')))
self.style_test_list = sorted(glob.glob(os.path.join(self.raw_data,'style/test/*.jpg')))
_content_train_len = len(self.content_train_list)
_content_test_len = len(self.content_test_list)
_style_train_len = len(self.style_train_list)
_style_test_len = len(self.style_test_list)
print(f"Train ---> Content {_content_train_len} | Style {_style_train_len} || Test ---> Content {_content_test_len} | Style {_style_test_len}")
def map_fn(self, image_path: str) -> tuple[tf.Tensor]:
"""
Args:
image_path: The RGB image path.
Returns:
The augmented image
"""
# Read Content and Style
image = tf.io.decode_jpeg(tf.io.read_file(image_path), channels = 3)
if tf.random.uniform((), 0, 1) > self.random_crop_prob:
# Resize
image = tf.image.resize(image,(self.resize_size,self.resize_size), method = 'bilinear')
# RandomCrop
image = tf.image.random_crop(image, size = [self.image_size,self.image_size,3])
else:
# Resize
image = tf.image.resize(image,(self.image_size,self.image_size), method = 'bilinear')
image = tf.cast(tf.clip_by_value(image / 255, clip_value_min = 0, clip_value_max = 1), dtype = tf.float32)
return image
def create_dataloader(self, image_set: list[str]) -> tf.data.Dataset:
"""
Args:
image_set: The RGB image paths set
Yield:
The image set dataloader
"""
def generator():
for image_path in image_set:
yield str(image_path)
dataset = tf.data.Dataset.from_generator(generator,
output_signature=tf.TensorSpec(shape=(), dtype=tf.string))
dataset = dataset.map(map_func = lambda img_path: self.map_fn(img_path),
num_parallel_calls = tf.data.AUTOTUNE,
deterministic = False)
dataset = dataset.apply(tf.data.experimental.ignore_errors())
dataset = dataset.shuffle(buffer_size = 1000)
dataset = dataset.repeat()
dataset = dataset.batch(self.batch_size)
return dataset | StarcoderdataPython |
8166342 | #!/usr/bin/python3
with open('input.txt') as f:
#with open('test.txt') as f:
input = f.read().splitlines()
foods = []
allergens = []
ingredients = []
for line in input:
ins, als = line[:-1].split(' (contains ')
food = {
'ingredients': ins.split(' '),
'allergens': als.split(', ')
}
foods.append(food)
for i in food['ingredients']:
if i not in ingredients:
ingredients.append(i)
for a in food['allergens']:
if a not in allergens:
allergens.append(a)
ingredients_with_allergens = set()
for a in allergens:
foods_with_allergen = [f for f in foods if a in f['allergens']]
ingredient_sets = []
for f in foods_with_allergen:
ingredient_sets.append(set(f['ingredients']))
intersection = ingredient_sets[0].intersection(*ingredient_sets[1:])
ingredients_with_allergens.update(intersection)
inert_ingredients = set(ingredients) - ingredients_with_allergens
# for each ingredient with an allergen, find the allergen
mapped_allergens = {}
while len(mapped_allergens.keys()) < len(ingredients_with_allergens):
for a in allergens:
foods_with_allergen = [f for f in foods if a in f['allergens']]
ingredient_sets = []
for f in foods_with_allergen:
ingredient_sets.append(set(f['ingredients']))
intersection = ingredient_sets[0].intersection(*ingredient_sets[1:]) - set(mapped_allergens.values())
if len(intersection) == 1:
mapped_allergens[a] = list(intersection)[0]
canonical_dangerous_ingredients = []
for a in sorted(mapped_allergens.keys()):
canonical_dangerous_ingredients.append(mapped_allergens[a])
print(','.join(canonical_dangerous_ingredients))
| StarcoderdataPython |
4924976 | from flask import session, redirect, url_for, g
from run import gt, ft, settings
from functools import wraps
import gorbin_tools2
def admin_required(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if gt.get_user_status(session.get('login')) != 'admin':
return '<h1>Permission Denied</h1>'
else:
return fn(*args, **kwargs)
return wrapper
def login_required(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if 'login' not in session:
return redirect(url_for('index.index'))
else:
return fn(*args, **kwargs)
return wrapper
def check_session(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not gt.get_user(session['login'], session['current_password']):
return redirect(url_for('logout.logout'))
else:
return fn(*args, **kwargs)
return wrapper | StarcoderdataPython |
11384981 | <reponame>LabShare/sos-java
#!/usr/bin/env python
#
# Copyright (c) <NAME>
# Distributed under the terms of the MIT License.
import os
import numpy as np
import pandas as pd
import csv
import tempfile
from textwrap import dedent
from sos.utils import short_repr, env
from collections import Sequence
from IPython.core.error import UsageError
import re
import sys
def homogeneous_type(seq):
iseq = iter(seq)
first_type = type(next(iseq))
if first_type in (int, float):
return True if all(isinstance(x, (int, float)) for x in iseq) else False
else:
return True if all(isinstance(x, first_type) for x in iseq) else False
java_init_statements = f'''
%jars {os.path.dirname(os.path.realpath(__file__))}/helper.jar\n
%maven tech.tablesaw:tablesaw-beakerx:0.30.3\n
%maven com.jimmoores:quandl-tablesaw:2.0.0\n
import static tech.tablesaw.aggregate.AggregateFunctions.*;\n
import tech.tablesaw.api.*;\n
import tech.tablesaw.columns.*;\n
import sos.helper
'''
def stitch_cell_output(response):
return ''.join([stream[1]['text'] for stream in response ])
def _sos_to_java_type(obj):
''' Returns corresponding Java data type string for provided Python object '''
if isinstance(obj, (int, np.intc, np.intp, np.int8, np.int16, np.int32, np.int64, bool, np.bool_)):
if isinstance(obj, (bool, np.bool_)):
return 'Boolean', 'true' if obj==True else 'false'
elif obj >= -2147483648 and obj <= 2147483647:
return 'Integer', repr(obj)
elif obj >= -9223372036854775808 and obj <= 9223372036854775807:
return 'Long', repr(obj)+'L'
else:
return -1, None #Integer is out of bounds
elif isinstance(obj, (float, np.float16, np.float32, np.float64)):
if (obj >= -3.40282e+38 and obj <= -1.17549e-38) or (obj >= 1.17549e-38 and obj <= 3.40282e+38):
return 'Float', repr(obj)+'f'
elif (obj >= -1.79769e+308 and obj <= -2.22507e-308) or (obj >= 2.22507e-308 and obj <= 1.79769e+308):
return 'Double', repr(obj)
else:
return -1, None
elif isinstance(obj, str):
return 'String', '"'+obj+'"'
else:
return -1, None
def _java_scalar_to_sos(java_type, value):
#Convert string value to appropriate type in SoS
integer_types = ['Byte', 'Integer', 'Short', 'Long']
real_types = ['Float', 'Double']
if java_type in integer_types:
return int(value)
elif java_type in real_types:
if value[-1] == 'f':
value = value[:-1]
return float(value)
elif java_type == 'Character':
return value
elif java_type == 'String':
return value
elif java_type == 'Boolean':
if value == 'true':
return True
else:
return False
class sos_java:
background_color = {'Java': '#F80000'}
supported_kernels = {'Java': ['java']}
options = {}
cd_command = ''
def __init__(self, sos_kernel, kernel_name='Java'):
self.sos_kernel = sos_kernel
self.kernel_name = kernel_name
self.init_statements = java_init_statements
def insistent_get_response(self, command, stream):
response = self.sos_kernel.get_response(command, stream)
while response==[]:
response = self.sos_kernel.get_response(command, stream)
return response
def _Java_declare_command_string(self, name, obj):
#Check if object is scalar
if isinstance(obj, (int, np.intc, np.intp, np.int8, np.int16, np.int32, np.int64, float, np.float16, np.float32, np.float64, np.longdouble, str, bool, np.bool_)):
#do scalar declaration
obj_type, obj_val = _sos_to_java_type(obj)
if not obj_type == -1:
return f'{obj_type} {name} = {obj_val};'
else:
return None
elif isinstance(obj, (Sequence, np.ndarray, dict, pd.core.frame.DataFrame)):
#do vector things
if len(obj) == 0:
#TODO: how to deal with an empty array?
return ''
else:
#convert Python dict to Java Map
if isinstance(obj, dict):
keys = obj.keys()
values = obj.values()
if homogeneous_type(keys) and homogeneous_type(values):
dict_value = '; '.join([f'{name}.put({ _sos_to_java_type(d[0])[1] }, { _sos_to_java_type(d[1])[1] })' for d in obj.items()])
return f'Map<{_sos_to_java_type(next(iter(keys)))[0]}, {_sos_to_java_type(next(iter(values)))[0]}> {name} = new HashMap<>(); {dict_value}'
elif isinstance(obj, Sequence):
if homogeneous_type(obj):
seq_value = ', '.join([_sos_to_java_type(s)[1] for s in obj])
el_type = _sos_to_java_type(next(iter(obj)))[0]
return f'ArrayList<{el_type}> {name} = new ArrayList<{el_type}>(Arrays.asList({seq_value}));'
else:
return None
elif isinstance(obj, pd.core.frame.DataFrame):
dic = tempfile.tempdir
os.chdir(dic)
obj.to_csv('df2java.csv', index=False, quoting=csv.QUOTE_NONNUMERIC, quotechar='"')
return f'var {name} = Table.read().csv("{dic}/df2java.csv");'
else:
#unsupported type
return None
def get_vars(self, names):
for name in names:
java_repr = self._Java_declare_command_string(name, env.sos_dict[name])
if not java_repr==None:
self.sos_kernel.run_cell(java_repr, True, False,
on_error=f'Failed to put variable {name} to Java')
else:
self.sos_kernel.warn(f'Cannot convert variable {name} to Java')
def put_vars(self, names, to_kernel=None):
result = {}
for name in names:
java_type = self.sos_kernel.get_response(f'helper.getType({name})', ('execute_result',))[0][1]['data']['text/plain']
if java_type in ('Boolean', 'Character', 'Byte', 'Short', 'Integer', 'Long', 'Float', 'Double', 'String'):
#do scalar conversion
value = self.sos_kernel.get_response(f'System.out.println({name});', ('stream',))[0][1]['text']
result[name] = _java_scalar_to_sos(java_type, value)
elif java_type == 'HashMap':
value = self.sos_kernel.get_response(f'helper.printMap({name});', ('execute_result',))[0][1]['data']['text/plain']
temp_dict = dict(eval(value))
key_java_type = self.sos_kernel.get_response(f'helper.getMapKeyType({name})', ('execute_result',))[0][1]['data']['text/plain']
val_java_type = self.sos_kernel.get_response(f'helper.getMapValueType({name})', ('execute_result',))[0][1]['data']['text/plain']
result[name] = dict({_java_scalar_to_sos(key_java_type, key) : _java_scalar_to_sos(val_java_type, val) for (key, val) in temp_dict.items()})
elif java_type == 'ArrayList':
flat_list = '[' + self.sos_kernel.get_response(f'System.out.println(helper.printArray({name}))', ('stream',))[0][1]['text'] + ']'
el_type = self.insistent_get_response(f'helper.getType({name}.get(0))', ('execute_result',))[0][1]['data']['text/plain']
result[name] = np.array([_java_scalar_to_sos(el_type, el) for el in eval(flat_list)])
elif java_type == 'Table':
dic = tempfile.tempdir
os.chdir(dic)
self.sos_kernel.run_cell(f'{name}.write().csv("{dic}/java2df.csv");', True, False, on_error=f'Failed to write dataframe {name} to file')
result[name] = pd.read_csv(f'{dic}/java2df.csv')
return result
| StarcoderdataPython |
5193107 | <reponame>ExpoAshique/ProveBanking__s<gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('channels', '0004_auto_20141205_0309'),
]
operations = [
migrations.RemoveField(
model_name='channel',
name='vendors',
),
migrations.AddField(
model_name='channel',
name='users',
field=models.ManyToManyField(related_name='channels', verbose_name=b'members', to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
migrations.AddField(
model_name='message',
name='viewed',
field=models.ManyToManyField(related_name='read_messages', to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
]
| StarcoderdataPython |
3477710 | import asyncio
import discord
import random
import json
import time
import os
from discord.ext import commands
from Cogs import Message
from Cogs import FuzzySearch
from Cogs import GetImage
from Cogs import Nullify
from Cogs import Message
from Cogs import DL
from Cogs import Admin
def setup(bot):
# Add the bot and deps
settings = bot.get_cog("Settings")
bot.add_cog(Humor(bot, settings))
# This module is for random funny things I guess...
class Humor:
def __init__(self, bot, settings, listName = "Adjectives.txt"):
self.bot = bot
self.settings = settings
# Setup our adjective list
self.adj = []
marks = map(chr, range(768, 879))
self.marks = list(marks)
if os.path.exists(listName):
with open(listName) as f:
for line in f:
self.adj.append(line)
def _is_admin(self, member, channel, guild):
# Check for admin/bot-admin
isAdmin = member.permissions_in(channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(guild, "AdminArray")
for role in member.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isAdmin = True
return isAdmin
@commands.command(pass_context=True)
async def zalgo(self, ctx, *, message = None):
"""Ỉ s̰hͨo̹u̳lͪd͆ r͈͍e͓̬a͓͜lͨ̈l̘̇y̡͟ h͚͆a̵͢v͐͑eͦ̓ i͋̍̕n̵̰ͤs͖̟̟t͔ͤ̉ǎ͓͐ḻ̪ͨl̦͒̂ḙ͕͉d͏̖̏ ṡ̢ͬö̹͗m̬͔̌e̵̤͕ a̸̫͓͗n̹ͥ̓͋t̴͍͊̍i̝̿̾̕v̪̈̈͜i̷̞̋̄r̦̅́͡u͓̎̀̿s̖̜̉͌..."""
if message == None:
await ctx.send("Usage: `{}zalgo [message]`".format(ctx.prefix))
return
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
words = message.split()
try:
iterations = int(words[len(words)-1])
words = words[:-1]
except Exception:
iterations = 1
if iterations > 100:
iterations = 100
if iterations < 1:
iterations = 1
zalgo = " ".join(words)
for i in range(iterations):
if len(zalgo) > 2000:
break
zalgo = self._zalgo(zalgo)
zalgo = zalgo[:2000]
# Check for suppress
if suppress:
zalgo = Nullify.clean(zalgo)
await Message.Message(message=zalgo).send(ctx)
#await ctx.send(zalgo)
def _zalgo(self, text):
words = text.split()
zalgo = ' '.join(''.join(c + ''.join(random.choice(self.marks)
for _ in range(i // 2 + 1)) * c.isalnum()
for c in word)
for i, word in enumerate(words))
return zalgo
@commands.command(pass_context=True)
async def holy(self, ctx, *, subject : str = None):
"""Time to backup the Batman!"""
if subject == None:
await ctx.channel.send("Usage: `{}holy [subject]`".format(ctx.prefix))
return
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
matchList = []
for a in self.adj:
if a[:1].lower() == subject[:1].lower():
matchList.append(a)
if not len(matchList):
# Nothing in there - get random entry
# msg = "*Whoah there!* That was *too* holy for Robin!"
word = random.choice(self.adj)
word = word.strip().capitalize()
subject = subject.strip().capitalize()
msg = "*Holy {} {}, Batman!*".format(word, subject)
else:
# Get a random one
word = random.choice(matchList)
word = word.strip().capitalize()
subject = subject.strip().capitalize()
msg = "*Holy {} {}, Batman!*".format(word, subject)
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def fart(self, ctx):
"""PrincessZoey :P"""
fartList = ["Poot", "Prrrrt", "Thhbbthbbbthhh", "Plllleerrrrffff", "Toot", "Blaaaaahnk", "Squerk"]
randnum = random.randint(0, len(fartList)-1)
msg = '{}'.format(fartList[randnum])
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def french(self, ctx):
"""Speaking French... probably..."""
fr_list = [ "hon", "fromage", "baguette" ]
punct = [ ".", "!", "?", "...", "!!!", "?!" ]
fr_sentence = []
for i in range(random.randint(3, 20)):
fr_sentence.append(random.choice(fr_list))
if len(fr_sentence) == 1:
# Capitalize the first letter of the first word
fr_sentence[0] = fr_sentence[0][:1].upper() + fr_sentence[0][1:]
totally_french = " ".join(fr_sentence) + random.choice(punct)
await ctx.send(totally_french)
def canDisplay(self, server):
# Check if we can display images
lastTime = int(self.settings.getServerStat(server, "LastPicture"))
threshold = int(self.settings.getServerStat(server, "PictureThreshold"))
if not GetImage.canDisplay( lastTime, threshold ):
# await self.bot.send_message(channel, 'Too many images at once - please wait a few seconds.')
return False
# If we made it here - set the LastPicture method
self.settings.setServerStat(server, "LastPicture", int(time.time()))
return True
@commands.command(pass_context=True)
async def memetemps(self, ctx):
"""Get Meme Templates"""
url = "https://api.imgflip.com/get_memes"
result_json = await DL.async_json(url)
templates = result_json["data"]["memes"]
templates_string_list = []
fields = []
for template in templates:
fields.append({ "name" : template["name"], "value" : "`" + str(template["id"]) + "`", "inline" : False })
await Message.Embed(title="Meme Templates", fields=fields).send(ctx)
@commands.command(pass_context=True)
async def meme(self, ctx, template_id = None, text_zero = None, text_one = None):
"""Generate Memes! You can get a list of meme templates with the memetemps command. If any fields have spaces, they must be enclosed in quotes."""
if not self.canDisplay(ctx.message.guild):
return
if text_one == None:
# Set as space if not included
text_one = " "
if template_id == None or text_zero == None or text_one == None:
msg = "Usage: `{}meme [template_id] [text#1] [text#2]`\n\n Meme Templates can be found using `$memetemps`".format(ctx.prefix)
await ctx.channel.send(msg)
return
templates = await self.getTemps()
chosenTemp = None
msg = ''
idMatch = FuzzySearch.search(template_id, templates, 'id', 1)
if idMatch[0]['Ratio'] == 1:
# Perfect match
chosenTemp = idMatch[0]['Item']['id']
else:
# Imperfect match - assume the name
nameMatch = FuzzySearch.search(template_id, templates, 'name', 1)
chosenTemp = nameMatch[0]['Item']['id']
if nameMatch[0]['Ratio'] < 1:
# Less than perfect, still
msg = 'I\'ll assume you meant *{}*.'.format(nameMatch[0]['Item']['name'])
url = "https://api.imgflip.com/caption_image"
username = self.settings.getServerStat(ctx.message.guild, "ImgflipUsername")
password = self.settings.getServerStat(ctx.message.guild, "ImgflipPassword")
payload = {'template_id': chosenTemp, 'username': username, 'password': password, 'text0': text_zero, 'text1': text_one }
result_json = await DL.async_post_json(url, payload)
# json.loads(r.text)
result = result_json["data"]["url"]
if msg:
# result = '{}\n{}'.format(msg, result)
await ctx.channel.send(msg)
# Download Image - set title as a space so it disappears on upload
await Message.Embed(image=result, color=ctx.author).send(ctx)
# await GetImage.get(ctx, result)
async def getTemps(self):
url = "https://api.imgflip.com/get_memes"
result_json = await DL.async_json(url)
templates = result_json["data"]["memes"]
if templates:
return templates
return None
@commands.command(pass_context=True)
async def imgflipcred(self, ctx, username = None, password = None):
isAdmin = self._is_admin(ctx.message.author, ctx.message.channel, ctx.message.guild)
if not isAdmin:
await ctx.channel.send('You do not have sufficient privileges to access this command.')
return
if username is not None:
self.settings.setServerStat(ctx.message.guild, "ImgflipUsername", username)
else:
self.settings.setServerStat(ctx.message.guild, "ImgflipUsername", "imgflip_hubot")
if password is not None:
self.settings.setServerStat(ctx.message.guild, "ImgflipPassword", password)
else:
self.settings.setServerStat(ctx.message.guild, "ImgflipPassword", "<PASSWORD>")
await ctx.channel.send("Username and password stored.")
| StarcoderdataPython |
11242643 | <reponame>Mozilla-GitHub-Standards/54c69db06ef83bda60e995a6c34ecfd168ca028994e40ce817295415bb409f0c<filename>make_mozilla/tools/admin.py<gh_stars>1-10
from django.contrib.gis import admin
from make_mozilla.tools import models
class ToolAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug':('name',),}
admin.site.register(models.Tool, ToolAdmin) | StarcoderdataPython |
3297465 | <filename>HELCATS_match_FLARECAST_1.py
# -*- coding: utf-8 -*-
"""
Created on Mon May 22 16:23:10 2017
@author: guerraaj
"""
import requests
import datetime
import numpy as np
def download_range(service_url, dataset, start, end, step=datetime.timedelta(days=30), **params):
"""
service_url: URL to get to the service. This is all the part before '/ui', e.g.
'http://cluster-r730-1:8002'
'http://api.flarecast.eu/property'
'http://localhost:8002'
Type: string
dataset: The dataset to download from
Type: string
start, end: Total start and end time of the data to download
Type: datetime
step: Time range of a single download slice
The total range (start - end) will be splitted up in smaller time ranges
with the size of 'step' and then every time range will be downloaded separately
Type: timedelta
params: Keyword argument, will be passed as query parameters to the http request url:
Examples:
property_type="sfunction_blos,sfunction_br"
nar=3120
returns: List with all entries, like you would download the whole time range in one request
Type: List of dicts
"""
all_data = []
while start < end:
response = None
end_step = min(start + step, end)
try:
params["time_start"] = "between(%s,%s)" % (
start.isoformat(),
end_step.isoformat()
),
response = requests.get(
"%s/region/%s/list" % (service_url, dataset),
params=params
)
except requests.exceptions.BaseHTTPError as ex:
print("exception while downloading: " % ex)
if response is not None and response.status_code == 200:
all_data.extend(response.json()["data"])
else:
resp_msg = response.json() if response is not None else ""
print("error while downloading time range (%s - %s): %s" % (
start, start + step, resp_msg
))
start += step
return all_data
# FUNCTION TO TRANSFORM LOCATION FORMAT
def location(loc):
loc1 = []
if loc != ' ':
slat1 = loc[0:1]
slon1 = loc[3:4]
if slat1 == 'N':
slat = 1
else:
slat = -1
if slon1 == 'E':
slon = -1
else:
slon = 1
lat = int(float(loc[1:3]))
lon = int(float(loc[4:6]))
loc1.append(slat)
loc1.append(slon)
loc1.append(lat)
loc1.append(lon)
return loc1
# FUNCTION TO MATCH REGIONS
def comp_location(hc_loc,fc_lon,fc_lat,tol):
# FIRST CONVERT HELCATS LOCATION FORMAT
region_match = False
#if len(hc_loc) == 1:
hg_coor = location(hc_loc)
#else:
#hg_coor = hc_loc
if fc_lon < 0:
sfc_lon = -1
else:
sfc_lon = 1
if fc_lat < 0:
sfc_lat = -1
else:
sfc_lat = 1
if (sfc_lon == hg_coor[1] and sfc_lat == hg_coor[0]):
fc_d = np.sqrt(fc_lon*fc_lon + fc_lat*fc_lat)
hc_d = np.sqrt(hg_coor[2]*hg_coor[2] + hg_coor[3]*hg_coor[3])
diff_ = np.abs(fc_d - hc_d)
if diff_ < tol: # TOL DEGREE IS THE TOLERANCE TO MATCH REGIONS
region_match = True
return region_match
# FUNCTION TO ROTATE REGIONS LOCATION
def rot_regions(nloc,ntime,srstime):
loc = location(nloc)
ar_lat = loc[0]*loc[2]
ar_lon = loc[1]*loc[3]
a=14.713
b=-2.396
c=-1.787
minn = ntime - srstime
if ntime > srstime:
m = ntime - srstime
minn = int(m.total_seconds()/60.)
if ntime < srstime:
m = srstime - ntime
minn = -int(m.total_seconds()/60.)
#
rotation=a + b*(np.sin(ar_lat))**2.0 + c*(np.sin(ar_lat))**4.0 # In deg/day
rotation=rotation/1440.0 # In deg/min
ar_lon=ar_lon + minn*rotation # In degree
if ar_lon > 0.:
s1 = 'W'
if ar_lon < 0.:
s1 = 'E'
if ar_lat < 0.:
s2 = 'S'
if ar_lat > 0.:
s2 = 'N'
nloc_lat = s2+"%02d" % loc[2]
nloc_lon = s1+"%02d" % abs(ar_lon)
new_loc = nloc_lat+nloc_lon
return new_loc
if __name__ == "__main__":
import iso8601
import json
import io
import dateutil
# SHARP DATA ONLY EXISTS SINCE SEPT 2012
sharp_date = datetime.datetime(2012,9,1)
# HELCATS/LOWCAT CATALOGUE FILENAME
json_data=open("helcats_list.json").read()
helcats_list = json.loads(json_data)
# FLARECAST ACTIVE REGION PROPERTY -
ps = "*" #ALL OR SELECT FROM LIST BELOW
"""
LIST OF FLARECAST AR PROPERTY NAMES
alpha_exp_cwt_blos, alpha_exp_cwt_br, alpha_exp_cwt_btot, #WAVELET POWER SPECTRAL INDEX
alpha_exp_fft_blos,alpha_exp_fft_br,alpha_exp_fft_btot, #FOURIER POWER SPECTRAL INDEX
beff_blos,beff_br, #B EFFECTIVE
decay_index_blos,decay_index_br, #DECAY INDEX
flow_field_bvec, #FLOW FIELD
helicity_energy_bvec, #HELICITY
ising_energy_blos,ising_energy_br, #ISING ENERGY
ising_energy_part_blos,ising_energy_part_br, #ISING ENERGY PARTITIONS
mpil_blos,mpil_br, #MPILs PARAMETERS
nn_currents, #NON NEUTRALIZED CURRENTS
r_value_blos_logr,r_value_br_logr, #R VALUE
sharp_kw, #EXTENDED SHARP KEYWORDS
wlsg_blos,wlsg_br, #FALCONER'S WLSG
mf_spectrum_blos,mf_spectrum_br,mf_spectrum_btot, #MULTI-FRACTAL SPECTRUM
sfunction_blos,sfunction_br,sfunction_btot, #STRUCTURE FUNCTION
frdim_blos,frdim_br,frdim_btot, #FRACTAL DIMENSION
gen_cor_dim_blos,gen_cor_dim_br,gen_cor_dim_btot, #GENERALIZED CORRELATION DIMENSION
gs_slf, #SUNSPOT-MAGNETIC PROPERTIES
"""
# EXTRACT FROM HELCATS LIST THOSE EVENTS WITH ASSOCIATED SOURCE REGIONS
reduced_list = []
for i in helcats_list:
ind = i["FL_TYPE"]
if ind == 'swpc' or ind == 'hessi':
reduced_list.append(i)
print 'Total CMEs with associatted Flare source region: ', len(reduced_list)
# FOR THOSE EVENTS IN THE REDUCED LIST, WE KEEP THOSE AFTER SHARP DATA IS AVAILABLE (SHARP_DATE)
for jj in enumerate(reduced_list):
j = jj[1]
print 'HELCATS CME event source region: ', jj[0],'.......'
hel_date = j["FL_STARTTIME"]
hel_date = dateutil.parser.parse(hel_date)
idate = hel_date - datetime.timedelta(minutes=60) # PLAY WITH THESE VALUES TO MATCH TIMES BETTER
edate = hel_date + datetime.timedelta(minutes=5) #
if idate > sharp_date:
print 'HELCATS date', hel_date
nar = int(j["SRS_NO"])
#
loc1a = j["FL_LOC"]
loc1 = loc1a.encode('ascii','ignore')
print "Location according to event list", loc1
if loc1 == ' ':
loc1a = j["SRS_LOC"]
loc1a = loc1a.encode('ascii','ignore')
print 'NOAA source region location at midnight', loc1a
#CORRECT NOAA LOCATION (MIDNIGHT) TO EVENT ACTUAL TIME
if loc1a == ' ':
continue
stime = j["SRS_TIME"].encode('ascii','ignore')
print 'SRS file time', stime
srstime = dateutil.parser.parse(stime)
loc1 = rot_regions(loc1a,hel_date,srstime)
print 'Corrected location from NOAA', loc1
yes = False
if nar or loc1:
nar = nar + 10000
print 'NOAA number from HELCATS', nar
# requesting data from FLARECAST property DB
idate = datetime.datetime.strftime(idate,'%Y-%m-%dT%H:%M:00Z')
edate = datetime.datetime.strftime(edate,'%Y-%m-%dT%H:%M:00Z')
start = iso8601.parse_date(idate)
end = iso8601.parse_date(edate)
#KEEP production_02 CHECK API.FLARECAST.EU FOR MOST COMPLETE DATA PRODUCTION
rdata = download_range("http://api.flarecast.eu/property", "production_02", start, end, property_type=ps, region_fields="*")
if rdata:
print 'FLARECAST date', rdata[0]["time_start"]
if yes == False:
for m in range(len(rdata)):
nnar = rdata[m]["meta"]["nar"]
if nnar:
if nar in nnar and len(nnar) == 1:
print 'Region matched by NOAA No', nar
# ADD A FIELD FOR QUALITY OF THE MATCH -- 0 MEANS MATCHED BY NOAA NUMBER
mm = rdata[m]["data"]
mm["fc_data_q"] = 0
j["FC_data"] = mm
yes = True
break
if yes == False:
for m in range(len(rdata)):
tolerance = 15.0 # Degrees of total distance between FC region and HC source region
comp_regions = comp_location(loc1,rdata[m]["long_hg"],rdata[m]["lat_hg"],tolerance)
if comp_regions:
print 'Region matched by position'
print 'Region location from FLARECAST',rdata[m]["lat_hg"],rdata[m]["long_hg"]
# ADD A FIELD FOR QUALITY OF THE MATCH -- !=0 MEANS SOURCE REGION IS "fl_data_q" DEGREES FROM FLARECAST REGION
mm = rdata[m]["data"]
mm["fc_data_q"] = comp_regions
j["FC_data"] = mm
yes = True
break
if not yes:
print 'No SHARP Region matched to candidate source region'
# FIND THE NUMBER OF REGIONS MATCHED
one = 0
for l in reduced_list:
try:
ind = l["FC_data"]
if ind:
one += 1
except:
continue
print 'Number of HELCATS events matched to FLARECAST regions:', one
#
with io.open('output_file.txt', 'w', encoding='utf-8') as f:
f.write(json.dumps(reduced_list , ensure_ascii=False))
| StarcoderdataPython |
165767 | <reponame>justmao945/lab
#!/usr/bin/env python2
# -*- coding:utf-8 -*-
'''
Here we firstly convert RGB model to CMYK model to split bands, so that
the routine can process on every channel. Finally, merge all bands back
and convert back to RGB model. Now we get a colored image.
'''
import Image
import sys
import ed
if __name__ == "__main__":
if len(sys.argv) != 2:
print "Usage: %s <image file>" % sys.argv[0]
exit(-1)
im = Image.open(sys.argv[1]).convert('CMYK')
im = im.split()
for chan in im:
ed.error_diffusion( chan.load(), chan.size )
im = Image.merge("CMYK", im).convert("RGB")
im.save( "color-ed-" + sys.argv[1] )
| StarcoderdataPython |
3388656 | # coding: utf-8
from mock import patch
from httmock import urlmatch, HTTMock
from nose.tools import eq_
from acmd.tools import bundle
from acmd import tool_repo, Server
from test_utils.compat import StringIO
BUNDLE_LIST = """{
"data": [
{
"category": "",
"fragment": false,
"id": 0,
"name": "System Bundle",
"state": "Active",
"stateRaw": 32,
"symbolicName": "org.apache.felix.framework",
"version": "4.2.0"
},
{
"category": "",
"fragment": false,
"id": 176,
"name": "Abdera Client",
"state": "Active",
"stateRaw": 32,
"symbolicName": "org.apache.abdera.client",
"version": "1.0.0.R783018"
},
{
"category": "",
"fragment": false,
"id": 177,
"name": "Abdera Core",
"state": "Active",
"stateRaw": 32,
"symbolicName": "org.apache.abdera.core",
"version": "1.0.0.R783018"
}
],
"s": [
329,
320,
9,
0,
0
],
"status": "Bundle information: 329 bundles in total - all 329 bundles active."
}"""
def test_tool_registration():
tool = tool_repo.get_tool('bundle')
assert tool is not None
@urlmatch(netloc='localhost:4502', path='/system/console/bundles.json', method='GET')
def bundles_mock(url, request):
return BUNDLE_LIST
EXPECTED_LIST = """org.apache.felix.framework\t4.2.0\tActive
org.apache.abdera.client\t1.0.0.R783018\tActive
org.apache.abdera.core\t1.0.0.R783018\tActive
"""
@patch('sys.stdout', new_callable=StringIO)
def test_list_bundles(stdout):
with HTTMock(bundles_mock):
tool = bundle.BundleTool()
server = Server('localhost')
tool.execute(server, ['bundle', 'list'])
eq_(EXPECTED_LIST, stdout.getvalue())
_expected_action = None
@urlmatch(netloc='localhost:4502', path="/system/console/bundles/mock_bundle", method='POST')
def mock_bundle(url, request):
eq_('action={}'.format(_expected_action), request.body)
return '{"fragment":false,"stateRaw":4}' if _expected_action == 'stop' else '{"fragment":false,"stateRaw":32}'
@patch('sys.stdout', new_callable=StringIO)
@patch('sys.stderr', new_callable=StringIO)
def test_stop_bundle(stderr, stdout):
global _expected_action
_expected_action = 'stop'
with HTTMock(mock_bundle):
tool = tool_repo.get_tool('bundle')
server = Server('localhost')
ret = tool.execute(server, ['bundle', 'stop', 'mock_bundle'])
eq_('', stdout.getvalue())
eq_('', stderr.getvalue())
eq_(None, ret)
ret = tool.execute(server, ['bundles', 'stop', '--raw', 'mock_bundle'])
eq_('{"fragment":false,"stateRaw":4}\n', stdout.getvalue())
eq_('', stderr.getvalue())
eq_(None, ret)
@patch('sys.stdout', new_callable=StringIO)
@patch('sys.stderr', new_callable=StringIO)
def test_start_bundle(stderr, stdout):
global _expected_action
_expected_action = 'start'
with HTTMock(mock_bundle):
tool = tool_repo.get_tool('bundle')
server = Server('localhost')
ret = tool.execute(server, ['bundle', 'start', 'mock_bundle'])
eq_('', stdout.getvalue())
eq_('', stderr.getvalue())
eq_(None, ret)
ret = tool.execute(server, ['bundles', 'start', '--raw', 'mock_bundle'])
eq_('{"fragment":false,"stateRaw":32}\n', stdout.getvalue())
eq_('', stderr.getvalue())
eq_(None, ret)
@patch('sys.stdout', new_callable=StringIO)
@patch('sys.stderr', new_callable=StringIO)
def test_bad_command(stderr, stdout):
tool = tool_repo.get_tool('bundle')
server = Server('localhost')
ret = tool.execute(server, ['bundle', 'foobar'])
eq_('', stdout.getvalue())
eq_('error: Unknown bundle action foobar\n', stderr.getvalue())
eq_(-2, ret)
| StarcoderdataPython |
6670251 | num_exercises = {"functions": 10, "syntax": 13, "control flow": 15, "loops": 22, "lists": 19, "classes": 18, "dictionaries": 18}
total_exercises = 0
for value in num_exercises.values():
total_exercises += value
print(total_exercises ) | StarcoderdataPython |
3253477 | <filename>model_zoo/research/cv/dem/src/config.py<gh_stars>1-10
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
network config setting, will be used in train.py
"""
from easydict import EasyDict as edict
awa_cfg = edict({
'lr_att': 1e-5,
'wd_att': 1e-2,
'clip_att': 0.2,
'lr_word': 1e-4,
'wd_word': 1e-3,
'clip_word': 0.5,
'lr_fusion': 1e-4,
'wd_fusion': 1e-2,
'clip_fusion': 0.5,
'batch_size': 64,
})
cub_cfg = edict({
'lr_att': 1e-5,
'wd_att': 1e-2,
'clip_att': 0.5,
'batch_size': 100,
})
| StarcoderdataPython |
9781660 | <filename>src/main.py
import logging
def main():
""" testing logger """
logger.info("starting")
if __name__ == "__main__":
""" this should be in the program's main/start/run function """
import logging.config
logging.config.fileConfig("logging.conf")
logger = logging.getLogger(__name__)
main()
| StarcoderdataPython |
362242 | <reponame>AppliedMechanics-EAFIT/Mod_Temporal<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Graficos relacionados con la seccion de interpolacion.
"""
from __future__ import division, print_function
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams["axes.spines.right"] = False
plt.rcParams["axes.spines.top"] = False
plt.rcParams["mathtext.fontset"] = "cm"
fun = lambda x: x**3 + 4*x**2 - 10
x = np.linspace(-1, 1, 5)
y = fun(x)
x_inter = np.linspace(-1, 1, 101)
y_inter = fun(x_inter)
plt.close("all")
#%% Puntos a interpolar
plt.figure(figsize=(4, 2.5))
plt.plot(x, y, "ok")
plt.xlabel("$x$", fontsize=14)
plt.ylabel("$y = f(x)$", fontsize=14)
plt.savefig("interp_puntos.pdf", bbox_inches="tight", transparent=True)
#%% Interpolacion global
plt.figure(figsize=(4, 2.5))
plt.plot(x_inter, y_inter, linestyle="dashed")
plt.plot(x, y, "ok")
plt.xlabel("$x$", fontsize=14)
plt.ylabel("$y = f(x)$", fontsize=14)
plt.savefig("interp_continua.pdf", bbox_inches="tight", transparent=True)
#%% Interpolacion a tramos
plt.figure(figsize=(4, 2.5))
plt.plot(x, y, linestyle="dashed")
plt.plot(x, y, "ok")
plt.xlabel("$x$", fontsize=14)
plt.ylabel("$y = f(x)$", fontsize=14)
plt.savefig("interp_tramos.pdf", bbox_inches="tight", transparent=True)
#%% Interpoladores base
plt.figure(figsize=(4, 3))
ax = plt.subplot(111)
plt.plot(x_inter, -0.5*(1 - x_inter)*x_inter, label="$L_1(x)$")
plt.plot(x_inter, 0.5*(1 + x_inter)*x_inter, label="$L_2(x)$")
plt.plot(x_inter, 1 - x_inter**2, label="$L_3(x)$")
plt.xlabel("$x$", fontsize=14)
plt.ylabel("$y$", fontsize=14)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width, box.height*0.8])
plt.legend(loc="upper center", ncol=3, bbox_to_anchor=(0.5, 1.2))
plt.savefig("interp_base.pdf", bbox_inches="tight", transparent=True)
#%% Interpoladores comparacion
plt.figure(figsize=(4, 3))
ax = plt.subplot(111)
fun2 = lambda x: 4*x**2 + x -10
plt.plot(x_inter, y_inter, label="$f(x)$")
plt.plot(x_inter, fun2(x_inter), linestyle="dashed", label="$p(x)$")
plt.plot([-1, 0, 1], fun(np.array([-1, 0, 1])), "ok")
plt.xlabel("$x$", fontsize=14)
plt.ylabel("$y$", fontsize=14)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width, box.height*0.8])
plt.legend(loc="upper center", ncol=3, bbox_to_anchor=(0.5, 1.2))
plt.savefig("interp_comparacion.pdf", bbox_inches="tight", transparent=True)
#%% Interpoladores y derivada
plt.figure(figsize=(4, 3))
ax = plt.subplot(111)
deriv1 = lambda x: 3*x**2 + 8*x
deriv2 = lambda x: 8*x + 1
plt.plot(x_inter, deriv1(x_inter), label=r"$\frac{\mathrm{d}f(x)}{\mathrm{d}x}$")
plt.plot(x_inter, deriv2(x_inter), linestyle="dashed",
label=r"$\frac{\mathrm{d}p(x)}{\mathrm{d}x}$")
plt.plot([-1, 0, 1], deriv1(np.array([-1, 0, 1])), "ok")
plt.xlabel("$x$", fontsize=14)
plt.ylabel("$y$", fontsize=14)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width, box.height*0.8])
plt.legend(loc="upper center", ncol=3, bbox_to_anchor=(0.5, 1.2))
plt.savefig("interp_deriv.pdf", bbox_inches="tight", transparent=True)
#plt.show() | StarcoderdataPython |
8000625 | #### Geometry file (.xml file), unit cell information (.cif file) and scattering kernels file (name-scatterer.xml file) which defines the scattering formula or diffraction peaks
import os,sys, numpy as np
thisdir = os.path.abspath(os.path.dirname(__file__))
if thisdir not in sys.path:
sys.path.insert(0, thisdir)
template = """<?xml version="1.0"?>
<!DOCTYPE SampleAssembly[
{shape_file_entries}
]>
<SampleAssembly name="ClampCell"
max_multiplescattering_loops_among_scatterers="1"
max_multiplescattering_loops_interactM_path1="1"
min_neutron_probability="0.01"
>
{sample_blocks}
<LocalGeometer registry-coordinate-system="InstrumentScientist">
{geom_regs}
</LocalGeometer>
<Environment temperature="300*K"/>
</SampleAssembly>
"""
def shape_file_entry(shape_name, shape_fileName):
return """ <!ENTITY {shape_name} SYSTEM "{shape_fileName}.xml">
""".format(shape_name=shape_name, shape_fileName=shape_fileName)
def sample_block(name, shape_name, formula, strutureFiletype):
return """ <PowderSample name="{name}" type="sample">
<Shape>
&{shape_name};
</Shape>
<Phase type="crystal">
<ChemicalFormula>{formula}</ChemicalFormula>
<{strutureFiletype}file>{formula}.{strutureFiletype}</{strutureFiletype}file>
</Phase>
</PowderSample>
""".format(name=name, shape_name=shape_name, formula=formula, strutureFiletype=strutureFiletype)
scatterers = {
('outer-body', 'shapeAl', 'outer-body-geom', 'Al', 'xyz'), # (name, shape_name, geometry file name, formula)
('inner-sleeve', 'shapeCu', 'inner-sleeve-geom', 'Cu', 'xyz'),
('sample', 'shapeSample', 'sample_geom', 'Si', 'xyz'),
('collimator', 'shapeColl','coll_geometry', 'B4C', 'cif'),
}
def makeSAXML(sampleassembly_fileName, scatterers=scatterers):
shape_file_entries = [shape_file_entry(shape_name, shape_fileName) for name, shape_name, shape_fileName, formula,strutureFiletype in scatterers]
shape_file_entries='\n'.join(shape_file_entries)
sample_blocks = [sample_block(name, shape_name, formula,strutureFiletype) for name, shape_name, shape_fileName, formula,strutureFiletype in scatterers]
sample_blocks = '\n'.join(sample_blocks)
lines = ['<Register name="{}" position="(0,0,0)" orientation="(0,0,0)"/>' .format(name) for name, shape_name, shape_fileName, formula,strutureFiletype in scatterers]
geom_regs = '\n '.join(lines)
text = template.format(shape_file_entries=shape_file_entries, sample_blocks=sample_blocks, geom_regs=geom_regs)
with open(os.path.join(thisdir, '../sample/sampleassembly_{}.xml'.format(sampleassembly_fileName)), "w") as sam_new:
sam_new.write(text)
# return(sampleassembly_fileName)
return()
| StarcoderdataPython |
6688295 | #convert hdf5 label files into bed format
import sys
task=sys.argv[1]
fold=sys.argv[2]
import pandas as pd
in_prefix="/srv/scratch/annashch/5_cell_lines_bias_correction/gc_covariate/classification/"
out_prefix="/srv/scratch/annashch/5_cell_lines_bias_correction/svm"
data=pd.read_hdf(in_prefix+"/"+task+"/"+"predictions.DNASE."+task+".classificationlabels.withgc."+fold+".labels.0")
data.to_csv(out_prefix+"/"+task+"/"+"svm_predictions_svmtrainset_genometestset"+"/"+"labels."+str(fold)+".bed",sep='\t',index=True,header=False)
| StarcoderdataPython |
1629727 | <reponame>ngmcfarland/emily
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import sys
import re
class PyTest(TestCommand):
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def run_tests(self):
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
def strip_rst_links(readme):
readme = re.sub(r"`([A-Za-z\s_0-9\-]+)\s?[A-Za-z0-9_/:\.<>\-]*`_",r"\1",readme)
return re.sub(r"\.\.\s_[a-zA-Z0-9\-_/:\.\s]+\n","",readme)
with open('README.rst') as f:
readme = f.read()
with open('RELEASES.rst') as f:
releases = f.read()
setup(
name='emily',
version='1.0.8',
url='https://github.com/ngmcfarland/emily',
license='Apache Software License',
author='<NAME>',
tests_require=['pytest'],
install_requires=['python-Levenshtein>=0.12.0',
'fuzzywuzzy>=0.15.0',
'Flask>=0.12',
'Flask-Cors>=3.0.2',
'PyYAML>=3.11',
'six>=1.10.0',
],
cmdclass={'test': PyTest},
author_email='<EMAIL>',
description='A highly customizable chatbot implemented in Python.',
long_description=strip_rst_links(readme) + '\n\n' + releases,
packages=['emily','emily.emily_modules'],
package_dir={'emily':'emily'},
include_package_data=True,
platforms='any',
download_url='https://github.com/ngmcfarland/emily/archive/1.0.8.tar.gz',
entry_points={
'console_scripts': ['emily=emily:chat','emily_server=emily:emily_server']
},
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Development Status :: 4 - Beta',
'Natural Language :: English',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Scientific/Engineering :: Human Machine Interfaces',
'Topic :: Software Development :: Libraries :: Python Modules',
],
extras_require={
'testing':['pytest'],
'aws':['boto3>=1.4.4'],
}
) | StarcoderdataPython |
1628859 | <filename>mtl_coherency.py
import time
import os
import operator
import random
import datetime
import logging
import sys
import argparse
import numpy as np
import pandas as pd
from copy import deepcopy
from collections import Counter
from ast import literal_eval
from tqdm import tqdm, trange
from nltk.corpus import stopwords
from sklearn.metrics import mean_squared_error, f1_score, accuracy_score, label_ranking_average_precision_score, confusion_matrix, average_precision_score
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torchtext as tt
from torch.optim.lr_scheduler import MultiStepLR
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset, Dataset)
from torch.nn.modules.distance import CosineSimilarity
from torch.nn.modules import HingeEmbeddingLoss
from model.mtl_models import CosineCoherence, MTL_Model3
from data_preparation import get_dataloader
test_amount = 1
def main():
args = parse_args()
init_logging(args)
# Init randomization
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda != -1:
cuda_device_name = "cuda:{}".format(args.cuda)
device = torch.device(
cuda_device_name if torch.cuda.is_available() else 'cpu')
else:
device = torch.device('cpu') # if torch.cuda.is_available() else 'cpu')
logging.info("Used Device = {}".format(str(device)))
train_datasetfile = os.path.join(
args.datadir, "train", "coherency_dset_{}.txt".format(str(args.task)))
val_datasetfile = os.path.join(
args.datadir, "validation", "coherency_dset_{}.txt".format(str(args.task)))
test_datasetfile = os.path.join(
args.datadir, "test", "coherency_dset_{}.txt".format(str(args.task)))
if args.model == "cosine":
if args.do_train:
logging.info("cannot train the cosine model!, we ignore --do_train ")
args.do_train = False
model = CosineCoherence(args, device).to(device)
elif args.model == "random":
if args.do_train:
logging.info("cannot train the random model!, we ignore --do_train ")
args.do_train = False
model = None
elif args.model == "model-3":
model = MTL_Model3(args, device).to(device)
else:
raise NotImplementedError("specified model is not supported")
logging.info("Used Model = {}".format(str(args.model)))
best_epoch = -1
train_dl = None
val_dl = None
test_dl = None
if args.do_train:
logging.info('load training data from: %s' % train_datasetfile)
train_dl = get_dataloader(train_datasetfile, args)
logging.info('load validation data from: %s' % train_datasetfile)
val_dl = get_dataloader(val_datasetfile, args)
sigma_1 = nn.Parameter(torch.tensor(
args.mtl_sigma, requires_grad=True).to(device))
sigma_2 = nn.Parameter(torch.tensor(
args.mtl_sigma, requires_grad=True).to(device))
if args.loss == "mtl":
optimizer = torch.optim.Adam(list(model.parameters())+[
sigma_1, sigma_2], lr=args.learning_rate)
else:
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
hinge = torch.nn.MarginRankingLoss(reduction='none', margin=0.1).to(device)
epoch_scores = dict()
for epoch in trange(args.epochs, desc="Epoch"):
output_model_file_epoch = os.path.join(
args.datadir, "{}_task-{}_loss-{}_epoch-{}.ckpt".format(str(model), str(args.task), str(args.loss), str(epoch)))
for i, ((utts_left, utts_right),
(coh_ixs, (acts_left, acts_right)), (len_u1, len_u2, len_d1, len_d2)) in tqdm(enumerate(train_dl),
total=len(train_dl), desc='Training', postfix="LR: {}".format(args.learning_rate)):
if args.test and i >= test_amount:
break
coh_ixs = coh_ixs.to(device)
coh1, (da1, loss1) = model(utts_left.to(device),
acts_left.to(device),
(len_u1.to(device), len_d1.to(device)))
coh2, (da2, loss2) = model(utts_right.to(device),
acts_right.to(device),
(len_u2.to(device), len_d2.to(device)))
# coh_ixs is of the form [0,1,1,0,1], where 0 indicates the first one is the more coherent one
# for this loss, the input is expected as [1,-1,-1,1,-1], where 1 indicates the first to be coherent, while -1 the second
# therefore, we need to transform the coh_ixs accordingly
loss_coh_ixs = torch.add(torch.add(coh_ixs*(-1), torch.ones(
coh_ixs.size()).to(device))*2, torch.ones(coh_ixs.size()).to(device)*(-1))
loss_da = loss1+loss2
loss_coh = hinge(coh1, coh2, loss_coh_ixs)
if args.loss == "da":
loss = loss_da
elif args.loss == "coh":
loss = hinge(coh1, coh2, loss_coh_ixs)
elif args.loss == "mtl":
loss = torch.div(loss_da, sigma_1**2) + torch.div(loss_coh,
sigma_2**2) + torch.log(sigma_1) + torch.log(sigma_2)
elif args.loss == 'coin':
d = random.uniform(0, 1)
if d < 0.5:
loss = loss_da
else:
loss = loss_coh
elif args.loss == 'sum':
loss = loss_da + loss_coh
optimizer.zero_grad()
loss.mean().backward()
optimizer.step()
# save after every epoch
torch.save(model.state_dict(), output_model_file_epoch)
if args.do_eval:
if train_dl == None:
train_dl = get_dataloader(train_datasetfile, args)
if val_dl == None:
val_dl = get_dataloader(val_datasetfile, args)
test_dl = get_dataloader(test_datasetfile, args)
def _eval_datasource(dl, desc_str):
coh_y_true = []
coh_y_pred = []
da_f1_scores = []
da_y_true = []
da_y_pred = []
for i, ((utts_left, utts_right),
(coh_ixs, (acts_left, acts_right)),
(len_u1, len_u2, len_d1, len_d2)) in tqdm(enumerate(dl),
total=len(dl), desc=desc_str, postfix="LR: {}".format(args.learning_rate)):
# additional code to shorten context/ dialogue
if args.test and i >= test_amount:
break
if model == None: # generate random values
pred = [random.randint(0, 1) for _ in range(coh_ixs.size(0))]
coh_y_pred += pred
coh_y_true += coh_ixs.detach().cpu().numpy().tolist()
else:
coh1, lda1 = model(utts_left.to(device), acts_left.to(
device), (len_u1.to(device), len_d1.to(device)))
coh2, lda2 = model(utts_right.to(device), acts_right.to(
device), (len_u2.to(device), len_d2.to(device)))
_, pred = torch.max(
torch.cat([coh1.unsqueeze(1), coh2.unsqueeze(1)], dim=1), dim=1)
coh_y_pred += pred.detach().cpu().numpy().tolist()
coh_y_true += coh_ixs.detach().cpu().numpy().tolist()
if lda1 != None and lda2 != None:
da1 = lda1[0].view(acts_left.size(
0)*acts_left.size(1)).detach().cpu().numpy()
da2 = lda2[0].view(acts_left.size(
0)*acts_left.size(1)).detach().cpu().numpy()
acts_left = acts_left.view(acts_left.size(
0)*acts_left.size(1)).detach().cpu().numpy()
acts_right = acts_right.view(acts_right.size(
0)*acts_right.size(1)).detach().cpu().numpy()
acts_left, da1 = da_filter_zero(acts_left.tolist(), da1.tolist())
acts_right, da2 = da_filter_zero(acts_right.tolist(), da2.tolist())
da_y_pred += da1 + da2
da_y_true += acts_left + acts_right
return (coh_y_true, coh_y_pred), (da_y_true, da_y_pred)
def _log_dataset_scores(name, coh_y_true, coh_y_pred, da_y_true, da_y_pred):
logging.info("%s size: %d" % (name, len(coh_y_true)))
coh_acc = accuracy_score(coh_y_true, coh_y_pred)
logging.info("%s coherence accuracy: %2.2f" % (name, coh_acc*100))
da_acc = accuracy_score(da_y_true, da_y_pred)
logging.info("%s DA accuracy: %2.2f" % (name, da_acc*100))
da_f1 = f1_score(da_y_true, da_y_pred, average='weighted')
logging.info("%s DA MicroF1: %2.2f" % (name, da_f1*100))
# choose the best epoch
if args.model != "random" and args.model != "cosine" and args.oot_model == None:
best_epoch = 0
if args.best_epoch:
best_epoch = args.best_epoch
else:
best_coh_acc, best_da_acc = None, None
for i in range(args.epochs):
model_file_epoch = os.path.join(args.datadir, "{}_task-{}_loss-{}_epoch-{}.ckpt".format(
str(model), str(args.task), str(args.loss), str(i)))
model.load_state_dict(torch.load(model_file_epoch))
model.to(device)
model.eval()
(coh_y_true, coh_y_pred), (da_y_true, da_y_pred) = _eval_datasource(
val_dl, "Validation {}:".format(i))
if i == 0:
best_coh_acc = accuracy_score(coh_y_true, coh_y_pred)
best_da_acc = accuracy_score(da_y_true, da_y_pred)
elif args.loss == 'da':
curr_da_acc = accuracy_score(da_y_true, da_y_pred)
if curr_da_acc > best_da_acc:
best_epoch = i
elif args.loss == 'coh':
curr_coh_acc = accuracy_score(coh_y_true, coh_y_pred)
if curr_coh_acc > best_coh_acc:
best_epoch = i
elif args.loss == 'mtl' or args.loss == 'coin' or args.loss == 'sum':
curr_coh_acc = accuracy_score(coh_y_true, coh_y_pred)
curr_da_acc = accuracy_score(da_y_true, da_y_pred)
if curr_coh_acc+curr_da_acc > best_coh_acc+best_da_acc:
best_epoch = i
logging.info("Best Epoch = {}".format(best_epoch))
# evaluate all sets on the best epoch
model_file_epoch = os.path.join(args.datadir, "{}_task-{}_loss-{}_epoch-{}.ckpt".format(
str(model), str(args.task), str(args.loss), str(best_epoch)))
model.load_state_dict(torch.load(model_file_epoch))
model.to(device)
model.eval()
elif args.oot_model:
model.load_state_dict(torch.load(args.oot_model))
model.to(device)
model.eval()
datasets = [('train', train_dl), ('validation', val_dl), ('test', test_dl)]
for (name, dl) in datasets:
(coh_y_true, coh_y_pred), (da_y_true, da_y_pred) = _eval_datasource(
dl, "Final Eval {}".format(name))
_log_dataset_scores(name, coh_y_true, coh_y_pred, da_y_true, da_y_pred)
def da_filter_zero(y_true, y_pred):
x = zip(y_true, y_pred)
x = list(filter(lambda y: y[0] != 0, x))
return [yt for (yt, _) in x], [yp for (_, yp) in x]
def init_logging(args):
now = datetime.datetime.now()
proc = "train" if args.do_train else "eval"
if os.path.exists(args.logdir) == False:
os.makedirs(args.logdir)
logfile = os.path.join(args.logdir, 'coherency_{}_{}_loss_{}_task_{}_{}.log'.format(
proc, args.model, args.loss, args.task, now.strftime("%Y-%m-%d-%H-%M-%S")))
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logging.basicConfig(filename=logfile, filemode='w',
level=logging.DEBUG, format='%(levelname)s:%(message)s')
print("Logging to ", logfile)
logging.info("Used Hyperparameters:")
logging.info("learning_rate = {}".format(args.learning_rate))
logging.info("num_epochs = {}".format(args.epochs))
logging.info("lstm_hidden_sent = {}".format(args.lstm_sent_size))
logging.info("lstm_hidden_utt = {}".format(args.lstm_utt_size))
logging.info("lstm_layers = {}".format(args.lstm_layers))
logging.info("batch_size = {}".format(args.batch_size))
logging.info("dropout probability = {}".format(args.dropout_prob))
logging.info("MTL Sigma Init = {}".format(args.mtl_sigma))
if args.oot_model:
logging.info("using OOT Model = {}".format(args.oot_model))
logging.info("========================")
logging.info("task = {}".format(args.task))
logging.info("loss = {}".format(args.loss))
logging.info("seed = {}".format(args.seed))
logging.info("embedding = {}".format(args.embedding))
logging.info("========================")
def parse_args():
parser = argparse.ArgumentParser()
# This also serves as a kind of configuration object, so some parameters are not ought to be changed (listed below)
parser.add_argument("--datadir",
required=True,
type=str,
help="""The input directory where the files of daily
dialog are located. the folder should have
train/test/validation as subfolders""")
parser.add_argument("--logdir",
default="./logs",
type=str,
help="the folder to save the logfile to.")
parser.add_argument('--seed',
type=int,
default=80591,
help="random seed for initialization")
parser.add_argument('--batch_size',
type=int,
default=128,
help="")
parser.add_argument('--epochs',
type=int,
default=20,
help="amount of epochs")
parser.add_argument('--learning_rate',
type=float,
default=0.0005,
help="")
parser.add_argument('--dropout_prob',
type=float,
default=0.1,
help="the dropout probality for DA classification")
parser.add_argument('--lstm_sent_size',
type=int,
default=128,
help="hidden size for the lstm models")
parser.add_argument('--lstm_utt_size',
type=int,
default=256,
help="hidden size for the lstm models")
parser.add_argument('--mtl_sigma',
type=float,
default=2.0,
help="initialization value for the two sigma values when using MTL Loss")
parser.add_argument('--embedding',
type=str,
default="glove",
help="""from which embedding should the word ids be used.
alternatives: glove """)
parser.add_argument('--model',
type=str,
default="cosine",
help="""with which model the dataset should be trained/evaluated.
alternatives: random | cosine | model-3 """)
parser.add_argument('--loss',
type=str,
default="mtl",
help="""with which loss the dataset should be trained/evaluated.
alternatives: mtl | coin | da | coh """)
parser.add_argument('--task',
required=True,
type=str,
default="up",
help="""for which task the dataset should be created.
alternatives: up (utterance permutation)
us (utterance sampling)
ui (utterance insertion)
hup (half utterance petrurbation) """)
parser.add_argument('--oot_model',
required=False,
type=str,
default=None,
help="""when doing Out-Of-Task evaluations, this provides the model file""")
parser.add_argument('--best_epoch',
type=int,
default=None,
help="when evaluating, tell the best epoch to choose the file")
parser.add_argument('--test',
action='store_true',
help="just do a test run on small amount of data")
parser.add_argument('--cuda',
type=int,
default=-1,
help="which cuda device to take")
parser.add_argument('--do_train',
action='store_true',
help="just do a test run on small amount of data")
parser.add_argument('--do_eval',
action='store_true',
help="just do a test run on small amount of data")
# usually unmodified parameters, keept here to have a config like object
parser.add_argument('--num_classes',
type=int,
default=0,
help="amount of classes 1-4 for DA acts, 0 for none. default: 5 (for DailyDialog), set to 50 for Switchboard")
parser.add_argument('--lstm_layers',
type=int,
default=1,
help="DONT CHANGE. amount of layers for LSTM models")
parser.add_argument('--embedding_dim',
type=int,
default=300,
help="DONT CHANGE. embedding dimension for GloVe vectors")
args = parser.parse_args()
if args.num_classes == 0:
dataset = os.path.split(args.datadir)[-1]
if dataset == "daily_dialog":
args.num_classes = 5
else:
args.num_classes = 50
return args
##########################################
if __name__ == "__main__":
start_time = time.time()
main()
end_time = time.time()
logging.info("Took %5.2f seconds" % (end_time - start_time))
logging.shutdown()
| StarcoderdataPython |
3272235 | <filename>netrics/tetrad_logit.py
# Ensure "normal" division
from __future__ import division
# Load library dependencies
import numpy as np
import scipy as sp
import scipy.optimize
import itertools as it
from numba import jit
import numexpr as ne
from logit import logit
from print_coef import print_coef
from helpers import generate_dyad_to_tetrads_dict, generate_tetrad_indices, \
organize_data_tetrad_logit, tetrad_logit_score_proj
# Define tetrad_logit() function
#-----------------------------------------------------------------------------#
def tetrad_logit(D, W, dtcon=None, silent=False, W_names=None):
"""
AUTHOR: <NAME>, <EMAIL>, June 2016
This function computes the Tetrad Logit estimator introduced in Graham (2014, NBER No. 20341) -- "An Econometric
Model of Link Formation with Degree Heterogeneity". The implementation is as described in the paper. Notation
attempts to follow that used in the paper.
INPUTS
------
D : N x N undirected adjacency matrix
W : List with elements consisting of N x N 2d numpy arrays of dyad-specific
covariates such that W[k][i,j] gives the k-th covariate for dyad ij
dtcon : Dyad and tetrad concordance (dtcon) List with elements [tetrad_to_dyads_indices,
dyad_to_tetrads_dict]. If dtcon=None, then construct it using generate_tetrad_indices()
function. See header to generate_tetrad_indices() for more information.
silent : If True then suppress all optimization and estimation output, show output otherwise.
W_names : List of K strings giving names of the columns of W_tilde. If silent=False then use
these in presentation of estimation output.
OUTPUTS
-------
beta_TL : K x 1 vector of tetrad logit point estimates of beta
vcov_beta_TL : K x K asymptotic-variance matrix for beta_TL
NOTE: vcov_beta_TL is already "divided by n" (just take square root of diagonal for std. errs.)
tetrad_frac_TL : Fraction of tetrads that contribute to Tetrad Logit criterion function
success : corresponds to success component of OptimizeResult associated with Scipy's minimize function;
success = True if the tetrad logit optimizer exited successfully
CALLS: : ...logit()...
...organize_data_tetrad_logit()...
------
"""
# ------------------------------------------------------- #
# - STEP 1: Prepare data for estimation - #
# ------------------------------------------------------- #
# compute size of the network and dimension of regressor matrix
K = len(W) # Number of dyad-specific covariates
N = np.shape(D)[0] # Number of agents in the network
n = N*(N-1) // 2 # Number of dyads in network
Nchoose4 = N*(N-1)*(N-2)*(N-3) // 24 # Number of tetrads in network
# organize data for input into logit maximizer
[S, W_tilde, tetrad_frac_TL, proj_tetrads_dict] = organize_data_tetrad_logit(D, W, dtcon)
# NOTES: S is a 3 (N choose 4) x 1 vector of -1, 0, 1 according to the configuration of the tetrad.
# W_tilde is a 3 (N choose 4) x K matrix of regressors corresponding to S. This is as described in the
# paper.
# Drop all rows where S = 0 (i.e., no identifying content & not part of the criterion function)
g = S.nonzero() # row & column indices of for all elements of S
# that are non-zero
# (i.e., equal to -1 or 1)
Y_trim = (0*(S[g[0],:]==-1) + 1*(S[g[0],:]==1)) # Make outcome binary
W_trim = W_tilde[g[0],:]
# ------------------------------------------------------- #
# - STEP 2: Compute Tetrad Logit Point Estimate - #
# ------------------------------------------------------- #
[beta_TL, hess_TL, success_TL] = logit(Y_trim, W_trim, nocons=True, \
silent=silent) # TL estimates of beta
beta_TL = np.reshape(beta_TL,(-1,1)) # Put beta_TL into
# 2-dimensional form
# ------------------------------------------------------- #
# - STEP 3: Compute Variance-Covariance Matrix - #
# ------------------------------------------------------- #
# ------------------------------------- #
# - Compute covariance of the "score" - #
# ------------------------------------- #
# place full "score" vector, including non-contributing tetrads, into
# a 3 (N choose 4) x 1 two dimensional scipy sparse matrix
score = sp.sparse.coo_matrix((np.ravel(Y_trim - (1 + np.exp(-np.dot(W_trim,beta_TL)))**-1),\
(g[0],g[1])), shape = (3*Nchoose4, 1), dtype='float64').tocsr()
# Compute n x K matrix containing all three components of each dyad's contribution to the
# projection of the "score". These correspond to the six non-redudant permutation of ij, kl
# which enter the criterion function as described in Graham (2014, NBER). Note the use of
# list comprehensions in what follows
# -------------------------------------------------------------- #
# - Compute n x K proj_score matrix using a list comprehension - #
# -------------------------------------------------------------- #
#---------------------------#
#- Serial implementation -#
#---------------------------#
# pass in relevant score components to compute projection for each dyad
proj_score = np.array([tetrad_logit_score_proj([score[tetrads,:].toarray(), W_tilde[tetrads,:]]) \
for dyad, tetrads in proj_tetrads_dict.iteritems()])/(n - 2*(N-1) + 1)
# compute the covariance matrix of the score projection
OMEGA_hat = np.cov(proj_score, rowvar=False)*((n-1)/(n-K))
# ------------------------------------- #
# - Compute inverse hessian matrix - #
# ------------------------------------- #
iGAMMA_hat = np.linalg.inv(-hess_TL/Nchoose4)
# Sandwich variance-covariance matrix estimate for beta_TL
vcov_beta_TL = 36*np.dot(np.dot(iGAMMA_hat,OMEGA_hat),iGAMMA_hat)/n
# ------------------------------------------------------- #
# - STEP 4: Report estimation results - #
# ------------------------------------------------------- #
if not silent:
print ""
print "-------------------------------------------------------------------------------------------"
print "- TETRAD LOGIT ESTIMATION RESULTS -"
print "-------------------------------------------------------------------------------------------"
print ""
print "Number of agents, N : " + "{:>15,.0f}".format(N)
print "Number of dyads, n : " + "{:>15,.0f}".format(n)
print "Number of tetrads : " + "{:>15,.0f}".format(Nchoose4)
print "Number identifying tetrads : " + "{:>15,.0f}".format(tetrad_frac_TL*Nchoose4)
print "Fraction identifying tetrads : " + "{:>15.6f}".format(tetrad_frac_TL)
print ""
print "-------------------------------------------------------------------------------------------"
print_coef(beta_TL, vcov_beta_TL, W_names)
return [beta_TL, vcov_beta_TL, tetrad_frac_TL, success_TL] | StarcoderdataPython |
6626519 | <filename>blogs/xmlload/xmlload.py
import argparse
import logging
import apache_beam as beam
def parse_into_dict(xmlfile):
import xmltodict
with open(xmlfile) as ifp:
doc = xmltodict.parse(ifp.read())
return doc
table_schema = {
'fields': [
{'name' : 'CustomerID', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name' : 'EmployeeID', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name' : 'OrderDate', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name' : 'RequiredDate', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name' : 'ShipInfo', 'type': 'RECORD', 'mode': 'NULLABLE', 'fields': [
{'name' : 'ShipVia', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name' : 'Freight', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name' : 'ShipName', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name' : 'ShipAddress', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name' : 'ShipCity', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name' : 'ShipRegion', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name' : 'ShipPostalCode', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name' : 'ShipCountry', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name' : 'ShippedDate', 'type': 'STRING', 'mode': 'NULLABLE'},
]},
]
}
# The @ symbol is not allowed as a column name in BigQuery
def cleanup(x):
import copy
y = copy.deepcopy(x)
if '@ShippedDate' in x['ShipInfo']: # optional attribute
y['ShipInfo']['ShippedDate'] = x['ShipInfo']['@ShippedDate']
del y['ShipInfo']['@ShippedDate']
print(y)
return y
def get_orders(doc):
for order in doc['Root']['Orders']['Order']:
yield cleanup(order)
def run(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument(
'--output',
required=True,
help=(
'Specify text file orders.txt or BigQuery table project:dataset.table '))
known_args, pipeline_args = parser.parse_known_args(argv)
with beam.Pipeline(argv=pipeline_args) as p:
orders = (p
| 'files' >> beam.Create(['orders.xml'])
| 'parse' >> beam.Map(lambda filename: parse_into_dict(filename))
| 'orders' >> beam.FlatMap(lambda doc: get_orders(doc)))
if '.txt' in known_args.output:
orders | 'totxt' >> beam.io.WriteToText(known_args.output)
else:
orders | 'tobq' >> beam.io.WriteToBigQuery(known_args.output,
schema=table_schema,
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND, #WRITE_TRUNCATE
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
| StarcoderdataPython |
4950424 | myList = [1, 2, 3]
myList[1] = 4
print(myList)
a = [1, 2]
b = a
b[0] = 3
print(a)
a = [1, 2]
b = [1, 2]
a[0] = 3
print(b)
a = [1, 2]
b = a
a = [3, 4]
print(b)
def replaceFirst(myList):
myList[0] = 'x'
nowList = list('abcdef')
replaceFirst(nowList)
print(nowList)
def reverseList(funcList):
funcList = funcList[::-1]
mainList = list('abc')
reverseList(mainList)
print(mainList)
"""
К переменным типа список можно применять методы, перечислим некоторые из них:
Методы, не изменяющие список и возвращающие значение:
count(x) - подсчитывает число вхождений значения x в список. Работает за время O(N)
index(x) - находит позицию первого вхождения значения x в список. Работает за время O(N)
index(x, from) - находит позицию первого вхождения значения x в список, начиная с позиции from.
\Работает за время O(N)
Методы, не возвращающие значение, но изменяющие список:
append(x) - добавляет значение x в конец списка
extend(otherList) - добавляет все содержимое списка otherList в конец списка.
\В отличие от операции + изменяет объект
\к которому применен, а не создает новый
remove(x) - удаляет первое вхождение числа x в список. Работает за время O(N)
insert(index, x) - вставляет число x в список так, что оно оказывается на позиции index.
\Число, стоявшее на позиции index и все числа правее него сдвигаются на один вправо. Работает за время O(N)
reverse() - Разворачивает список (меняет значение по ссылке, а не создает новый список как myList[::-1]).
\Работает за время O(N)
Методы, возвращающие значение и изменяющие список:
pop() - возвращает последний элемент списка и удаляет его
pop(index) - возвращает элемент списка на позиции index и удаляет его. Работает за время O(N)
"""
numbers = list(map(int, input().split()))
for i in range(len(numbers)):
if numbers[i] % 2 != 0:
numbers.pop(i)
print(' '.join(map(str, numbers)))
numbers = list(map(int, input().split()))
i = 0
while i < len(numbers):
if numbers[i] % 2 != 0:
numbers.pop(i)
else:
i += 1
print(' '.join(map(str, numbers)))
numbers = list(map(int, input().split()))
newList = []
for i in range(len(numbers)):
if numbers[i] % 2 == 0:
newList.append(numbers[i])
print(' '.join(map(str, newList))) | StarcoderdataPython |
345576 | from gameObject import GameObject
from pygame import Rect
# Maneja todos los botones
class Button(GameObject):
def __init__(self, x, y, width, height, icon=None):
super().__init__(x, y, icon)
self._rect = Rect(x, y, width, height)
self.width = width
self.height = height
# E: poitn(x,y)
# S: boolean
# D: true si x, y estan dentro de el rectangulo
def inRect(self, point):
return self._rect.collidepoint(point)
# D: si click entonces pasa a este metodo
def clicked(self):
pass
# S: Rect
# D: retorna el rectangulo que compone el boton
def getRect(self):
return self._rect
| StarcoderdataPython |
12816878 | """ CSeq C Sequentialization Framework
module stubs
written by <NAME>, University of Southampton.
CSeq's Translator modules are
built on top of pycparser, BSD licensed, by <NAME>,
pycparser embeds PLY, by <NAME>,
maintained by <NAME>, University of Southampton.
Naming conventions for introduced variables (use the following prefixes):
- __cs_ in general for any variable renamed or introduced in the translation
- __cs_tmp_ for additional temporary variables (for example, if conditions)
TODO:
- rename basic modules to Wrappers (to match other documentation)
- new module that takes C and returns a generic string,
not a translator, not a wrapper, but something to extract information on the files,
for example to list the names of functions, or to calculate source code complexity
according to some metric.
Changelog:
2017.08.23 fix for extracting line number from node coord (from pycparser 2.18+)
2015.06.29 fixed coords calculation for parsing errors
2015.01.07 added self.stacknodes as the stack of nodes in the AST currently being visited
2014.12.09 major reorganisation to accomodate new CSeq framework
2014.10.22 implemented linemapping (now disabled to improve performance and stability - it needs proper testing before releasing)
2014.10.15 implemented self.stack to examine the stack of calls to visit() (modules may use this)
2014.06.02 introduced specific module.ModuleError exception for detailed error handling
"""
import cProfile
import pstats
from core.parser import Parser
VERSION = 'module-1.0-2017.08.23'
# VERSION = 'module-0.0-2015.07.16'
# VERSION = 'module-0.0-2015.07.03'
# VERSION = 'module-0.0-2015.06.29'
# VERSION = 'module-0.0-2015.01.07'
# VERSION = 'module-0.0-2014.12.24' # CSeq-1.0beta
import os, re, sys, time
import pycparser.c_parser, pycparser.c_ast, pycparser.c_generator
from core import parser, utils
'''
Notes on line-mapping.
In general, using linemarkers from the beginning and
propagating them across modules,
so that each module would have references to the original input,
can work well.
However since pycparser does not handle line control directives,
we need some workaround to implement this mechanism.
This is a two-step process:
- step 1: while generating the output,
coord markers (see cpp line control)
are inserted whenever coord information is present.
So at a certain line in the output the code will look like:
# X // <-- marker
int k = 1239; // <-- line Y in the output
This step is performed in visit().
- step 2: once all the (marked) output is generated,
the module does not return its output yet, as
pycparser does not support line control.
The markers need to be removed from the output,
but before that, the information stored in them is used
to map input lines to output lines according
to the following mechanism.
The line number from the marker corresponds to the input line number.
The line below the marker is che corresponding line of the output =
(actual output line including markers) - (number of markers so far).
In the example,
statement (int k = 1239) from the output file
comes from line X of the input file.
This is done in generatelinenumbers() at the end of the visit of the AST.
TODO:
- ...
Changelog:
2015.07.16 add removelinenumbers to strip off fake include (Truc)
'''
''' Generic module error '''
class ModuleError(Exception):
def __init__(self, value): self.value = value
def __str__(self): return repr(self.value)
''' Module requests stopping the translation '''
class ModuleChainBreak(ModuleError): pass
''' Error in module parameters '''
class ModuleParamError(ModuleError): pass
class ModuleParam():
def __init__(self, id, description='', datatype='', default='', optional=''):
self.id = id
self.description = description # description displayed in the help screen by the front-end
self.datatype = datatype # type ('int', 'string') checked by the front-end
self.default = default # default value
self.optional = optional # optional (=True) or mandatory (=False)?
def isflag(self):
return (self.datatype == None or self.datatype == '')
def tostring(self):
return '%s, \'%s\', %s, %s, %s' % (
self.id, self.description, self.default, self.datatype, 'optional' if self.optional else 'mandatory')
class BasicModule():
def __init__(self):
# the CSeq environment
self.cseqenv = None
# Input and output of the module
self.input = ''
self.output = ''
# Module parameters
self.inputparamdefs = []
self.outputparamdefs = []
def getversion(self):
return self.MODULE_VERSION
def getname(self):
return self.__class__.__name__
def loadfromstring(self, string, env):
# At this point the module expects
# all the input parameters previously set with addInputParam()
# to have a corresponding entry in the cseq environment.
#
for p in self.inputparamdefs:
if not p.optional and p.id not in env.paramvalues:
raise ModuleParamError('module \'%s\' requires parameter --%s.' % (self.getname(), p.id))
def log(self, string):
tag = 'log:'
taglen = len(tag) + 1
print(utils.colors.BLUE + tag + utils.colors.NO)
print(('\n' + ' ' * taglen).join([l for l in string.split('\n')]))
def warn(self, string):
tag = 'warning:'
taglen = len(tag) + 1
print(utils.colors.YELLOW + tag + utils.colors.NO)
print(('\n' + ' ' * taglen).join([l for l in string.split('\n')]))
def error(self, string):
tag = 'error:'
taglen = len(tag) + 1
# raise ModuleParamError(string)
print(utils.colors.RED + tag + utils.colors.NO)
print(('\n' + ' ' * taglen).join([l for l in string.split('\n')]))
sys.exit(1)
def getoutput(self):
return self.output
def save(self, filename):
outfile = open(filename, "w")
outfile.write(self.output)
outfile.close()
''' parameter handling '''
def initParams(self, env):
self.cseqenv = env
# Initialise input parameters when needed
for p in self.inputparamdefs:
if not p.optional and p.default and p.id not in env.paramvalues:
env.paramvalues[p.id] = p.default
def addInputParam(self, id, description, datatype, default, optional):
q = ModuleParam(id, description, datatype, default, optional)
# TODO when adding duplicated input parameters,
# field 'datatype' should be consistent;
# all other attributes are left like on the 1st time.
self.inputparamdefs.append(q)
def addOutputParam(self, id, description='', datatype=''):
self.outputparamdefs.append(ModuleParam(id, description, datatype))
def getInputParamValue(self, id):
if id in self.cseqenv.paramvalues: return self.cseqenv.paramvalues[id]
return None
def setOutputParam(self, id, value):
self.cseqenv.paramvalues[id] = value
class Translator(BasicModule, pycparser.c_generator.CGenerator):
indent_level = 0
INDENT_SPACING = ' '
__produce_counter_examples = False
def __init__(self):
super(Translator, self).__init__()
# Parser module to generate the AST, the symbol table and other data structs.
self.Parser = Parser.get_instance()
self.Parser.reset()
# Coords for the last AST node visited
self.lastInputCoords = '' # coords, example: ':245'
self.currentInputLineNumber = 0 # line numbers extracted from coords
# Coords last read from a linemarker
self.lastinputfile = '' # input file in the last linemarker
self.lastinputlineno = 0 # line number since last linemarker
self.lastoutputlineno = 0 # line number in output
# Stacks of ongoing recursive visits
self.stack = [] # class names, example: ['FileAST', 'FuncDef', 'Compound', 'Compound', 'If', 'BinaryOp', 'ArrayRef', 'ID']
self.stacknodes = [] # AST nodes
self.currentFunct = '' # name of the function being parsed ('' = none)
self.lines = set()
# Input<->Output linemaps
self.inputtooutput = {}
self.outputtoinput = {}
''' Returns the input line number
mapping it back from the output of last module to the input of 1st module.
Returns 0 if unable to map back the line number.
'''
def _mapbacklineno(self, lineno):
# Note: since the same input line may correspond to
# multiple lines in the final output,
# the tracing has to be done backwards.
#
lastmodule = len(self.cseqenv.maps)
nextkey = 0
inputfile = ''
if lineno in self.cseqenv.maps[len(self.cseqenv.maps) - 1]:
# firstkey = lastkey = nextkey = lineno
firstkey = nextkey = lastkey = lineno
for modno in reversed(range(0, lastmodule)):
if nextkey in self.cseqenv.maps[modno] and nextkey != 0:
lastkey = nextkey
nextkey = self.cseqenv.maps[modno][nextkey]
else:
nextkey = 0
if modno == 0:
inputfile = self.cseqenv.outputtofiles[lastkey]
return (nextkey, inputfile)
def _make_indent(self, delta=0):
return (self.indent_level + delta) * self.INDENT_SPACING
def _getCurrentCoords(self, item):
if not self.__produce_counter_examples:
return ''
linecoord = utils.removeColumnFromCoord(item.coord)
''' NOTE: uncomment instructions below to disable linemapping '''
# return ''
''' NOTE: uncomment instructions below to enable linemapping '''
# lineno = str(item.coord)[1:] if str(item.coord)[0] == ':' else -1 # not valid from pycparser 2.18+
lineno = linecoord[1:] if linecoord[0] == ':' else -1
return '# %s "<previous_module>"' % (lineno)
# return '# %s \n' % (lineno)
def insertheader(self, h):
offset = h.count('\n')
self.output = h + self.output
if self.__produce_counter_examples:
# Shift linemapping accordingly.
for i in range(1, max(self.inputtooutput)):
if i in self.inputtooutput:
self.inputtooutput[i] += offset
# for i in range(max(self.outputtoinput),1):
for i in reversed(range(1, max(self.outputtoinput))):
if i in self.outputtoinput:
self.outputtoinput[i + offset] = self.outputtoinput[i]
self.outputtoinput[i] = -1
def removelinenumbers(self):
'''
Strip off fake define include and recalculate line number
'''
s2 = ''
status = 0
top = bottom = 0
# print "Input to output"
# for i in self.inputtooutput:
# print "%s -> %s" % (i, self.inputtooutput[i])
# print "Output to input"
# for i in self.outputtoinput:
# print "%s -> %s" % (i, self.outputtoinput[i])
# utils.saveFile("beforestrip.c", self.output)
for i, line in enumerate(self.output.split('\n')):
if '_____STARTSTRIPPINGFROMHERE_____' in line:
status = 1
# print '-----> top line: %s' % (i + 1)
top = i + 1
continue
if '_____STOPSTRIPPINGFROMHERE_____' in line:
status = 2
# print '-----> bottom line: %s' % (i + 1)
bottom = i + 1
continue
if status == 0 or status == 2:
s2 += line + '\n'
offset = bottom - top + 1
# input file
# | region 1 | removed region | region 2
# ^ offset ^
# top bottom
if self.__produce_counter_examples:
# Shift linemapping accordingly.
for i in reversed(range(1, max(self.inputtooutput))):
if i in self.inputtooutput:
if self.inputtooutput[i] > bottom:
# Shift back if output line in region 2
self.inputtooutput[i] -= offset
elif self.inputtooutput[i] >= top:
# Map to -1 if output line in removed region
self.inputtooutput[i] = -1
# #for i in range(max(self.outputtoinput),1):
m = max(self.outputtoinput)
for i in range(top, m):
if (i + offset) in self.outputtoinput:
self.outputtoinput[i] = self.outputtoinput[i + offset]
elif i + offset > m:
self.outputtoinput[i] = -1
self.output = s2
def loadfromstring(self, string, env, fill_only_fields=None):
self.__produce_counter_examples = env.cex
super(Translator, self).loadfromstring(string, env)
self.input = string
self.Parser.reset() # resets all the parser datastructs
self.Parser.loadfromstring(string, fill_only_fields)
self.ast = self.Parser.ast
self.output = self.visit(self.ast)
# Remove any linemarker indentation.
newoutput = ''
if self.__produce_counter_examples:
for line in self.output.splitlines():
newoutput += re.sub(r'(%s)*#' % self.INDENT_SPACING, r'#', line) + '\n'
else:
newoutput = self.output
self.markedoutput = newoutput
self.output = newoutput
# Generate the linemap and remove linemarkers from self.output
self.removeemptylines()
if self.__produce_counter_examples:
self.generatelinenumbers()
def getlinenumbertable(self):
linenumbers = ''
for i in range(1, self.lastoutputlineno + 1):
if i in self.outputtoinput:
linenumbers += "%d <- %d\n" % (self.outputtoinput[i], i)
return linenumbers
def removeemptylines(self):
cleanoutput = ''
for line in self.output.splitlines():
if line.strip() != '':
cleanoutput += line + '\n'
self.output = cleanoutput
def generatelinenumbers(self):
''' the difference with the std preprocessor linemapping (see merger.py) is that
here we assume that when there are no linemarkers the output line
always generates from the input coordinate fetched from the last linemarker found.
'''
inputlineno = 0 # actual input line number including line with markers
inputmarkercnt = 0 # count the linemarkers in the input (each linemarker takes one line)
cleanoutput = '' # output without linemarkers
for line in self.output.splitlines():
inputlineno += 1
if line.startswith('# '):
inputmarkercnt += 1
(self.lastinputlineno, self.lastinputfile, self.lastflag) = utils.linemarkerinfo(line)
else:
if line == '': # avoid mapping empty lines
# print "EMPTY LINE"
pass
else:
# > > > Our line map < < <
self.inputtooutput[self.lastinputlineno] = inputlineno - inputmarkercnt
self.outputtoinput[inputlineno - inputmarkercnt] = self.lastinputlineno
self.lastoutputlineno += 1
cleanoutput += line + '\n'
self.output = cleanoutput
# Extract the coords from an error condition
#
def parseErrorCoords(self, error):
tmp = str(error).split(':')
try:
row = int(tmp[1])
except ValueError:
row = -1
try:
col = int(tmp[2])
except ValueError:
col = -1
return ":%s:%s" % (row, col)
def getLineNo(self, error):
return int(self.parseErrorCoords(error).split(':')[1])
def getColumnNo(self, error):
return int(self.parseErrorCoords(error).split(':')[2])
def visit_FuncDef(self, n):
if n.decl.name: self.currentFunct = n.decl.name
funcBlock = super(Translator, self).visit_FuncDef(n)
if n.decl.name: self.currentFunct = ''
return funcBlock
def visit(self, node):
method = 'visit_' + node.__class__.__name__
self.stack.append(node.__class__.__name__)
self.stacknodes.append(node)
lineCoords = ''
# Extracts node coords where possible.
#
# This is to update the current coord (= filename+line number)
# of the input being parsed, considering that:
#
# - on the same line of input, there may be more AST nodes (shouldn't enter duplicates)
# - compound statement and empty statements have line number 0 (shouldn't update the current line)
# - the same line of input may correspond to many lines of output
#
if hasattr(node, 'coord'):
if ((self.stack[-1] == 'Struct' and self.stack[-2] == 'Typedef') or # typedef structs break linemap
False):
# (len(self.stack)>=2 and self.stack[-1] != 'Compound' and self.stack[-2] == 'DoWhile')):
pass
elif node.coord:
self.lastInputCoords = utils.removeColumnFromCoord(node.coord)
# self.lastInputCoords = str(node.coord) # not valid since pycparser 2.18
# line number handling borrowed from CSeq-0.5,
# linenumber = str(self.lastInputCoords)
# linenumber = linenumber[linenumber.rfind(':')+1:]
linenumber = self.lastInputCoords[1:]
self.currentInputLineNumber = int(linenumber)
# Each line of the output is annotated when
# either it is coming from a new input line number
# or the input line has generated many output lines,
# in which case the annotation needs to be repeated at each line..
#
if self.currentInputLineNumber != 0 and self.currentInputLineNumber not in self.lines:
self.lines.add(
self.currentInputLineNumber) # now until next input line is read, do not add further markers
lineCoords = '\n' + self._getCurrentCoords(node) + '\n' # + '<-' + str(self.stack[-1]) + '\n'
retval = lineCoords + super(Translator, self).visit(node)
self.stack.pop()
self.stacknodes.pop()
return retval
| StarcoderdataPython |
1965776 | <reponame>aleloi/advent-of-code
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import parglare
desc = open("../inputs/input07.txt").readlines()
grammar = r"""
LINE: COLOR 'contain' CONTENTS;
COLOR: word word BAG;
BAG: 'bag' | 'bags' ;
CONTENTS: 'no' 'other' 'bags' dot
| BAG_AMOUNT_LIST dot;
BAG_AMOUNT_LIST: number COLOR COMMA_AMOUNT_COLOR* ;
COMMA_AMOUNT_COLOR: ',' number COLOR;
terminals
word: /\w+/;
number: /\d+/;
dot: /[.]/;
"""
# Simplify the syntax tree:
actions = {
'LINE': lambda _, nodes: (nodes[0], nodes[2]),
'COLOR': lambda _, nodes: "{} {}".format(nodes[0], nodes[1]),
'CONTENTS': [lambda _, nodes: [],
lambda _, nodes: nodes[0]],
'BAG_AMOUNT_LIST': lambda _, nodes: [(int(nodes[0]), nodes[1])] + nodes[2],
'COMMA_AMOUNT_COLOR': lambda _, nodes: (int(nodes[1]), nodes[2])
}
g = parglare.Grammar.from_string(grammar)
parser = parglare.Parser(g, actions=actions)
graph = {}
graph_rev = collections.defaultdict(set)
for line in desc:
color, contents = parser.parse(line)
assert color not in graph
graph[color] = contents
for _, contents_col in contents:
graph_rev[contents_col].add(color)
# A
contains_shiny_gold = set()
def dfs(n):
if n in contains_shiny_gold:
return
contains_shiny_gold.add(n)
for x in graph_rev[n]:
dfs(x)
dfs('shiny gold')
print("Answer A:", len(contains_shiny_gold)-1)
def count_tot(n):
res = 1
for am, what in graph[n]:
res += am * count_tot(what)
return res
print("Answer B:", count_tot('shiny gold') - 1)
| StarcoderdataPython |
12712 | # Standard library imports
import logging
import os
# Third party imports
import dash
import dash_bootstrap_components as dbc
from flask_caching import Cache
import plotly.io as pio
# Local application imports
from modules.gitlab import GitLab
import settings
# Initialize logging mechanism
logging.basicConfig(level=settings.LOGLEVEL, format=settings.LOGFORMAT)
logger = logging.getLogger(__name__)
gl = GitLab()
logger.info("Current GitLab version: {}".format(GitLab.version))
# App instance
app = dash.Dash(__name__,
suppress_callback_exceptions=True,
external_stylesheets=[dbc.themes.BOOTSTRAP])
app.title = settings.APP_NAME
# App caching
# CACHE_CONFIG = {
# # Note that filesystem cache doesn't work on systems with ephemeral
# # filesystems like Heroku.
# 'CACHE_TYPE': 'filesystem',
# 'CACHE_DIR': 'cache-directory',
# # should be equal to maximum number of users on the app at a single time
# # higher numbers will store more data in the filesystem / redis cache
# 'CACHE_THRESHOLD': 200
# }
CACHE_CONFIG = {
# try 'filesystem' if you don't want to setup redis
'CACHE_TYPE': 'redis',
'CACHE_REDIS_URL': settings.REDIS_URL
}
cache = Cache()
cache.init_app(app.server, config=CACHE_CONFIG)
pio.templates.default = "plotly_dark" | StarcoderdataPython |
6423312 | import numpy as np
import transforms3d
smpl_keypoint_semantic = {
0: 'root',
1: 'llegroot',
2: 'rlegroot',
3: 'lowerback',
4: 'lknee',
5: 'rknee',
6: 'upperback',
7: 'lankle',
8: 'rankle',
9: 'thorax',
10: 'ltoes',
11: 'rtoes',
12: 'lowerneck',
13: 'lclavicle',
14: 'rclavicle',
15: 'upperneck',
16: 'larmroot',
17: 'rarmroot',
18: 'lelbow',
19: 'relbow',
20: 'lwrist',
21: 'rwrist',
22: 'lhand',
23: 'rhand'
}
smpl_asf_map = {
0: 'root',
1: 'lfemur',
2: 'rfemur',
3: 'upperback',
4: 'ltibia',
5: 'rtibia',
6: 'thorax',
7: 'lfoot',
8: 'rfoot',
9: 'lowerneck',
10: 'ltoes',
11: 'rtoes',
12: 'upperneck',
13: 'lclavicle',
14: 'rclavicle',
15: 'head',
16: 'lhumerus',
17: 'rhumerus',
18: 'lradius',
19: 'rradius',
20: 'lwrist',
21: 'rwrist',
22: 'lhand',
23: 'rhand'
}
asf_smpl_map = {
'root': 0,
'lfemur': 1,
'rfemur': 2,
'upperback': 3,
'ltibia': 4,
'rtibia': 5,
'thorax': 6,
'lfoot': 7,
'rfoot': 8,
'lowerneck': 9,
'ltoes': 10,
'rtoes': 11,
'upperneck': 12,
'lclavicle': 13,
'rclavicle': 14,
'head': 15,
'lhumerus': 16,
'rhumerus': 17,
'lradius': 18,
'rradius': 19,
'lwrist': 20,
'rwrist': 21,
'lhand': 22,
'rhand': 23
}
class Joint:
def __init__(self, name, direction, length, axis, dof, limits):
self.name = name
self.direction = np.matrix(direction)
self.length = length
axis = np.deg2rad(axis)
self.C = np.matrix(transforms3d.euler.euler2mat(*axis))
self.Cinv = np.linalg.inv(self.C)
self.limits = np.zeros([3, 2])
self.movable = len(dof) == 0
for lm, nm in zip(limits, dof):
if nm == 'rx':
self.limits[0] = lm
elif nm == 'ry':
self.limits[1] = lm
else:
self.limits[2] = lm
self.parent = None
self.children = []
# bone's far end's cooridnate
self.coordinate = None
self.matrix = None
self.relative_R = None
def set_motion(self, motion):
if self.name == 'root':
self.coordinate = np.array(motion['root'][:3])
rotation = np.deg2rad(motion['root'][3:])
self.matrix = self.C * np.matrix(transforms3d.euler.euler2mat(*rotation)) * self.Cinv
self.relative_R = np.array(self.matrix)
else:
# set rx ry rz according to degree of freedom
idx = 0
rotation = np.zeros(3)
for axis, lm in enumerate(self.limits):
if not np.array_equal(lm, np.zeros(2)):
rotation[axis] = motion[self.name][idx]
idx += 1
rotation = np.deg2rad(rotation)
self.relative_R = np.array(self.C * np.matrix(transforms3d.euler.euler2mat(*rotation)) * self.Cinv)
self.matrix = self.parent.matrix * np.matrix(self.relative_R)
self.coordinate = np.squeeze(np.array(np.reshape(self.parent.coordinate, [3, 1]) + self.length * self.matrix * np.reshape(self.direction, [3, 1])))
for child in self.children:
child.set_motion(motion)
def to_dict(self):
ret = {self.name: self}
for child in self.children:
ret.update(child.to_dict())
return ret
def reset_pose(self):
if self.name == 'root':
self.coordinate = np.zeros(3)
else:
self.coordinate = self.parent.coordinate + self.length * np.squeeze(np.array(self.direction))
self.relative_R = np.eye(3)
self.matrix = np.matrix(self.relative_R)
for child in self.children:
child.reset_pose()
def pretty_print(self):
print('===================================')
print('joint: %s' % self.name)
print('direction:')
print(self.direction)
print('limits:', self.limits)
print('parent:', self.parent)
print('children:', self.children)
class SMPLJoints:
def __init__(self, idx):
self.idx = idx
self.to_parent = None
self.parent = None
self.coordinate = None
self.matrix = None
self.children = []
self.align_R = np.eye(3)
self.motion_R = None
def init_bone(self):
if self.parent is not None:
self.to_parent = self.coordinate - self.parent.coordinate
def set_motion_R(self, motion):
self.motion_R = motion[self.idx]
if self.parent is not None:
self.motion_R = self.parent.motion_R.dot(self.motion_R)
for child in self.children:
child.set_motion_R(motion)
def update_coord(self):
if self.parent is not None:
absolute_R = self.parent.motion_R.dot(self.parent.align_R)
self.coordinate = self.parent.coordinate + np.squeeze(absolute_R.dot(np.reshape(self.to_parent, [3,1])))
for child in self.children:
child.update_coord()
def to_dict(self):
ret = {self.idx: self}
for child in self.children:
ret.update(child.to_dict())
return ret
def export_G(self):
G = np.zeros([4, 4])
G[:3,:3] = self.motion_R.dot(self.align_R)
G[:3,3] = self.coordinate
G[3,3] = 1
return G
def export_theta(self):
self_relative_G = None
if self.parent is None:
self_relative_G = self.export_G()[:3,:3]
else:
parent_G = self.parent.export_G()[:3,:3]
self_G = self.export_G()[:3,:3]
# parent_G * relative_G = self_G
self_relative_G = np.linalg.inv(parent_G).dot(self_G)
ax, rad = transforms3d.axangles.mat2axangle(self_relative_G)
ax = ax[:3]
axangle = ax / np.linalg.norm(ax) * rad
return axangle
def setup_smpl_joints(smpl, rescale=True):
joints = {}
for i in range(24):
joints[i] = SMPLJoints(i)
for child, parent in smpl.parent.items():
joints[child].parent = joints[parent]
joints[parent].children.append(joints[child])
if rescale:
J = smpl.J / 0.45 * 10
else:
J = smpl.J
for j in joints.values():
j.coordinate = J[j.idx]
for j in joints.values():
j.init_bone()
return joints
| StarcoderdataPython |
6690784 | # Create your views here.
from rest_framework.views import APIView
from ..models import Operation, Feature, DataTable
from main.response_processing import get_success_response
def find_object(name):
operations = Operation.objects.filter(name=name)
if operations:
operation = operations[0]
return {
"name": operation.name,
"id": operation.id,
"type": 'operation',
}
features = Feature.objects.filter(name=name)
if features:
features = features[0]
return {
"name": features.name,
"id": features.id,
"type": 'features',
}
data_tables = DataTable.objects.filter(name=name)
if data_tables:
data_table = data_tables[0]
return {
"name": data_table.name,
"id": data_table.id,
"type": 'data_table',
}
return {
"name": name,
"id": 'join',
"type": 'join',
}
class UserView(APIView):
def post(self, request):
data = request.data
nodes = data['nodes']
nodes_in_objects = {}
output = {}
for key, node in nodes.items():
output[node['name']] = {}
obj = find_object(node['name'])
nodes_in_objects[key] = obj
output[node['name']]['id'] = obj['id']
output[node['name']]['type'] = obj['type']
outputs = []
for key1, output_o in node['outputs'].items():
connections = output_o['connections']
if connections:
for connection in connections:
outputs.append({
"node": connection['node'],
"name_field": connection['input'],
})
inputs = []
for key1, output_i in node['inputs'].items():
connections = output_i['connections']
if connections:
for connection in connections:
inputs.append({
"node": connection['node'],
"name_field": connection['output'],
})
output[node['name']]['connections'] = {}
output[node['name']]['connections']['inputs'] = inputs
output[node['name']]['connections']['output'] = outputs
for key, dates in output.items():
connections = dates['connections']
for input_data in connections['inputs']:
if input_data.get('node'):
input_data['object_information'] = nodes_in_objects[str(input_data['node'])]
del input_data['node']
for output_data in connections['output']:
if output_data.get('node'):
output_data['object_information'] = nodes_in_objects[str(output_data['node'])]
del output_data['node']
return get_success_response(output)
| StarcoderdataPython |
11286621 | from .fingerprint import Fingerprinter
from .kgram import KGrams, Buffer
class Winnower(Fingerprinter):
def __init__(self, parser_factory, window_size, k):
super().__init__(parser_factory)
self.window_size = window_size
self.k = k
@property
def kgramifier(self):
# Can be overriden to change the default hash function
return KGrams.kgramify
def extract_fingerprints_(self, token_iterator):
window = Buffer(self.window_size)
selected_grams = []
min_gram = None
for location, kgram in self.kgramifier(token_iterator, self.k):
window.put(kgram)
if window.is_full():
# Note: using built-in `min` should be much faster than
# re-impl. it. Moreover, the window is expected to be small
# and the cost of deriving and inverting an array should be
# small.
# `min` keeps the leftmost minima:
# >> min([(1, 1), (1, 2)], key=lambda x:x[0])
# (1, 1)
window_min = min(list(window)[::-1], key=hash)
if window_min is not min_gram:
selected_grams.append(window_min)
min_gram = window_min
yield location, window_min
| StarcoderdataPython |
5058658 | <reponame>jolitp/automation_scripts<filename>old/multiple_files_operations/get_all_videos_in_a_directory/tests/unit_tests_get_all_videos_in_a_directory.py
#! /usr/bin/python3
"""
tests for .py
"""
import unittest
import importlib.util # needed for importing scripts using the scripts path
# cSpell:disable
python_scripts_folder_path : str = "/home/jolitp/Projects/automation_scripts/"
# cSpell:enable
subfolder : str = "src/multiple_files_operations/get_all_videos_in_a_directory/"
spec = importlib.util.spec_from_file_location("get_all_videos_in_a_directory",
python_scripts_folder_path + subfolder + "get_all_videos_in_a_directory.py")
get_all_videos_in_a_directory_script = importlib.util.module_from_spec(spec)
spec.loader.exec_module(get_all_videos_in_a_directory_script)
class UnitTest_get_all_videos_in_a_directory(unittest.TestCase):
"""
unit tests for .py
"""
# region tests (...):
# region def (...):
def test_given_a_relative_path_should_raise_ValueError(self):
"""
when the function get_all_videos_in_a_directory(...)
is given a relative path it should raise a ValueError exception.
"""
# setup
relative_path : str = "path"
# act
with self.assertRaises(ValueError) as error:
get_all_videos_in_a_directory_script \
.get_all_videos(relative_path)
# assert
self.assertTrue("directory_path must be an absolute path" in str(error.exception))
# endregion def (...):
# endregion tests (...):
if __name__ == "__main__":
print("get_all_videos_in_a_directory.__main__")
unittest.main()
| StarcoderdataPython |
4952594 | <gh_stars>0
#!/usr/bin/env python
# Check that the expected (or actual) snippets are in the manuscript. E.g.
# bin/check_manuscript.py ~/book-workspace/htdg-git/ch16-pig.xml expected/ch16-pig/grunt/*
import sys
manuscript = open(sys.argv[1], 'r').read()
for snippet_file in sys.argv[2:]:
lines = open(snippet_file, 'r').readlines()
if lines[0].startswith("<!--"):
doc = "".join(lines[1:]) # remove first line if a comment
else:
doc = "".join(lines[0:])
snippet = doc.strip()
index = manuscript.find(snippet)
if index == -1:
print "Snippet not found", snippet_file
#else:
# print "Snippet found", snippet_file
| StarcoderdataPython |
3505380 | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
idade= ([19, 21, 23, 25, 25, 29, 31, 33, 35, 37, 39, 41, 31, 19,
40, 34, 28, 32, 29, 34, 27, 27, 36, 29, 37, 31, 29, 33,
34, 39, 26, 27, 37, 33, 38, 34, 33, 29, 36, 28, 27, 34,
28, 27, 30, 28, 37, 37, 32, 36, 34, 38, 29, 30, 20, 30,
31, 25, 32, 27, 28, 38, 29, 28, 33, 37, 40, 41, 40, 27,
30, 27, 25, 25, 29, 25, 39, 29, 39, 24, 25, 28, 24, 29,
29, 24, 24, 28, 31, 36, 24, 24, 33, 34, 31, 28, 24, 30,
31, 37, 17, 30, 27, 32, 35, 26, 26, 34, 33, 25, 24, 32,
32, 22, 30, 25, 32, 25, 21, 20, 30, 29, 18, 23, 23, 35,
20, 18, 27, 29, 17, 35, 17, 21, 28, 17, 23, 25, 24, 23,
20, 29, 22, 21, 22, 26, 19, 24, 25, 22, 19, 23, 18, 22,
35, 30, 28, 27, 29, 29, 22, 25, 22, 29, 26, 22, 19, 22,
33, 24, 29, 28, 19, 26, 29, 19, 31, 21, 21, 26, 31, 29])
#Tamanho da amostra
tamanho= len(idade)
# quantidade de Classes (bins)
cl = int(round(tamanho**(1/2),0))
plt.title("Histograma de Idades")
plt.xlabel("Idades")
plt.ylabel("Frequências")
# Range é uma tupla indicando o intervalo das idades. alpha corresponde a saturação da cor
plt.hist(idade, bins = cl, range = ( min(idade), max(idade)), alpha = 0.6, color = 'g')
plt.tight_layout()
plt.show() | StarcoderdataPython |
1631815 | from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class WishlistsConfig(AppConfig):
label = 'wishlists'
name = 'oscar.apps.wishlists'
verbose_name = _('Wishlists')
| StarcoderdataPython |
6522915 | import pandas as pd
import numpy as np
import re
import os
import fit_least_square_regression as flsr
class parse_regression_coef_c:
# read pathway data as pandas frame object
@staticmethod
def read_pathway_as_pandas_frame_object(filename):
return pd.read_csv(filename, delimiter="\t", names=["number"])
# convert pandas frame object to list
@staticmethod
def convert_pandas_frame_object_to_list(pathway_data_in):
# get rid of "#"
mask = pathway_data_in.number.str.contains("#")
pathway_data = pathway_data_in[~mask]
return np.array(pathway_data.number).reshape((-1, 4))
# parse variance, zeroth_order, first_order and second_order values
@staticmethod
def var_zero_first_second(coef_list, ith, in_Nth_order_1st=5, in_Nth_order_2nd=2):
coef_ith = coef_list[ith]
var = float(coef_ith[0])
zeroth = float(coef_ith[1])
first_t = map(float, re.split(",", coef_ith[2]))
first = np.array(first_t).reshape((-1, in_Nth_order_1st))
second_t = map(float, re.split(",", coef_ith[3]))
second = np.array(second_t).reshape(
(-1, in_Nth_order_2nd * in_Nth_order_2nd))
return var, zeroth, first, second
# parse variance, zeroth_order, first_order and second_order values
# return zeroth_ordre, first_order and second_order in a list
@staticmethod
def var_zero_first_second_in_a_list(coef_list, ith):
coef_ith = coef_list[ith]
var = float(coef_ith[0])
zeroth = float(coef_ith[1])
first_t = [float(x) for x in re.split(",", coef_ith[2])]
second_t = [float(x) for x in re.split(",", coef_ith[3])]
return var, [zeroth] + first_t + second_t
@staticmethod
def get_var_zero_first_second_coef(data_dir, s_a_s=None):
"""
return zero-th order, first order and second order coefficient
"""
if s_a_s is None:
return
f_n_coef = os.path.join(data_dir, "output", "fit_coef.inp")
var_coef_frame_obj = parse_regression_coef_c.read_pathway_as_pandas_frame_object(
f_n_coef)
var_coef_list = parse_regression_coef_c.convert_pandas_frame_object_to_list(
var_coef_frame_obj)
var_target, fit_coef = parse_regression_coef_c.var_zero_first_second_in_a_list(
var_coef_list, 0)
zero_order_coef, first_order_coef, second_order_coef = \
flsr.fit_1D_2D_all_c.split_1D_coef_array_static(
fit_coef, s_a_s['N_variable'], s_a_s['Nth_order_1st'], s_a_s['Nth_order_2nd'])
return var_target, zero_order_coef, first_order_coef, second_order_coef
| StarcoderdataPython |
4904399 | <reponame>panchiwalashivani/python<filename>tests/functional/test_remove_channel_from_cg.py<gh_stars>1-10
import unittest
from pubnub.endpoints.channel_groups.remove_channel_from_channel_group import RemoveChannelFromChannelGroup
try:
from mock import MagicMock
except ImportError:
from unittest.mock import MagicMock
from pubnub.pubnub import PubNub
from tests.helper import pnconf, sdk_name
from pubnub.managers import TelemetryManager
class TestRemoveChannelToChannelGroup(unittest.TestCase):
def setUp(self):
self.pubnub = MagicMock(
spec=PubNub,
config=pnconf,
sdk_name=sdk_name,
uuid=None
)
self.pubnub.uuid = "UUID_RemoveChannelToCGTest"
self.pubnub._telemetry_manager = TelemetryManager()
self.remove = RemoveChannelFromChannelGroup(self.pubnub)
def test_remove_single_channel(self):
self.remove.channels('ch').channel_group('gr')
self.assertEqual(self.remove.build_path(),
RemoveChannelFromChannelGroup.REMOVE_PATH % (
pnconf.subscribe_key, "gr"))
self.assertEqual(self.remove.build_params_callback()({}), {
'pnsdk': sdk_name,
'uuid': self.pubnub.uuid,
'remove': "ch"
})
self.assertEqual(self.remove._channels, ['ch'])
def test_remove_multiple_channels(self):
self.remove.channels(['ch1', 'ch2']).channel_group('gr')
self.assertEqual(self.remove.build_path(),
RemoveChannelFromChannelGroup.REMOVE_PATH % (
pnconf.subscribe_key, "gr"))
self.assertEqual(self.remove.build_params_callback()({}), {
'pnsdk': sdk_name,
'uuid': self.pubnub.uuid,
'remove': "ch1,ch2"
})
self.assertEqual(self.remove._channels, ['ch1', 'ch2'])
| StarcoderdataPython |
366581 | <filename>js/angular/__init__.py<gh_stars>0
from fanstatic import Library, Resource
library = Library('angularjs', 'resources')
angular = Resource(library, 'angular.js', minified='angular.min.js')
angular_animate = Resource(
library, 'angular-animate.js',
minified='angular-animate.min.js', depends=[angular])
angular_cookies = Resource(
library, 'angular-cookies.js',
minified='angular-cookies.min.js', depends=[angular])
angular_loader = Resource(
library, 'angular-loader.js',
minified='angular-loader.min.js')
angular_mocks = Resource(
library, 'angular-mocks.js', depends=[angular])
angular_resource = Resource(
library, 'angular-resource.js',
minified='angular-resource.min.js', depends=[angular])
angular_route = Resource(
library, 'angular-route.js',
minified='angular-route.min.js', depends=[angular])
angular_sanitize = Resource(
library, 'angular-sanitize.js',
minified='angular-sanitize.min.js', depends=[angular])
angular_scenario = Resource(library, 'angular-scenario.js')
locales = {}
_langs = [
'af-na',
'af-za',
'af',
'agq-cm',
'agq',
'ak-gh',
'ak',
'am-et',
'am',
'ar-001',
'ar-ae',
'ar-bh',
'ar-dj',
'ar-dz',
'ar-eg',
'ar-eh',
'ar-er',
'ar-il',
'ar-iq',
'ar-jo',
'ar-km',
'ar-kw',
'ar-lb',
'ar-ly',
'ar-ma',
'ar-mr',
'ar-om',
'ar-ps',
'ar-qa',
'ar-sa',
'ar-sd',
'ar-so',
'ar-ss',
'ar-sy',
'ar-td',
'ar-tn',
'ar-xb',
'ar-ye',
'ar',
'as-in',
'as',
'asa-tz',
'asa',
'ast-es',
'ast',
'az-cyrl-az',
'az-cyrl',
'az-latn-az',
'az-latn',
'az',
'bas-cm',
'bas',
'be-by',
'be',
'bem-zm',
'bem',
'bez-tz',
'bez',
'bg-bg',
'bg',
'bm-ml',
'bm',
'bn-bd',
'bn-in',
'bn',
'bo-cn',
'bo-in',
'bo',
'br-fr',
'br',
'brx-in',
'brx',
'bs-cyrl-ba',
'bs-cyrl',
'bs-latn-ba',
'bs-latn',
'bs',
'ca-ad',
'ca-es-valencia',
'ca-es',
'ca-fr',
'ca-it',
'ca',
'ce-ru',
'ce',
'cgg-ug',
'cgg',
'chr-us',
'chr',
'ckb-arab-iq',
'ckb-arab-ir',
'ckb-arab',
'ckb-iq',
'ckb-ir',
'ckb-latn-iq',
'ckb-latn',
'ckb',
'cs-cz',
'cs',
'cu-ru',
'cu',
'cy-gb',
'cy',
'da-dk',
'da-gl',
'da',
'dav-ke',
'dav',
'de-at',
'de-be',
'de-ch',
'de-de',
'de-it',
'de-li',
'de-lu',
'de',
'dje-ne',
'dje',
'dsb-de',
'dsb',
'dua-cm',
'dua',
'dyo-sn',
'dyo',
'dz-bt',
'dz',
'ebu-ke',
'ebu',
'ee-gh',
'ee-tg',
'ee',
'el-cy',
'el-gr',
'el',
'en-001',
'en-150',
'en-ag',
'en-ai',
'en-as',
'en-at',
'en-au',
'en-bb',
'en-be',
'en-bi',
'en-bm',
'en-bs',
'en-bw',
'en-bz',
'en-ca',
'en-cc',
'en-ch',
'en-ck',
'en-cm',
'en-cx',
'en-cy',
'en-de',
'en-dg',
'en-dk',
'en-dm',
'en-er',
'en-fi',
'en-fj',
'en-fk',
'en-fm',
'en-gb',
'en-gd',
'en-gg',
'en-gh',
'en-gi',
'en-gm',
'en-gu',
'en-gy',
'en-hk',
'en-ie',
'en-il',
'en-im',
'en-in',
'en-io',
'en-iso',
'en-je',
'en-jm',
'en-ke',
'en-ki',
'en-kn',
'en-ky',
'en-lc',
'en-lr',
'en-ls',
'en-mg',
'en-mh',
'en-mo',
'en-mp',
'en-ms',
'en-mt',
'en-mu',
'en-mw',
'en-my',
'en-na',
'en-nf',
'en-ng',
'en-nl',
'en-nr',
'en-nu',
'en-nz',
'en-pg',
'en-ph',
'en-pk',
'en-pn',
'en-pr',
'en-pw',
'en-rw',
'en-sb',
'en-sc',
'en-sd',
'en-se',
'en-sg',
'en-sh',
'en-si',
'en-sl',
'en-ss',
'en-sx',
'en-sz',
'en-tc',
'en-tk',
'en-to',
'en-tt',
'en-tv',
'en-tz',
'en-ug',
'en-um',
'en-us-posix',
'en-us',
'en-vc',
'en-vg',
'en-vi',
'en-vu',
'en-ws',
'en-xa',
'en-za',
'en-zm',
'en-zw',
'en',
'eo-001',
'eo',
'es-419',
'es-ar',
'es-bo',
'es-br',
'es-bz',
'es-cl',
'es-co',
'es-cr',
'es-cu',
'es-do',
'es-ea',
'es-ec',
'es-es',
'es-gq',
'es-gt',
'es-hn',
'es-ic',
'es-mx',
'es-ni',
'es-pa',
'es-pe',
'es-ph',
'es-pr',
'es-py',
'es-sv',
'es-us',
'es-uy',
'es-ve',
'es',
'et-ee',
'et',
'eu-es',
'eu',
'ewo-cm',
'ewo',
'fa-af',
'fa-ir',
'fa',
'ff-cm',
'ff-gn',
'ff-mr',
'ff-sn',
'ff',
'fi-fi',
'fi',
'fil-ph',
'fil',
'fo-dk',
'fo-fo',
'fo',
'fr-be',
'fr-bf',
'fr-bi',
'fr-bj',
'fr-bl',
'fr-ca',
'fr-cd',
'fr-cf',
'fr-cg',
'fr-ch',
'fr-ci',
'fr-cm',
'fr-dj',
'fr-dz',
'fr-fr',
'fr-ga',
'fr-gf',
'fr-gn',
'fr-gp',
'fr-gq',
'fr-ht',
'fr-km',
'fr-lu',
'fr-ma',
'fr-mc',
'fr-mf',
'fr-mg',
'fr-ml',
'fr-mq',
'fr-mr',
'fr-mu',
'fr-nc',
'fr-ne',
'fr-pf',
'fr-pm',
'fr-re',
'fr-rw',
'fr-sc',
'fr-sn',
'fr-sy',
'fr-td',
'fr-tg',
'fr-tn',
'fr-vu',
'fr-wf',
'fr-yt',
'fr',
'fur-it',
'fur',
'fy-nl',
'fy',
'ga-ie',
'ga',
'gd-gb',
'gd',
'gl-es',
'gl',
'gsw-ch',
'gsw-fr',
'gsw-li',
'gsw',
'gu-in',
'gu',
'guz-ke',
'guz',
'gv-im',
'gv',
'ha-gh',
'ha-ne',
'ha-ng',
'ha',
'haw-us',
'haw',
'he-il',
'he',
'hi-in',
'hi',
'hr-ba',
'hr-hr',
'hr',
'hsb-de',
'hsb',
'hu-hu',
'hu',
'hy-am',
'hy',
'id-id',
'id',
'ig-ng',
'ig',
'ii-cn',
'ii',
'in',
'is-is',
'is',
'it-ch',
'it-it',
'it-sm',
'it-va',
'it',
'iw',
'ja-jp',
'ja',
'jgo-cm',
'jgo',
'jmc-tz',
'jmc',
'ka-ge',
'ka',
'kab-dz',
'kab',
'kam-ke',
'kam',
'kde-tz',
'kde',
'kea-cv',
'kea',
'khq-ml',
'khq',
'ki-ke',
'ki',
'kk-kz',
'kk',
'kkj-cm',
'kkj',
'kl-gl',
'kl',
'kln-ke',
'kln',
'km-kh',
'km',
'kn-in',
'kn',
'ko-kp',
'ko-kr',
'ko',
'kok-in',
'kok',
'ks-in',
'ks',
'ksb-tz',
'ksb',
'ksf-cm',
'ksf',
'ksh-de',
'ksh',
'kw-gb',
'kw',
'ky-kg',
'ky',
'lag-tz',
'lag',
'lb-lu',
'lb',
'lg-ug',
'lg',
'lkt-us',
'lkt',
'ln-ao',
'ln-cd',
'ln-cf',
'ln-cg',
'ln',
'lo-la',
'lo',
'lrc-iq',
'lrc-ir',
'lrc',
'lt-lt',
'lt',
'lu-cd',
'lu',
'luo-ke',
'luo',
'luy-ke',
'luy',
'lv-lv',
'lv',
'mas-ke',
'mas-tz',
'mas',
'mer-ke',
'mer',
'mfe-mu',
'mfe',
'mg-mg',
'mg',
'mgh-mz',
'mgh',
'mgo-cm',
'mgo',
'mk-mk',
'mk',
'ml-in',
'ml',
'mn-mn',
'mn',
'mo',
'mr-in',
'mr',
'ms-bn',
'ms-my',
'ms-sg',
'ms',
'mt-mt',
'mt',
'mua-cm',
'mua',
'my-mm',
'my',
'mzn-ir',
'mzn',
'naq-na',
'naq',
'nb-no',
'nb-sj',
'nb',
'nd-zw',
'nd',
'nds-de',
'nds-nl',
'nds',
'ne-in',
'ne-np',
'ne',
'nl-aw',
'nl-be',
'nl-bq',
'nl-cw',
'nl-nl',
'nl-sr',
'nl-sx',
'nl',
'nmg-cm',
'nmg',
'nn-no',
'nn',
'nnh-cm',
'nnh',
'no-no',
'no',
'nus-ss',
'nus',
'nyn-ug',
'nyn',
'om-et',
'om-ke',
'om',
'or-in',
'or',
'os-ge',
'os-ru',
'os',
'pa-arab-pk',
'pa-arab',
'pa-guru-in',
'pa-guru',
'pa',
'pl-pl',
'pl',
'prg-001',
'prg',
'ps-af',
'ps',
'pt-ao',
'pt-br',
'pt-ch',
'pt-cv',
'pt-gq',
'pt-gw',
'pt-lu',
'pt-mo',
'pt-mz',
'pt-pt',
'pt-st',
'pt-tl',
'pt',
'qu-bo',
'qu-ec',
'qu-pe',
'qu',
'rm-ch',
'rm',
'rn-bi',
'rn',
'ro-md',
'ro-ro',
'ro',
'rof-tz',
'rof',
'ru-by',
'ru-kg',
'ru-kz',
'ru-md',
'ru-ru',
'ru-ua',
'ru',
'rw-rw',
'rw',
'rwk-tz',
'rwk',
'sah-ru',
'sah',
'saq-ke',
'saq',
'sbp-tz',
'sbp',
'se-fi',
'se-no',
'se-se',
'se',
'seh-mz',
'seh',
'ses-ml',
'ses',
'sg-cf',
'sg',
'sh',
'shi-latn-ma',
'shi-latn',
'shi-tfng-ma',
'shi-tfng',
'shi',
'si-lk',
'si',
'sk-sk',
'sk',
'sl-si',
'sl',
'smn-fi',
'smn',
'sn-zw',
'sn',
'so-dj',
'so-et',
'so-ke',
'so-so',
'so',
'sq-al',
'sq-mk',
'sq-xk',
'sq',
'sr-cyrl-ba',
'sr-cyrl-me',
'sr-cyrl-rs',
'sr-cyrl-xk',
'sr-cyrl',
'sr-latn-ba',
'sr-latn-me',
'sr-latn-rs',
'sr-latn-xk',
'sr-latn',
'sr',
'sv-ax',
'sv-fi',
'sv-se',
'sv',
'sw-cd',
'sw-ke',
'sw-tz',
'sw-ug',
'sw',
'ta-in',
'ta-lk',
'ta-my',
'ta-sg',
'ta',
'te-in',
'te',
'teo-ke',
'teo-ug',
'teo',
'th-th',
'th',
'ti-er',
'ti-et',
'ti',
'tk-tm',
'tk',
'tl',
'to-to',
'to',
'tr-cy',
'tr-tr',
'tr',
'twq-ne',
'twq',
'tzm-ma',
'tzm',
'ug-cn',
'ug',
'uk-ua',
'uk',
'ur-in',
'ur-pk',
'ur',
'uz-arab-af',
'uz-arab',
'uz-cyrl-uz',
'uz-cyrl',
'uz-latn-uz',
'uz-latn',
'uz',
'vai-latn-lr',
'vai-latn',
'vai-vaii-lr',
'vai-vaii',
'vai',
'vi-vn',
'vi',
'vo-001',
'vo',
'vun-tz',
'vun',
'wae-ch',
'wae',
'xog-ug',
'xog',
'yav-cm',
'yav',
'yi-001',
'yi',
'yo-bj',
'yo-ng',
'yo',
'yue-hk',
'yue',
'zgh-ma',
'zgh',
'zh-cn',
'zh-hans-cn',
'zh-hans-hk',
'zh-hans-mo',
'zh-hans-sg',
'zh-hans',
'zh-hant-hk',
'zh-hant-mo',
'zh-hant-tw',
'zh-hant',
'zh-hk',
'zh-tw',
'zh',
'zu-za',
'zu',
]
for lang in _langs:
locales[lang] = locals()["angular_locale_{0}".format(
lang).replace("-", "_")] = Resource(
library, "i18n/angular-locale_{0}.js".format(lang),
depends=[angular])
| StarcoderdataPython |
6505084 | <reponame>Keleas/Tello_Laser_Shot
from drone.controller import DroneController
from drone.virtual_drone import TestController
from drone.tools import NavigationSystem
class FrontEnd(object):
"""
Основной цикл взаимодействия модулей автономной системы управления дрона Tello Edu.
Управление всеми командами осуществялется путем активации атрибутов класса CommandController.
Обработка оптического потока, приходящегося с дрона, происходит путем активации атрибутов класса SystemCV.
Взаимодействие двух модулей всей системы производиться за счет передачи команды DroneCommand и
информации DroneInfo.
"""
def __init__(self):
# управляющий модуль ЛА
self.command_controller = DroneController()
self.virt_drone = TestController()
self.display_time_upd = 30
def check_connect(self):
"""
Проверить соединение с ЛА
:return: bool: True в случае успеха, False в противном случае
"""
try:
self.command_controller.drone.connect()
self.command_controller.drone.streamoff()
self.command_controller.drone.streamon()
return True
except:
return False
def run(self):
""" Главынй цикл программы управления """
# self.autopilot_tester.run()
if self.check_connect():
self.command_controller.video_loop()
else:
# запуск виртуального беспилотника на локалке
# камера подключается от пк, управление имитируется в графике
self.virt_drone.run()
return True
def main():
frontend = FrontEnd()
frontend.run() # запуск системы управления ЛА
return True
if __name__ == '__main__':
main()
| StarcoderdataPython |
1715255 | from PIL import Image
from ....osrparse.enums import Mod
from ....CheckSystem.getgrade import getgrade
from ... import imageproc
from .ARankingScreen import ARankingScreen
class RankingGrade(ARankingScreen):
def __init__(self, replayinfo, gradeframes, gap, settings):
dummy = [Image.new("RGBA", (1, 1))]
super().__init__(dummy, settings)
acc = {300: replayinfo.number_300s,
100: replayinfo.number_100s,
50: replayinfo.number_50s,
0: replayinfo.misses}
grade = getgrade(acc)
is_silver = int(Mod.Hidden in replayinfo.mod_combination or Mod.Flashlight in replayinfo.mod_combination)
self.gradeframe = gradeframes[is_silver][grade]
self.gap = int(gap * self.settings.scale * 0.75)
if self.settings.skin_ini.general["Version"] == 1:
self.y = 272
else:
self.y = 320
def add_to_frame(self, background):
# source: https://osu.ppy.sh/help/wiki/Skinning/Interface#ranking-grades
super().add_to_frame(background)
if self.fade == self.FADEIN:
imageproc.add(self.gradeframe, background, self.settings.width - 192 * self.settings.scale,
self.y * self.settings.scale, self.alpha)
| StarcoderdataPython |
1843132 | <reponame>strattner/pybinder
#!/usr/bin/python3
"""
searchdns
Query one or more records in DNS, using either system default or
specified nameserver and/or search domain.
Author: <NAME> (<EMAIL>)
Copyright (c) 2017 IBM Corp.
All Rights Reserved
"""
import re
import logging
import ipaddress
import dns.resolver
import dns.name
import dns.reversename
class DNSSearchAnswerError(Exception): # pylint: disable=missing-docstring
pass
class DNSSearchAnswer(object): # pylint: disable=too-few-public-methods
"""
Parses and stores the answer to a SearchDNS query.
Only understands A, PTR, and CNAME records.
"""
FORWARD = "A"
REVERSE = "PTR"
ALIAS = "CNAME"
ROUND_ROBIN = "Round robin"
NOT_FOUND = "not found in DNS"
def __init__(self, entry, value=None):
logging.debug("Inserting DNSSearchAnswer %s %s", entry, value)
self.entry = entry
if not value:
self.type = DNSSearchAnswer.NOT_FOUND
else:
self.__parse_answer(value)
def __parse_answer(self, answer):
first_result = str(answer.pop(0))
logging.debug("Parsing first result: %s", first_result)
result_type_search = re.search(r'IN (\S+) (\S+)$', first_result)
result_type = result_type_search.group(1)
result_answer = result_type_search.group(2)
logging.debug("Result type %s with value %s", result_type,
result_answer)
if result_type == DNSSearchAnswer.ALIAS:
self.type = DNSSearchAnswer.ALIAS
self.real_name = result_answer
elif result_type == DNSSearchAnswer.FORWARD:
self.type = DNSSearchAnswer.FORWARD
self.addr = result_answer
if answer:
self.addr = [self.addr]
self.type = DNSSearchAnswer.ROUND_ROBIN
for ans in answer:
next_ip_search = re.search(r'IN A (\S+)$', ans)
self.addr.append(next_ip_search.group(1))
elif result_type == DNSSearchAnswer.REVERSE:
self.type = DNSSearchAnswer.REVERSE
self.name = result_answer
else:
raise DNSSearchAnswerError(
"Unrecognized DNS response: {}".format(answer))
return True
def __str__(self):
if self.type == DNSSearchAnswer.NOT_FOUND:
return self.entry + " " + self.type
return_string = str(self.entry) + " "
if self.type == DNSSearchAnswer.ALIAS:
return_string += "(alias for) " + self.real_name
if self.type == DNSSearchAnswer.ROUND_ROBIN:
return_string += " ".join(self.addr)
if self.type == DNSSearchAnswer.FORWARD:
return_string += self.addr
if self.type == DNSSearchAnswer.REVERSE:
return_string += self.name
return return_string
class SearchDNS(object):
"""
Performs DNS query against system or specified nameserver,
returns results as DNSSearchAnswer instance.
"""
FORWARD = "A"
REVERSE = "PTR"
ALIAS = "CNAME"
NORMAL = [FORWARD, REVERSE]
def __init__(self, nameserver=None, zone=None):
self.nameserver = nameserver
self.zone = zone
self.searcher = dns.resolver.Resolver()
self.searcher.set_flags(0) # do not perform recursive query
if self.nameserver:
logging.debug("Using %s as nameserver", self.nameserver)
self.searcher.nameservers = [self.nameserver]
if self.zone:
logging.debug("Using %s as default domain", self.zone)
self.searcher.search = [dns.name.from_text(self.zone)]
@staticmethod
def is_address(entry): # pylint: disable=missing-docstring
try:
_ = ipaddress.ip_address(entry)
return True
except ValueError:
return False
def query(self, entry, search_type=None): # pylint: disable=missing-docstring
logging.debug("Performing a search for %s", entry)
search_entry = entry
if not search_type:
if SearchDNS.is_address(entry):
search_type = SearchDNS.REVERSE
search_entry = dns.reversename.from_address(str(entry))
else:
search_type = SearchDNS.FORWARD
try:
answer = self.searcher.query(search_entry, search_type).response
answer_list = str(answer.answer.pop(0)).splitlines()
logging.debug("Answer: %s", answer_list)
return DNSSearchAnswer(entry, answer_list)
except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
return DNSSearchAnswer(entry)
def main():
"""
SearchDNS.
Usage:
searchdns.py [--server <server>] [--domain <domain>] [--debug <file>] <query>...
searchdns.py -h | --help
Arguments:
query One or more names or IPs to search
Options:
-h --help Show this screen
--server <server> Specify the nameserver (if not system default)
--domain <domain> Specify the search domain (if FQDN is not given)
--debug <file> Send debug messages to a file
"""
arguments = docopt(str(main.__doc__))
if arguments['--debug']:
print("Running in debug mode to {}".format(arguments['--debug']))
log_date = '%Y-%m-%d %H:%M:%S'
log_form = '%(asctime)s %(message)s'
logging.basicConfig(
filename=arguments['--debug'],
level=logging.DEBUG,
format=log_form,
datefmt=log_date)
logging.debug("Program arguments: %s", arguments)
nameserver = arguments['--server'] if '--server' in arguments else None
domain = arguments['--domain'] if '--domain' in arguments else None
searcher = SearchDNS(nameserver, domain)
for query in arguments['<query>']:
print(searcher.query(query))
if __name__ == "__main__":
from docopt import docopt
main()
| StarcoderdataPython |
235715 | <gh_stars>0
#!/usr/bin/python3
# ******************************************************************************
# Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved.
# licensed under the Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
# http://license.coscl.org.cn/MulanPSL2
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v2 for more details.
# ******************************************************************************/
from .be_depend import BeDepend
from .build_depend import BuildDepend
from .install_depend import InstallDepend
from .self_depend import SelfDepend
class DispatchDepend:
"""
Factory method, distributing operations of different depend search
"""
def __init__(self):
self._kwargs = dict()
def _selfdep(self):
_self_depend = SelfDepend(
db_list=self._kwargs["parameter"]["db_priority"])
_self_depend(**self._kwargs)
return _self_depend
def _installdep(self):
_install_depend = InstallDepend(
db_list=self._kwargs["parameter"]["db_priority"])
_install_depend(**self._kwargs)
return _install_depend
def _builddep(self):
_build_depend = BuildDepend(
db_list=self._kwargs["parameter"]["db_priority"])
_build_depend(**self._kwargs)
return _build_depend
def _bedep(self):
_be_depend = BeDepend(**self._kwargs)
_be_depend(**self._kwargs)
return _be_depend
@classmethod
def execute(cls, **kwargs):
"""Execute the map to return the dependent instance
Distribute the method through the corresponding value of depend_type key in kwargs
The value of depend_type must be in ['instaldep','builddep','selfdep','bedep']
"""
def dispatch():
depend_type = kwargs["depend_type"]
method = getattr(dispatch_cls, "_" + depend_type, None)
if not method:
raise AttributeError(
"Queries that do not support dependencies are not supported")
return method()
if "depend_type" not in kwargs:
raise ValueError(
"Missing the necessary query parameter 'depend_type' .")
dispatch_cls = cls()
setattr(dispatch_cls, "_kwargs", kwargs)
return dispatch()
| StarcoderdataPython |
231662 | # Some utility classes to represent a PDB structure
class Atom:
"""
A simple class for an amino acid residue
"""
def __init__(self, type):
self.type = type
self.coords = (0.0, 0.0, 0.0)
# Overload the __repr__ operator to make printing simpler.
def __repr__(self):
return self.type
class Residue:
"""
A simple class for an amino acid residue
"""
def __init__(self, type, number):
self.type = type
self.number = number
self.atoms = []
# Overload the __repr__ operator to make printing simpler.
def __repr__(self):
return "{0} {1}".format(self.type, self.number)
@property
def alpha_carbon(self):
""" Lookup the atom representing the alpha carbon """
for atom in self.atoms:
if atom.type in ('CA', 'CA A', 'C A'):
return atom
# Le sad: no alpha carbon found
print(self.atoms)
return None
class ActiveSite:
"""
A simple class for an active site
"""
def __init__(self, name):
self.name = name
self.residues = []
# Overload the __repr__ operator to make printing simpler.
def __repr__(self):
return self.name
| StarcoderdataPython |
6412608 | <filename>x86/Chapter1/Chapter1-printf.py
from ctypes import *
msvcrt = cdll.msvcrt
message_string = "Hello, Gray Hat Python!\n"
msvcrt.printf("A message has been received: %s", message_string) | StarcoderdataPython |
173730 | <gh_stars>100-1000
from tasklib import TaskWarrior
from taskwiki import errors
class WarriorStore(object):
"""
Stores all instances of TaskWarrior objects.
"""
def __init__(self, default_rc, default_data, extra_warrior_defs):
default_kwargs = dict(
data_location=default_data,
taskrc_location=default_rc,
)
# Setup the store of TaskWarrior objects
self.warriors = {'default': TaskWarrior(**default_kwargs)}
for key in extra_warrior_defs.keys():
current_kwargs = default_kwargs.copy()
current_kwargs.update(extra_warrior_defs[key])
self.warriors[key] = TaskWarrior(**current_kwargs)
# Make sure context is not respected in any TaskWarrior
for tw in self.warriors.values():
tw.overrides.update({'context':''})
def __getitem__(self, key):
try:
return self.warriors[key]
except KeyError:
raise errors.TaskWikiException(
"Taskwarrior with key '{0}' not available."
.format(key))
def __setitem__(self, key, value):
self.warriors[key] = value
def values(self):
return self.warriors.values()
def items(self):
return self.warriors.items()
class NoNoneStore(object):
def __init__(self, cache):
self.cache = cache
self.store = dict()
def __getitem__(self, key):
item = self.store.get(key)
if item is None:
item = self.get_method(key)
# If we successfully obtained an item, save it to the cache
if item is not None:
self.store[key] = item
return item # May return None if the line has no task
def __setitem__(self, key, value):
# Never store None in the cache, treat it as deletion
if value is None:
del self[key]
return
# Otherwise store the given value
self.store[key] = value
def __delitem__(self, key):
if key in self.store:
del self.store[key]
def __contains__(self, key):
return key in self.store
def values(self):
return self.store.values()
def items(self):
return self.store.items()
def clear(self):
return self.store.clear()
class LineNumberedKeyedStoreMixin(object):
def shift(self, position, offset):
self.store = {
(i + offset if i >= position else i): self.store[i]
for i in self.store.keys()
}
def swap(self, position1, position2):
temp = self.store.get(position1)
self[position1] = self.store.get(position2)
self[position2] = temp
class TaskStore(NoNoneStore):
def get_method(self, key):
return key.tw.tasks.get(uuid=key.value)
class VwtaskStore(LineNumberedKeyedStoreMixin, NoNoneStore):
def shift(self, position, offset):
for line, vwtask in self.store.items():
if line >= position:
vwtask['line_number'] += offset
super(VwtaskStore, self).shift(position, offset)
def swap(self, position1, position2):
super(VwtaskStore, self).swap(position1, position2)
for index in (position1, position2):
if self.store.get(index) is not None:
self[index]['line_number'] = index
def get_method(self, line):
from taskwiki import vwtask
return vwtask.VimwikiTask.from_line(self.cache, line)
class ViewportStore(LineNumberedKeyedStoreMixin, NoNoneStore):
def shift(self, position, offset):
for line, viewport in self.store.items():
if line >= position:
viewport.line_number += offset
super(ViewportStore, self).shift(position, offset)
def swap(self, position1, position2):
super(ViewportStore, self).swap(position1, position2)
for index in (position1, position2):
if self.store.get(index) is not None:
self[index].line_number = index
def get_method(self, line):
import viewport
return viewport.ViewPort.from_line(line, self.cache)
class PresetStore(LineNumberedKeyedStoreMixin, NoNoneStore):
def get_method(self, line):
import preset
return preset.PresetHeader.from_line(line, self.cache)
class LineStore(NoNoneStore):
def __delitem__(self, number):
for cls, i in list(self.store.keys()):
if i == number:
del self.store[(cls, i)]
def get_method(self, key):
cls, line = key
return cls.parse_line(self.cache, line)
def shift(self, position, offset):
new_store = {
(cls, i + offset if i >= position else i): self.store[(cls, i)]
for cls, i in self.store.keys()
}
self.store = new_store
def swap(self, position1, position2):
temp_store1 = {
(cls, i): self.store[(cls, i)]
for cls, i in self.store.keys()
if i == position1
}
temp_store2 = {
(cls, i): self.store[(cls, i)]
for cls, i in self.store.keys()
if i == position2
}
for cls, i in list(self.store.keys()):
if i == position1 or i == position2:
del self.store[(cls, i)]
for cls, i in temp_store1.keys():
self.store[(cls, position2)] = temp_store1[(cls, i)]
for cls, i in temp_store2.keys():
self.store[(cls, position1)] = temp_store2[(cls, i)]
# Also change the actual line content
temp = self.cache.buffer[position1]
self.cache.buffer[position1] = self.cache.buffer[position2]
self.cache.buffer[position2] = temp
class CompletionStore(NoNoneStore):
def get_method(self, key):
from taskwiki import completion
return completion.Completion(key)
| StarcoderdataPython |
1757345 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from services.aadservice import AadService
from models.reportconfig import ReportConfig
from models.embedtoken import EmbedToken
from models.embedconfig import EmbedConfig
from models.embedtokenrequestbody import EmbedTokenRequestBody
from flask import current_app as app, abort
import requests
import json
class PbiEmbedService:
def get_embed_params_for_single_report(self, workspace_id, report_id, additional_dataset_id=None):
'''Get embed params for a report and a workspace
Args:
workspace_id (str): Workspace Id
report_id (str): Report Id
additional_dataset_id (str, optional): Dataset Id different than the one bound to the report. Defaults to None.
Returns:
EmbedConfig: Embed token and Embed URL
'''
report_url = f'https://api.powerbi.com/v1.0/myorg/groups/{workspace_id}/reports/{report_id}'
api_response = requests.get(report_url, headers=self.get_request_header())
if api_response.status_code != 200:
abort(api_response.status_code, description=f'Error while retrieving Embed URL\n{api_response.reason}:\t{api_response.text}\nRequestId:\t{api_response.headers.get("RequestId")}')
api_response = json.loads(api_response.text)
report = ReportConfig(api_response['id'], api_response['name'], api_response['embedUrl'])
dataset_ids = [api_response['datasetId']]
# Append additional dataset to the list to achieve dynamic binding later
if additional_dataset_id is not None:
dataset_ids.append(additional_dataset_id)
embed_token = self.get_embed_token_for_single_report_single_workspace(report_id, dataset_ids, workspace_id)
embed_config = EmbedConfig(embed_token.tokenId, embed_token.token, embed_token.tokenExpiry, [report.__dict__])
return json.dumps(embed_config.__dict__)
def get_embed_params_for_multiple_reports(self, workspace_id, report_ids, additional_dataset_ids=None):
'''Get embed params for multiple reports for a single workspace
Args:
workspace_id (str): Workspace Id
report_ids (list): Report Ids
additional_dataset_ids (list, optional): Dataset Ids which are different than the ones bound to the reports. Defaults to None.
Returns:
EmbedConfig: Embed token and Embed URLs
'''
# Note: This method is an example and is not consumed in this sample app
dataset_ids = []
# To store multiple report info
reports = []
for report_id in report_ids:
report_url = f'https://api.powerbi.com/v1.0/myorg/groups/{workspace_id}/reports/{report_id}'
api_response = requests.get(report_url, headers=self.get_request_header())
if api_response.status_code != 200:
abort(api_response.status_code, description=f'Error while retrieving Embed URL\n{api_response.reason}:\t{api_response.text}\nRequestId:\t{api_response.headers.get("RequestId")}')
api_response = json.loads(api_response.text)
report_config = ReportConfig(api_response['id'], api_response['name'], api_response['embedUrl'])
reports.append(report_config.__dict__)
dataset_ids.append(api_response['datasetId'])
# Append additional dataset to the list to achieve dynamic binding later
if additional_dataset_ids is not None:
dataset_ids.extend(additional_dataset_ids)
embed_token = self.get_embed_token_for_multiple_reports_single_workspace(report_ids, dataset_ids, workspace_id)
embed_config = EmbedConfig(embed_token.tokenId, embed_token.token, embed_token.tokenExpiry, reports)
return json.dumps(embed_config.__dict__)
def get_embed_token_for_single_report_single_workspace(self, report_id, dataset_ids, target_workspace_id=None):
'''Get Embed token for single report, multiple datasets, and an optional target workspace
Args:
report_id (str): Report Id
dataset_ids (list): Dataset Ids
target_workspace_id (str, optional): Workspace Id. Defaults to None.
Returns:
EmbedToken: Embed token
'''
request_body = EmbedTokenRequestBody()
for dataset_id in dataset_ids:
request_body.datasets.append({'id': dataset_id})
request_body.reports.append({'id': report_id})
if target_workspace_id is not None:
request_body.targetWorkspaces.append({'id': target_workspace_id})
# Generate Embed token for multiple workspaces, datasets, and reports. Refer https://aka.ms/MultiResourceEmbedToken
embed_token_api = 'https://api.powerbi.com/v1.0/myorg/GenerateToken'
api_response = requests.post(embed_token_api, data=json.dumps(request_body.__dict__), headers=self.get_request_header())
if api_response.status_code != 200:
abort(api_response.status_code, description=f'Error while retrieving Embed token\n{api_response.reason}:\t{api_response.text}\nRequestId:\t{api_response.headers.get("RequestId")}')
api_response = json.loads(api_response.text)
embed_token = EmbedToken(api_response['tokenId'], api_response['token'], api_response['expiration'])
return embed_token
def get_embed_token_for_multiple_reports_single_workspace(self, report_ids, dataset_ids, target_workspace_id=None):
'''Get Embed token for multiple reports, multiple dataset, and an optional target workspace
Args:
report_ids (list): Report Ids
dataset_ids (list): Dataset Ids
target_workspace_id (str, optional): Workspace Id. Defaults to None.
Returns:
EmbedToken: Embed token
'''
# Note: This method is an example and is not consumed in this sample app
request_body = EmbedTokenRequestBody()
for dataset_id in dataset_ids:
request_body.datasets.append({'id': dataset_id})
for report_id in report_ids:
request_body.reports.append({'id': report_id})
if target_workspace_id is not None:
request_body.targetWorkspaces.append({'id': target_workspace_id})
# Generate Embed token for multiple workspaces, datasets, and reports. Refer https://aka.ms/MultiResourceEmbedToken
embed_token_api = 'https://api.powerbi.com/v1.0/myorg/GenerateToken'
api_response = requests.post(embed_token_api, data=json.dumps(request_body.__dict__), headers=self.get_request_header())
if api_response.status_code != 200:
abort(api_response.status_code, description=f'Error while retrieving Embed token\n{api_response.reason}:\t{api_response.text}\nRequestId:\t{api_response.headers.get("RequestId")}')
api_response = json.loads(api_response.text)
embed_token = EmbedToken(api_response['tokenId'], api_response['token'], api_response['expiration'])
return embed_token
def get_embed_token_for_multiple_reports_multiple_workspaces(self, report_ids, dataset_ids, target_workspace_ids=None):
'''Get Embed token for multiple reports, multiple datasets, and optional target workspaces
Args:
report_ids (list): Report Ids
dataset_ids (list): Dataset Ids
target_workspace_ids (list, optional): Workspace Ids. Defaults to None.
Returns:
EmbedToken: Embed token
'''
# Note: This method is an example and is not consumed in this sample app
request_body = EmbedTokenRequestBody()
for dataset_id in dataset_ids:
request_body.datasets.append({'id': dataset_id})
for report_id in report_ids:
request_body.reports.append({'id': report_id})
if target_workspace_ids is not None:
for target_workspace_id in target_workspace_ids:
request_body.targetWorkspaces.append({'id': target_workspace_id})
# Generate Embed token for multiple workspaces, datasets, and reports. Refer https://aka.ms/MultiResourceEmbedToken
embed_token_api = 'https://api.powerbi.com/v1.0/myorg/GenerateToken'
api_response = requests.post(embed_token_api, data=json.dumps(request_body.__dict__), headers=self.get_request_header())
if api_response.status_code != 200:
abort(api_response.status_code, description=f'Error while retrieving Embed token\n{api_response.reason}:\t{api_response.text}\nRequestId:\t{api_response.headers.get("RequestId")}')
api_response = json.loads(api_response.text)
embed_token = EmbedToken(api_response['tokenId'], api_response['token'], api_response['expiration'])
return embed_token
def get_request_header(self):
'''Get Power BI API request header
Returns:
Dict: Request header
'''
return {'Content-Type': 'application/json', 'Authorization': 'Bearer ' + AadService.get_access_token()} | StarcoderdataPython |
12803454 | <gh_stars>100-1000
from django.dispatch import Signal
post_export = Signal(providing_args=["model"])
post_import = Signal(providing_args=["model"])
| StarcoderdataPython |
3379890 | <reponame>XeryusTC/projman
# -*- coding: utf-8 -*-
from selenium import webdriver
import unittest
from .base import FunctionalTestCase
from . import pages
from . import remote
import projects.factories
class SettingsTests(FunctionalTestCase):
def test_can_navigate_to_projects_from_settings(self):
# Alice is a user who logs into the website
self.create_and_login_user('alice', '<EMAIL>', 'alice')
# She clicks on the settings link
project_page = pages.projects.BaseProjectPage(self.browser)
project_page.menu.click()
project_page.settings_link.click()
# She ends up on the settings page
settings_page = pages.settings.SettingsPage(self.browser)
self.assertEqual('Settings', self.browser.title)
# She wants to go back to the projects
settings_page.return_link.click()
# She ends up on the project_page
project_page.inlist_link(project_page.sidebar).click()
def test_language_setting(self):
# Alice is a user who logs into the website
self.create_and_login_user('alice', '<EMAIL>', 'alice')
# On the top of the page she sees a button, she decides to click it
page = pages.projects.BaseProjectPage(self.browser)
page.menu.click()
page.settings_link.click()
# She is directed to a new page
settings_page = pages.settings.SettingsPage(self.browser)
self.assertEqual('Settings', self.browser.title)
# On the page there is a language setting
self.assertIn('Language', settings_page.settings_list)
self.assertNotIn('Taal', settings_page.settings_list)
# Alice changes the language to Dutch
settings_page.language.select_by_value('nl')
# She also submits the form
settings_page.confirm.click()
# She sees that the page is now in Dutch
self.assertNotIn('Language', settings_page.settings_list)
self.assertIn('Taal', settings_page.settings_list)
self.assertEqual('Instellingen', self.browser.title)
self.assertIn('/nl/', self.browser.current_url)
self.assertNotIn('/en/', self.browser.current_url)
def test_language_setting_is_remembered_across_pages_and_sessions(self):
# Alice is a user who logs into the website
self.create_and_login_user('alice', '<EMAIL>', 'alice')
# She goes to the settings
project_page = pages.projects.BaseProjectPage(self.browser)
project_page.menu.click()
project_page.settings_link.click()
# Her language is currently set to English, but she wants it in Dutch
self.assertIn('/en/', self.browser.current_url)
self.assertNotIn('/nl/', self.browser.current_url)
settings_page = pages.settings.SettingsPage(self.browser)
settings_page.language.select_by_value('nl')
settings_page.confirm.click()
# When she goes back to the projects she sees that key elements
# have been translated
settings_page.return_link.click()
project_page.menu.click()
self.assertEqual(project_page.logout.text.lower(), 'afmelden')
self.assertEqual(project_page.settings_link.text.lower(),
'instellingen')
self.assertEqual('in lijst',
project_page.inlist_link(project_page.sidebar).text.lower())
# Alice leaves the website
# When alice returns later she sees that everything is still in Dutch
self.restart_browser()
self.login_user('alice', 'alice')
project_page = pages.projects.BaseProjectPage(self.browser)
project_page.menu.click()
self.assertEqual(project_page.logout.text.lower(), 'afmelden')
self.assertEqual(project_page.settings_link.text.lower(),
'instellingen')
self.assertEqual('in lijst',
project_page.inlist_link(project_page.sidebar).text.lower())
def test_inlist_delete_confirm(self):
"""Test if the inlist confirm delete setting skips the confirm page"""
# Alice is a user who logs into the website
self.create_and_login_user('alice', '<EMAIL>', 'alice')
# She goes to the settings
project_page = pages.projects.BaseProjectPage(self.browser)
project_page.menu.click()
project_page.settings_link.click()
# Currently the option to ask for confirmation when inlist items
# are deleted is on
settings_page = pages.settings.SettingsPage(self.browser)
self.assertTrue(settings_page.inlist_delete_confirm.is_selected())
# She switches it off and saves her settings
settings_page.inlist_delete_confirm.click()
settings_page.confirm.click()
# Alice goes to add an item to her inlist
settings_page.return_link.click()
project_page.inlist_link(project_page.sidebar).click()
inlist_page = pages.projects.InlistPage(self.browser)
inlist_page.add_box.send_keys("Don't test the settings\n")
# Deciding that this is a stupid idea she deletes the item
self.assertIn("Don't test the settings",
[item.text for item in inlist_page.thelist])
item = inlist_page.listrows[0]
inlist_page.delete_item(item).click()
# She sees that she is not send to the confirm page but instead
# the item has just disapeared
self.assertNotEqual(self.browser.title, 'Delete in list item')
self.assertEqual(self.browser.title, 'In list')
self.assertEqual(len(inlist_page.listrows), 0)
def test_actionlist_delete_confirm(self):
# Alice is a user who logs into the website
user = self.create_and_login_user('alice', '<EMAIL>', 'alice')
# She goes to the settings
page = pages.projects.BaseProjectPage(self.browser)
page.menu.click()
page.settings_link.click()
# She sees an option that asks for confirmation when an action
# item gets deleted
settings_page = pages.settings.SettingsPage(self.browser)
self.assertTrue(settings_page.action_delete_confirm.is_selected())
self.assertIn('Ask for confirmation when deleting actions',
settings_page.content.text,)
# She switches it off and saves her settings
settings_page.action_delete_confirm.click()
settings_page.confirm.click()
# Alice goes to add an item to her actionlist
settings_page.return_link.click()
page.action_link(page.sidebar).click()
actionlist_page = pages.projects.ActionlistPage(self.browser)
actionlist_page.add_box.send_keys('Watch more series\n')
# Alice deletes the item
item = actionlist_page.get_list_rows(actionlist_page.thelist)[0]
item['delete'].click()
# The item gets deleted without going through the confirmation page
self.assertNotEqual(self.browser.title, 'Delete action')
self.assertEqual(self.browser.title, 'Actions')
self.assertEqual(len(actionlist_page.thelist), 0)
# She also has an action on a project
if self.against_staging:
remote.create_project(self.server_host, 'alice', 'Plan a rave', '')
remote.create_action(self.server_host, 'alice', 'Find a location',
'Plan a rave')
else:
p = projects.factories.ProjectFactory(user=user,
name='Plan a rave')
projects.factories.ActionlistItemFactory(user=user, project=p,
text='Find a location')
self.browser.refresh()
# She goess to delete the action from the project
page.project_link('Plan a rave').click()
project_page = pages.projects.ProjectPage(self.browser)
item = project_page.get_list_rows(project_page.thelist)[0]
item['delete'].click()
# This item also got deleted without needing confirmation
self.assertNotEqual(self.browser.title, 'Delete action')
self.assertEqual(self.browser.title, 'Plan a rave')
self.assertEqual(len(project_page.thelist), 0)
def test_settings_page_has_return_link_in_sidebar(self):
# Alice is a user who goes to the settings
self.create_and_login_user('alice', '<EMAIL>', 'alice')
project_page = pages.projects.BaseProjectPage(self.browser)
project_page.menu.click()
project_page.settings_link.click()
# There is a sidebar on the page
settings_page = pages.settings.SettingsPage(self.browser)
self.assertIsNotNone(settings_page.sidebar)
# In the sidebar there is a button that returns to the projects page
self.assertIn('Home', project_page.sidebar.text)
# When she clicks it she is returned to the projects page
settings_page.sidebar_return_link.click()
project_page.inlist_link(project_page.sidebar).click()
def test_can_change_password(self):
# Alice is a user who goes to the settings
self.create_and_login_user('alice', '<EMAIL>', 'alice')
project_page = pages.projects.BaseProjectPage(self.browser)
project_page.menu.click()
project_page.settings_link.click()
# She sees an account link in the sidebar and clicks it
settings_page = pages.settings.SettingsPage(self.browser)
settings_page.account_link.click()
# On the form there is a change password form, she fills it out
# and submits it
account_settings = pages.settings.AccountSettingsPage(self.browser)
self.assertNotIn('Password successfully changed',
account_settings.body.text)
account_settings.old_password.send_keys('<PASSWORD>')
account_settings.password1.send_keys('<PASSWORD>')
account_settings.password2.send_keys('security')
account_settings.change_confirm.click()
# There is a message on the page that the password has been changed
self.assertIn('Password successfully changed.',
account_settings.body.text)
# Alice then signs out
settings_page.menu.click()
settings_page.logout.click()
confirm_page = pages.accounts.LogoutConfirmPage(self.browser)
confirm_page.confirm.click()
# She must now log in with her new password
self.login_user('alice', 'security')
self.is_logged_in()
| StarcoderdataPython |
8142380 | <reponame>eimrek/ProbeParticleModel
#!/usr/bin/python
import numpy as np
import os
import GridUtils as GU
import basUtils as bU
import fieldFFT
import common as PPU
import core
import cpp_utils
# overall procedure for importing the sample geometry:
def importGeometries( fname ):
if (fname.lower().endswith(".xyz") or fname.lower().endswith(".bas")):
atoms, nDim, lvec = bU.loadAtoms( fname )
elif fname.lower().endswith(".xsf"):
atoms, nDim, lvec = bU.loadXSFGeom( fname )
elif fname.lower().endswith(".cube"):
atoms, nDim, lvec = bU.loadAtomsCUBE( fname )
elif fname.lower().endswith(".in"):
atoms, nDim, lvec = bU.loadGeometryIN( fname )
else:
sys.exit("ERROR!!! Unknown format of geometry system. Supported "
"formats are: .xyz, .bas., .xsf, .cube, .in \n\n")
if (nDim != []):
PPU.params['gridN'] = nDim
if (lvec != []):
PPU.params['gridA'] = lvec[1]
PPU.params['gridB'] = lvec[2]
PPU.params['gridC'] = lvec[3]
else:
lvec=np.zeros((4,3))
lvec[ 1,: ] = PPU.params['gridA'].copy()
lvec[ 2,: ] = PPU.params['gridB'].copy()
lvec[ 3,: ] = PPU.params['gridC'].copy()
return atoms, lvec;
def parseAtoms( atoms, autogeom = False, PBC = True, FFparams=None ):
if FFparams is None:
raise ValueError("You should provide a list of LJ parameters!")
Rs = np.array([atoms[1],atoms[2],atoms[3]]);
Natoms=[]
elem_dict={}
for i,ff in enumerate(FFparams):
elem_dict[ff[3]] = i+1
for atm in atoms[0]:
try:
Natoms.append(int(atm))
except:
try:
Natoms.append(elem_dict[atm])
except:
raise ValueError("Did not find atomkind: "
"{}".format(atm))
iZs=np.array( Natoms )
if autogeom:
print " autoGeom "
PPU.autoGeom( Rs, shiftXY=True, fitCell=True, border=3.0 )
Rs = np.transpose( Rs, (1,0) ).copy()
Qs = np.array( atoms[4] )
if PBC:
iZs,Rs,Qs = PPU.PBCAtoms( iZs, Rs, Qs, avec=PPU.params['gridA'], bvec=PPU.params['gridB'] )
return iZs,Rs,Qs
def perpareArrays( FF, Vpot ):
if ( FF is None ):
gridN = PPU.params['gridN']
FF = np.zeros( (gridN[2],gridN[1],gridN[0],3) )
else:
PPU.params['gridN'] = np.shape( FF )
if ( Vpot ):
V = np.zeros( (gridN[2],gridN[1],gridN[0]) )
else:
V=None
core.setFF( gridF=FF, gridE=V )
return FF, V
def computeLJ( Rs, iZs, FFLJ=None, FFparams=None, Vpot=False ):
if FFparams is None:
raise ValueError("You should provide a list of LJ parameters!")
FFLJ,VLJ = perpareArrays( FFLJ, Vpot )
C6,C12 = PPU.getAtomsLJ( PPU.params['probeType'], iZs, FFparams )
#core.setFF( gridF=FFLJ, gridE=VLJ )
core.getLenardJonesFF( Rs, C6, C12 )
return FFLJ, VLJ
def computeCoulomb( Rs, Qs, FFel=None , Vpot=False ):
FFel,Vel = perpareArrays( FFel, Vpot )
#core.setFF( gridF=FFel, gridE=Vel )
core.getCoulombFF ( Rs, Qs * PPU.CoulombConst )
return FFel, Vel
"""
def prepareForceFields( store = True, storeXsf = False, autogeom = False, FFparams=None ):
newEl = False
newLJ = False
head = None
# --- try to load FFel or compute it from LOCPOT.xsf
if ( os.path.isfile('FFel_x.xsf') ):
print " FFel_x.xsf found "
FFel, lvecEl, nDim, head = GU.loadVecField('FFel', FFel)
PPU.lvec2params( lvecEl )
else:
print "F Fel_x.xsf not found "
if ( xsfLJ and os.path.isfile('LOCPOT.xsf') ):
print " LOCPOT.xsf found "
V, lvecEl, nDim, head = GU.loadXSF('LOCPOT.xsf')
PPU.lvec2params( lvecEl )
FFel_x,FFel_y,FFel_z = fieldFFT.potential2forces( V, lvecEl, nDim, sigma = 1.0 )
FFel = GU.packVecGrid( FFel_x,FFel_y,FFel_z )
del FFel_x,FFel_y,FFel_z
GU.saveVecFieldXsf( 'FFel', FF, lvecEl, head = head )
else:
print " LOCPOT.xsf not found "
newEl = True
# --- try to load FFLJ
if ( os.path.isfile('FFLJ_x.xsf') ):
print " FFLJ_x.xsf found "
FFLJ, lvecLJ, nDim, head = GU.loadVecFieldXsf( 'FFLJ' )
PPU.lvec2params( lvecLJ )
else:
newLJ = True
# --- compute Forcefield by atom-wise interactions
if ( newEl or newEl ):
atoms = basUtils.loadAtoms('geom.bas')
iZs,Rs,Qs = parseAtoms( atoms, autogeom = autogeom, PBC =
PPU.params['PBC'], FFparams = FFparams )
lvec = PPU.params2lvec( )
if head is None:
head = GU.XSF_HEAD_DEFAULT
if newLJ:
FFLJ = computeLJ ( Rs, iZs, FFparams=FFparams )
GU.saveVecFieldXsf( 'FFLJ', FF, lvecEl, head = head )
if newEl:
FFel = computeCoulomb( Rs, Qs, FFel )
GU.saveVecFieldXsf( 'FFel', FF, lvecEl, head = head )
return FFLJ, FFel
"""
def relaxedScan3D( xTips, yTips, zTips ):
nstroke = len(zTips);
rTip_ = np.zeros((nstroke,3))
rPP_ = np.zeros((nstroke,3))
F_ = np.zeros((nstroke,3))
rTip_[:,2] = zTips[::-1]
nx = len(zTips); ny = len(yTips ); nz = len(xTips);
Fs = np.zeros( ( nx,ny,nz,3 ) );
rPPs = np.zeros( ( nx,ny,nz,3 ) );
rTips = np.zeros( ( nx,ny,nz,3 ) );
for ix,x in enumerate( xTips ):
print "relax ix:", ix
rTip_[:,0] = x
for iy,y in enumerate( yTips ):
rTip_[:,1] = y
itrav = core.relaxTipStroke( rTip_, rPP_, F_ ) / float( nstroke )
Fs [:,iy,ix,:] = F_ [::-1,:]
rPPs [:,iy,ix,:] = rPP_ [::-1,:]
rTips[:,iy,ix,:] = rTip_[::-1,:]
return Fs,rPPs,rTips
def Gauss(Evib, E0, w):
return np.exp( -0.5*((Evib - E0)/w)**2);
def symGauss( Evib, E0, w):
return Gauss(Evib, E0, w) - Gauss(Evib, -E0, w);
| StarcoderdataPython |
3220078 | """Prepare PASCAL VOC datasets"""
import os
import shutil
import argparse
import tarfile
from encoding.utils import download, mkdir
_TARGET_DIR = os.path.expanduser('../dataset/')
def parse_args():
parser = argparse.ArgumentParser(
description='Initialize PASCAL VOC dataset.',
epilog='Example: python prepare_pascal.py',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--download-dir', type=str, help='dataset directory on disk')
parser.add_argument('--overwrite', action='store_true', help='overwrite downloaded files if set, in case they are corrputed')
args = parser.parse_args()
return args
def download_voc(path, overwrite=False):
_DOWNLOAD_URLS = [
('http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar',
'4e443f8a2eca6b1dac8a6c57641b67dd40621a49')]
download_dir = os.path.join(path, 'downloads')
mkdir(download_dir)
for url, checksum in _DOWNLOAD_URLS:
filename = download(url, path=download_dir, overwrite=overwrite, sha1_hash=checksum)
# extract
with tarfile.open(filename) as tar:
tar.extractall(path=path)
def download_aug(path, overwrite=False):
_AUG_DOWNLOAD_URLS = [
('http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz', '7129e0a480c2d6afb02b517bb18ac54283bfaa35')]
download_dir = os.path.join(path, 'downloads')
mkdir(download_dir)
for url, checksum in _AUG_DOWNLOAD_URLS:
filename = download(url, path=download_dir, overwrite=overwrite, sha1_hash=checksum)
# extract
with tarfile.open(filename) as tar:
tar.extractall(path=path)
shutil.move(os.path.join(path, 'benchmark_RELEASE'),
os.path.join(path, 'VOCaug'))
filenames = ['VOCaug/dataset/train.txt', 'VOCaug/dataset/val.txt']
# generate trainval.txt
with open(os.path.join(path, 'VOCaug/dataset/trainval.txt'), 'w') as outfile:
for fname in filenames:
fname = os.path.join(path, fname)
with open(fname) as infile:
for line in infile:
outfile.write(line)
if __name__ == '__main__':
args = parse_args()
mkdir(os.path.expanduser('~/.encoding/datasets'))
download_voc(_TARGET_DIR, overwrite=False)
download_aug(_TARGET_DIR, overwrite=False)
| StarcoderdataPython |
6671519 | <filename>bin/blank2comma.py
import os
import pdb
import scipy.io as scio
import numpy as np
base_path = '/home/david/Tracking/DataSets/pysot-toolkit/results/UAV/COT'
files = os.listdir(base_path)
save_path = '/home/david/Tracking/DataSets/pysot-toolkit/results/UAV/CCOT'
if not os.path.exists(save_path):
os.makedirs(save_path)
for f in files:
f_path = os.path.join(base_path, f)
result = np.loadtxt(f_path)
new_save_path = os.path.join(save_path,f)
with open(new_save_path, "w") as fin:
for x in result:
fin.write(','.join([str(i) for i in x]) + '\n')
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.