id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
3292735 | FTP_PATH = 'ftp.1000genomes.ebi.ac.uk'
BASE_PATH = '/vol1/ftp/phase3/data'
EXOME_ALIGNMENT_DIR = 'exome_alignment'
ALIGNMENT_DIR = 'alignment'
DEFAULT_FILE_FORMAT = 'bam'
OUTPUT_FOLDER = 'samples'
FORMATS = dict(bam=('bam', 'bai'), cram=('cram', 'crai'))
| StarcoderdataPython |
3396963 | #!/usr/bin/python
'''
Author:
<NAME>
<EMAIL>
www.bitforestinfo.com
Description:
This Script is Part of https://github.com/surajsinghbisht054/reinforcement_learning_scripts Project.
I Wrote this script just for Educational and Practise Purpose Only.
==================================================================================
Please Don't Remove Author Initials
==================================================================================
You Can Use These Codes For Anything But You Just Have To Mention Author Initial With
The Codes. And yes! This is Compulsory.
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Creating Simple Treasur Hunt Game Based On Reinforcement Learning
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
'''
import time
import random
import sys
# Configurations
EPSILON = 0.9 # Rate Of Changes in EPLISON = (1.0 - RATE_OF_CHANGE_IN_VALUES)
ACTIONS = ['left', 'right']
ALPHA = 0.1 # learning rate
GAMMA = 0.9 # discount factor
EPISODES = 20 # EPISODE
Debug = False
BIGREWARD = 1.0
SMALLREWARD = 0.0 # small reward
TIMESLEEP = 0.1
CUTREWARD = 11.0 # divide reward after 11 steps
# Game Track
TRACK = ['-','-','-','-','-','-','-','-','-','-','-','-']
TRACK.append('#') # gold in the end
# show debug status
if Debug: print "[*] Debug Variable Is True"
# print track
if Debug: print "[*] Track : ", ''.join(TRACK)
# Q Learning Decision Table
Qtable = [
# left, right
[ 0, 0 ], # 0
[ 0, 0 ], # 1
[ 0, 0 ], # 2
[ 0, 0 ], # 3
[ 0, 0 ], # 4
[ 0, 0 ], # 5
[ 0, 0 ], # 6
[ 0, 0 ], # 7
[ 0, 0 ], # 8
[ 0, 0 ], # 9
[ 0, 0 ], # 10
[ 0, 0 ], # 11
]
BestQTable = {}
# choose action
def choose_action(playerstate):
# Get Qtable left right
tmprow = Qtable[playerstate]
# limit random value updating using epsilon value
if (random.uniform(0.0, 1.0) > EPSILON):
# choice action randomly
action = random.choice(ACTIONS)
else:
# decide, based on Qtable Decision value
if tmprow[0]>tmprow[1]:
action = ACTIONS[0]
else:
action = ACTIONS[1]
# print debugging informations
if Debug: print "[*] Player State : {} | Action Choice : {} ".format(playerstate, action)
return action
# take player action and return reward
def take_action(playerstate, action, count):
#
over = False
reward = -0.1
# if action=left
if action==ACTIONS[0]:
# if player back to starting point
if playerstate==0:
newstate = playerstate # reach starting point
else:
newstate = playerstate - 1
# action=right
else:
newstate = playerstate + 1
# win
if playerstate==11:
over = True # Game Over
reward = (float(BIGREWARD)/float(count))*CUTREWARD # Give Reward
else:
reward = SMALLREWARD # small reward for right, ComeOn! Encourage Agent To Solve it Quickly
if Debug: print "[*] Take Action > Player At {} | Reward {} | End {}".format(newstate, reward, over)
return (newstate, reward, over)
# print details over terminal
def print_updates(S, episode, count, end=False):
tmp = TRACK[:]
tmp[S]='o'
if Debug: print "{}".format(''.join(tmp))
if not Debug:
sys.stdout.write("\r{} ".format(''.join(tmp)))
sys.stdout.flush()
if end:
print "\n[*] Numbers of Episode : {}\r".format(episode)
print "[*] Numbers of Steps : {}".format(count)
# to make this process more cool
time.sleep(TIMESLEEP)
# main function
def main():
# Iterate Episode
for episode in range(EPISODES):
# Intialize variable at Every new iteration
PS = 0 # Player State In Game
count = 0 # steps counter
END = False # no end
print_updates(PS, episode, count)
while not END:
# choose action to take
AC = choose_action(PS)
# check feed back
NS, RW, END = take_action(PS, AC, count)
# update
print_updates(NS, episode, count, end=END)
# Formula
if END:
qtarget = RW
else:
qtarget = RW + (GAMMA * max(Qtable[NS]))
# previous value
prevalue = Qtable[PS][ACTIONS.index(AC)]
# updated value
Qtable[PS][ACTIONS.index(AC)] = (((1-ALPHA)*prevalue)+(ALPHA*qtarget))
count += 1
PS = NS # new playerstate
return
if __name__=='__main__':
main()
print "\n\n{}\t |\t {}\t |\t {}\n".format(str("No.").rjust(3), 'left'.rjust(18), 'right'.rjust(18))
for no, row in enumerate(Qtable):
col1, col2 = row[0], row[1]
print "{}\t |\t {}\t |\t {}".format(str(no).rjust(3), str(col1).ljust(18), str(col2).ljust(18))
| StarcoderdataPython |
3980 | <filename>appliance/src/ufw_interface.py
#!/usr/bin/env python
#shamelessy stolen from: https://gitlab.com/dhj/easyufw
# A thin wrapper over the thin wrapper that is ufw
# Usage:
# import easyufw as ufw
# ufw.disable() # disable firewall
# ufw.enable() # enable firewall
# ufw.allow() # default allow -- allow all
# ufw.allow(22) # allow port 22, any protocol
# ufw.allow(22,'tcp') # allow port 22, tcp protocol
# ufw.allow('22/tcp') # allow port 22, tcp protocol
# ufw.allow(53,'udp') # allow port 53, udp protocol
# ufw.allow(53,'udp') # allow port 53, udp protocol
# ufw.deny() # default deny -- deny all
# ufw.deny(22,'tcp') # deny port 22, tcp protocol
# ufw.delete(22) # delete rules referencing port 22
# ufw.reset() # restore defaults
# ufw.status() # return status string (default verbose=True)
# ufw.run("allow 22") # directly run command as if from command line
import ufw.frontend
import ufw.common
import gettext
progName = ufw.common.programName
gettext.install(progName)#, unicode=True) # for i18n; fixes '_' not defined
ui = ufw.frontend.UFWFrontend(False) # no dryrun -- do it live
backend = ui.backend
parse_command = ufw.frontend.parse_command
def _parse(actionstr):
# parse commands like "allow 22", "reset", "default allow"
argv = [progName]
argv.extend(actionstr.split(' ')) # generate bogus argv to parse
pr = parse_command(argv)
return pr
def run(actionstr, force=False):
# run command with an explicit force argument
pr = _parse(actionstr)
rule = pr.data.get('rule','') # commands like reset don't have a rule
iptype = pr.data.get('iptype','')
return ui.do_action(pr.action,rule,iptype,force)
def reset(force=True):
run('reset',force=force)
def enable():
ui.set_enabled(True)
def disable():
ui.set_enabled(False)
def allow(port=None, protocol=None):
# port int; protocol str ['tcp','udp']
pp = None
if port is not None:
pp = "" # port and protocol string
pp += str(port)
if protocol is not None:
pp += '/' + protocol
_allow(pp)
def _allow(pp=None):
# pp = port and protocol string ['22','22/tcp','53/udp']
# port without protocol includes all protocols
if pp is None:
run('default allow')
else:
run('allow ' + pp)
def deny(port=None, protocol=None):
# port int; protocol str ['tcp','udp']
pp = None
if port is not None:
pp = "" # port and protocol string
pp += str(port)
if protocol is not None:
pp += '/' + protocol
_deny(pp)
def _deny(pp=None):
# pp = port and protocol string
if pp is None:
run('default deny')
else:
run('deny ' + pp)
def delete(port):
# delete all rules by destination port
while _delete(port): pass # while ports deleted re-enumerate and continue
def _delete(port):
for i,rule in enumerate(backend.get_rules()):
rule_port = None
try:
rule_port = int(rule.dport)
except:
rule_port = None
if rule_port is not None and port == rule_port:
run("delete " + str(i+1), force=True)
return True # delete one rule; enumeration changes after delete
return False
def status(verbose=True):
cmd = 'status'
if verbose:
cmd += ' verbose'
return run(cmd)
| StarcoderdataPython |
197600 | from mesher.cgal_mesher import ConstrainedDelaunayTriangulation as CDT
from mesher.cgal_mesher import (
Point, Mesher, make_conforming_delaunay, make_conforming_gabriel, Criteria
)
def main():
cdt = CDT()
va = cdt.insert(Point(100, 269))
vb = cdt.insert(Point(246, 269))
vc = cdt.insert(Point(246, 223))
vd = cdt.insert(Point(303, 223))
ve = cdt.insert(Point(303, 298))
vf = cdt.insert(Point(246, 298))
vg = cdt.insert(Point(246, 338))
vh = cdt.insert(Point(355, 338))
vi = cdt.insert(Point(355, 519))
vj = cdt.insert(Point(551, 519))
vk = cdt.insert(Point(551, 445))
vl = cdt.insert(Point(463, 445))
vm = cdt.insert(Point(463, 377))
vn = cdt.insert(Point(708, 377))
vo = cdt.insert(Point(708, 229))
vp = cdt.insert(Point(435, 229))
vq = cdt.insert(Point(435, 100))
vr = cdt.insert(Point(100, 100))
cdt.insert_constraint(va, vb)
cdt.insert_constraint(vb, vc)
cdt.insert_constraint(vc, vd)
cdt.insert_constraint(vd, ve)
cdt.insert_constraint(ve, vf)
cdt.insert_constraint(vf, vg)
cdt.insert_constraint(vg, vh)
cdt.insert_constraint(vh, vi)
cdt.insert_constraint(vi, vj)
cdt.insert_constraint(vj, vk)
cdt.insert_constraint(vk, vl)
cdt.insert_constraint(vl, vm)
cdt.insert_constraint(vm, vn)
cdt.insert_constraint(vn, vo)
cdt.insert_constraint(vo, vp)
cdt.insert_constraint(vp, vq)
cdt.insert_constraint(vq, vr)
cdt.insert_constraint(vr, va)
vs = cdt.insert(Point(349, 236))
vt = cdt.insert(Point(370, 236))
vu = cdt.insert(Point(370, 192))
vv = cdt.insert(Point(403, 192))
vw = cdt.insert(Point(403, 158))
vx = cdt.insert(Point(349, 158))
cdt.insert_constraint(vs, vt)
cdt.insert_constraint(vt, vu)
cdt.insert_constraint(vu, vv)
cdt.insert_constraint(vv, vw)
cdt.insert_constraint(vw, vx)
cdt.insert_constraint(vx, vs)
vy = cdt.insert(Point(501, 336))
vz = cdt.insert(Point(533, 336))
v1 = cdt.insert(Point(519, 307))
v2 = cdt.insert(Point(484, 307))
cdt.insert_constraint(vy, vz)
cdt.insert_constraint(vz, v1)
cdt.insert_constraint(v1, v2)
cdt.insert_constraint(v2, vy)
print("Number of vertices:", cdt.number_of_vertices())
mesher = Mesher(cdt)
seeds = [
Point(505, 325),
Point(379, 172),
]
mesher.seeds_from(seeds)
make_conforming_delaunay(cdt)
print("Number of vertices:", cdt.number_of_vertices())
make_conforming_gabriel(cdt)
print("Number of vertices:", cdt.number_of_vertices())
mesher.criteria = Criteria(
aspect_bound=0.125,
size_bound=30
)
mesher.refine_mesh()
print("Number of vertices:", cdt.number_of_vertices())
if __name__ == '__main__':
main()
| StarcoderdataPython |
3209199 | <gh_stars>0
# Enter positive floating-point number as input
# Output an approximation of its square root
from math import sqrt
def squareroot(x):
return(sqrt(x))
x = float(input("Please enter a positive number: "))
ans = (sqrt(x))
y = format(ans, ".1f")
print("The square root of %s is approx." % x, y)
# push to github | StarcoderdataPython |
3231405 | <filename>sktime/classification/interval_based/_cif.py
# -*- coding: utf-8 -*-
"""CIF classifier.
Interval based CIF classifier extracting catch22 features from random intervals.
"""
__author__ = ["MatthewMiddlehurst"]
__all__ = ["CanonicalIntervalForest"]
import math
import numpy as np
from joblib import Parallel, delayed
from sklearn.base import BaseEstimator
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import check_random_state
from sktime.base._base import _clone_estimator
from sktime.classification.base import BaseClassifier
from sktime.contrib.vector_classifiers._continuous_interval_tree import (
ContinuousIntervalTree,
_drcif_feature,
)
from sktime.transformations.panel.catch22 import Catch22
class CanonicalIntervalForest(BaseClassifier):
"""Canonical Interval Forest Classifier (CIF).
Implementation of the nterval based forest making use of the catch22 feature set
on randomly selected intervals described in Middlehurst et al. (2020). [1]_
Overview: Input "n" series with "d" dimensions of length "m".
For each tree
- Sample n_intervals intervals of random position and length
- Subsample att_subsample_size catch22 or summary statistic attributes randomly
- Randomly select dimension for each interval
- Calculate attributes for each interval, concatenate to form new
data set
- Build decision tree on new data set
ensemble the trees with averaged probability estimates
Parameters
----------
n_estimators : int, default=200
Number of estimators to build for the ensemble.
n_intervals : int or None, default=None
Number of intervals to extract per tree, if None extracts
(sqrt(series_length) * sqrt(n_dims)) intervals.
att_subsample_size : int, default=8
Number of catch22 or summary statistic attributes to subsample per tree.
min_interval : int, default=3
Minimum length of an interval.
max_interval : int or None, default=None
Maximum length of an interval, if None set to (series_length / 2).
base_estimator : BaseEstimator or str, default="DTC"
Base estimator for the ensemble, can be supplied a sklearn BaseEstimator or a
string for suggested options.
"DTC" uses the sklearn DecisionTreeClassifier using entropy as a splitting
measure.
"CIT" uses the sktime ContinuousIntervalTree, an implementation of the original
tree used with embedded attribute processing for faster predictions.
n_jobs : int, default=1
The number of jobs to run in parallel for both `fit` and `predict`.
``-1`` means using all processors.
random_state : int or None, default=None
Seed for random number generation.
Attributes
----------
n_classes_ : int
The number of classes.
n_instances_ : int
The number of train cases.
n_dims_ : int
The number of dimensions per case.
series_length_ : int
The length of each series.
classes_ : list
The classes labels.
estimators_ : list of shape (n_estimators) of BaseEstimator
The collections of estimators trained in fit.
intervals_ : list of shape (n_estimators) of ndarray with shape (n_intervals,2)
Stores indexes of each intervals start and end points for all classifiers.
atts_ : list of shape (n_estimators) of array with shape (att_subsample_size)
Attribute indexes of the subsampled catch22 or summary statistic for all
classifiers.
dims_ : list of shape (n_estimators) of array with shape (n_intervals)
The dimension to extract attributes from each interval for all classifiers.
See Also
--------
DrCIF
Notes
-----
For the Java version, see
`TSML <https://github.com/uea-machine-learning/tsml/blob/master/src/main/java
/tsml/classifiers/interval_based/CIF.java>`_.
References
----------
.. [1] <NAME> and <NAME> and <NAME>. "The Canonical
Interval Forest (CIF) Classifier for Time Series Classification."
IEEE International Conference on Big Data 2020
Examples
--------
>>> from sktime.classification.interval_based import CanonicalIntervalForest
>>> from sktime.datasets import load_unit_test
>>> X_train, y_train = load_unit_test(split="train", return_X_y=True)
>>> X_test, y_test = load_unit_test(split="test", return_X_y=True)
>>> clf = CanonicalIntervalForest(n_estimators=10)
>>> clf.fit(X_train, y_train)
CanonicalIntervalForest(...)
>>> y_pred = clf.predict(X_test)
"""
_tags = {
"capability:multivariate": True,
"capability:multithreading": True,
"classifier_type": "interval",
}
def __init__(
self,
n_estimators=200,
n_intervals=None,
att_subsample_size=8,
min_interval=3,
max_interval=None,
base_estimator="CIT",
n_jobs=1,
random_state=None,
):
self.n_estimators = n_estimators
self.n_intervals = n_intervals
self.min_interval = min_interval
self.max_interval = max_interval
self.att_subsample_size = att_subsample_size
self.base_estimator = base_estimator
self.random_state = random_state
self.n_jobs = n_jobs
# The following set in method fit
self.n_instances_ = 0
self.n_dims_ = 0
self.series_length_ = 0
self.estimators_ = []
self.intervals_ = []
self.atts_ = []
self.dims_ = []
self._n_intervals = n_intervals
self._att_subsample_size = att_subsample_size
self._max_interval = max_interval
self._min_interval = min_interval
self._base_estimator = base_estimator
super(CanonicalIntervalForest, self).__init__()
def _fit(self, X, y):
self.n_instances_, self.n_dims_, self.series_length_ = X.shape
if self.base_estimator.lower() == "dtc":
self._base_estimator = DecisionTreeClassifier(criterion="entropy")
elif self.base_estimator.lower() == "cit":
self._base_estimator = ContinuousIntervalTree()
elif isinstance(self.base_estimator, BaseEstimator):
self._base_estimator = self.base_estimator
else:
raise ValueError("DrCIF invalid base estimator given.")
if self.n_intervals is None:
self._n_intervals = int(
math.sqrt(self.series_length_) * math.sqrt(self.n_dims_)
)
if self._n_intervals <= 0:
self._n_intervals = 1
if self.att_subsample_size > 25:
self._att_subsample_size = 25
if self.series_length_ <= self.min_interval:
self._min_interval = self.series_length_ - 1
elif self.min_interval < 3:
self._min_interval = 3
if self.max_interval is None:
self._max_interval = self.series_length_ / 2
if self._max_interval < self._min_interval:
self._max_interval = self._min_interval
fit = Parallel(n_jobs=self._threads_to_use)(
delayed(self._fit_estimator)(
X,
y,
i,
)
for i in range(self.n_estimators)
)
self.estimators_, self.intervals_, self.dims_, self.atts_ = zip(*fit)
return self
def _predict(self, X) -> np.ndarray:
rng = check_random_state(self.random_state)
return np.array(
[
self.classes_[int(rng.choice(np.flatnonzero(prob == prob.max())))]
for prob in self._predict_proba(X)
]
)
def _predict_proba(self, X) -> np.ndarray:
n_test_instances, _, series_length = X.shape
if series_length != self.series_length_:
raise ValueError(
"ERROR number of attributes in the train does not match "
"that in the test data"
)
y_probas = Parallel(n_jobs=self._threads_to_use)(
delayed(self._predict_proba_for_estimator)(
X,
self.estimators_[i],
self.intervals_[i],
self.dims_[i],
self.atts_[i],
)
for i in range(self.n_estimators)
)
output = np.sum(y_probas, axis=0) / (
np.ones(self.n_classes_) * self.n_estimators
)
return output
def _fit_estimator(self, X, y, idx):
c22 = Catch22(outlier_norm=True)
rs = 255 if self.random_state == 0 else self.random_state
rs = (
None
if self.random_state is None
else (rs * 37 * (idx + 1)) % np.iinfo(np.int32).max
)
rng = check_random_state(rs)
transformed_x = np.empty(
shape=(self._att_subsample_size * self._n_intervals, self.n_instances_),
dtype=np.float32,
)
atts = rng.choice(25, self._att_subsample_size, replace=False)
dims = rng.choice(self.n_dims_, self._n_intervals, replace=True)
intervals = np.zeros((self._n_intervals, 2), dtype=int)
# Find the random intervals for classifier i and concatenate
# features
for j in range(0, self._n_intervals):
if rng.random() < 0.5:
intervals[j][0] = rng.randint(
0, self.series_length_ - self._min_interval
)
len_range = min(
self.series_length_ - intervals[j][0],
self._max_interval,
)
length = (
rng.randint(0, len_range - self._min_interval) + self._min_interval
if len_range - self._min_interval > 0
else self._min_interval
)
intervals[j][1] = intervals[j][0] + length
else:
intervals[j][1] = (
rng.randint(0, self.series_length_ - self._min_interval)
+ self._min_interval
)
len_range = min(intervals[j][1], self._max_interval)
length = (
rng.randint(0, len_range - self._min_interval) + self._min_interval
if len_range - self._min_interval > 0
else self._min_interval
)
intervals[j][0] = intervals[j][1] - length
for a in range(0, self._att_subsample_size):
transformed_x[self._att_subsample_size * j + a] = _drcif_feature(
X, intervals[j], dims[j], atts[a], c22, case_id=j
)
tree = _clone_estimator(self._base_estimator, random_state=rs)
transformed_x = transformed_x.T
transformed_x = transformed_x.round(8)
if self.base_estimator == "CIT":
transformed_x = np.nan_to_num(
transformed_x, False, posinf=np.nan, neginf=np.nan
)
else:
transformed_x = np.nan_to_num(transformed_x, False, 0, 0, 0)
tree.fit(transformed_x, y)
return [tree, intervals, dims, atts]
def _predict_proba_for_estimator(self, X, classifier, intervals, dims, atts):
c22 = Catch22(outlier_norm=True)
if isinstance(self._base_estimator, ContinuousIntervalTree):
return classifier._predict_proba_cif(X, c22, intervals, dims, atts)
else:
transformed_x = np.empty(
shape=(self._att_subsample_size * self._n_intervals, X.shape[0]),
dtype=np.float32,
)
for j in range(0, self._n_intervals):
for a in range(0, self._att_subsample_size):
transformed_x[self._att_subsample_size * j + a] = _drcif_feature(
X, intervals[j], dims[j], atts[a], c22, case_id=j
)
transformed_x = transformed_x.T
transformed_x.round(8)
np.nan_to_num(transformed_x, False, 0, 0, 0)
return classifier.predict_proba(transformed_x)
def _temporal_importance_curves(self, normalise_time_points=False):
if not isinstance(self._base_estimator, ContinuousIntervalTree):
raise ValueError(
"CIF base estimator for temporal importance curves must"
" be ContinuousIntervalTree."
)
curves = np.zeros((25, self.n_dims_, self.series_length_))
if normalise_time_points:
counts = np.zeros((25, self.n_dims_, self.series_length_))
for i, tree in enumerate(self.estimators_):
splits, gains = tree.tree_node_splits_and_gain()
for n, split in enumerate(splits):
gain = gains[n]
interval = int(split / self._att_subsample_size)
att = self.atts_[i][int(split % self._att_subsample_size)]
dim = self.dims_[i][interval]
for j in range(
self.intervals_[i][interval][0], self.intervals_[i][interval][1] + 1
):
curves[att][dim][j] += gain
if normalise_time_points:
counts[att][dim][j] += 1
if normalise_time_points:
counts = counts / self.n_estimators / self._n_intervals
curves /= counts
return curves
@classmethod
def get_test_params(cls):
"""Return testing parameter settings for the estimator.
Returns
-------
params : dict or list of dict, default={}
Parameters to create testing instances of the class.
Each dict are parameters to construct an "interesting" test instance, i.e.,
`MyClass(**params)` or `MyClass(**params[i])` creates a valid test instance.
`create_test_instance` uses the first (or only) dictionary in `params`.
"""
params = {"n_estimators": 2, "n_intervals": 2, "att_subsample_size": 2}
return params
| StarcoderdataPython |
3267321 | <gh_stars>1-10
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
import warnings
warnings.simplefilter('ignore')
from pylab import rcParams
rcParams['figure.figsize'] = 8, 5
import time
def timeConvert(x):
return int(x[0] + x[1]) * 60 + int(x[3] + x[4])
def convert(x):
x = x.tm_wday
if x == 0:
return 'пн'
elif x == 1:
return 'вт'
elif x == 2:
return 'ср'
elif x == 3:
return 'чт'
elif x == 4:
return 'пт'
elif x == 5:
return 'сб'
def weekNumber(x):
return time.strftime("%W", time.strptime(x, "%Y-%m-%d"))
def dayNumber(x):
return time.strftime("%j", time.strptime(x, "%Y-%m-%d"))
def preparation():
df = pd.read_csv('data.csv', sep='~', encoding='utf-8',
usecols=['ContingentUnitCourse', 'ContingentUnitName', 'DivisionsString', 'DivisionName', 'Start',
'End', 'EducatorAssignment', 'LocationsDisplayText', 'Subject'])
df.columns = ['Start', 'End', 'Курс', 'Номер группы', 'Факультет1', 'Факультет2', 'Преподаватель', 'Адрес',
'Предмет']
df['Вид занятия'] = df['Предмет'].str.split(',').str.get(1)
df['Предмет'] = df['Предмет'].str.split(',').str.get(0)
df['День'] = df['Start'].str.split(' ').str.get(0)
df['Время начала'] = df['Start'].str.split(' ').str.get(1)
df['Время окончания'] = df['End'].str.split(' ').str.get(1)
df['Факультет'] = df['Факультет1'].fillna(df['Факультет2'])
df['Аудитория'] = df['Адрес'].str.split(',').str.get(-1)
df['Корпус'] = df['Адрес'].str.split(',').str.get(0) + df['Адрес'].str.split(',').str.get(1)
df['Начало(мин)'] = df['Время начала'].apply(timeConvert)
df['Окончание(мин)'] = df['Время окончания'].apply(timeConvert)
df['День недели'] = (df['День'].apply(time.strptime, args=('%Y-%m-%d',))).apply(convert)
df = pd.concat([df[df['День недели'] == 'пн'], df[df['День недели'] == 'вт'], df[df['День недели'] == 'ср'],
df[df['День недели'] == 'чт'], df[df['День недели'] == 'пт'], df[df['День недели'] == 'сб']],
ignore_index=True)
df['Day'] = df['День'].apply(time.strptime, args=('%Y-%m-%d',))
df['Номер недели'] = df['День'].apply(weekNumber)
df['Номер дня'] = df['День'].apply(dayNumber)
df = df[
['Номер группы', 'Факультет', 'Курс', 'Преподаватель', 'Адрес', 'Корпус', 'Аудитория', 'Предмет', 'Вид занятия',
'День', 'День недели', 'Номер дня', 'Номер недели', 'Время начала', 'Начало(мин)', 'Время окончания',
'Окончание(мин)']]
df['Номер дня'] = df['Номер дня'].apply(int)
return df
# -----------------------------------------------------------------------------------------
# Список свободных аудиторий в определенный промежуток времени на факультете
def tool_1_2(corp, startDay, startTime, stopDay, stopTime):
df = preparation()
dff = df[df['Корпус'] == corp]
begin = int(startTime[0] + startTime[1]) * 60 + int(startTime[3] + startTime[4])
end = int(stopTime[0] + stopTime[1]) * 60 + int(stopTime[3] + stopTime[4])
startDayNumber = int(time.strftime("%j", time.strptime(startDay, "%Y-%m-%d")))
stopDayNumber = int(time.strftime("%j", time.strptime(stopDay, "%Y-%m-%d")))
classroomList = pd.unique(dff['Адрес'])
result = np.zeros(len(classroomList))
dff = dff[(dff['Номер дня'] >= startDayNumber) & (dff['Номер дня'] < stopDayNumber)]
def isEmpty(row):
for j, room in enumerate(classroomList):
if row['Адрес'] == room:
if row['Номер дня'] == startDayNumber:
if begin < row['Окончание(мин)']:
result[j] += 1
elif row['Номер дня'] == stopDayNumber - 1:
if end > row['Начало(мин)']:
result[j] += 1
else:
result[j] += 1
dff.apply(lambda row: isEmpty(row), axis=1)
print('Свободные аудитории с ', startTime, startDay, ' до ', stopTime, stopDay)
x = np.array([])
for i, room in enumerate(classroomList):
if result[i] == 0:
x = np.append(x, room)
return x
# Расписание аудитории
def tool_1_3(number):
df = preparation()
x = df[(df['Адрес'] == number)]
x = x.sort_values(by=['Номер дня', 'Начало(мин)'], ascending=[True, True])
x = x.values
return x | StarcoderdataPython |
4836286 | #!/usr/bin/env python
import os
from argparse import ArgumentParser
from pprint import pprint
from cncframework import graph, parser
from cncframework.events.eventgraph import EventGraph
from cncframework.inverse import find_step_inverses, find_blame_candidates, blame_deadlocks
def pprint_inverses(graphData):
for (step, func) in graphData.stepFunctions.iteritems():
print "Step {}:".format(step)
pprint(find_step_inverses(func))
def main():
bin_name = os.environ['BIN_NAME'] or "CnCInverse"
arg_parser = ArgumentParser(prog=bin_name,
description="Compute inverse output functions from CnC graph spec.")
arg_parser.add_argument('specfile', help="CnC graph spec file")
arg_parser.add_argument('--log', nargs='?', default=None, help="CnC debug log file")
arg_parser.add_argument('--blame', nargs='?', default=None,
help="collection@tag or step@tag to blame")
args = arg_parser.parse_args()
# Parse graph spec
graphAst = parser.cncGraphSpec.parseFile(args.specfile, parseAll=True)
graphData = graph.CnCGraph("_", graphAst)
# Construct the event graph if they give us a log file.
event_graph = None
if args.log:
with open(args.log, 'r') as log:
event_graph = EventGraph(log.readlines(), False, False)
if not args.blame and not args.log:
# nothing to blame and no log given, just print out the inverse
# functions
return pprint_inverses(graphData)
if args.blame:
print "Steps that could be blamed for {}:".format(args.blame)
pprint(find_blame_candidates(args.blame, graphData, event_graph))
else:
# user gives us log without blame, do an "auto-blame"
# i.e. we perform a blame on the set of all items with a get without a
# put
autoblames = blame_deadlocks(graphData, event_graph)
print "Performing automatic blame on potentially deadlocked items from event log: {}".format(autoblames.keys())
pprint(autoblames)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3291504 | <reponame>salam20-meet/meet2018y1final-proj
from PIL import Image
import turtle
import random
import math
screen = turtle.Screen()
screen_x = 700
screen_y = 1050
screen.setup(screen_x, screen_y)
player = turtle.Turtle()
bg = Image.open('background.jpeg')
pix=bg.load()
bagr=bg.resize((700,1050),Image.ANTIALIAS)
bagr.save('thebackground.gif')
turtle.register_shape('thebackground.gif')
turtle.bgpic('thebackground.gif')
pl = Image.open('papernobg.gif')
pn=pl.resize((50,50),Image.ANTIALIAS)
pn.save('player.gif')
turtle.register_shape('player.gif')
screen.bgpic = ('thebackground.gif')
player.shape('player.gif')
player.penup()
player.goto(0,450)
mazeCube = player.clone()
mazeCube.shape('square')
def move_left():
x,y = player.pos()
player.goto(x-20,y)
def move_right():
x,y = player.pos()
player.goto(x+20,y)
def move_down():
x,y = player.pos()
player.goto(x,y-50)
turtle.onkeypress(move_left, "Left")
turtle.onkeypress(move_right, "Right")
turtle.onkeypress(move_down, "Down")
turtle.listen()
| StarcoderdataPython |
1676888 | <filename>tests/test_agg.py
from __future__ import annotations
import unittest
from typing import Callable
from apm import *
class AggregationsTest(unittest.TestCase):
def test_agg_set(self):
result = match([
'foo',
3,
'bar',
10,
-4,
'foo',
'bar',
17,
], Each(OneOf(InstanceOf(str) >> agg.Set('strings'), ...)))
self.assertTrue(result)
self.assertEqual(result.strings, {'foo', 'bar'})
def test_agg_list(self):
result = match([
'foo',
3,
'bar',
10,
-4,
'foo',
'bar',
17,
], Each(OneOf(InstanceOf(str) >> agg.List('strings'), ...)))
self.assertTrue(result)
self.assertEqual(result.strings, ['foo', 'bar', 'foo', 'bar'])
def test_agg_count(self):
result = match([
1,
2,
3.0,
4,
5.0,
], Each(OneOf(
InstanceOf(int) >> agg.Count('integers'),
InstanceOf(float) >> agg.Count('floats'),
)))
self.assertTrue(result)
self.assertEqual(3, result.integers)
self.assertEqual(2, result.floats)
def test_agg_histogram(self):
result = match([
'a', 'b', 'c', 'b', 'd', 'c', 'a', 'a', 'c', 'a'
], Each(_ >> agg.Histogram('histo')))
self.assertTrue(result)
self.assertEqual({
'a': 4,
'b': 2,
'c': 3,
'd': 1,
}, result.histo)
def test_agg_sum(self):
lower = 1
upper = 10
result = match(range(lower, upper), Each(_ >> agg.Sum('sum')))
length = upper - lower
self.assertTrue(result)
self.assertEqual(length * (length + 1) // 2, result.sum)
def test_agg_float_sum(self):
result = match(['1', '10.3'], Each(_ >> agg.Sum('sum')))
self.assertTrue(result)
self.assertEqual(11.3, result.sum)
def test_some_more_complicated_pattern(self):
class Recursive(Pattern):
def __init__(self, p: Callable):
self._pattern = p
def match(self, value, *, ctx: MatchContext, strict: bool) -> MatchResult:
return ctx.match(value, self._pattern())
numbers = agg.Set()
pattern = OneOf(InstanceOf(int) >> numbers, EachItem(..., Recursive(lambda: pattern)))
result = match({
'a': {'b': 3},
'c': 7,
'd': {'e': {}},
'f': {'g': {'h': 10, 'i': 7}},
}, pattern)
self.assertTrue(result)
self.assertEqual({3, 7, 10}, numbers.value)
def test_some_more_complicated_pattern_with_nothing_in_it(self):
class Recursive(Pattern):
def __init__(self, p: Callable):
self._pattern = p
def match(self, value, *, ctx: MatchContext, strict: bool) -> MatchResult:
return ctx.match(value, self._pattern())
numbers = agg.Set()
pattern = OneOf(InstanceOf(float) >> numbers, InstanceOf(int), EachItem(..., Recursive(lambda: pattern)))
result = match({
'a': {'b': 3},
'c': 7,
'd': {'e': {}},
'f': {'g': {'h': 10, 'i': 7}},
}, pattern)
self.assertTrue(result)
self.assertEqual(set(), numbers.value)
| StarcoderdataPython |
33785 | from django.conf.urls import url
from django.conf.urls import patterns
from django.views.generic import TemplateView
view = TemplateView.as_view(template_name='dummy.html')
urlpatterns = patterns('',
url(r'^nl/foo/', view, name='not-translated'),
)
| StarcoderdataPython |
1798591 | <reponame>CDLUC3/counter-processor
import config
import json
from models import *
from peewee import *
from .report import Report
from .id_stat import IdStat
from .json_metadata import JsonMetadata
import datetime
import dateutil.parser
import io
import datetime
#import ipdb; ipdb.set_trace()
class JsonReport(Report):
"""Make a JSON report from the generic data report object this inherits from"""
def output(self):
with io.open(f"{config.Config().output_file}.json", 'w', encoding='utf8') as jsonfile:
head = self.header_dict()
body = {'report-datasets': [self.dict_for_id(my_id) for my_id in self.ids_to_process ] }
data = dict(list(head.items()) + list(body.items()))
print('')
print(f'Writing JSON report to {config.Config().output_file}.json')
json.dump(data, jsonfile, ensure_ascii=False)
# the indent makes it much easier to read, but makes the file much bigger sending across the wire
# the indent is good for troubleshooting and reading to find problems and line numbers are useful to communicate
# json.dump(data, jsonfile, indent=2, ensure_ascii=False)
def header_dict(self):
compressed_dict = {
'code': 69,
'severity': 'warning',
'message': 'Report is compressed using gzip',
'help-url': 'https://github.com/datacite/sashimi',
'data': 'usage data needs to be uncompressed'
}
if config.Config().month_complete():
exception_dict = {}
else:
exception_dict = {
'code': 3040,
'severity': 'warning',
'message': "partial data returned",
'help-url': "String",
'data': "usage data has not been processed for the entire reporting period"
}
head_dict = { 'report-header': {
'report-name': "dataset report",
'report-id': "DSR",
'release': "rd1",
'created': config.Config().last_day(),
# TODO: DataCite Sashimi doesn't handle reports correctly, so have to put in fake creation dates
# 'created': self.just_date(datetime.datetime.now()),
'created-by': config.Config().platform,
'report-attributes': [],
'reporting-period':
{
'begin-date': config.Config().start_date.strftime('%Y-%m-%d'),
'end-date': config.Config().last_day()
},
'report-filters': [],
'exceptions': [ compressed_dict, exception_dict ]
}
}
return head_dict
def dict_for_id(self, my_id):
"""Takes a IdStat object, which is at the level of identifier"""
self.ids_processed = self.ids_processed + 1
print(f'{datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")} {self.ids_processed}/{self.id_count} Calculating stats for {my_id}')
id_stat = IdStat(my_id)
meta = self.find_metadata_by_identifier(id_stat.identifier)
js_meta = JsonMetadata(id_stat, meta)
return js_meta.descriptive_dict()
| StarcoderdataPython |
3256731 | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-02-04 19:14
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('contactboard', '0002_mare_pare_a_tutors'),
]
operations = [
migrations.RemoveField(
model_name='alumne',
name='data_de_naixement',
),
]
| StarcoderdataPython |
10102 | import json
import requests
from datetime import datetime
from playsound import playsound
tday=datetime.today().strftime('%Y-%m-%d')
right_now=datetime.today().strftime('%I-%M-%p')
response = requests.get("https://www.londonprayertimes.com/api/times/?format=json&key=0239f686-4423-408e-9a0c-7968a403d197&year=&month=")
data=response.json()
for key,value in data.items():
if value >= '03:30' and value < '06:00':
print('It is asr time')
#playsound('/home/danish/Downloads/adan.mp3') | StarcoderdataPython |
99502 | <reponame>fcchou/gan_models
"""
Train the Yelp photo generator
Example command:
python -m examples.yelp_photos.train_photo_generator --input-path=processed_photos.npy \
--epochs=5 --batch-size=64 --iters=75
"""
import argparse
import numpy as np
import keras
import matplotlib.pyplot as plt
from gan_models.model_layers import get_dcgan_discriminator, get_dcgan_generator, ConvParams
from gan_models.gan_trainer import GanTrainer
from gan_models.common import plot_generated_images
PHOTO_SHAPE = (64, 64, 3)
G_INPUT_SIZE = 100
def get_dcgan_model():
generator = get_dcgan_generator(
input_dim=G_INPUT_SIZE,
shape_layer1=(4, 4, 1024),
conv_params_list=[
ConvParams(filters=512, kernel_sizes=5, strides=2),
ConvParams(filters=256, kernel_sizes=5, strides=2),
ConvParams(filters=128, kernel_sizes=5, strides=2),
ConvParams(filters=PHOTO_SHAPE[-1], kernel_sizes=5, strides=2),
],
use_batch_norm=True,
)
discriminator = get_dcgan_discriminator(
input_shape=PHOTO_SHAPE,
conv_params_list=[
ConvParams(filters=64, kernel_sizes=5, strides=2),
ConvParams(filters=128, kernel_sizes=5, strides=2),
ConvParams(filters=256, kernel_sizes=5, strides=2),
ConvParams(filters=512, kernel_sizes=5, strides=2),
],
use_batch_norm=False,
)
return generator, discriminator
def main():
parser = argparse.ArgumentParser(description='Train GAN with Yelp photos')
parser.add_argument('--input-path', required=True, help='Path to the preprocessed input data')
parser.add_argument('--iters', type=int, required=True, help='Number of the training iterations')
parser.add_argument('--epochs', type=int, required=True, help='Number of the epochs per iterations')
parser.add_argument('--batch-size', type=int, required=True, help='Size of the training mini-batch')
args = parser.parse_args()
generator, discriminator = get_dcgan_model()
model = GanTrainer(generator=generator, discriminator=discriminator)
model.gan_compile(
g_optimizer=keras.optimizers.Adam(lr=0.0001, beta_1=0.5, beta_2=0.9),
d_optimizer=keras.optimizers.Adam(lr=0.0001, beta_1=0.5, beta_2=0.9),
n_discriminator_training=5,
is_wgan=True,
wgan_gradient_lambda=10,
)
# Load the input and normalize
training_image = np.load(args.input_path).astype(np.float32) / 255.0
for i in range(args.iters):
model.gan_fit(training_image, epochs=args.epochs, batch_size=args.batch_size)
plot_generated_images(model.generator, G_INPUT_SIZE)
plt.savefig('yelp_photo_gan{}.png'.format(i), dpi=600)
model.generator.save('yelp_photo_g{}.hdf5'.format(i))
model.discriminator.save('yelp_photo_d{}.hdf5'.format(i))
if __name__ == '__main__':
main()
| StarcoderdataPython |
53245 | #!/usr/bin/env python
import cv2
import dlib
import rospy
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
from ros_face_recognition.srv import Face
import config
import face_api
_service = "/{}/faces".format(config.topic_name)
class ImageReader:
def __init__(self):
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("/camera/rgb/image_raw", Image, self.process)
def process(self, data):
try:
image = self.bridge.imgmsg_to_cv2(data, "bgr8")
image_h, image_w = image.shape[:2]
rospy.wait_for_service(_service)
try:
faces = rospy.ServiceProxy(_service, Face)
resp1 = faces()
faces = resp1.faces
for f in faces:
rect = dlib.rectangle(
int(f.x * image_w),
int(f.y * image_h),
int((f.x + f.w) * image_w),
int((f.y + f.h) * image_h),
)
face = face_api.Face(rect)
face.details["id"] = f.label
face.details["name"] = f.name
face.details["gender"] = f.gender
face.draw_face(image)
except rospy.ServiceException, e:
print "Service call failed: %s" % e
cv2.imshow("image", image)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
cv2.destroyAllWindows()
rospy.signal_shutdown("q key pressed")
elif key == ord('s'):
cv2.imwrite("output.jpg", image)
except CvBridgeError as e:
rospy.logerr(e)
def main():
rospy.init_node(config.topic_name, anonymous=True)
rospy.loginfo("Listening to images reader")
ImageReader()
try:
rospy.spin()
except KeyboardInterrupt:
rospy.logwarn("Shutting done ...")
if __name__ == "__main__":
main()
| StarcoderdataPython |
3217253 | <filename>parkstay/cron.py
from datetime import date, timedelta
from django_cron import CronJobBase, Schedule
from parkstay.models import Booking
from parkstay.reports import outstanding_bookings
from parkstay.emails import send_booking_confirmation
from parkstay.utils import oracle_integration
class UnpaidBookingsReportCronJob(CronJobBase):
RUN_AT_TIMES = ['01:00']
schedule = Schedule(run_at_times=RUN_AT_TIMES)
code = 'parkstay.unpaid_bookings_report'
def do(self):
outstanding_bookings()
class OracleIntegrationCronJob(CronJobBase):
RUN_AT_TIMES = ['01:00']
schedule = Schedule(run_at_times=RUN_AT_TIMES)
code = 'parkstay.oracle_integration'
def do(self):
oracle_integration(str(date.today() - timedelta(days=1)), False)
class SendBookingsConfirmationCronJob(CronJobBase):
RUN_EVERY_MINS = 5
schedule = Schedule(run_at_times=RUN_EVERY_MINS)
code = 'parkstay.send_booking_confirmations'
def do(self):
try:
# Update confirmation_status
for b in Booking.objects.all():
if not b.paid and b.confirmation_sent:
b.confirmation_sent = False
b.save()
unconfirmed = Booking.objects.filter(confirmation_sent=False)
if unconfirmed:
for b in unconfirmed:
if b.paid:
send_booking_confirmation(b)
except:
raise
| StarcoderdataPython |
25481 | bind = "0.0.0.0:80"
| StarcoderdataPython |
117013 | <gh_stars>0
from __future__ import print_function
import torch
# import networkx as nx
import torch.nn as nn
import torch.nn.functional as F
class DQN(nn.Module):
def __init__(self, input_dim, hidden_dim, out_dim):
super().__init__()
self.linear_h = nn.Linear(input_dim, hidden_dim)
self.linear_out = nn.Linear(hidden_dim, out_dim)
def forward(self, x):
x = F.relu(self.linear_h(x))
x = self.linear_out(x)
return x
@torch.no_grad()
def greedy_action(self, x, valid_actions=None):
x = self.forward(x)
if valid_actions is not None:
val, idx = torch.max(x[valid_actions])
return valid_actions[idx], val
else:
val, idx = torch.max(x)
return idx, val
| StarcoderdataPython |
4838370 | <filename>blogapp/admin.py
from django.contrib import admin
from blogapp.models import UserProfile
# Register your models here.
admin.site.register(UserProfile)
| StarcoderdataPython |
149043 | <filename>tensorflow_graphics/rendering/tests/interpolate_test.py
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow_graphics.rendering.tests.interpolate."""
from absl.testing import parameterized
import tensorflow as tf
from tensorflow_graphics.rendering import interpolate
from tensorflow_graphics.rendering import rasterization_backend
from tensorflow_graphics.rendering.tests import rasterization_test_utils
from tensorflow_graphics.util import test_case
class RasterizeTest(test_case.TestCase):
def setUp(self):
super(RasterizeTest, self).setUp()
self.test_data_directory = (
'google3/research/vision/viscam/diffren/mesh/test_data/')
self.cube_vertex_positions = tf.constant(
[[[-1, -1, 1], [-1, -1, -1], [-1, 1, -1], [-1, 1, 1], [1, -1, 1],
[1, -1, -1], [1, 1, -1], [1, 1, 1]]],
dtype=tf.float32)
self.cube_triangles = tf.constant(
[[0, 1, 2], [2, 3, 0], [3, 2, 6], [6, 7, 3], [7, 6, 5], [5, 4, 7],
[4, 5, 1], [1, 0, 4], [5, 6, 2], [2, 1, 5], [7, 4, 0], [0, 3, 7]],
dtype=tf.int32)
self.image_width = 640
self.image_height = 480
perspective = rasterization_test_utils.make_perspective_matrix(
self.image_width, self.image_height)
projection = tf.matmul(
perspective,
rasterization_test_utils.make_look_at_matrix(
camera_origin=(2.0, 3.0, 6.0)))
# Add batch dimension.
self.projection = tf.expand_dims(projection, axis=0)
@parameterized.parameters(True, False)
def test_renders_colored_cube(self, enable_cull_face):
"""Renders a simple colored cube."""
num_layers = 1
rasterized = rasterization_backend.rasterize(
self.cube_vertex_positions,
self.cube_triangles,
self.projection, (self.image_width, self.image_height),
num_layers=num_layers,
enable_cull_face=enable_cull_face,
backend=rasterization_backend.RasterizationBackends.CPU)
vertex_rgb = (self.cube_vertex_positions * 0.5 + 0.5)
vertex_rgba = tf.concat([vertex_rgb, tf.ones([1, 8, 1])], axis=-1)
rendered = interpolate.interpolate_vertex_attribute(vertex_rgba,
rasterized).value
face_culling_num = 1 if enable_cull_face else 0
baseline_image = rasterization_test_utils.load_baseline_image(
'Unlit_Cube_0_%d.png' % face_culling_num, rendered.shape)
images_near, error_message = rasterization_test_utils.compare_images(
self, baseline_image, rendered)
self.assertTrue(images_near, msg=error_message)
if __name__ == '__main__':
tf.test.main()
| StarcoderdataPython |
1779806 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Implement a simulator to mimic a dynamic order book environment
@author: ucaiado
Created on 10/24/2016
"""
import datetime
import logging
import os
import sys
import pandas as pd
import numpy as np
import pickle
import pprint
import gzip
import random
import time
from market_gym.config import DEBUG, root, s_log_file
sys.path.append('../../')
'''
Begin help functions
'''
def save_q_table(e, i_trial):
'''
Log the final Q-table of the algorithm
:param e: Environment object. The order book
:param i_trial: integer. id of the current trial
'''
agent = e.primary_agent
try:
q_table = agent.value_function
# define the name of the files
s_log_name = s_log_file.split('/')[-1].split('.')[0]
s_fname = 'log/qtables/{}_valuefun_{}.dat'
s_fname = s_fname.format(s_log_name, i_trial)
# save data structures
pickle.dump(q_table, open(s_fname, 'w'))
root.info('save_q_table(): Q-table saved successfully')
except:
root.info('save_q_table(): No Q-table to be printed')
def save_log_info(e, i_trial):
'''
Log the final log_info attribute from the agent
:param e: Environment object. The order book
:param i_trial: integer. id of the current trial
'''
agent = e.primary_agent
if agent.b_print_always:
return
log_info = agent.log_info
# define the name of the files
s_log_name = s_log_file.split('/')[-1].split('.')[0]
s_fname = 'log/loginfo/{}_loginfo_{}.pklz'
s_fname = s_fname.format(s_log_name, i_trial)
# f = gzip.open(s_log_name,'rb') to read
# save data structures
f = gzip.open(s_fname, 'wb')
pickle.dump(log_info, f)
f.close()
'''
End help functions
'''
class Simulator(object):
"""
Simulates agents in a dynamic order book environment.
"""
def __init__(self, env, update_delay=1.0, display=True):
'''
Initiate a Simulator object. Save all parameters as attributes
Environment Object. The Environment where the agent acts
:param env: Environment Object. The environment to be simulated
:param update_delay*: Float. Seconds elapsed to print out the book
:param display*: Boolean. If should open a visualizer
'''
self.env = env
self.quit = False
self.start_time = None
self.current_time = 0.0
self.last_updated = 0.0
self.update_delay = update_delay
if display:
self.func_render = self._render_env
else:
self.func_render = self._render_not
def run(self, n_trials=1, n_sessions=1, f_tolerance=0.05, b_testing=False):
'''
Run the simulation to train the algorithm
:param n_sessions*: integer. Number of different files to read
:param n_trials*: integer. Iterations over the same files
:param f_tolerance*: float. Minimum epsilon necessary to begin testing
:param b_testing*: boolean. should use the value function already fit
:param b_render*: boolean. If should render the environment
'''
if self.env.primary_agent:
if not b_testing and self.env.primary_agent.learning:
root.info('Simulator.run(): Starting training session !')
for trial in xrange(n_trials):
if self.env.primary_agent:
if self.env.primary_agent.learning:
# assumes epsilon decays to 0 (freeze policy updates)
if self.env.primary_agent.f_epsilon <= f_tolerance:
if not b_testing and self.env.primary_agent.learning:
s_err = 'Simulator.run(): Starting test session !'
root.info(s_err)
b_testing = True
elif b_testing:
s_err = 'Simulator.run(): Starting test session !'
root.info(s_err)
for i_sess in xrange(n_sessions):
self.quit = False
self.env.reset(testing=b_testing, carry_pos=i_sess > 0)
self.env.step() # give the first step
# set variable to control rendering
self.current_time = 0.0
self.last_updated = 0.0
self.start_time = 0.
self.start_time = time.time()
# iterate over the current dataset
while True:
try:
# Update current time
# self.current_time = time.time() - self.start_time
self.current_time = self.env.order_matching.f_time
# Update environment
f_time_step = self.current_time - self.last_updated
self.env.step()
# print information to be used by a visualization
if f_time_step >= self.update_delay:
# TODO: Print out the scenario to be visualized
self.func_render()
self.last_updated = self.current_time
except StopIteration:
self.quit = True
except KeyboardInterrupt:
self.print_when_paused()
s_msg = 'Should quit this trial (y/n)?'
programPause = raw_input(s_msg)
if programPause == 'y':
self.quit = True
else:
continue
except:
print 'Unexpected error:', sys.exc_info()[0]
raise
finally:
if self.quit or self.env.done:
break
# # save the current Q-table
# save_q_table(self.env, trial+1)
# log the end of the trial
self.env.log_trial()
# save the last Q-table
if self.env.primary_agent:
if not self.env.primary_agent.b_print_always:
save_q_table(self.env, trial+1)
# save log info
# save_log_info(self.env, trial+1)
def print_when_paused(self):
'''
Print information when a simulation is interupted by ctrl+c
'''
if self.env.primary_agent:
agent = self.env.primary_agent
s_time = self.env.order_matching.s_time
f_duration = agent.log_info['duration']
s_msg = '\n=============================\n'
s_msg += '{}: PnL {:0.2f}, duration {:0.2f}'
print s_msg.format(s_time, agent.f_pnl, f_duration)
s_main = self.env.s_main_intrument
s_hedge = self.env.primary_agent.risk_model.main_hedge
for s_key in ['BID', 'ASK']:
for s_instr in self.env.l_instrument:
l_aux = agent.d_trades[s_instr][s_key]
if len(l_aux) > 0:
s_err = '\nLast opened positions in {} {}:'
print s_err.format(s_key, s_instr)
for idx, (s_side, f_price, d_) in enumerate(l_aux):
print ' {} {}'.format(s_side, f_price)
if idx > 4:
break
print '\nOpenPrices:'
pprint.pprint(agent.current_open_price)
print '\nPositions:'
# calculate the current position
f_pos = agent.position[s_main]['qBid']
f_pos -= agent.position[s_main]['qAsk']
f_pos_discl = f_pos + agent.disclosed_position[s_main]['qBid']
f_pos_discl -= agent.disclosed_position[s_main]['qAsk']
print ' Pos: {} Disclosed: {}'.format(f_pos, f_pos_discl)
print '\nBest Prices:'
for s_instr in self.env.l_instrument:
agent_orders = agent.d_order_tree[s_instr]
if agent_orders['BID'].count > 0:
f_bid = agent_orders['BID'].max_key()
print ' Bid: {} {}'.format(s_instr,
f_bid)
if agent_orders['ASK'].count > 0:
f_ask = agent_orders['ASK'].min_key()
print ' Ask: {} {}'.format(s_instr,
f_ask)
for s_instr in agent.env.l_instrument:
print '\n{} book:'.format(s_instr)
print self.env.get_order_book(s_instr, b_rtn_dataframe=True)
print '\n'
def _render_env(self):
'''
Call the render method from environment
'''
self.env.render()
def _render_not(self):
'''
Do nothing
'''
pass
| StarcoderdataPython |
3353493 | <filename>eslearn/GUI/easylearn_main_run.py
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Main GUI of the easylearn
# Author: <NAME> <<EMAIL>>
# License: MIT
"""
import sys
import os
import json
from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox, QFileDialog
from PyQt5.QtGui import QIcon, QPixmap
from eslearn.stylesheets.PyQt5_stylesheets import PyQt5_stylesheets
from easylearn_main_gui import Ui_MainWindow
from easylearn_data_loading_run import EasylearnDataLoadingRun
class EasylearnMainGUI(QMainWindow, Ui_MainWindow):
"""This class is used to display the main GUI of the easylearn.
"""
def __init__(self):
QMainWindow.__init__(self)
Ui_MainWindow.__init__(self)
self.setupUi(self)
self.working_directory = ""
self.textBrowser.setText("Hi~, I'm easylearn. I hope I can help you finish this project successfully\n")
# Set appearance
self.set_logo()
self.set_skin()
# Connecting to functions
self.select_working_directory.triggered.connect(self.select_workingdir_fun)
self.create_configuration_file.triggered.connect(self.initialize_configuration_fun)
self.choose_configuration_file.triggered.connect(self.load_configuration_fun)
self.data_loading.clicked.connect(self.data_loading_fun)
self.feature_engineering.clicked.connect(self.feature_engineering_fun)
self.machine_learning.clicked.connect(self.machine_learning_fun)
self.model_evaluation.clicked.connect(self.model_evaluation_fun)
self.statistical_analysis.clicked.connect(self.statistical_analysis_fun)
self.run.clicked.connect(self.run_fun)
self.quit.clicked.connect(self.closeEvent_button)
# Skins
self.skins = {"Dark": "style_Dark", "Black": "style_black", "DarkOrange": "style_DarkOrange",
"Gray": "style_gray", "Blue": "style_blue", "Navy": "style_navy", "Classic": "style_Classic"}
self.actionDark.triggered.connect(self.set_skin)
self.actionBlack.triggered.connect(self.set_skin)
self.actionDarkOrange.triggered.connect(self.set_skin)
self.actionGray.triggered.connect(self.set_skin)
self.actionBlue.triggered.connect(self.set_skin)
self.actionNavy.triggered.connect(self.set_skin)
self.actionClassic.triggered.connect(self.set_skin)
def set_logo(self):
qss_logo = """#logo{background-color: black;
border: 2px solid white;
border-radius: 20px;
border-image: url('../logo/logo-lower.jpg');
}
#logo:hover {border-radius: 0px;}
"""
self.logo.setStyleSheet(qss_logo)
self.setWindowTitle('easylearn')
self.setWindowIcon(QIcon('../logo/logo-upper.jpg'))
# Run Icon
self.run.setIcon(QIcon("../logo/run.png"));
self.run.setIconSize(QPixmap("../logo/run.png").size());
self.run.resize(QPixmap("../logo/run.png").size());
# Close Icon
self.quit.setIcon(QIcon("../logo/close.png"));
self.quit.setIconSize(QPixmap("../logo/close.png").size());
self.quit.resize(QPixmap("../logo/close.png").size());
def set_skin(self):
"""Set a appearance for easylearn (skin, etc).
"""
sender = self.sender()
if sender:
if (sender.text() in list(self.skins.keys())):
self.setStyleSheet(PyQt5_stylesheets.load_stylesheet_pyqt5(style=self.skins[sender.text()]))
if sender.text() == "Classic":
self.setStyleSheet("")
else:
self.setStyleSheet(PyQt5_stylesheets.load_stylesheet_pyqt5(style="style_Dark"))
else:
self.setStyleSheet(PyQt5_stylesheets.load_stylesheet_pyqt5(style="style_Dark"))
def select_workingdir_fun(self):
"""
This function is used to select the working working_directory, then change directory to this directory.
"""
# If has selected working working_directory previously, then I set it as initial working working_directory.
if self.working_directory == "":
self.working_directory = QFileDialog.getExistingDirectory(self, "Select a working_directory", os.getcwd())
self.textBrowser.setText("Current working directory is " + self.working_directory + "\n")
else:
self.working_directory = QFileDialog.getExistingDirectory(self, "Select a working_directory", self.working_directory)
self.textBrowser.setText("Current working directory is " + self.working_directory + "\n")
# If already choose a working directory, change directory to the working directory
if self.working_directory != "":
os.chdir(self.working_directory)
def initialize_configuration_fun(self):
"""Create file to save settings
This function will add the configuration_file to self
"""
if self.working_directory != "":
configuration_file_name, ok = QInputDialog.getText(self, "Initialize configuration", "Please name the configuration file:", QLineEdit.Normal, "configuration_file.json")
self.configuration_file = os.path.join(self.working_directory, configuration_file_name)
with open(self.configuration_file, 'w') as configuration_file:
config = {"data_loading": {}, "feature_engineering": {}, "machine_learning": {}, "model_evaluation": {}, "statistical_analysis": {}}
config = json.dumps(config)
configuration_file.write(config)
config_message = "Configuration file is " + self.configuration_file
self.textBrowser.setText(config_message)
else:
QMessageBox.warning( self, 'Warning', f'Please choose a working directory first! (press button at the top left corner)')
def load_configuration_fun(self):
"""Load configuration
"""
self.configuration_file, filetype = QFileDialog.getOpenFileName(self,
"Select configuration file",
os.getcwd(), "Text Files (*.json);;All Files (*);;")
# Read configuration_file if already selected
if self.configuration_file != "":
# TODO: 解决中文编码的问题
with open(self.configuration_file, 'r') as config:
self.configuration = config.read()
# Check the configuration is valid JSON, then transform the configuration to dict
# If the configuration is not valid JSON, then give configuration and configuration_file to ""
try:
self.configuration = json.loads(self.configuration)
self.textBrowser.setText("Configuration file is " + self.configuration_file)
except json.decoder.JSONDecodeError:
QMessageBox.warning( self, 'Warning', f'{self.configuration_file} is not valid JSON')
self.configuration_file = ""
else:
QMessageBox.warning( self, 'Warning', 'Configuration file was not selected')
def data_loading_fun(self):
"""This function is called when data_loading button is clicked.
Then, this function will process the data loading.
"""
print('data_loading_fun')
self.data_loading = EasylearnDataLoadingRun(self.working_directory)
self.data_loading.show()
def feature_engineering_fun(self):
"""This function is called when feature_engineering button is clicked.
Then, this function will process the feature_engineering.
"""
print('feature_engineering_fun')
def machine_learning_fun(self):
"""This function is called when machine_learning button is clicked.
Then, this function will process the data loading.
"""
print('machine_learning_fun')
def model_evaluation_fun(self):
"""This function is called when model_evaluation button is clicked.
Then, this function will process the model evaluation.
"""
print('model_evaluation_fun')
def statistical_analysis_fun(self):
"""This function is called when data_loading button is clicked.
Then, this function will process the data loading.
"""
print('statistical_analysis_fun')
def save_workflow_fun(self):
"""This function is called when data_loading button is clicked.
Then, this function will process the data loading.
"""
print('save_workflow_fun')
def run_fun(self):
"""This function is called when data_loading button is clicked.
Then, this function will process the data loading.
"""
print('run_fun')
def closeEvent(self, event):
"""This function is called when exit icon of the window is clicked.
This function make sure the program quit safely.
"""
# Set qss to make sure the QMessageBox can be seen
reply = QMessageBox.question(self, 'Quit',"Are you sure to quit?",
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
event.accept()
else:
event.ignore()
def closeEvent_button(self, event):
"""This function is called when quit button is clicked.
This function make sure the program quit safely.
"""
# Set qss to make sure the QMessageBox can be seen
reply = QMessageBox.question(self, 'Quit',"Are you sure to quit?",
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
QCoreApplication.quit()
if __name__=='__main__':
app=QApplication(sys.argv)
md=EasylearnMainGUI()
md.show()
sys.exit(app.exec_())
| StarcoderdataPython |
3293606 | import collections
import contextlib
import hashlib
import json
import os
from typing import Dict, Iterator, List, Optional
from .api import API
from . import buildroot
from . import host
from . import objectstore
from . import remoteloop
from .devices import Device, DeviceManager
from .inputs import Input
from .mounts import Mount, MountManager
from .sources import Source
from .util import osrelease
def cleanup(*objs):
"""Call cleanup method for all objects, filters None values out"""
_ = map(lambda o: o.cleanup(), filter(None, objs))
class BuildResult:
def __init__(self, origin, returncode, output, metadata, error):
self.name = origin.name
self.id = origin.id
self.options = origin.options
self.success = returncode == 0
self.output = output
self.metadata = metadata
self.error = error
def as_dict(self):
return vars(self)
class Stage:
def __init__(self, info, source_options, build, base, options):
self.info = info
self.sources = source_options
self.build = build
self.base = base
self.options = options
self.checkpoint = False
self.inputs = {}
self.devices = {}
self.mounts = {}
@property
def name(self):
return self.info.name
@property
def id(self):
m = hashlib.sha256()
m.update(json.dumps(self.name, sort_keys=True).encode())
m.update(json.dumps(self.build, sort_keys=True).encode())
m.update(json.dumps(self.base, sort_keys=True).encode())
m.update(json.dumps(self.options, sort_keys=True).encode())
if self.inputs:
data = {n: i.id for n, i in self.inputs.items()}
m.update(json.dumps(data, sort_keys=True).encode())
return m.hexdigest()
def add_input(self, name, info, origin, options=None):
ip = Input(name, info, origin, options or {})
self.inputs[name] = ip
return ip
def add_device(self, name, info, parent, options):
dev = Device(name, info, parent, options)
self.devices[name] = dev
return dev
def add_mount(self, name, info, device, target, options):
mount = Mount(name, info, device, target, options)
self.mounts[name] = mount
return mount
def prepare_arguments(self, args, location):
args["options"] = self.options
args["meta"] = {
"id": self.id
}
# Root relative paths: since paths are different on the
# host and in the container they need to be mapped to
# their path within the container. For all items that
# have registered roots, re-root their path entries here
for name, root in args.get("paths", {}).items():
group = args.get(name)
if not group or not isinstance(group, dict):
continue
for item in group.values():
path = item.get("path")
if not path:
continue
item["path"] = os.path.join(root, path)
with open(location, "w", encoding="utf-8") as fp:
json.dump(args, fp)
def run(self, tree, runner, build_tree, store, monitor, libdir):
with contextlib.ExitStack() as cm:
build_root = buildroot.BuildRoot(build_tree, runner, libdir, store.tmp)
cm.enter_context(build_root)
# if we have a build root, then also bind-mount the boot
# directory from it, since it may contain efi binaries
build_root.mount_boot = bool(self.build)
tmpdir = store.tempdir(prefix="buildroot-tmp-")
tmpdir = cm.enter_context(tmpdir)
inputs_tmpdir = os.path.join(tmpdir, "inputs")
os.makedirs(inputs_tmpdir)
inputs_mapped = "/run/osbuild/inputs"
inputs = {}
devices_mapped = "/dev"
devices = {}
mounts_tmpdir = os.path.join(tmpdir, "mounts")
os.makedirs(mounts_tmpdir)
mounts_mapped = "/run/osbuild/mounts"
mounts = {}
os.makedirs(os.path.join(tmpdir, "api"))
args_path = os.path.join(tmpdir, "api", "arguments")
args = {
"tree": "/run/osbuild/tree",
"paths": {
"devices": devices_mapped,
"inputs": inputs_mapped,
"mounts": mounts_mapped,
},
"devices": devices,
"inputs": inputs,
"mounts": mounts,
}
ro_binds = [
f"{self.info.path}:/run/osbuild/bin/{self.name}",
f"{inputs_tmpdir}:{inputs_mapped}",
f"{args_path}:/run/osbuild/api/arguments"
]
binds = [
os.fspath(tree) + ":/run/osbuild/tree",
f"{mounts_tmpdir}:{mounts_mapped}"
]
storeapi = objectstore.StoreServer(store)
cm.enter_context(storeapi)
mgr = host.ServiceManager(monitor=monitor)
cm.enter_context(mgr)
for key, ip in self.inputs.items():
data = ip.map(mgr, storeapi, inputs_tmpdir)
inputs[key] = data
devmgr = DeviceManager(mgr, build_root.dev, tree)
for name, dev in self.devices.items():
devices[name] = devmgr.open(dev)
mntmgr = MountManager(devmgr, mounts_tmpdir)
for key, mount in self.mounts.items():
data = mntmgr.mount(mount)
mounts[key] = data
self.prepare_arguments(args, args_path)
api = API()
build_root.register_api(api)
rls = remoteloop.LoopServer()
build_root.register_api(rls)
r = build_root.run([f"/run/osbuild/bin/{self.name}"],
monitor,
binds=binds,
readonly_binds=ro_binds)
return BuildResult(self, r.returncode, r.output, api.metadata, api.error)
class Pipeline:
def __init__(self, name: str, runner=None, build=None):
self.name = name
self.build = build
self.runner = runner
self.stages = []
self.assembler = None
self.export = False
@property
def id(self):
"""
Pipeline id: corresponds to the `id` of the last stage
In contrast to `name` this identifies the pipeline via
the tree, i.e. the content, it produces. Therefore two
pipelines that produce the same `tree`, i.e. have the
same exact stages and build pipeline, will have the
same `id`; thus the `id`, in contrast to `name` does
not uniquely identify a pipeline.
In case a Pipeline has no stages, its `id` is `None`.
"""
return self.stages[-1].id if self.stages else None
def add_stage(self, info, options, sources_options=None):
stage = Stage(info, sources_options, self.build,
self.id, options or {})
self.stages.append(stage)
if self.assembler:
self.assembler.base = stage.id
return stage
def build_stages(self, object_store, monitor, libdir):
results = {"success": True}
# We need a build tree for the stages below, which is either
# another tree that needs to be built with the build pipeline
# or the host file system if no build pipeline is specified
# NB: the very last level of nested build pipelines is always
# build on the host
if not self.build:
build_tree = objectstore.HostTree(object_store)
else:
build_tree = object_store.get(self.build)
if not build_tree:
raise AssertionError(f"build tree {self.build} not found")
# If there are no stages, just return build tree we just
# obtained and a new, clean `tree`
if not self.stages:
tree = object_store.new()
return results, build_tree, tree
# Check if the tree that we are supposed to build does
# already exist. If so, short-circuit here
tree = object_store.get(self.id)
if tree:
return results, build_tree, tree
# Not in the store yet, need to actually build it, but maybe
# an intermediate checkpoint exists: Find the last stage that
# already exists in the store and use that as the base.
tree = object_store.new()
base_idx = -1
for i in reversed(range(len(self.stages))):
if object_store.contains(self.stages[i].id):
tree.base = self.stages[i].id
base_idx = i
break
# If two run() calls race each-other, two trees will get built
# and it is nondeterministic which of them will end up
# referenced by the `tree_id` in the content store if they are
# both committed. However, after the call to commit all the
# trees will be based on the winner.
results["stages"] = []
for stage in self.stages[base_idx + 1:]:
with build_tree.read() as build_path, tree.write() as path:
monitor.stage(stage)
r = stage.run(path,
self.runner,
build_path,
object_store,
monitor,
libdir)
monitor.result(r)
results["stages"].append(r.as_dict())
if not r.success:
cleanup(build_tree, tree)
results["success"] = False
return results, None, None
# The content of the tree now corresponds to the stage that
# was build and this can can be identified via the id of it
tree.id = stage.id
if stage.checkpoint:
object_store.commit(tree, stage.id)
return results, build_tree, tree
def run(self, store, monitor, libdir, output_directory):
results = {"success": True}
monitor.begin(self)
# If the final result is already in the store, no need to attempt
# building it. Just fetch the cached information. If the associated
# tree exists, we return it as well, but we do not care if it is
# missing, since it is not a mandatory part of the result and would
# usually be needless overhead.
obj = store.get(self.id)
if not obj:
results, _, obj = self.build_stages(store, monitor, libdir)
if not results["success"]:
return results
if self.export and obj:
if output_directory:
obj.export(output_directory)
monitor.finish(results)
return results
class Manifest:
"""Representation of a pipeline and its sources"""
def __init__(self):
self.pipelines = collections.OrderedDict()
self.sources: List[Source] = []
def add_pipeline(self, name: str, runner: str, build: str) -> Pipeline:
pipeline = Pipeline(name, runner, build)
if name in self.pipelines:
raise ValueError(f"Name {name} already exists")
self.pipelines[name] = pipeline
return pipeline
def add_source(self, info, items: List, options: Dict) -> Source:
source = Source(info, items, options)
self.sources.append(source)
return source
def download(self, store, monitor, libdir):
with host.ServiceManager(monitor=monitor) as mgr:
for source in self.sources:
source.download(mgr, store, libdir)
def build(self, store, monitor, libdir, output_directory):
results = {"success": True}
for pl in self.pipelines.values():
res = pl.run(store, monitor, libdir, output_directory)
results[pl.id] = res
if not res["success"]:
results["success"] = False
return results
return results
def mark_checkpoints(self, checkpoints):
points = set(checkpoints)
def mark_stage(stage):
c = stage.id
if c in points:
stage.checkpoint = True
points.remove(c)
def mark_pipeline(pl):
if pl.name in points and pl.stages:
pl.stages[-1].checkpoint = True
points.remove(pl.name)
for stage in pl.stages:
mark_stage(stage)
for pl in self.pipelines.values():
mark_pipeline(pl)
return points
def get(self, name_or_id: str) -> Optional[Pipeline]:
pl = self.pipelines.get(name_or_id)
if pl:
return pl
for pl in self.pipelines.values():
if pl.id == name_or_id:
return pl
return None
def __contains__(self, name_or_id: str) -> bool:
return self.get(name_or_id) is not None
def __getitem__(self, name_or_id: str) -> Pipeline:
pl = self.get(name_or_id)
if pl:
return pl
raise KeyError(f"'{name_or_id}' not found")
def __iter__(self) -> Iterator[Pipeline]:
return iter(self.pipelines.values())
def detect_host_runner():
"""Use os-release(5) to detect the runner for the host"""
osname = osrelease.describe_os(*osrelease.DEFAULT_PATHS)
return "org.osbuild." + osname
| StarcoderdataPython |
1685785 | <reponame>Fireman730/tap-as-a-service
# Copyright (C) 2018 AT&T
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from neutron_taas.common import constants as taas_consts
from neutron_taas.extensions import taas as taas_ext
from neutron_taas.extensions import vlan_filter as vlan_filter_ext
from neutron_taas.tests.unit.extensions import test_taas as test_taas_ext
from oslo_utils import uuidutils
from webob import exc
from neutron.tests.unit.api.v2 import test_base as test_api_v2
import webtest
_uuid = uuidutils.generate_uuid
_get_path = test_api_v2._get_path
class VlanFilterExtensionTestCase(test_taas_ext.TaasExtensionTestCase):
def setUp(self):
super(test_taas_ext.TaasExtensionTestCase, self).setUp()
attr_map = taas_ext.RESOURCE_ATTRIBUTE_MAP
attr_map['tap_flows'].update(
vlan_filter_ext.EXTENDED_ATTRIBUTES_2_0['tap_flows'])
self.setup_extension(
'neutron_taas.extensions.taas.TaasPluginBase',
'TAAS',
taas_ext.Taas,
'taas',
plural_mappings={}
)
def _get_expected_tap_flow(self, data):
ret = super(VlanFilterExtensionTestCase,
self)._get_expected_tap_flow(data)
ret['tap_flow'].update(
vlan_filter=data['tap_flow'].get('vlan_filter', None))
return ret
def test_create_tap_flow_with_vlan_filter(self):
tenant_id = _uuid()
tap_flow_data = {
'tenant_id': tenant_id,
'name': 'MyTapFlow',
'description': 'This is my tap flow',
'direction': 'BOTH',
'tap_service_id': _uuid(),
'source_port': _uuid(),
'project_id': tenant_id,
'vlan_filter': taas_consts.VLAN_RANGE,
}
data = {'tap_flow': tap_flow_data}
expected_data = self._get_expected_tap_flow(data)
expected_ret_val = copy.copy(expected_data['tap_flow'])
expected_ret_val.update({'id': _uuid()})
instance = self.plugin.return_value
instance.create_tap_flow.return_value = expected_ret_val
res = self.api.post(
_get_path(test_taas_ext.TAP_FLOW_PATH, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_tap_flow.assert_called_with(
mock.ANY,
tap_flow=expected_data)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('tap_flow', res)
self.assertEqual(expected_ret_val, res['tap_flow'])
def test_create_tap_flow_invalid_vlan_filter_value(self):
tenant_id = _uuid()
tap_flow_data = {
'tenant_id': tenant_id,
'name': 'MyTapFlow',
'description': 'This is my tap flow',
'direction': 'BOTH',
'tap_service_id': _uuid(),
'source_port': _uuid(),
'project_id': tenant_id,
'vlan_filter': '10-25,',
}
data = {'tap_flow': tap_flow_data}
self.assertRaises(
webtest.app.AppError,
self.api.post,
_get_path(test_taas_ext.TAP_FLOW_PATH, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
| StarcoderdataPython |
3221036 | import requests
from models import Torrent
from provider import BaseProvider
from humanize import naturalsize
class YTS(BaseProvider):
def __init__(self, base_url):
super(YTS, self).__init__(base_url)
def search(self, query):
payload = {
'query_term': query, 'sort': 'title', 'order': 'desc', 'set': '1'}
search_url = self.base_url + '/api/v2/list_movies.json'
try:
response = requests.get(
search_url, params=payload, headers=self.headers).json()
except Exception:
return
torrents = []
for movie in response['data']['movies']:
for torrent in movie['torrents']:
t = Torrent()
t.title = movie['title_long'] + " " + torrent['quality']
t.seeds = torrent['seeds']
t.size = torrent['size']
t.torrent_url = torrent['url']
torrents.append(t)
return torrents
def get_top(self):
payload = {
'sort': 'date_added',
'order': 'desc',
'set': '1',
'limit': 20
}
search_url = self.base_url + '/api/v2/list_movies.json'
try:
response = requests.get(
search_url, params=payload, headers=self.headers).json()
except Exception:
return
torrents = []
for movie in response['data']['movies']:
for torrent in movie['torrents']:
t = Torrent()
t.title = movie['title_long'] + " " + torrent['quality']
t.seeds = torrent['seeds']
t.size = torrent['size']
t.torrent_url = torrent['url']
torrents.append(t)
return torrents
| StarcoderdataPython |
157274 | import argparse
import numpy as np
def Read_Evaluation():
'''Read all files in list and evaluate the count of the variants'''
variantDict={}
sample_names=[]
varType=''
plist=open(opts.pathList)
for line in plist:
varType=line.split('/')[-1].split('_')[0]
sample_names.append(line.split('/')[-3])
plist.close()
for varfile in open(opts.pathList):
filename=varfile.rstrip()
in_file=open(filename)
for line in in_file:
line=line.rstrip()
row=line.split('\t')
dictValue=list(np.zeros((len(sample_names),), dtype=np.int))
if line.startswith('SAMPLE_ID'):
samplename=row.index("SAMPLE_ID")
contig=row.index("CONTIG")
pos=row.index("POS")
ref=row.index("REF")
alt=row.index("ALT")
else:
ID=row[contig]+':'+row[pos]+':'+row[ref]+':'+row[alt]
name=row[samplename]
dictValue[sample_names.index(name)]=1
if ID not in variantDict.keys():
variantDict[ID]=dictValue
else:
val=variantDict[ID]
val[sample_names.index(name)]=1
variantDict[ID]=val
in_file.close()
return sample_names,variantDict,varType
def WriteOutput(sample_names,varType,varDict):
'''Write the output file including the frequency of the presence of a variant between samples analyzed'''
names=''
for s in sample_names:
names=names+str(s)+'\t'
outFile=open(opts.outputPath+'/'+str(varType)+'Statistics.txt','w')
outFile.write('CONTIG\tPOS\tREF\tALT\t'+names+'COUNT'+'\t'+'FREQUENCY'+'\n')
for k,v in varDict.iteritems():
var=''
idv=''
for val in v:
var=var+str(val)+'\t'
for el in str(k).split(':'):
idv=idv+str(el)+'\t'
frequencyV=float(float(sum(v))/float(len(v)))
sumV=sum(v)
outFile.write(idv+var+str(sumV)+'\t'+str(frequencyV)+'\n')
outFile.close()
def main():
#read parameteres of the script
parser = argparse.ArgumentParser('Count the frequency of the variants of the run samples. Output is to stdout.')
parser.add_argument('-pathList', '--pathList', help="paths of the variant files in analysis")
parser.add_argument('-outputPath', '--outputPath', help="Path of the output file")
global opts
opts = parser.parse_args()
[sample_names,variantDict,varType]=Read_Evaluation()
WriteOutput(sample_names,varType,variantDict)
main() | StarcoderdataPython |
1658838 | <reponame>kuanpern/jupyterlab-snippets-multimenus
constants.physical_constants["Loschmidt constant (273.15 K, 100 kPa)"] | StarcoderdataPython |
1621181 | # This scripts initializes a Model_Manager object a Traffic_Model and Cost_Function object
# It uses as input a Demand_Assignment objects (demand per path and per time) to generate costs per path as
# a Path_Costs object
# This particular model uses a Static model and BPR Cost_Function model
import numpy as np
from copy import deepcopy
from Solvers.Solver_Class import Solver_class
from Model_Manager.Link_Model_Manager import Link_Model_Manager_class
from Java_Connection import Java_Connection
from copy import copy
import matplotlib.pyplot as plt
import os
import inspect
from Solvers.Path_Based_Frank_Wolfe_Solver import Path_Based_Frank_Wolfe_Solver
import csv
plt.rcParams.update({'font.size': 18})
# Flag that indicates whether we are doing decomposition or not
decompositio_flag = False
connection = Java_Connection()
if connection.pid is not None:
# Contains local path to input configfile, for the three_links.xml network
this_folder = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
configfile = os.path.join(this_folder, os.path.pardir, 'configfiles', 'seven_links.xml')
coefficients = {}
T = 1800 # Time horizon of interest
sim_dt = 0.0 # Duration of one time_step for the traffic model
sampling_dt = 300 # Duration of time_step for the solver, in this case it is equal to sim_dt
model_manager = Link_Model_Manager_class(configfile, "static", connection.gateway, sim_dt, "bpr", coefficients)
#Estimating bpr coefficients with beats
num_links = model_manager.otm_api.scenario().get_num_links()
avg_travel_time = np.zeros(num_links)
num_coeff = 5
for i in range(num_links):
link_info = model_manager.otm_api.scenario().get_link_with_id(long(i))
fft= (link_info.getFull_length() /1000
/ link_info.get_ffspeed_kph())
coefficients[long(i)] = np.zeros(num_coeff)
coefficients[i][0] = copy(fft)
coefficients[i][4] = copy(fft*0.15)
# If scenario.beast_api is none, it means the configfile provided was not valid for the particular traffic model type
if model_manager.is_valid():
num_steps = T / sampling_dt
# Get the OD Matrix form Model Manager
# OD Matrix can also be initialized from another source, as long as it fits the OD_Matrix class format
OD_Matrix = model_manager.get_OD_Matrix(num_steps, sampling_dt)
if OD_Matrix is not None:
# Algorithm to use
solver_algorithm = Path_Based_Frank_Wolfe_Solver
scenario_solver = Solver_class(model_manager, solver_algorithm)
assignment, solver_run_time = scenario_solver.Solver_function(T, sampling_dt, OD_Matrix, decompositio_flag)
if assignment is None:
print "Solver did not run"
else:
# Save assignment into a pickle file
# this_folder = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
# outputfile = os.path.join(this_folder, os.path.pardir, 'output', scenario_name+'.picle')
path_costs = model_manager.evaluate(assignment, T, initial_state=None)
# Distance to Nash
print "\n"
error_percentage = scenario_solver.distance_to_Nash(assignment, path_costs, sampling_dt, OD_Matrix)
print "%.02f" % error_percentage, "% vehicles from equilibrium"
print "\nSUCCESS!!"
# kill jvm
# connection.close() | StarcoderdataPython |
1682557 | <reponame>0x20Man/Watcher3<filename>lib/hachoir/parser/misc/mapsforge_map.py
"""
Mapsforge map file parser (for version 3 files).
Author: <NAME>
References:
- http://code.google.com/p/mapsforge/wiki/SpecificationBinaryMapFile
- http://mapsforge.org/
"""
from hachoir.parser import Parser
from hachoir.field import (Bit, Bits, UInt8, UInt16, UInt32, Int32, UInt64, String,
PaddingBits,
Enum, Field, FieldSet, SeekableFieldSet, RootSeekableFieldSet)
from hachoir.core.endian import BIG_ENDIAN
# micro-degrees factor:
UDEG = float(1000 * 1000)
CoordinateEncoding = {
0: "single delta encoding",
1: "double delta encoding",
}
class UIntVbe(Field):
def __init__(self, parent, name, description=None):
Field.__init__(self, parent, name, description=description)
value = 0
size = 0
while True:
byteValue = self._parent.stream.readBytes(
self.absolute_address + (size * 8), 1)[0]
haveMoreData = (byteValue & 0x80)
value = value | ((byteValue & 0x7f) << (size * 7))
size += 1
assert size < 100, "UIntVBE is too large"
if not(haveMoreData):
break
self._size = size * 8
self.createValue = lambda: value
class IntVbe(Field):
def __init__(self, parent, name, description=None):
Field.__init__(self, parent, name, description=description)
value = 0
size = 0
shift = 0
while True:
byteValue = self._parent.stream.readBytes(
self.absolute_address + (size * 8), 1)[0]
haveMoreData = (byteValue & 0x80)
if size == 0:
isNegative = (byteValue & 0x40)
value = (byteValue & 0x3f)
shift += 6
else:
value = value | ((byteValue & 0x7f) << shift)
shift += 7
size += 1
assert size < 100, "IntVBE is too large"
if not(haveMoreData):
break
if isNegative:
value *= -1
self._size = size * 8
self.createValue = lambda: value
class VbeString(FieldSet):
def createFields(self):
yield UIntVbe(self, "length")
yield String(self, "chars", self["length"].value, charset="UTF-8")
def createDescription(self):
return '(%d B) "%s"' % (self["length"].value, self["chars"].value)
class TagStringList(FieldSet):
def createFields(self):
yield UInt16(self, "num_tags")
for i in range(self["num_tags"].value):
yield VbeString(self, "tag[]")
def createDescription(self):
return "%d tag strings" % self["num_tags"].value
class ZoomIntervalCfg(FieldSet):
def createFields(self):
yield UInt8(self, "base_zoom_level")
yield UInt8(self, "min_zoom_level")
yield UInt8(self, "max_zoom_level")
yield UInt64(self, "subfile_start")
yield UInt64(self, "subfile_size")
def createDescription(self):
return "zoom level around %d (%d - %d)" % (self["base_zoom_level"].value,
self["min_zoom_level"].value, self["max_zoom_level"].value)
class TileIndexEntry(FieldSet):
def createFields(self):
yield Bit(self, "is_water_tile")
yield Bits(self, "offset", 39)
class TileZoomTable(FieldSet):
def createFields(self):
yield UIntVbe(self, "num_pois")
yield UIntVbe(self, "num_ways")
def createDescription(self):
return "%d POIs, %d ways" % (self["num_pois"].value, self["num_ways"].value)
class TileHeader(FieldSet):
def __init__(self, parent, name, zoomIntervalCfg, **kw):
FieldSet.__init__(self, parent, name, **kw)
self.zoomIntervalCfg = zoomIntervalCfg
def createFields(self):
numLevels = int(self.zoomIntervalCfg[
"max_zoom_level"].value - self.zoomIntervalCfg["min_zoom_level"].value) + 1
assert(numLevels < 50)
for i in range(numLevels):
yield TileZoomTable(self, "zoom_table_entry[]")
yield UIntVbe(self, "first_way_offset")
class POIData(FieldSet):
def createFields(self):
if self["/have_debug"].value:
yield String(self, "signature", 32)
if not self['signature'].value.startswith("***POIStart"):
raise ValueError
yield IntVbe(self, "lat_diff")
yield IntVbe(self, "lon_diff")
yield Bits(self, "layer", 4)
yield Bits(self, "num_tags", 4)
for i in range(self["num_tags"].value):
yield UIntVbe(self, "tag_id[]")
yield Bit(self, "have_name")
yield Bit(self, "have_house_number")
yield Bit(self, "have_ele")
yield PaddingBits(self, "pad[]", 5)
if self["have_name"].value:
yield VbeString(self, "name")
if self["have_house_number"].value:
yield VbeString(self, "house_number")
if self["have_ele"].value:
yield IntVbe(self, "ele")
def createDescription(self):
s = "POI"
if self["have_name"].value:
s += ' "%s"' % self["name"]["chars"].value
s += " @ %f/%f" % (self["lat_diff"].value / UDEG,
self["lon_diff"].value / UDEG)
return s
class SubTileBitmap(FieldSet):
static_size = 2 * 8
def createFields(self):
for y in range(4):
for x in range(4):
yield Bit(self, "is_used[%d,%d]" % (x, y))
class WayProperties(FieldSet):
def createFields(self):
if self["/have_debug"].value:
yield String(self, "signature", 32)
if not self['signature'].value.startswith("---WayStart"):
raise ValueError
yield UIntVbe(self, "way_data_size")
# WayProperties is split into an outer and an inner field, to allow
# specifying data size for inner part:
yield WayPropertiesInner(self, "inner", size=self["way_data_size"].value * 8)
class WayPropertiesInner(FieldSet):
def createFields(self):
yield SubTileBitmap(self, "sub_tile_bitmap")
# yield Bits(self, "sub_tile_bitmap", 16)
yield Bits(self, "layer", 4)
yield Bits(self, "num_tags", 4)
for i in range(self["num_tags"].value):
yield UIntVbe(self, "tag_id[]")
yield Bit(self, "have_name")
yield Bit(self, "have_house_number")
yield Bit(self, "have_ref")
yield Bit(self, "have_label_position")
yield Bit(self, "have_num_way_blocks")
yield Enum(Bit(self, "coord_encoding"), CoordinateEncoding)
yield PaddingBits(self, "pad[]", 2)
if self["have_name"].value:
yield VbeString(self, "name")
if self["have_house_number"].value:
yield VbeString(self, "house_number")
if self["have_ref"].value:
yield VbeString(self, "ref")
if self["have_label_position"].value:
yield IntVbe(self, "label_lat_diff")
yield IntVbe(self, "label_lon_diff")
numWayDataBlocks = 1
if self["have_num_way_blocks"].value:
yield UIntVbe(self, "num_way_blocks")
numWayDataBlocks = self["num_way_blocks"].value
for i in range(numWayDataBlocks):
yield WayData(self, "way_data[]")
def createDescription(self):
s = "way"
if self["have_name"].value:
s += ' "%s"' % self["name"]["chars"].value
return s
class WayData(FieldSet):
def createFields(self):
yield UIntVbe(self, "num_coord_blocks")
for i in range(self["num_coord_blocks"].value):
yield WayCoordBlock(self, "way_coord_block[]")
class WayCoordBlock(FieldSet):
def createFields(self):
yield UIntVbe(self, "num_way_nodes")
yield IntVbe(self, "first_lat_diff")
yield IntVbe(self, "first_lon_diff")
for i in range(self["num_way_nodes"].value - 1):
yield IntVbe(self, "lat_diff[]")
yield IntVbe(self, "lon_diff[]")
class TileData(FieldSet):
def __init__(self, parent, name, zoomIntervalCfg, **kw):
FieldSet.__init__(self, parent, name, **kw)
self.zoomIntervalCfg = zoomIntervalCfg
def createFields(self):
if self["/have_debug"].value:
yield String(self, "signature", 32)
if not self['signature'].value.startswith("###TileStart"):
raise ValueError
yield TileHeader(self, "tile_header", self.zoomIntervalCfg)
numLevels = int(self.zoomIntervalCfg[
"max_zoom_level"].value - self.zoomIntervalCfg["min_zoom_level"].value) + 1
for zoomLevel in range(numLevels):
zoomTableEntry = self["tile_header"][
"zoom_table_entry[%d]" % zoomLevel]
for poiIndex in range(zoomTableEntry["num_pois"].value):
yield POIData(self, "poi_data[%d,%d]" % (zoomLevel, poiIndex))
for zoomLevel in range(numLevels):
zoomTableEntry = self["tile_header"][
"zoom_table_entry[%d]" % zoomLevel]
for wayIndex in range(zoomTableEntry["num_ways"].value):
yield WayProperties(self, "way_props[%d,%d]" % (zoomLevel, wayIndex))
class ZoomSubFile(SeekableFieldSet):
def __init__(self, parent, name, zoomIntervalCfg, **kw):
SeekableFieldSet.__init__(self, parent, name, **kw)
self.zoomIntervalCfg = zoomIntervalCfg
def createFields(self):
if self["/have_debug"].value:
yield String(self, "signature", 16)
if self['signature'].value != "+++IndexStart+++":
raise ValueError
indexEntries = []
numTiles = None
i = 0
while True:
entry = TileIndexEntry(self, "tile_index_entry[]")
indexEntries.append(entry)
yield entry
i += 1
if numTiles is None:
# calculate number of tiles (TODO: better calc this from map
# bounding box)
firstOffset = self["tile_index_entry[0]"]["offset"].value
if self["/have_debug"].value:
firstOffset -= 16
numTiles = firstOffset / 5
if i >= numTiles:
break
for i, indexEntry in enumerate(indexEntries):
offset = indexEntry["offset"].value
self.seekByte(offset, relative=True)
if i != len(indexEntries) - 1:
next_offset = indexEntries[i + 1]["offset"].value
size = (next_offset - offset) * 8
else:
size = self.size - offset * 8
if size == 0:
# hachoir doesn't support empty field.
continue
yield TileData(self, "tile_data[%d]" % i, zoomIntervalCfg=self.zoomIntervalCfg, size=size)
class MapsforgeMapFile(Parser, RootSeekableFieldSet):
PARSER_TAGS = {
"id": "mapsforge_map",
"category": "misc",
"file_ext": ("map",),
"min_size": 62 * 8,
"description": "Mapsforge map file",
}
endian = BIG_ENDIAN
def validate(self):
return self["file_magic"].value == "mapsforge binary OSM" and self["file_version"].value == 3
def createFields(self):
yield String(self, "file_magic", 20)
yield UInt32(self, "header_size")
yield UInt32(self, "file_version")
yield UInt64(self, "file_size")
yield UInt64(self, "creation_date")
yield Int32(self, "min_lat")
yield Int32(self, "min_lon")
yield Int32(self, "max_lat")
yield Int32(self, "max_lon")
yield UInt16(self, "tile_size")
yield VbeString(self, "projection")
# flags
yield Bit(self, "have_debug")
yield Bit(self, "have_map_start")
yield Bit(self, "have_start_zoom")
yield Bit(self, "have_language_preference")
yield Bit(self, "have_comment")
yield Bit(self, "have_created_by")
yield Bits(self, "reserved[]", 2)
if self["have_map_start"].value:
yield UInt32(self, "start_lat")
yield UInt32(self, "start_lon")
if self["have_start_zoom"].value:
yield UInt8(self, "start_zoom")
if self["have_language_preference"].value:
yield VbeString(self, "language_preference")
if self["have_comment"].value:
yield VbeString(self, "comment")
if self["have_created_by"].value:
yield VbeString(self, "created_by")
yield TagStringList(self, "poi_tags")
yield TagStringList(self, "way_tags")
yield UInt8(self, "num_zoom_intervals")
for i in range(self["num_zoom_intervals"].value):
yield ZoomIntervalCfg(self, "zoom_interval_cfg[]")
for i in range(self["num_zoom_intervals"].value):
zoomIntervalCfg = self["zoom_interval_cfg[%d]" % i]
self.seekByte(zoomIntervalCfg[
"subfile_start"].value, relative=False)
yield ZoomSubFile(self, "subfile[]", size=zoomIntervalCfg["subfile_size"].value * 8, zoomIntervalCfg=zoomIntervalCfg)
| StarcoderdataPython |
1740314 | <reponame>kalxas/eoxserver<gh_stars>10-100
# ------------------------------------------------------------------------------
#
# Project: EOxServer <http://eoxserver.org>
# Authors: <NAME> <<EMAIL>>
#
# ------------------------------------------------------------------------------
# Copyright (C) 2017 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# ------------------------------------------------------------------------------
from django.contrib.gis.geos import Polygon
from django.contrib.gis.gdal import SpatialReference, CoordTransform, DataSource
from eoxserver.contrib import gdal
from eoxserver.backends.access import get_vsi_path, get_vsi_env, gdal_open
from eoxserver.render.coverage.objects import Coverage
BROWSE_MODE_RGB = "rgb"
BROWSE_MODE_RGBA = "rgba"
BROWSE_MODE_GRAYSCALE = "grayscale"
class Browse(object):
def __init__(self, name, filename, env, size, extent, crs, mode, footprint):
self._name = name
self._filename = filename
self._env = env
self._size = size
self._extent = extent
self._crs = crs
self._mode = mode
self._footprint = footprint
@property
def name(self):
return self._name
@property
def filename(self):
return self._filename
@property
def env(self):
return self._env
@property
def size(self):
return self._size
@property
def extent(self):
return self._extent
@property
def crs(self):
return self._crs
@property
def spatial_reference(self):
return SpatialReference(self.crs)
@property
def mode(self):
return self._mode
@property
def footprint(self):
if self._footprint:
return self._footprint
else:
polygon = Polygon.from_bbox(self.extent)
srs = SpatialReference(self.crs)
if srs.srid != 4326:
ct = CoordTransform(srs, SpatialReference(4326))
polygon.transform(ct)
return polygon
@classmethod
def from_model(cls, product_model, browse_model):
filename = get_vsi_path(browse_model)
env = get_vsi_env(browse_model.storage)
size = (browse_model.width, browse_model.height)
extent = (
browse_model.min_x, browse_model.min_y,
browse_model.max_x, browse_model.max_y
)
ds = gdal_open(browse_model)
mode = _get_ds_mode(ds)
ds = None
if browse_model.browse_type:
name = '%s__%s' % (
product_model.identifier, browse_model.browse_type.name
)
else:
name = product_model.identifier
return cls(
name, filename, env, size, extent,
browse_model.coordinate_reference_system, mode,
product_model.footprint
)
@classmethod
def from_file(cls, filename, env=None):
env = env or {}
ds = gdal.Open(filename)
size = (ds.RasterXSize, ds.RasterYSize)
extent = gdal.get_extent(ds)
mode = _get_ds_mode(ds)
return cls(
filename, env, filename, size, extent,
ds.GetProjection(), mode, None
)
class GeneratedBrowse(Browse):
def __init__(self, name, band_expressions, ranges, nodata_values,
fields_and_coverages, field_list, footprint):
self._name = name
self._band_expressions = band_expressions
self._ranges = ranges
self._nodata_values = nodata_values
self._fields_and_coverages = fields_and_coverages
self._field_list = field_list
self._footprint = footprint
@property
def name(self):
return self._name
@property
def size(self):
for field, coverages in self._fields_and_coverages.items():
return coverages[0].size
@property
def extent(self):
for field, coverages in self._fields_and_coverages.items():
return coverages[0].extent
@property
def crs(self):
for field, coverages in self._fields_and_coverages.items():
return coverages[0].grid.coordinate_reference_system
@property
def spatial_reference(self):
for field, coverages in self._fields_and_coverages.items():
return coverages[0].grid.spatial_reference
@property
def mode(self):
field_count = len(self._band_expressions)
if field_count == 1:
return BROWSE_MODE_GRAYSCALE
elif field_count == 3:
return BROWSE_MODE_RGB
elif field_count == 4:
return BROWSE_MODE_RGB
@property
def band_expressions(self):
return self._band_expressions
@property
def ranges(self):
return self._ranges
@property
def nodata_values(self):
return self._nodata_values
@property
def fields_and_coverages(self):
return self._fields_and_coverages
@property
def field_list(self):
return self._field_list
@classmethod
def from_coverage_models(cls, band_expressions, ranges, nodata_values,
fields_and_coverage_models,
product_model):
fields_and_coverages = {
field_name: [
Coverage.from_model(coverage)
for coverage in coverages
]
for field_name, coverages in fields_and_coverage_models.items()
}
return cls(
product_model.identifier,
band_expressions,
ranges,
nodata_values,
fields_and_coverages, [
fields_and_coverages[field_name][0].range_type.get_field(
field_name
)
for field_name in fields_and_coverages.keys()
],
product_model.footprint
)
class Mask(object):
def __init__(self, filename=None, geometry=None, validity=False):
self._filename = filename
self._geometry = geometry
self._validity = validity
@property
def filename(self):
return self._filename
@property
def geometry(self):
return self._geometry
def load_geometry(self):
ds = DataSource(self.filename)
layer = ds[0]
geometries = layer.get_geoms()
first = geometries[0]
for other in geometries[1:]:
first = first.union(other)
return first.geos
@property
def validity(self):
return self._validity
@classmethod
def from_model(cls, mask_model, mask_type):
filename = None
if mask_model and mask_model.location:
filename = get_vsi_path(mask_model)
geometry = None
if mask_model:
geometry = mask_model.geometry
mask_type = mask_type or mask_model.mask_type
validity = False
if mask_type:
validity = mask_type.validity
return cls(filename, geometry, validity)
class MaskedBrowse(object):
def __init__(self, browse, mask):
self._browse = browse
self._mask = mask
@property
def browse(self):
return self._browse
@property
def mask(self):
return self._mask
@classmethod
def from_models(cls, product_model, browse_model, mask_model,
mask_type_model):
return cls(
Browse.from_model(product_model, browse_model),
Mask.from_model(mask_model, mask_type_model)
)
def _get_ds_mode(ds):
first = ds.GetRasterBand(1)
count = ds.RasterCount
if count == 1 or count > 4 and not first.GetColorTable():
mode = BROWSE_MODE_GRAYSCALE
elif (count == 1 and first.GetColorTable()) or count == 4:
mode = BROWSE_MODE_RGBA
elif count == 3 and first.GetColorInterpretation() == gdal.GCI_RedBand:
mode = BROWSE_MODE_RGB
return mode
| StarcoderdataPython |
4821920 | <filename>test/test_impuritysolver.py
import unittest
from pytriqs.operators import c, c_dag
from pytriqs.gf import BlockGf, GfImFreq, SemiCircular, iOmega_n, inverse
from cdmft.impuritysolver import ImpuritySolver
class TestImpuritySolver(unittest.TestCase):
def test_ImpuritySolver_initialization(self):
solver = ImpuritySolver(10, [('u', range(2))], 1025, 10001, 50)
def test_ImpuritySolver_run(self):
solver = ImpuritySolver(10, [('u', range(2))], 100, 1000, 25)
h = c_dag('u', 0) * c('u', 0)
g = BlockGf(name_list=['u'],
block_list=[GfImFreq(indices=range(2),
beta=10, n_points=100)])
g['u'] << SemiCircular(1)
solver.run(g, h, 0, n_cycles=50, length_cycle=2,
n_warmup_cycles=20, verbosity=0)
res = solver.get_results()
def test_ImpuritySolver_init_new_giw(self):
solver = ImpuritySolver(10, [('u', range(2))], 100, 1000, 25)
solver._init_new_giw()
def test_ImpuritySolver_get_g_iw(self):
solver = ImpuritySolver(10, [('u', range(2))], 100, 1000, 25)
h = c_dag('u', 0) * c('u', 0)
g = BlockGf(name_list=['u'],
block_list=[GfImFreq(indices=range(2),
beta=10, n_points=100)])
g['u'] << SemiCircular(1)
solver.run(g, h, 0, n_cycles=50, length_cycle=2, n_warmup_cycles=20,
verbosity=0, perform_post_proc=True, measure_G_l=True)
g << solver.get_g_iw(True, False)
g << solver.get_g_iw(False, True)
g << solver.get_se(True, False)
g << solver.get_se(False, True)
| StarcoderdataPython |
1690781 | <reponame>carlocastoldi/wyvern
#!/usr/bin/env python3
#
# Copyright 2018 | <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
from .util import UnreachableError
class DataType(Enum):
int32 = "I32"
uint32 = "U32"
float32 = "F32"
bool = "Bool"
class IoType(Enum):
input = "input"
output = "output"
private = "private"
class Variable:
pass
class Array:
def _get_key(self, key):
if type(key) == int:
key = Constant.uint32(key, self._ctx)
elif type(key) == Constant and key._ty == DataType.uint32:
pass
else:
raise TypeError
return key
def __setitem__(self, key, value):
key = self._get_key(key)
value = self._ctx._sanitize(value)
if type(value) != Constant or value._ty != self._ty:
raise TypeError
self._ctx.getProgramBuilder()._add_command({
"ArrayStore": [self._tid, key._tid, value._tid]
})
def __getitem__(self, key):
key = self._get_key(key)
element = Constant._new_constant(self._ctx, self._ty)
self._ctx.getProgramBuilder()._add_command({
"ArrayLoad": [element._tid, self._tid, key._tid]
})
return element
def __len__(self):
length = Constant._new_constant(self._ctx, DataType.uint32)
self._ctx.getProgramBuilder()._add_command({
"ArrayLen": [length._tid, self._tid]
})
return length
class Constant:
def getContext(self):
return self._ctx
def getProgramBuilder(self):
return self.getContext().getProgramBuilder()
def __add__(self, other):
other = self._ctx._sanitize(other)
assert self.getProgramBuilder() == other.getProgramBuilder()
assert self._ty == other._ty
assert self._ty in (DataType.int32, DataType.uint32, DataType.float32)
result = Constant._new_constant(self.getContext(), self._ty)
self.getProgramBuilder()._add_command({
"Add": [result._tid, self._tid, other._tid]
})
return result
def __sub__(self, other):
other = self._ctx._sanitize(other)
assert self.getProgramBuilder() == other.getProgramBuilder()
assert self._ty == other._ty
assert self._ty in (DataType.int32, DataType.uint32, DataType.float32)
result = Constant._new_constant(self.getContext(), self._ty)
self.getProgramBuilder()._add_command({
"Sub": [result._tid, self._tid, other._tid]
})
return result
def __mul__(self, other):
other = self._ctx._sanitize(other)
assert self.getProgramBuilder() == other.getProgramBuilder()
assert self._ty == other._ty
assert self._ty in (DataType.int32, DataType.uint32, DataType.float32)
result = Constant._new_constant(self.getContext(), self._ty)
self.getProgramBuilder()._add_command({
"Mul": [result._tid, self._tid, other._tid]
})
return result
def __floordiv__(self, other):
other = self._ctx._sanitize(other)
assert self.getProgramBuilder() == other.getProgramBuilder()
assert self._ty == other._ty
assert self._ty in (DataType.int32, DataType.uint32, DataType.float32)
result = Constant._new_constant(self.getContext(), self._ty)
self.getProgramBuilder()._add_command({
"Div": [result._tid, self._tid, other._tid]
})
return result
def __truediv__(self, other):
return self.__floordiv__(other)
def __mod__(self, other):
other = self._ctx._sanitize(other)
assert self.getProgramBuilder() == other.getProgramBuilder()
assert self._ty == other._ty
assert self._ty in (DataType.int32, DataType.uint32, DataType.float32)
result = Constant._new_constant(self.getContext(), self._ty)
self.getProgramBuilder()._add_command({
"Rem": [result._tid, self._tid, other._tid]
})
return result
def __inv__(self):
assert self._ty in (DataType.int32, DataType.uint32)
result = Constant._new_constant(self.getContext(), self._ty)
self.getProgramBuilder()._add_command({
"Not": [result._tid, self._tid]
})
return result
def not_(self):
assert self._ty in (DataType.bool,)
result = Constant._new_constant(self.getContext(), self._ty)
self.getProgramBuilder()._add_command({
"Not": [result._tid, self._tid]
})
return result
def __neg__(self):
assert self._ty in (DataType.int32, DataType.uint32, DataType.float32)
result = Constant._new_constant(self.getContext(), self._ty)
self.getProgramBuilder()._add_command({
"Neg": [result._tid, self._tid]
})
return result
def __lshift__(self, other):
other = self._ctx._sanitize(other)
assert self.getProgramBuilder() == other.getProgramBuilder()
assert self._ty == other._ty
assert self._ty in (DataType.int32, DataType.uint32)
result = Constant._new_constant(self.getContext(), self._ty)
self.getProgramBuilder()._add_command({
"Shl": [result._tid, self._tid, other._tid]
})
return result
def __rshift__(self, other):
other = self._ctx._sanitize(other)
assert self.getProgramBuilder() == other.getProgramBuilder()
assert self._ty == other._ty
assert self._ty in (DataType.int32, DataType.uint32)
result = Constant._new_constant(self.getContext(), self._ty)
self.getProgramBuilder()._add_command({
"Shr": [result._tid, self._tid, other._tid]
})
return result
def __xor__(self, other):
other = self._ctx._sanitize(other)
assert self.getProgramBuilder() == other.getProgramBuilder()
assert self._ty == other._ty
assert self._ty in (DataType.int32, DataType.uint32, DataType.bool)
result = Constant._new_constant(self.getContext(), self._ty)
self.getProgramBuilder()._add_command({
"BitXor": [result._tid, self._tid, other._tid]
})
return result
def __and__(self, other):
other = self._ctx._sanitize(other)
assert self.getProgramBuilder() == other.getProgramBuilder()
assert self._ty == other._ty
assert self._ty in (DataType.int32, DataType.uint32, DataType.bool)
result = Constant._new_constant(self.getContext(), self._ty)
self.getProgramBuilder()._add_command({
"BitAnd": [result._tid, self._tid, other._tid]
})
return result
def __or__(self, other):
other = self._ctx._sanitize(other)
assert self.getProgramBuilder() == other.getProgramBuilder()
assert self._ty == other._ty
assert self._ty in (DataType.int32, DataType.uint32, DataType.bool)
result = Constant._new_constant(self.getContext(), self._ty)
self.getProgramBuilder()._add_command({
"BitOr": [result._tid, self._tid, other._tid]
})
return result
def __eq__(self, other):
other = self._ctx._sanitize(other)
assert self.getProgramBuilder() == other.getProgramBuilder()
assert self._ty == other._ty
assert self._ty in (DataType.int32, DataType.uint32,
DataType.float32, DataType.bool)
result = Constant._new_constant(self.getContext(), DataType.bool)
self.getProgramBuilder()._add_command({
"Eq": [result._tid, self._tid, other._tid]
})
return result
def __ne__(self, other):
other = self._ctx._sanitize(other)
assert self.getProgramBuilder() == other.getProgramBuilder()
assert self._ty == other._ty
assert self._ty in (DataType.int32, DataType.uint32,
DataType.float32, DataType.bool)
result = Constant._new_constant(self.getContext(), DataType.bool)
self.getProgramBuilder()._add_command({
"Ne": [result._tid, self._tid, other._tid]
})
return result
def __lt__(self, other):
other = self._ctx._sanitize(other)
assert self.getProgramBuilder() == other.getProgramBuilder()
assert self._ty == other._ty
assert self._ty in (DataType.int32, DataType.uint32, DataType.float32)
result = Constant._new_constant(self.getContext(), DataType.bool)
self.getProgramBuilder()._add_command({
"Lt": [result._tid, self._tid, other._tid]
})
return result
def __le__(self, other):
other = self._ctx._sanitize(other)
assert self.getProgramBuilder() == other.getProgramBuilder()
assert self._ty == other._ty
assert self._ty in (DataType.int32, DataType.uint32, DataType.float32)
result = Constant._new_constant(self.getContext(), DataType.bool)
self.getProgramBuilder()._add_command({
"Le": [result._tid, self._tid, other._tid]
})
return result
def __gt__(self, other):
other = self._ctx._sanitize(other)
assert self.getProgramBuilder() == other.getProgramBuilder()
assert self._ty == other._ty
assert self._ty in (DataType.int32, DataType.uint32, DataType.float32)
result = Constant._new_constant(self.getContext(), DataType.bool)
self.getProgramBuilder()._add_command({
"Gt": [result._tid, self._tid, other._tid]
})
return result
def __ge__(self, other):
other = self._ctx._sanitize(other)
assert self.getProgramBuilder() == other.getProgramBuilder()
assert self._ty == other._ty
assert self._ty in (DataType.int32, DataType.uint32, DataType.float32)
result = Constant._new_constant(self.getContext(), DataType.bool)
self.getProgramBuilder()._add_command({
"Ge": [result._tid, self._tid, other._tid]
})
return result
@staticmethod
def _new_constant(ctx, ty):
const = Constant()
const._ctx = ctx
const._ty = ty
const._tid = ctx._new_constant(ty)
return const
@staticmethod
def int32(value, ctx=None):
if type(value) not in (int, Constant):
value = int(value)
if type(value) == Constant:
ctx = value.getContext()
const = Constant._new_constant(ctx, DataType.int32)
p = ctx.getProgramBuilder()
if type(value) == int:
if value < -2**31 or value >= 2**31:
raise ValueError
p._add_command({
"Constant": [const._tid, {DataType.int32.value: value}]
})
elif type(value) == Constant:
assert p == value.getProgramBuilder()
if value._ty == DataType.int32:
const._tid = value._tid
elif value._ty == DataType.uint32:
p._add_command({"I32fromU32": [const._tid, value._tid]})
elif value._ty == DataType.float32:
p._add_command({"I32fromF32": [const._tid, value._tid]})
else:
raise TypeError
else:
raise UnreachableError
return const
@staticmethod
def uint32(value, ctx=None):
if type(value) not in (int, Constant):
value = int(value)
if type(value) == Constant:
ctx = value.getContext()
const = Constant._new_constant(ctx, DataType.uint32)
p = ctx.getProgramBuilder()
if type(value) == int:
if value < 0 or value >= 2**32:
raise ValueError
p._add_command({
"Constant": [const._tid, {DataType.uint32.value: value}]
})
elif type(value) == Constant:
assert p == value.getProgramBuilder()
if value._ty == DataType.uint32:
const._tid = value._tid
elif value._ty == DataType.int32:
p._add_command({"U32fromI32": [const._tid, value._tid]})
elif value._ty == DataType.float32:
p._add_command({"U32fromF32": [const._tid, value._tid]})
else:
raise TypeError
else:
raise UnreachableError
return const
@staticmethod
def float32(value, ctx=None):
if type(value) not in (float, Constant):
value = float(value)
if type(value) == Constant:
ctx = value.getContext()
const = Constant._new_constant(ctx, DataType.float32)
p = ctx.getProgramBuilder()
if type(value) == float:
p._add_command({
"Constant": [const._tid, {DataType.float32.value: value}]
})
elif type(value) == Constant:
assert p == value.getProgramBuilder()
if value._ty == DataType.uint32:
p._add_command({"F32fromU32": [const._tid, value._tid]})
elif value._ty == DataType.int32:
p._add_command({"F32fromI32": [const._tid, value._tid]})
elif value._ty == DataType.float32:
const._tid = value._tid
else:
raise TypeError
else:
raise UnreachableError
return const
@staticmethod
def bool(value, ctx=None):
if type(value) not in (bool, Constant):
value = bool(value)
if type(value) == Constant:
ctx = value.getContext()
const = Constant._new_constant(ctx, DataType.bool)
p = ctx.getProgramBuilder()
if type(value) == bool:
p._add_command({
"Constant": [const._tid, {DataType.bool.value: value}]
})
elif type(value) == Constant:
assert p == value.getProgramBuilder()
if value._ty == DataType.bool:
const._tid = value._tid
else:
raise TypeError
else:
raise UnreachableError
return const
| StarcoderdataPython |
11592 | <reponame>sohammanjrekar/HackerRank<filename>10 Days of Statistics/Day 5 - Normal Distribution I.py
"""
Day 5: Normal Distribution I
In certain plant, the time taken to assemble a car is a random variable, X having a normal distribution
with a mean of 20 hours and a standard deviation of 2 hours. What is the probability that a car can be
assembled at this plant in:
1. Less han 19.5 hours?
2. Between 20 and 22 hours?
Author: <NAME>
"""
import math
# less than 19.5 hours
def cumulative1(mean, std, less):
print(round(0.5 * (1 + math.erf((less - mean) / (std * (2 ** 0.5)))), 3))
# Between 20 and 22 hours
def cumulative2(mean, std, lower_range, upper_range):
print(round(0.5 * (1 + math.erf((upper_range - mean) / (std * (2 ** 0.5)))) -
0.5 * (1 + math.erf((lower_range - mean) / (std * (2 ** 0.5)))), 3))
values = list(map(float, input().split()))
mean = values[0]
std = values[1]
less = float(input())
boundaries = list(map(float, input().split()))
lower_range = boundaries[0]
upper_range = boundaries[1]
cumulative1(mean, std, less)
cumulative2(mean, std, lower_range, upper_range) | StarcoderdataPython |
3207758 | from collections import Counter
def solve(N, A):
ans = float('inf')
cnts = Counter(Counter(A).values())
for x in cnts.keys():
cur = 0
for y in cnts.keys():
if y > x: cur += (y - x) * cnts[y]
elif y < x: cur += y * cnts[y]
ans = min(ans, cur)
return ans
for _ in xrange(int(raw_input())):
N = int(raw_input())
A = map(int, raw_input().split())
ans = solve(N, A)
print ans
| StarcoderdataPython |
1628513 | import os
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
IMAGES_DIR = os.path.join(ROOT_DIR, 'tmp/images')
ARCHIVES_DIR = os.path.join(ROOT_DIR, 'tmp/archives')
TEXTS_DIR = os.path.join(ROOT_DIR, 'tmp/texts')
IMAGES_SCRAPING_TYPE = 'images-type'
TEXTS_SCRAPING_TYPE = 'texts-type'
redis_host = "localhost"
redis_port = 6379
redis_password = "" | StarcoderdataPython |
4824774 | <reponame>drewmee/opcsim
d2 = opcsim.load_distribution("Marine")
d3 = opcsim.load_distribution("Rural")
ax = opcsim.plots.pdfplot(d)
ax = opcsim.plots.pdfplot(d2, ax=ax)
ax = opcsim.plots.pdfplot(d3, ax=ax)
ax.set_title("Various Aerosol Distributions", fontsize=16)
ax.legend(loc='best')
sns.despine()
| StarcoderdataPython |
3246898 | """
@package mi.dataset.parser
@file mi-instrument/mi/dataset/parser/ctdav_nbosi_auv.py
@author <NAME>
@brief Parser and particle Classes and tools for the ctdav_nbosi_auv data
Release notes:
initial release
"""
__author__ = '<NAME>'
from mi.core.log import get_logger
log = get_logger()
from mi.dataset.parser.auv_common import \
AuvCommonParticle, \
AuvCommonParser, \
compute_timestamp, \
CTDAV_AUV_COMMON_PARAM_MAP
# The structure below is a list of common tuples extended with a list
# of specific ctdav_nbosi_auv tuples. Each tuple consists of parameter
# name, index into raw data parts list, encoding function.
CTDAV_NBOSI_AUV_PARAM_MAP = list(CTDAV_AUV_COMMON_PARAM_MAP)
CTDAV_NBOSI_AUV_PARAM_MAP.extend(
[
('pressure', 10, float)
]
)
class CtdavNbosiAuvParticle(AuvCommonParticle):
# must provide a parameter map for _build_parsed_values
_auv_param_map = CTDAV_NBOSI_AUV_PARAM_MAP
_data_particle_type = "ctdav_auv_data"
CTDAV_NBOSI_AUV_ID = '1107' # message ID of ctdav_auv_data records
CTDAV_NBOSI_AUV_FIELD_COUNT = len(CTDAV_NBOSI_AUV_PARAM_MAP) + 1 # number of expected fields in an ctdav_nbois record
CTDAV_NBOSI_AUV_MESSAGE_MAP = [(CTDAV_NBOSI_AUV_ID,
CTDAV_NBOSI_AUV_FIELD_COUNT,
compute_timestamp,
CtdavNbosiAuvParticle)]
class CtdavNbosiAuvParser(AuvCommonParser):
def __init__(self,
stream_handle,
exception_callback):
# provide message ID and # of fields to parent class
super(CtdavNbosiAuvParser, self).__init__(stream_handle,
exception_callback,
CTDAV_NBOSI_AUV_MESSAGE_MAP)
| StarcoderdataPython |
3250139 | class Solution:
def numSpecialEquivGroups(self, A):
"""
:type A: List[str]
:rtype: int
"""
adic = collections.Counter()
l = len(A[0])
for a in A:
evenA = sorted([a[c] for c in range(0, l, 2)])
oddA = sorted([a[c] for c in range(1, l, 2)])
a = ''.join([evenA[i//2] if i % 2 == 0 else oddA[i//2] for i in range(len(a))])
adic[a] += 1
return len(adic)
| StarcoderdataPython |
4822443 | from dataclasses import dataclass
from functools import total_ordering
from typing import Any, cast
@dataclass
class OrderElement:
name: str
value: int
# @total_ordering
class Order:
def __init__(self, client_name: str) -> None:
self.client_name = client_name
self.elements: list[OrderElement] = []
def add_element(self, element: OrderElement) -> None:
self.elements.append(element)
@property
def overall_value(self) -> int:
return sum([element.value for element in self.elements])
# def __eq__(self, other: Any) -> bool:
# if self.__class__ != other.__class__:
# return NotImplemented
# return cast(bool, self.overall_value == other.overall_value)
#
# def __gt__(self, other: Any) -> bool:
# if self.__class__ != other.__class__:
# return NotImplemented
# return cast(bool, self.overall_value >= other.overall_value)
| StarcoderdataPython |
3390721 | class HelpVar:
def __init__(self):
self.wificonfig = False
self.validconfig = False
helpobj = HelpVar()
| StarcoderdataPython |
3221153 | <reponame>dj0wns/MA_MST_Extractor-DEPRECATED-<filename>loaders/csv.py
import json
from loaders.base import Loader
"""
Dump everything in the format:
type 0 ascii str - "a:string"
type 1 float num - 123
type 2 utf16 str - "u:string"
ex:
{
"some_key": ["u:äöõö", 123, "a:hello", ...],
"some_other_key": [956, "a:halo", ...],
...
}
"""
class CSVLoader(Loader):
def read(self, reader):
reader.seek(self.entry.location)
file_length, entry_count, entry_offset, when = reader.read_fmt('IIII')
reader.seek(self.entry.location + entry_offset)
entries = [reader.read_fmt('IIHHI') for i in range(entry_count)]
out_dict = {}
for key_loc, key_len, val_count, idx, val_loc in entries:
reader.seek(self.entry.location + key_loc)
key = reader.read_str(key_len)
values = []
reader.seek(self.entry.location + val_loc)
for i in range(val_count):
val_type = reader.read_fmt('I')[0]
if val_type == 0: # string
str_loc, str_len = reader.read_fmt('II')
tmp_pos = reader.pos
reader.seek(self.entry.location + str_loc)
str_val = reader.read_str(str_len)
reader.seek(tmp_pos)
values.append(str_val)
elif val_type == 1: # number(float)
float_val, _ = reader.read_fmt('fI')
values.append(float("{0:.5f}".format(float_val)))
elif val_type == 2: # utf16 string
str_loc, str_len = reader.read_fmt('II')
tmp_pos = reader.pos
reader.seek(self.entry.location + str_loc)
if reader.little_endian:
str_val = reader.handle.read(str_len * 2).decode('utf-16le')
else:
str_val = reader.handle.read(str_len * 2).decode('utf-16be')
reader.seek(tmp_pos)
values.append(str_val)
else:
raise Exception('malformed CSV')
out_dict[key] = values
self.data = out_dict
def save(self, handle):
for key in self.data.keys():
handle.write(key.encode('ascii', 'ignore'))
handle.write(",".encode())
for item in self.data[key]:
if isinstance(item, str):
handle.write(item.encode('ascii', "ignore"))
handle.write(",".encode())
else:
handle.write(str(item).encode('ascii', 'ignore'))
handle.write(",".encode())
handle.write("\n".encode())
def reimport(self, handle):
self.data = json.loads(handle.read().decode())
| StarcoderdataPython |
178259 | <filename>exercise/week4/ex2.py
def interlock(word1, word2, word3):
if (not word1) or (not word2) or (not word3):
return False
interlocked = ""
for i in range(len(min([word1, word2], key=len))):
interlocked += (word1[i] + word2[i])
if word3 == (interlocked + max([word1, word2], key=len)[len(min([word1, word2], key=len)):]):
return True
return False | StarcoderdataPython |
99510 | #!/usr/bin/env python
"""
File: DataSet
Date: 5/1/18
Author: <NAME> (<EMAIL>)
This file provides loading of the BraTS datasets
for ease of use in TensorFlow models.
"""
import os
import pandas as pd
import numpy as np
import nibabel as nib
from tqdm import tqdm
from BraTS.Patient import *
from BraTS.structure import *
from BraTS.modalities import *
from BraTS.load_utils import *
survival_df_cache = {} # Prevents loading CSVs more than once
class DataSubSet:
def __init__(self, directory_map, survival_csv, data_set_type=None):
self.directory_map = directory_map
self._patient_ids = sorted(list(directory_map.keys()))
self._survival_csv = survival_csv
self._num_patients = len(self._patient_ids)
self.type = data_set_type
# Data caches
self._mris = None
self._segs = None
self._patients = {}
self._survival_df_cached = None
self._patients_fully_loaded = False
self._id_indexer = {patient_id: i for i, patient_id in enumerate(self._patient_ids)}
def subset(self, patient_ids):
"""
Split this data subset into a small subset by patient ID
:param n: The number of elements in the smaller training set
:return: A new data subset with only the specified number of items
"""
dir_map = {id: self.directory_map[id] for id in patient_ids}
return DataSubSet(dir_map, self._survival_csv)
@property
def ids(self):
"""
List of all patient IDs in this dataset
Will copy the ids... so modify them all you want
:return: Copy of the patient IDs
"""
return list(self._patient_ids)
@property
def mris(self):
if self._mris is not None:
return self._mris
self._load_images()
return self._mris
@property
def segs(self):
if self._segs is None:
self._load_images()
return self._segs
def _load_images(self):
mris_shape = (self._num_patients,) + mri_shape
segs_shape = (self._num_patients,) + image_shape
self._mris = np.empty(shape=mris_shape)
self._segs = np.empty(shape=segs_shape)
if self._patients_fully_loaded:
# All the patients were already loaded
for i, patient in enumerate(tqdm(self._patients.values())):
self._mris[i] = patient.mri_data
self._segs[i] = patient.seg
else:
# Load it from scratch
for i, patient_id in enumerate(self._patient_ids):
patient_dir = self.directory_map[patient_id]
load_patient_data_inplace(patient_dir, self._mris, self._segs, i)
@property
def patients(self):
"""
Loads ALL of the patients from disk into patient objects
:return: A dictionary containing ALL patients
"""
for patient_id in self.ids:
yield self.patient(patient_id)
self._patients_fully_loaded = True
def patient(self, patient_id):
"""
Loads only a single patient from disk
:param patient_id: The patient ID
:return: A Patient object loaded from disk
"""
if patient_id not in self._patient_ids:
raise ValueError("Patient id \"%s\" not present." % patient_id)
# Return cached value if present
if patient_id in self._patients:
return self._patients[patient_id]
# Load patient data into memory
patient = Patient(patient_id)
patient_dir = self.directory_map[patient_id]
df = self._survival_df
if patient_id in df.id.values:
patient.age = float(df.loc[df.id == patient_id].age)
patient.survival = int(df.loc[df.id == patient_id].survival)
if self._mris is not None and self._segs is not None:
# Load from _mris and _segs if possible
index = self._id_indexer[patient_id]
patient.mri = self._mris[index]
patient.seg = self._segs[index]
else:
# Load the mri and segmentation data from disk
patient.mri, patient.seg = load_patient_data(patient_dir)
self._patients[patient_id] = patient # cache the value for later
return patient
def drop_cache(self):
self._patients.clear()
self._mris = None
self._segs = None
@property
def _survival_df(self):
if self._survival_csv in survival_df_cache:
return survival_df_cache[self._survival_csv]
df = load_survival(self._survival_csv)
survival_df_cache[self._survival_csv] = df
return df
class DataSet(object):
def __init__(self, data_set_dir=None, brats_root=None, year=None):
if data_set_dir is not None:
# The data-set directory was specified explicitly
assert isinstance(data_set_dir, str)
self._data_set_dir = data_set_dir
elif brats_root is not None and isinstance(year, int):
# Find the directory by specifying the year
assert isinstance(brats_root, str)
year_dir = find_file_containing(brats_root, str(year % 100))
self._data_set_dir = os.path.join(brats_root, year_dir)
self._brats_root = brats_root
self._year = year
else:
# BraTS data-set location was not improperly specified
raise Exception("Specify BraTS location with \"data_set_dir\" or with \"brats_root\" and \"year\"")
self._validation = None
self._train = None
self._hgg = None
self._lgg = None
self._dir_map_cache = None
self._val_dir = None
self._train_dir_cached = None
self._hgg_dir = os.path.join(self._train_dir, "HGG")
self._lgg_dir = os.path.join(self._train_dir, "LGG")
self._train_survival_csv_cached = None
self._validation_survival_csv_cached = None
self._train_ids = None
self._hgg_ids_cached = None
self._lgg_ids_cached = None
self._train_dir_map_cache = None
self._validation_dir_map_cache = None
self._hgg_dir_map_cache = None
self._lgg_dir_map_cache = None
def set(self, data_set_type):
"""
Get a data subset by type
:param data_set_type: The DataSubsetType to get
:return: The data sub-set of interest
"""
assert isinstance(data_set_type, DataSubsetType)
if data_set_type == DataSubsetType.train:
return self.train
if data_set_type == DataSubsetType.hgg:
return self.hgg
if data_set_type == DataSubsetType.lgg:
return self.lgg
if data_set_type == DataSubsetType.validation:
return self.validation
@property
def train(self):
"""
Training data
Loads the training data from disk, utilizing caching
:return: A tf.data.Dataset object containing the training data
"""
if self._train is None:
try:
self._train = DataSubSet(self._train_dir_map,
self._train_survival_csv,
data_set_type=DataSubsetType.train)
except FileNotFoundError:
return None
return self._train
@property
def validation(self):
"""
Validation data
:return: Validation data
"""
if self._validation is None:
try:
self._validation = DataSubSet(self._validation_dir_map,
self._validation_survival_csv,
data_set_type=DataSubsetType.validation)
except FileNotFoundError:
return None
return self._validation
@property
def hgg(self):
if self._hgg is None:
try:
self._hgg = DataSubSet(self._hgg_dir_map,
self._train_survival_csv,
data_set_type=DataSubsetType.hgg)
except FileNotFoundError:
return None
return self._hgg
@property
def lgg(self):
if self._lgg is None:
try:
self._lgg = DataSubSet(self._lgg_dir_map,
self._train_survival_csv,
data_set_type=DataSubsetType.lgg)
except FileNotFoundError:
return None
return self._lgg
def drop_cache(self):
"""
Drops the cached values in the object
:return: None
"""
self._validation = None
self._train = None
self._hgg = None
self._lgg = None
self._dir_map_cache = None
self._val_dir = None
self._train_dir_cached = None
self._train_survival_csv_cached = None
self._validation_survival_csv_cached = None
self._train_ids = None
self._hgg_ids_cached = None
self._lgg_ids_cached = None
self._train_dir_map_cache = None
self._validation_dir_map_cache = None
self._hgg_dir_map_cache = None
self._lgg_dir_map_cache = None
@property
def _train_survival_csv(self):
if self._train_survival_csv_cached is None:
self._train_survival_csv_cached = find_file_containing(self._train_dir, "survival")
if self._train_survival_csv_cached is None:
raise FileNotFoundError("Could not find survival CSV in %s" % self._train_dir)
return self._train_survival_csv_cached
@property
def _validation_survival_csv(self):
if self._validation_survival_csv_cached is None:
self._validation_survival_csv_cached = find_file_containing(self._validation_dir, "survival")
if self._validation_survival_csv_cached is None:
raise FileNotFoundError("Could not find survival CSV in %s" % self._validation_dir)
return self._validation_survival_csv_cached
@property
def _train_dir(self):
if self._train_dir_cached is not None:
return self._train_dir_cached
self._train_dir_cached = find_file_containing(self._data_set_dir, "training")
if self._train_dir_cached is None:
raise FileNotFoundError("Could not find training directory in %s" % self._data_set_dir)
return self._train_dir_cached
@property
def _validation_dir(self):
if self._val_dir is not None:
return self._val_dir
self._val_dir = find_file_containing(self._data_set_dir, "validation")
if self._val_dir is None:
raise FileNotFoundError("Could not find validation directory in %s" % self._data_set_dir)
return self._val_dir
@property
def _train_dir_map(self):
if self._train_dir_map_cache is None:
self._train_dir_map_cache = dict(self._hgg_dir_map)
self._train_dir_map_cache.update(self._lgg_dir_map)
return self._train_dir_map_cache
@property
def _validation_dir_map(self):
if self._validation_dir_map_cache is None:
self._validation_dir_map_cache = self._directory_map(self._validation_dir)
return self._validation_dir_map_cache
@property
def _hgg_dir_map(self):
if self._hgg_dir_map_cache is None:
self._hgg_dir_map_cache = self._directory_map(self._hgg_dir)
return self._hgg_dir_map_cache
@property
def _lgg_dir_map(self):
if self._lgg_dir_map_cache is None:
self._lgg_dir_map_cache = self._directory_map(self._lgg_dir)
return self._lgg_dir_map_cache
@property
def _hgg_ids(self):
if self._hgg_ids_cached is None:
self._hgg_ids_cached = os.listdir(self._hgg_dir)
return self._hgg_ids_cached
@property
def _lgg_ids(self):
if self._lgg_ids_cached is None:
self._lgg_ids_cached = os.listdir(self._lgg_dir)
return self._lgg_ids_cached
@classmethod
def _directory_map(cls, dir):
return {file: os.path.join(dir, file)
for file in os.listdir(dir)
if os.path.isdir(os.path.join(dir, file))}
| StarcoderdataPython |
150155 | <gh_stars>1-10
x=100
text="python tutorial"
print(x)
print(text)
# Assign values to multiple variables
x,y,z=10,20,30
print(x)
print(y)
print(z) | StarcoderdataPython |
1634930 | from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
from typing import Callable
from functools import wraps
class glpy:
"""Main class to initialize OpenGL and Glut functions
"""
def __init__(self, **kwargs):
"""initialize the class with the following parameters
Keyword arguments:
mode -- diplay mode (default GLUT_DOUBLE | GLUT_RGB)
size -- window size (default (500, 500))
position -- window position (default (0, 0))
title -- window title (default "glpy")
color -- background color (default (0.0, 0.0, 0.0))
range -- window range (default (-1.0, 1.0, -1.0, 1.0))
"""
self.mode = kwargs["mode"] if "mode" in kwargs else GLUT_RGBA
self.size = kwargs["size"] if "size" in kwargs else (500, 500)
self.position = kwargs["position"] if "position" in kwargs else (0, 0)
self.title = kwargs["title"]if "title" in kwargs else "new title"
self.color = kwargs["bgcolor"] if "bgcolor" in kwargs else (0, 0, 0, 1.0)
self.range = kwargs["axis_range"] if "axis_range" in kwargs else (-100, 100,-100, 100)
def run(self, cb: Callable):
"""
Run the main loop of the program to execute the callbacks
Keyword arguments:
function(ListCallable) : a list of callback functions that will be executed -required
"""
glutInit(sys.argv)
glutInitDisplayMode(self.mode)
glutInitWindowSize(*self.size)
glutInitWindowPosition(*self.position)
glutCreateWindow(self.title)
glutDisplayFunc(cb)
glClearColor(*self.color)
gluOrtho2D(*self.range)
glutMainLoop()
def point(size: float):
''' wraps a callback function allowing it to plot points '''
def decorate(func: Callable):
@wraps(func)
def wrapper(*args, **kwargs):
glPointSize(size) # setting the point size
glBegin(GL_POINTS)
func(*args, **kwargs)
glEnd()
glFlush()
return wrapper
return decorate
def line(width: float):
''' wraps a callback function allowing it to plot lines '''
def decorate(func: Callable):
@wraps(func)
def wrapper(*args, **kwargs):
glLineWidth(width) # setting the line width
glBegin(GL_LINES)
func(*args, **kwargs)
glEnd()
glFlush()
return wrapper
return decorate
| StarcoderdataPython |
3302399 | import time
from .http_request import HttpRequest
from .http_response import HttpResponse
from .tp_link_cipher import TpLinkCipher
class RequestSecurePassthrough(HttpRequest):
def __init__(self, cipher: TpLinkCipher, payload: HttpRequest):
enc_payload = self.tpLinkCipher.encrypt(payload.get_payload())
super().__init__('securePassthrough', 'params',
{
'request': payload,
})
def parse_response(self, resp):
return ResponseSecurePassthrough(resp)
class ResponseSecurePassthrough(HttpResponse):
def __init__(self, data):
self.response = None
super().__init__(data)
def parse_result(self, result):
self.response = result['result']['response']
def get_response(self):
return self.response
| StarcoderdataPython |
3298147 | """
Test: header removal
"""
import os
import unittest
import ediclean.paxlst as paxlst
class TestEdifact(unittest.TestCase):
def test_files(self):
testfiles_dir = os.path.join(os.path.dirname(__file__), "testfiles",
"original")
for root, dirs, files in os.walk(testfiles_dir):
for file in sorted(files):
basename = os.path.basename(file)
# retrieve and clean original file
with open(os.path.join(root, file), 'r') as file:
cleaned_file = paxlst.cleanfile(os.path.abspath(file.name))
# Write cleansed file to disk; for testing purposes
# cleaned_file_dir = os.path.join(os.path.dirname(__file__), "testfiles", "cleaned")
# if not os.path.exists(cleaned_file_dir):
# os.makedirs(cleaned_file_dir)
#
# cleaned_file_pointer = open(os.path.join(os.path.dirname(__file__),
# "testfiles", "cleaned", basename), "w")
#
# #write string to file
# n = cleaned_file_pointer.write(cleaned_file)
#
# #close file
# cleaned_file_pointer.close()
# read reference file
with open(
os.path.join(os.path.dirname(__file__), "testfiles",
"reference", basename), 'r') as ref_file:
reference_file = ref_file.read()
# compare the cleaned file with the reference file
self.assertEqual.__self__.maxDiff = None
self.assertEqual(cleaned_file, reference_file)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1659154 | """init
Revision ID: 61bc6b460d35
Revises:
Create Date: 2021-04-28 03:43:32.300947
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| StarcoderdataPython |
15100 | <gh_stars>1-10
# Copyright (c) 2021.
# The copyright lies with <NAME>, the further use is only permitted with reference to source
import urllib.request
from RiotGames.API.RiotApi import RiotApi
class Match(RiotApi):
__timeline_by_match_id_url: str = "https://{}.api.riotgames.com/lol/match/v4/timelines/by-match/{}?api_key={}"
def __init__(self, apikey: str):
"""
:param apikey:
"""
super().__init__(apikey)
self.__super = super()
def by_id(self, match_id: int, region: str):
"""
Special Function still in development
https://developer.riotgames.com/apis#match-v4/GET_getMatchlist
TODO
:param match_id:
:param region:
:return:
"""
pass
def matchlist_by_account_id(self, account_id: str, begin_time: int = None, end_time: int = None,
begin_index: int = None, end_index: int = None, champions: list = None,
queue: list = None, season: list = None):
"""
Special Function still in development
https://developer.riotgames.com/apis#match-v4/GET_getMatchlist
TODO
format url
:param account_id:
encrypted account id
:param begin_time:
:param end_time:
:param begin_index:
:param end_index:
:param champions:
:param queue:
:param season:
:return:
"""
pass
def timeline_by_match_id(self, match_id: int, region: str) -> dict:
"""
:param match_id:
:param region:
:return:
"""
return eval(bytes(
urllib.request.urlopen(
self.__timeline_by_match_id_url.format(region, match_id, super()._get_key())).read()).decode())
| StarcoderdataPython |
1765721 | from os import path
from setuptools import setup, find_packages
GAME_ENGINE_VERSION_RAW: str
cwd = path.abspath(path.dirname(__file__))
# Import GAME_ENGINE_VERSION_RAW
# TODO XXX find a more elegant way of tracking versions
with open(path.join(cwd, 'd20', 'version.py')) as f:
exec(f.read())
with open(path.join(cwd, 'README.md')) as f:
long_description = f.read()
setup(name="d20-framework",
version=GAME_ENGINE_VERSION_RAW, # noqa
description="Automated Static Analysis Framework",
long_description=long_description,
long_description_content_type='text/markdown',
author="MITRE",
author_email="",
url="https://github.com/MITRECND/d20",
python_requires=">=3.6",
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Security',
],
install_requires=[
'python-magic',
'ssdeep',
'pyyaml>=5.1,<5.2',
'requests',
'packaging',
'cerberus',
'texttable'
],
packages=find_packages(exclude=("d20.tests",)),
entry_points={'console_scripts':
['d20=d20.Manual.Entry:main',
'd20-shell=d20.Manual.Entry:shellmain']}
)
| StarcoderdataPython |
3384931 | '''
059. Spiral Matrix II - LeetCode
https://leetcode.com/problems/spiral-matrix-ii/description/
'''
# import numpy as np
class Solution(object):
def generateMatrix(self, n):
"""
:type n: int
:rtype: List[List[int]]
"""
if n == 0:
return []
# ret = np.zeros((n,n),dtype=int)
ret = []
for i in range(n):
ret.append([])
for j in range(n):
ret[i].append(0)
col = -1
row = 0
x = n
y = n
j = 1
while True:
for i in range(x):
col += 1
ret[row][col] = j
j += 1
y -= 1
if y == 0:
break
for i in range(y):
row += 1
ret[row][col] = j
j += 1
x -= 1
if x == 0:
break
for i in range(x):
col -= 1
ret[row][col] = j
j += 1
y -= 1
if y == 0:
break
for i in range(y):
row -= 1
ret[row][col] = j
j += 1
x -= 1
if x == 0:
break
return ret
ans = [
[
3,
[
[ 1, 2, 3 ],
[ 8, 9, 4 ],
[ 7, 6, 5 ]
]
]
]
s = Solution()
for i in ans:
ret = s.generateMatrix(i[0])
# print( "O" if (ret == i[1]).all() else "X", ret)
print( "O" if ret == i[1] else "X", ret) | StarcoderdataPython |
1657311 | <reponame>Vipheak/College-Manager
import sys, os;
from PyQt5.QtWidgets import QDialog;
from PyQt5 import uic;
from PyQt5.QtCore import QFile, QIODevice, QTextStream;
#from src.lib.database import DBManager;
class DBConfig(QDialog):
nameInConfig = "";
usernameInConfig = "";
passwordInConfig = "";
hostnameInConfig = "";
portInConfig = 0;
def __init__(self):
super().__init__();
self.setupUi();
def setupUi(self):
uic.loadUi("src/ui/dbconfig.ui", self);
stylesheet = QFile("assets/qss/dialog.qss");
stylesheet.open(QIODevice.ReadWrite | QIODevice.Text);
self.setStyleSheet(QTextStream(stylesheet).readAll());
self.port.setMinimum(1);
self.port.setMaximum(65536);
f = QFile("config/database.config");
if f.open(QIODevice.ReadWrite | QIODevice.Text):
name = f.readLine(255);
username = f.readLine(255);
password = <PASSWORD>(255);
hostname = f.readLine(255);
port = f.readLine(1000);
f.close();
self.nameInConfig = str(name)[17:-3]
self.usernameInConfig = str(username)[21:-3]
self.passwordInConfig = str(password)[21:-3]
self.hostnameInConfig = str(hostname)[21:-3]
self.portInConfig = int(str(port)[17:-1])
self.dbName.setText(self.nameInConfig);
self.username.setText(self.usernameInConfig);
self.password.setText(self.passwordInConfig);
self.hostname.setText(self.hostnameInConfig);
self.port.setValue(self.portInConfig);
self.accept.clicked.connect(self.configure);
self.cancel.clicked.connect(self.close);
def configure(self):
data = "database_name: " + self.dbName.text();
data += "\ndatabase_username: " + self.username.text();
data += "\ndatabase_password: " + self.password.text();
data += "\ndatabase_hostname: " + self.hostname.text();
data += "\ndatabase_port: " + self.port.text();
f = QFile("config/database.config");
f.remove("config/database.config");
if f.open(QIODevice.ReadWrite | QIODevice.Text):
stream = QTextStream(f);
stream << data;
f.close();
self.close();
def getName(self): return self.nameInConfig;
def getUsername(self): return self.usernameInConfig;
def getPassword(self): return self.passwordInConfig;
def getHostname(self): return self.hostnameInConfig;
def getPort(self): return self.portInConfig;
# @decorator
def createTables():
db = DBManager();
| StarcoderdataPython |
9569 | #!/usr/bin/env python
# Copyright (c) 2013, Digium, Inc.
# Copyright (c) 2014-2016, Yelp, Inc.
import os
from setuptools import setup
import bravado
setup(
name="bravado",
# cloudlock version, no twisted dependency
version=bravado.version + "cl",
license="BSD 3-Clause License",
description="Library for accessing Swagger-enabled API's",
long_description=open(os.path.join(os.path.dirname(__file__),
"README.rst")).read(),
author="Digium, Inc. and Yelp, Inc.",
author_email="<EMAIL>",
url="https://github.com/Yelp/bravado",
packages=["bravado"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries :: Python Modules",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
],
install_requires=[
"bravado-core >= 4.2.2",
"yelp_bytes",
"python-dateutil",
"pyyaml",
"requests",
"six",
],
extras_require={
},
)
| StarcoderdataPython |
1734965 | <gh_stars>0
from pid import PID
from lowpass import LowPassFilter
from yaw_controller import YawController
import rospy
GAS_DENSITY = 2.858
ONE_MPH = 0.44704
class Controller(object):
def __init__(self, vehicle_mass, fuel_capacity, brake_deadband,
decel_limit, accel_limit, wheel_radius, wheel_base, steer_ratio,
max_lat_accel, max_steer_angle, stop_brake_torque):
self.yaw_controller = YawController(wheel_base, steer_ratio, ONE_MPH, max_lat_accel, max_steer_angle)
kp = 0.9
ki = 0.007
kd = 0.2
mn = 0.0 # Minimum throttle.
mx = 0.2 # Maximum throttle.
self.throttle_controller = PID(kp, ki, kd, mn, mx)
tau_throttle = 0.5
ts_throttle = 0.02 # Sample time.
self.throttle_lpf = LowPassFilter(tau_throttle, ts_throttle)
self.vehicle_mass = vehicle_mass
self.fuel_capacity = fuel_capacity
self.fuel_mass = self.fuel_capacity * GAS_DENSITY
self.brake_deadband = brake_deadband
self.decel_limit = decel_limit
self.accel_limit = accel_limit
self.wheel_radius = wheel_radius
self.stop_brake_torque = stop_brake_torque
self.last_time = rospy.get_time()
def control(self, current_vel, dbw_enabled, linear_vel, angular_vel):
if not dbw_enabled:
self.throttle_controller.reset()
self.throttle_lpf.reset()
return 0, 0, 0
current_vel = self.throttle_lpf.filt(current_vel)
steering = self.yaw_controller.get_steering(linear_vel, angular_vel, current_vel)
vel_error = linear_vel - current_vel
self.last_vel = current_vel
current_time = rospy.get_time()
sample_time = current_time - self.last_time
self.last_time = current_time
throttle = self.throttle_controller.step(vel_error, sample_time)
brake = 0
if linear_vel == 0.0 and current_vel < 0.1:
throttle = 0
brake = self.stop_brake_torque # Nm
elif throttle < 0.1 and vel_error < 0:
throttle = 0.0
decel = max(vel_error, self.decel_limit)
brake = abs(decel) * (self.vehicle_mass + self.fuel_mass) * self.wheel_radius # Torque N*m
return throttle, brake, steering
| StarcoderdataPython |
121034 | from djfilters import filters
from .models import (BooleanModel, DateFieldModel, EmailModel, IpModel,
NumberModel, RelatedIntIdModel, RelatedSlugIdModel,
TextModel)
# Simple Filters
class TextFieldFilter(filters.Filter):
text = filters.CharField(max_length=10, required=False)
class BooleanFilter(filters.Filter):
flag = filters.BooleanField()
class CharFieldWithRequiredFilter(filters.Filter):
char = filters.CharField()
# Model filters
class TextModelFilter(filters.ModelFilter):
class Meta:
model = TextModel
fields = '__all__'
class IpModelFilter(filters.ModelFilter):
class Meta:
model = IpModel
fields = '__all__'
class EmailModelFilter(filters.ModelFilter):
class Meta:
model = EmailModel
fields = '__all__'
class NumberModelFilter(filters.ModelFilter):
class Meta:
model = NumberModel
fields = '__all__'
class BooleanModelFilter(filters.ModelFilter):
class Meta:
model = BooleanModel
fields = '__all__'
class DateFieldModelFilter(filters.ModelFilter):
class Meta:
model = DateFieldModel
fields = '__all__'
class RelatedIntIdModelFilter(filters.ModelFilter):
class Meta:
model = RelatedIntIdModel
fields = '__all__'
class RelatedSlugIdModelFilter(filters.ModelFilter):
class Meta:
model = RelatedSlugIdModel
fields = '__all__'
class NoFieldFilter(filters.Filter):
pass
def get_model_filter(
filter_class,
fields='__all__',
extra_kwargs={}
):
filter_class.Meta.fields = fields
if extra_kwargs:
filter_class.Meta.extra_kwargs = extra_kwargs
return filter_class
def get_simple_filter(fields):
class Filter(filters.Filter):
def __init__(self, *args, **kwargs):
for name, field in fields.items():
self.fields[name] = field
super(Filter, self).__init__(*args, **kwargs)
return Filter
| StarcoderdataPython |
61095 | <reponame>dfioravanti/tangles<filename>test/test_utils.py
import pytest
from src.utils import merge_dictionaries_with_disagreements, matching_items
class Test_matching_items():
def test_two_match(self):
d1 = {1: True, 2: True, 4: False}
d2 = {1: True, 2: True, 3: False}
expected = [1, 2]
result = matching_items(d1, d2)
assert expected == result
result = matching_items(d2, d1)
assert expected == result
def test_one_match(self):
d1 = {1: True, 2: True, 4: False}
d2 = {1: False, 2: True, 3: False}
expected = [2]
result = matching_items(d1, d2)
assert expected == result
result = matching_items(d2, d1)
assert expected == result
def test_zero_match(self):
d1 = {1: True, 2: False, 4: False}
d2 = {1: False, 2: True, 3: False}
expected = []
result = matching_items(d1, d2)
assert expected == result
result = matching_items(d2, d1)
assert expected == result
class Test_merge_dictionaries_with_disagreements():
def test_no_drop(self):
d1 = {1: True, 4: False}
d2 = {2: True, 3: False}
expected = {**d1, **d2}
result = merge_dictionaries_with_disagreements(d1, d2)
assert expected == result
result = merge_dictionaries_with_disagreements(d2, d1)
assert expected == result
def test_one_drop(self):
d1 = {1: True, 2: False}
d2 = {2: True, 3: False}
expected = {1: True, 3: False}
result = merge_dictionaries_with_disagreements(d1, d2)
assert expected == result
result = merge_dictionaries_with_disagreements(d2, d1)
assert expected == result
def test_all_drop(self):
d1 = {1: True, 2: False}
d2 = {1: False, 2: True}
expected = {}
result = merge_dictionaries_with_disagreements(d1, d2)
assert expected == result
result = merge_dictionaries_with_disagreements(d2, d1)
assert expected == result
def test_all_but_one_drop(self):
d1 = {1: True, 2: False}
d2 = {1: False, 2: True, 3: True}
expected = {3: True}
result = merge_dictionaries_with_disagreements(d1, d2)
assert expected == result
result = merge_dictionaries_with_disagreements(d2, d1)
assert expected == result
def test_one_empty(self):
d1 = {}
d2 = {1: False, 2: True, 3: True}
expected = d2
result = merge_dictionaries_with_disagreements(d1, d2)
assert expected == result
result = merge_dictionaries_with_disagreements(d2, d1)
assert expected == result
def test_both_empty(self):
d1 = {}
d2 = {}
expected = {}
result = merge_dictionaries_with_disagreements(d1, d2)
assert expected == result
result = merge_dictionaries_with_disagreements(d2, d1)
assert expected == result | StarcoderdataPython |
1621668 | from synapyse.base.input_functions.input_function import InputFunction
from synapyse.base.neuron import Neuron
__author__ = '<NAME>'
class BiasNeuron(Neuron):
def __init__(self, activation_function):
"""
:type activation_function: synapyse.base.activation_functions.activation_function.ActivationFunction
"""
Neuron.__init__(self, InputFunction, activation_function)
def compute_output(self):
self.output = 1.0
return self.output | StarcoderdataPython |
3345220 | <filename>KF/AUKF.py
# Author: <NAME>
# Affiliation: Kyoto University
# ======================================================================
# 1. (IMPORTANT:CITATION ALERT)
# This script is largely an adaptation of the paper DOI:10.3390/s18030808 (Zheng et al., Sensors, 2018).
# This paper was chosen as the basis for this script due to its relatively straight forward implementation
# and cost-effectiveness of computation.
# 2.
# Note that residual methods such as this requires a post-hoc adjustments to R separate from the main UKF.
# For brevity, recompute of the state mean and variance were skipped. This also does mean that the effect
# of the current time innovation (or prediction error) will start affecting the filter one time step after
# the fact.
# 3.
# This method belongs to a class of 'residual based adaptive method'.
# For more information on different types of adaptation possible, see DOI:10.1109/TAC.1972.1100100.
# For more information on derivation of 'residual based adaptive method', see DOI:10.1007/s001900050236.
# 4.
# Similar implementations of adaptive filters can be found in a variety of literature;
# DOI: 10.3390/app9091726 (Battery Health)*
# DOI: 10.1007/s001900050236 (GPS/INS)
# *:A variation where hyperparameter is used in place describing noise as a distribution initself is also found
import numpy as np
from copy import copy, deepcopy
class AdaptiveUnscentedKalmanFilter:
'''
A mixin component.
This is not meant as a standalone filter.
In order to utilize this as a full adaptive filter, do the following;
Example (with UKF):
class constructor_AUKF(UnscentedKalmanFilter,AdaptiveUnscentedKalmanFilter,metaclass=MixedClassMeta):
def __init__(self,*args,**kwargs): pass
AUKF = constructor_AUKF(**kwargs)
Parameters
----------
kwargs : dict
+ n : number of iterations
+ delta: adaptive rate of filter
'''
def __init__(self, **kwargs):
self.n = kwargs['n']
self.delta = kwargs['delta']
self.residuals = np.empty((self.n,self._dim_x))
self.residual_variances = np.empty((self.n,self._dim_x,self._dim_x))
def adapt_noise(self, i, x, **kwargs):
'''
Post-hoc adaptive measurement noise.
Computes residuals which are used to construct a guaranteed positive definite matrix.
Parameters
----------
i : int
iteration index
x : array_like
observation
'''
# re-create sigma points corresponding to updated mean and variance of hidden state
self.phi_c_c = self.points_fn.sigma_points(self.z, self.P)
# re-create sigma points corresponding to measurement sigma points
hphi_c_c = []
for s in self.phi_c_c:
hphi_c_c.append(self.hx(s, **kwargs))
self.hphi_c_c = np.atleast_2d(hphi_c_c)
# recompute the mean and predictive measurement variance
self.x_c_c, self.S_c_c = self.UT(
sigmas = self.hphi_c_c,
Wm = self.Wm,
Wc = self.Wc,
noise_cov = np.zeros((self._dim_x, self._dim_x))
)
self.residual = np.subtract(x, self.x_c_c)
self.residual_variance = np.outer(self.residual, self.residual)
# save residual sequences
self.residuals[i,:] = self.residual
self.residual_variances[i,:,:] = self.residual_variance
# R adaptation
self.R = (1 - self.delta) * self.R + self.delta * (self.residual_variance + self.S_c_c) | StarcoderdataPython |
1734286 | from stenway.reliabletxt import *
class WsvChar:
def isWhitespace(c):
return (c == 0x09 or
(c >= 0x0B and c <= 0x0D) or
c == 0x0020 or
c == 0x0085 or
c == 0x00A0 or
c == 0x1680 or
(c >= 0x2000 and c <= 0x200A) or
c == 0x2028 or
c == 0x2029 or
c == 0x202F or
c == 0x205F or
c == 0x3000)
def getWhitespaceCodePoints():
return [0x0009,
0x000B,
0x000C,
0x000D,
0x0020,
0x0085,
0x00A0,
0x1680,
0x2000,
0x2001,
0x2002,
0x2003,
0x2004,
0x2005,
0x2006,
0x2007,
0x2008,
0x2009,
0x200A,
0x2028,
0x2029,
0x202F,
0x205F,
0x3000]
class WsvString:
def isWhitespace(str):
if not str:
return False
codePoints = list(map(lambda c: ord(c), str))
for c in codePoints:
if not WsvChar.isWhitespace(c):
return False
return True
class WsvParserException(Exception):
def __init__(self, index, lineIndex, linePosition, message):
super().__init__("{} ({}, {})".format(message, lineIndex + 1, linePosition + 1))
self.index = index
self.lineIndex = lineIndex
self.linePosition = linePosition
class WsvCharIterator(ReliableTxtCharIterator):
def __init__(self, text):
ReliableTxtCharIterator.__init__(self, text)
def isWhitespace(self):
if self.isEndOfText():
return False
return WsvChar.isWhitespace(self._chars[self._index])
def getString(self, startIndex):
part = self._chars[startIndex:self._index]
return StringUtil.fromCodePoints(part)
def readCommentText(self):
startIndex = self._index
while True:
if self.isEndOfText():
break
if self._chars[self._index] == 0x0A:
break
self._index += 1
return self.getString(startIndex)
def skipCommentText(self):
while True:
if self.isEndOfText():
break
if self._chars[self._index] == 0x0A:
break
self._index += 1
def readWhitespaceOrNull(self):
startIndex = self._index
while True:
if self.isEndOfText():
break
c = self._chars[self._index]
if c == 0x0A:
break
if not WsvChar.isWhitespace(c):
break
self._index += 1
if self._index == startIndex:
return None
return self.getString(startIndex)
def skipWhitespace(self):
startIndex = self._index
while True:
if self.isEndOfText():
break
c = self._chars[self._index]
if c == 0x0A:
break
if not WsvChar.isWhitespace(c):
break
self._index += 1
return self._index > startIndex
def getException(self, message):
lineIndex, linePosition = self.getLineInfo()
return WsvParserException(self._index, lineIndex, linePosition, message)
def readString(self):
chars = []
while True:
if self.isEndOfText() or self.isChar(0x0A):
raise self.getException("String not closed")
c = self._chars[self._index]
if c == 0x22:
self._index += 1
if self.tryReadChar(0x22):
chars.append(0x22)
elif self.tryReadChar(0x2F):
if not self.tryReadChar(0x22):
raise self.getException("Invalid string line break")
chars.append(0x0A)
elif self.isWhitespace() or self.isChar(0x0A) or self.isChar(0x23) or self.isEndOfText():
break
else:
raise self.getException("Invalid character after string")
else:
chars.append(c)
self._index += 1
return StringUtil.fromCodePoints(chars)
def readValue(self):
startIndex = self._index
while True:
if self.isEndOfText():
break
c = self._chars[self._index]
if WsvChar.isWhitespace(c) or c == 0x0A or c == 0x23:
break
if c == 0x22:
raise self.getException("Invalid double quote in value")
self._index += 1
if self._index == startIndex:
raise self.getException("Invalid value")
return self.getString(startIndex)
class WsvParser:
def parseLineAsArray(content):
iterator = WsvCharIterator(content)
result = WsvParser._parseLineAsArray(iterator)
if iterator.isChar(0x0A):
raise iterator.getException("Multiple WSV lines not allowed")
elif not iterator.isEndOfText():
raise iterator.getException("Unexpected parser error")
return result
def _parseLineAsArray(iterator):
iterator.skipWhitespace()
values = []
while (not iterator.isChar(0x0A)) and (not iterator.isEndOfText()):
value = None
if iterator.isChar(0x23):
break
elif iterator.tryReadChar(0x22):
value = iterator.readString()
else:
value = iterator.readValue()
if value == "-":
value = None
values.append(value)
if not iterator.skipWhitespace():
break
if iterator.tryReadChar(0x23):
iterator.skipCommentText()
return values
def parseDocumentAsJaggedArray(content):
iterator = WsvCharIterator(content)
lines = []
while True:
newLine = WsvParser._parseLineAsArray(iterator)
lines.append(newLine)
if iterator.isEndOfText():
break
elif not iterator.tryReadChar(0x0A):
raise iterator.getException("Unexpected parser error")
if not iterator.isEndOfText():
raise iterator.getException("Unexpected parser error")
return lines
def parseLine(content):
iterator = WsvCharIterator(content)
result = WsvParser._parseLine(iterator)
if iterator.isChar(0x0A):
raise iterator.getException("Multiple WSV lines not allowed")
elif not iterator.isEndOfText():
raise iterator.getException("Unexpected parser error")
return result
def _parseLine(iterator):
values = []
whitespaces = []
whitespace = iterator.readWhitespaceOrNull()
whitespaces.append(whitespace)
while (not iterator.isChar(0x0A)) and (not iterator.isEndOfText()):
value = None
if iterator.isChar(0x23):
break
elif iterator.tryReadChar(0x22):
value = iterator.readString()
else:
value = iterator.readValue()
if value == "-":
value = None
values.append(value)
whitespace = iterator.readWhitespaceOrNull()
if whitespace == None:
break
whitespaces.append(whitespace)
comment = None
if iterator.tryReadChar(0x23):
comment = iterator.readCommentText()
if whitespace == None:
whitespaces.append(None)
newLine = WsvLine(values)
newLine._whitespaces = whitespaces
newLine._comment = comment
return newLine
def parseDocument(content):
document = WsvDocument()
iterator = WsvCharIterator(content)
while True:
newLine = WsvParser._parseLine(iterator)
document.addLine(newLine)
if iterator.isEndOfText():
break
elif not iterator.tryReadChar(0x0A):
raise iterator.getException("Unexpected parser error")
if not iterator.isEndOfText():
raise iterator.getException("Unexpected parser error")
return document
def parseLineNonPreserving(content):
values = WsvParser.parseLineAsArray(content)
return WsvLine(values)
def parseDocumentNonPreserving(content):
document = WsvDocument()
iterator = WsvCharIterator(content)
while True:
lineValues = WsvParser._parseLineAsArray(iterator)
newLine = WsvLine(lineValues)
document.addLine(newLine)
if iterator.isEndOfText():
break
elif not iterator.tryReadChar(0x0A):
raise iterator.getException("Unexpected parser error")
if not iterator.isEndOfText():
raise iterator.getException("Unexpected parser error")
return document
class WsvSerializer:
def containsSpecialChar(value):
chars = StringUtil.getCodePoints(value)
for c in chars:
if c == 0x0A or WsvChar.isWhitespace(c) or c == 0x22 or c == 0x23:
return True
return False
def serializeValue(value):
if value==None:
return "-"
elif len(value) == 0:
return "\"\""
elif value == "-":
return "\"-\""
elif WsvSerializer.containsSpecialChar(value):
result = []
chars = StringUtil.getCodePoints(value)
result.append(0x22)
for c in chars:
if c == 0x0A:
result.append(0x22)
result.append(0x2F)
result.append(0x22)
elif c == 0x22:
result.append(0x22)
result.append(0x22)
else:
result.append(c)
result.append(0x22)
return StringUtil.fromCodePoints(result)
else:
return value
def _serializeWhitespace(whitespace, isRequired):
if whitespace != None and len(whitespace) > 0:
return whitespace
elif isRequired:
return " "
else:
return ""
def _serializeValuesWithWhitespace(line):
result = ""
whitespaces = line._whitespaces
comment = line._comment
if line.values == None:
whitespace = whitespaces[0]
result += WsvSerializer._serializeWhitespace(whitespace, False)
return result
for i in range(len(line.values)):
whitespace = None
if i < len(whitespaces):
whitespace = whitespaces[i]
if i == 0:
result += WsvSerializer._serializeWhitespace(whitespace, False)
else:
result += WsvSerializer._serializeWhitespace(whitespace, True)
result += WsvSerializer.serializeValue(line.values[i])
if len(whitespaces) >= len(line.values) + 1:
whitespace = whitespaces[len(line.values)]
result += WsvSerializer._serializeWhitespace(whitespace, False)
elif comment != None and len(line.Values) > 0:
result += " "
return result
def _serializeValuesWithoutWhitespace(line):
result = ""
if line.values == None:
return result
isFollowingValue = False
for value in line.values:
if isFollowingValue:
result += ' '
else:
isFollowingValue = True
result += WsvSerializer.serializeValue(value)
if line.getComment() != None and len(line.values) > 0:
result += " "
return result
def serializeLine(line):
result = ""
whitespaces = line._whitespaces
if whitespaces != None and len(whitespaces) > 0:
result += WsvSerializer._serializeValuesWithWhitespace(line)
else:
result += WsvSerializer._serializeValuesWithoutWhitespace(line)
comment = line._comment
if comment != None:
result += "#"
result += comment
return result
def serializeLineValues(values):
result = ""
isFirstValue = True
for value in values:
if not isFirstValue:
result += " "
else:
isFirstValue = False
result += WsvSerializer.serializeValue(value)
return result
def serializeLineNonPreserving(line):
return WsvSerializer.serializeLineValues(line.values)
def serializeDocument(document):
result = ""
isFirstLine = True
for line in document.lines:
if not isFirstLine:
result += "\n"
else:
isFirstLine = False
result += WsvSerializer.serializeLine(line)
return result
def serializeDocumentNonPreserving(document):
result = ""
isFirstLine = True
for line in document.lines:
if not isFirstLine:
result += "\n"
else:
isFirstLine = False
result += WsvSerializer.serializeLineNonPreserving(line)
return result
class WsvLine:
def __init__(self, values=None, whitespaces=None, comment=None):
if values is None:
self.values = []
else:
self.values = values
self.setWhitespaces(whitespaces)
self.setComment(comment)
def hasValues(self):
return self.values != None and len(self.values) > 0
def setWhitespaces(self, whitespaces):
WsvLine.validateWhitespaces(whitespaces)
self._whitespaces = whitespaces
def setComment(self, comment):
WsvLine.validateComment(comment)
self._comment = comment
def setValues(self, *values):
self.values = []
for value in values:
self.values.append(value)
def validateWhitespaces(whitespaces):
if whitespaces != None:
for whitespace in whitespaces:
if whitespace != None and len(whitespace) > 0 and not WsvString.isWhitespace(whitespace):
raise Exception("Whitespace value contains non whitespace character or line feed")
def validateComment(comment):
if comment != None and comment.find('\n') >= 0:
raise Exception("Line feed in comment is not allowed")
def getWhitespaces(self):
return self._whitespaces
def getComment(self):
return self._comment
def parse(content, preserveWhitespaceAndComment = True):
if preserveWhitespaceAndComment:
return WsvParser.parseLine(content)
else:
return WsvParser.parseLineNonPreserving(content)
def parseAsArray(content):
return WsvParser.parseLineAsArray(content)
def __str__(self):
return self.toString(True)
def toString(self, preserveWhitespaceAndComment):
if preserveWhitespaceAndComment:
return WsvSerializer.serializeLine(self)
else:
return WsvSerializer.serializeLineNonPreserving(self)
def _set(self, values, whitespaces, comment):
self.values = values
self._whitespaces = whitespaces
self._comment = comment
class WsvDocument:
def __init__(self, lines = None, encoding = ReliableTxtEncoding.UTF_8):
if lines is None:
self.lines = []
else:
self.lines = lines
self.encoding = encoding
def setEncoding(self, encoding):
self.encoding = encoding
def getEncoding(self):
return self.encoding
def addLine(self, line):
self.lines.append(line)
def __str__(self):
return self.toString()
def toString(self, preserveWhitespaceAndComments=True):
if preserveWhitespaceAndComments:
return WsvSerializer.serializeDocument(self)
else:
return WsvSerializer.serializeDocumentNonPreserving(self)
def toArray(self):
array = []
for line in self.lines:
array.append(line.values)
return array
def save(self, filePath, preserveWhitespaceAndComments=True):
content = self.toString(preserveWhitespaceAndComments)
file = ReliableTxtDocument(content, self.encoding)
file.save(filePath)
def parse(content, preserveWhitespaceAndComments=True):
if preserveWhitespaceAndComments:
return WsvParser.parseDocument(content)
else:
return WsvParser.parseDocumentNonPreserving(content)
def load(filePath, preserveWhitespaceAndComments=True):
file = ReliableTxtDocument.load(filePath)
content = file.getText()
document = WsvDocument.parse(content, preserveWhitespaceAndComments)
document.setEncoding(file.getEncoding())
return document
def parseAsJaggedArray(content):
return WsvParser.parseDocumentAsJaggedArray(content)
| StarcoderdataPython |
3252873 | <reponame>adsonrodrigues/customers
from django.core.management.base import BaseCommand, CommandError
from clients.models import Profile
from django.conf import settings
import environ
import googlemaps
import csv
env = environ.Env()
environ.Env.read_env()
class Command(BaseCommand):
help = 'Populanting database with customers from a file.'
def handle(self, *args, **kwargs):
filename = settings.BASE_DIR / 'customers.csv'
with open(filename, 'r') as file:
reader = csv.reader(file, delimiter=",")
next(reader, None)
gmaps = googlemaps.Client(key=env.str("KEY_GOOGLE_MAPS"))
for row in reader:
print('Creating Profile id: {} name: {}'.format(row[0], row[1]))
geocode_result = gmaps.geocode(row[6])
location = geocode_result[0].get('geometry').get('location')
lat = location.get('lat')
lng = location.get('lng')
obj = Profile(id=row[0], first_name=row[1], last_name=row[2], email=row[3], gender=row[4],
company=row[5], city=row[6], title=row[7], latitude=lat, longitude=lng)
obj.save()
print('Successfully imported customers.')
| StarcoderdataPython |
173471 | import multiprocessing
import pickle
import random
import sys
from collections import defaultdict
from math import ceil, sqrt
import numpy as np
from scipy.stats import norm, skewnorm
from tqdm import tqdm
sys.path.append('..')
import features
INCOME_SECURITY = {
'employee_wage': 0.8,
'state_wage': 0.9,
'dividents': 0.5,
'rent': 0.6,
'other': 0.1,
}
EXPENSE_SECURITY = {
'housing': 0.8,
'car_service': 0.5,
'taxes': 0.8,
'alimony': 0.8,
'credits': 0.5,
'insurance': 0.8,
'other': 0.5,
}
PROPERTY_SECURITY = {
'apartment': 0.7,
'house': 0.5,
'car': 0.3,
}
def calc_responsibility(fdict):
result = 0.6
if fdict['education'] >= 3 or \
fdict['purpose:education'] or fdict['purpose:real_estate']:
result += 0.1
dependents = fdict['dependents']
if dependents >= 2:
result += 0.3
elif dependents == 1:
result += 0.2
elif fdict['married']:
result += 0.1
for feature in ['has_overdue_debts', 'missed_deadlines', 'was_bankrupt']:
if fdict[feature]:
result *= 0.5
return result
SIGMA_COEFF = 0.05
def calc_balance_distr(fdict, resp):
balance_mean = 0
balance_var = 0
for cat in features.CUM_FEATURES['income']:
value = fdict['income:' + cat]
if value > 0:
distr = skewnorm(-4, value, value * SIGMA_COEFF / INCOME_SECURITY[cat] / resp)
balance_mean += distr.mean()
balance_var += distr.var()
for cat in features.CUM_FEATURES['expense']:
value = fdict['expense:' + cat]
if value > 0:
distr = skewnorm(4, value, value * SIGMA_COEFF / EXPENSE_SECURITY[cat] / resp)
balance_mean -= distr.mean()
balance_var += distr.var()
duration_in_months = 12 * fdict['duration']
balance_mean *= duration_in_months
balance_var *= duration_in_months ** 2
for cat in features.CUM_FEATURES['property']:
price = fdict['property:' + cat]
if price > 0:
distr = skewnorm(4, price, price * SIGMA_COEFF / PROPERTY_SECURITY[cat] / resp)
balance_mean += distr.mean()
balance_var += distr.var()
return norm(balance_mean, sqrt(balance_var))
def cond_expect(norm_distr, a):
"""
Let X = norm_distr(). This function returns E(X | X < a) * P{X < a}.
To proof use:
https://en.wikipedia.org/wiki/List_of_integrals_of_Gaussian_functions
"""
mu, sigma = norm_distr.args
return mu * norm_distr.cdf(a) - sigma ** 2 * norm_distr.pdf(a)
TARGET_INTEREST = 1.10
MAX_INTEREST = 10.00
def calc_interest_rate(fdict):
resp = calc_responsibility(fdict)
balance_distr = calc_balance_distr(fdict, resp)
credit_amount = fdict['credit_amount']
duration = fdict['duration']
bank_wants = credit_amount * TARGET_INTEREST ** duration
lo = TARGET_INTEREST
hi = MAX_INTEREST
for _ in range(15):
middle = (lo + hi) / 2
bank_asks = credit_amount * middle ** duration
default_proba = balance_distr.cdf(bank_asks)
bank_takes = cond_expect(balance_distr, bank_asks) + \
(1 - default_proba) * bank_asks
if bank_takes < bank_wants:
lo = middle
else:
hi = middle
return ceil(lo * 1e3) / 1e3
def test_calc_interest_rate():
fdict = {
'credit_amount': 3 * 10 ** 6,
'duration': 2,
'education': 3,
'married': 1,
'dependents': 2,
'purpose:real_estate': 1,
'income:state_wage': 200000,
'expense:housing': 28000,
'expense:other': 9000,
}
print(calc_interest_rate(defaultdict(int, fdict)))
MAX_CUM_VALUE = {
'income': 200000,
'expense': 50000,
'property': 5 * 10 ** 6,
}
def generate_input():
fdict = {}
for feature, (min, max) in features.NUM_FEATURES.items():
fdict[feature] = random.randint(min, max)
for feature, cats in features.CAT_FEATURES.items():
value = random.choice(cats)
for cat in cats:
fdict[feature + ':' + cat] = 0
fdict[feature + ':' + value] = 1
for feature, cats in features.CUM_FEATURES.items():
for cat in cats:
if random.random() < 0.5:
value = random.randint(1, MAX_CUM_VALUE[feature])
else:
value = 0
fdict[feature + ':' + cat] = value
return fdict
def generate_pair(_):
fdict = generate_input()
X_i = features.feature_dict_to_array(fdict)
y_i = calc_interest_rate(fdict)
return X_i, y_i
def generate_data(size):
pool = multiprocessing.Pool()
X = []
y = []
for X_i, y_i in tqdm(pool.imap_unordered(generate_pair, range(size)), total=size):
X.append(X_i)
y.append(y_i)
return np.array(X), np.array(y)
DATA_FILENAME = 'data.pickle'
def main():
data = generate_data(10 ** 6)
with open(DATA_FILENAME, 'wb') as f:
pickle.dump(data, f)
print('[+] Saved to {}'.format(DATA_FILENAME))
if __name__ == '__main__':
main()
| StarcoderdataPython |
1633488 | <reponame>philsupertramp/django-data-migration
import os.path
from tests.utils import ResetDirectoryMixin
this_dir = os.path.dirname(__file__)
class ResetDirectoryContext(ResetDirectoryMixin):
targets = ['migrations', 'data_migrations']
protected_files = ['__init__.py', '0001_first.py', '0002_add_name.py']
this_dir = this_dir
| StarcoderdataPython |
1766738 | import numpy as np
from simglucose.analysis.risk import risk_index, magni_RI
bg_opt=112.517 #The risk function has a minimun here, ris(112.517)=0
def stepReward3(BG,CGM, CHO, insulin, done=False):
bg_current=BG
LBGI, HBGI, RI = risk_index([bg_current], 1)
mRI = magni_RI([bg_current], 1)
if bg_current >= 70 and bg_current <= 180:
reward=1
else: reward=0
if done:
reward=-100
return reward
def stepReward3_eval(BG,CGM, CHO, insulin, done=False):
bg_current=BG
LBGI, HBGI, RI = risk_index([bg_current], 1)
mRI = magni_RI([bg_current], 1)
if bg_current >= 70 and bg_current <= 180:
reward=1
else: reward=0
if done:
reward=-100
print("Action:", insulin,";CHO:",CHO,";reward:", reward , ";BG:", BG, ";CGM:", CGM, ";RI:" , RI, ";LBGI:", LBGI, ";HBGI:", HBGI, ";mRI:", mRI)
return reward
| StarcoderdataPython |
1631159 | import redis
from xjkj import settings
from django_redis import get_redis_connection
class IpConn(object):
__instance = None
def __init__(self):
self._conn = redis.Redis(host=settings.REDIS_ADDR, port=settings.REDIS_PORT, db=3)
def __new__(cls, *args, **kwargs):
if cls.__instance == None:
cls.__instance = object.__new__(cls, *args, **kwargs)
return cls.__instance
redis_conn = get_redis_connection('vcode')
user_conn = get_redis_connection('user')
ip_conn = IpConn()
| StarcoderdataPython |
50285 | <reponame>chrishendra93/dinner_at_clemz
from .all_messages import *
__all__ = [
'Eko', 'Emil', 'Marco', 'Ida', 'Karin', 'Rika', 'Zefa', 'Jason', 'Billy', 'Kafi', 'Indrik', 'Toto', 'Acang', 'Bena'
]
| StarcoderdataPython |
4835472 | <reponame>babatana/stograde
from dataclasses import dataclass, field
from typing import List
@dataclass
class SubmissionWarnings:
"""Track any warnings about the assignment"""
assignment_missing: bool = False # No assignment submission could be found
recording_err: str = None # Something raised an error during recording
unmerged_branches: List[str] = field(default_factory=list) # There are unmerged branches that might have more code
| StarcoderdataPython |
1645389 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016-2017 <NAME>
# Copyright (c) 2018 <NAME> (Kronuz)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from __future__ import print_function, absolute_import
import os
import re
import sys
import time
import fnmatch
import logging
import tempfile
import textwrap
import shutil
import subprocess
from clint.textui import colored, puts_err
from . import utils
from .vmrun import VMrun
from .command import Command
logger = logging.getLogger(__name__)
DEFAULT_HOST = 'mech'
DEFAULT_USER = 'vagrant'
DEFAULT_PASSWORD = '<PASSWORD>'
INSECURE_PRIVATE_KEY = """-----<KEY>=
-----END RSA PRIVATE KEY-----
"""
HOME = os.path.expanduser("~/.mech")
class MechCommand(Command):
active_mechfile = None
def activate_mechfile(self, path):
if path in self.mechfiles:
self.active_mechfile = self.mechfiles[path]
else:
self.active_mechfile = self.mechfiles[path] = utils.load_mechfile(path)
def activate(self, instance_name=None):
if not hasattr(self, 'mechfiles'):
self.mechfiles = {}
if instance_name:
instance = utils.settle_instance(instance_name)
path = instance.get('path')
if not path:
puts_err(colored.red(textwrap.fill("Cannot find a valid path for '{}' instance".format(instance_name))))
sys.exit(1)
path = os.path.abspath(os.path.expanduser(path))
os.chdir(path)
self.activate_mechfile(path)
else:
path = os.getcwd()
self.activate_mechfile(path)
instance_name = self.active_mechfile.get('name') or os.path.basename(path) # Use the Mechfile's name if available
return instance_name
def get(self, name, default=None):
if self.active_mechfile is None:
raise AttributeError("Must activate(instance_name) first.")
return self.active_mechfile.get(name, default)
def get_vmx(self, silent=False):
self.get("") # Check if there's a Mechfile
return utils.get_vmx(silent=silent)
@property
def vmx(self):
return self.get_vmx()
@property
def box_name(self):
box_name = self.get('box')
if not box_name:
puts_err(colored.red(textwrap.fill("Cannot find a box configured in the Mechfile")))
sys.exit(1)
return box_name
@property
def box_version(self):
return self.get('box_version')
@property
def user(self):
return self.get('user', DEFAULT_USER)
@property
def password(self):
return self.get('password', DEFAULT_PASSWORD)
@property
def config(self):
return self.get('config', {}).get('ssh', {})
@property
def config_ssh(self):
vmrun = VMrun(self.vmx, user=self.user, password=self.password)
lookup = self.get("enable_ip_lookup", False)
ip = vmrun.getGuestIPAddress(wait=False, lookup=lookup) if vmrun.installedTools() else None
if not ip:
puts_err(colored.red(textwrap.fill(
"This Mech machine is reporting that it is not yet ready for SSH. "
"Make sure your machine is created and running and try again. "
"Additionally, check the output of `mech status` to verify "
"that the machine is in the state that you expect."
)))
sys.exit(1)
insecure_private_key = os.path.abspath(os.path.join(HOME, "insecure_private_key"))
if not os.path.exists(insecure_private_key):
with open(insecure_private_key, 'w') as f:
f.write(INSECURE_PRIVATE_KEY)
os.chmod(insecure_private_key, 0o400)
config = {
"Host": DEFAULT_HOST,
"User": self.user,
"Port": "22",
"UserKnownHostsFile": "/dev/null",
"StrictHostKeyChecking": "no",
"PasswordAuthentication": "no",
"IdentityFile": insecure_private_key,
"IdentitiesOnly": "yes",
"LogLevel": "FATAL",
}
for k, v in self.config.items():
k = re.sub(r'[ _]+', r' ', k)
k = re.sub(r'(?<=[^_])([A-Z])', r' \1', k).lower()
k = re.sub(r'^( *)(.*?)( *)$', r'\2', k)
callback = lambda pat: pat.group(1).upper()
k = re.sub(r' (\w)', callback, k)
if k[0].islower():
k = k[0].upper() + k[1:]
config[k] = v
config.update({
"HostName": ip,
})
return config
class MechBox(MechCommand):
"""
Usage: mech box <subcommand> [<args>...]
Available subcommands:
add add a box to the catalog of available boxes
list list available boxes in the catalog
outdated checks for outdated boxes
prune removes old versions of installed boxes
remove removes a box that matches the given name
repackage
update
For help on any individual subcommand run `mech box <subcommand> -h`
"""
def add(self, arguments):
"""
Add a box to the catalog of available boxes.
Usage: mech box add [options] [<name>] [<location>]
Notes:
The box descriptor can be the name of a box on HashiCorp's Vagrant Cloud,
or a URL, a local .box or .tar file, or a local .json file containing
the catalog metadata.
Options:
-f, --force Overwrite an existing box if it exists
--insecure Do not validate SSL certificates
--cacert FILE CA certificate for SSL download
--capath DIR CA certificate directory for SSL download
--cert FILE A client SSL cert, if needed
--box-version VERSION Constrain version of the added box
--checksum CHECKSUM Checksum for the box
--checksum-type TYPE Checksum type (md5, sha1, sha256)
-h, --help Print this help
"""
url = arguments['<location>']
if url:
name = arguments['<name>']
else:
url = arguments['<name>']
name = None
version = arguments['--box-version']
force = arguments['--force']
requests_kwargs = utils.get_requests_kwargs(arguments)
utils.add_box(url, name=name, version=version, force=force, requests_kwargs=requests_kwargs)
def list(self, arguments):
"""
List all available boxes in the catalog.
Usage: mech box list [options]
Options:
-i, --box-info Displays additional information about the boxes
-h, --help Print this help
"""
print("{}\t{}".format(
'BOX'.rjust(35),
'VERSION'.rjust(12),
))
path = os.path.abspath(os.path.join(HOME, 'boxes'))
for root, dirnames, filenames in os.walk(path):
for filename in fnmatch.filter(filenames, '*.box'):
directory = os.path.dirname(os.path.join(root, filename))[len(path) + 1:]
account, box, version = (directory.split('/', 2) + ['', ''])[:3]
print("{}\t{}".format(
"{}/{}".format(account, box).rjust(35),
version.rjust(12),
))
ls = list
def outdated(self, arguments):
"""
Checks if there is a new version available for the box.
Usage: mech box outdated [options]
Options:
--global Check all boxes installed
--insecure Do not validate SSL certificates
--cacert FILE CA certificate for SSL download
--capath DIR CA certificate directory for SSL download
--cert FILE A client SSL cert, if needed
-h, --help Print this help
"""
puts_err(colored.red("Not implemented!"))
def prune(self, arguments):
"""
Remove old versions of installed boxes.
Usage: mech box prune [options] [<name>]
Notes:
If the box is currently in use mech will ask for confirmation.
Options:
-n, --dry-run Only print the boxes that would be removed.
-f, --force Destroy without confirmation even when box is in use.
-h, --help Print this help
"""
puts_err(colored.red("Not implemented!"))
def remove(self, arguments):
"""
Remove a box from mech that matches the given name.
Usage: mech box remove [options] <name>
Options:
-f, --force Remove without confirmation.
--box-version VERSION The specific version of the box to remove
--all Remove all available versions of the box
-h, --help Print this help
"""
puts_err(colored.red("Not implemented!"))
def repackage(self, arguments):
"""
Repackage the box that is in use in the current mech environment.
Usage: mech box repackage [options] <name> <version>
Notes:
Puts it in the current directory so you can redistribute it.
The name and version of the box can be retrieved using mech box list.
Options:
-h, --help Print this help
"""
puts_err(colored.red("Not implemented!"))
def update(self, arguments):
"""
Update the box that is in use in the current mech environment.
Usage: mech box update [options] [<name>]
Notes:
Only if there any updates available. This does not destroy/recreate
the machine, so you'll have to do that to see changes.
Options:
-f, --force Overwrite an existing box if it exists
--insecure Do not validate SSL certificates
--cacert FILE CA certificate for SSL download
--capath DIR CA certificate directory for SSL download
--cert FILE A client SSL cert, if needed
-h, --help Print this help
"""
puts_err(colored.red("Not implemented!"))
class MechSnapshot(MechCommand):
"""
Usage: mech snapshot <subcommand> [<args>...]
Available subcommands:
delete delete a snapshot taken previously with snapshot save
list list all snapshots taken for a machine
pop restore state that was pushed with `mech snapshot push`
push push a snapshot of the current state of the machine
restore restore a snapshot taken previously with snapshot save
save take a snapshot of the current state of the machine
For help on any individual subcommand run `mech snapshot <subcommand> -h`
"""
def delete(self, arguments):
"""
Delete a snapshot taken previously with snapshot save.
Usage: mech snapshot delete [options] <name> [<instance>]
Options:
-h, --help Print this help
"""
name = arguments['<name>']
instance_name = arguments['<instance>']
instance_name = self.activate(instance_name)
vmrun = VMrun(self.vmx, user=self.user, password=self.password)
if vmrun.deleteSnapshot(name) is None:
puts_err(colored.red("Cannot delete name"))
else:
puts_err(colored.green("Snapshot {} deleted".format(name)))
def list(self, arguments):
"""
List all snapshots taken for a machine.
Usage: mech snapshot list [options] [<instance>]
Options:
-h, --help Print this help
"""
instance_name = arguments['<instance>']
instance_name = self.activate(instance_name)
vmrun = VMrun(self.vmx, user=self.user, password=<PASSWORD>)
print(vmrun.listSnapshots())
def pop(self, arguments):
"""
Restore state that was pushed with `mech snapshot push`.
Usage: mech snapshot pop [options] [<instance>]
Options:
--provision Enable provisioning
--no-delete Don't delete the snapshot after the restore
-h, --help Print this help
"""
puts_err(colored.red("Not implemented!"))
def push(self, arguments):
"""
Push a snapshot of the current state of the machine.
Usage: mech snapshot push [options] [<instance>]
Notes:
Take a snapshot of the current state of the machine and 'push'
it onto the stack of states. You can use `mech snapshot pop`
to restore back to this state at any time.
If you use `mech snapshot save` or restore at any point after
a push, pop will still bring you back to this pushed state.
Options:
-h, --help Print this help
"""
puts_err(colored.red("Not implemented!"))
def restore(self, arguments):
"""
Restore a snapshot taken previously with snapshot save.
Usage: mech snapshot restore [options] <name> [<instance>]
Options:
--provision Enable provisioning
-h, --help Print this help
"""
puts_err(colored.red("Not implemented!"))
def save(self, arguments):
"""
Take a snapshot of the current state of the machine.
Usage: mech snapshot save [options] <name> [<instance>]
Notes:
Take a snapshot of the current state of the machine. The snapshot
can be restored via `mech snapshot restore` at any point in the
future to get back to this exact machine state.
Snapshots are useful for experimenting in a machine and being able
to rollback quickly.
Options:
-f --force Replace snapshot without confirmation
-h, --help Print this help
"""
name = arguments['<name>']
instance_name = arguments['<instance>']
instance_name = self.activate(instance_name)
vmrun = VMrun(self.vmx, user=self.user, password=self.password)
if vmrun.snapshot(name) is None:
puts_err(colored.red("Cannot take snapshot"))
else:
puts_err(colored.green("Snapshot {} taken".format(name)))
class Mech(MechCommand):
"""
Usage: mech [options] <command> [<args>...]
Options:
-v, --version Print the version and exit.
-h, --help Print this help.
--debug Show debug messages.
Common commands:
(list|ls) lists all available boxes
init initializes a new Mech environment by creating a Mechfile
destroy stops and deletes all traces of the Mech machine
(up|start) starts and provisions the Mech environment
(down|stop|halt) stops the Mech machine
suspend suspends the machine
pause pauses the Mech machine
ssh connects to machine via SSH
ssh-config outputs OpenSSH valid configuration to connect to the machine
scp copies files to and from the machine via SCP
ip outputs ip of the Mech machine
box manages boxes: installation, removal, etc.
global-status outputs status Mech environments for this user
status outputs status of the Mech machine
ps list running processes in Guest OS
provision provisions the Mech machine
reload restarts Mech machine, loads new Mechfile configuration
resume resume a paused/suspended Mech machine
snapshot manages snapshots: saving, restoring, etc.
port displays information about guest port mappings
push deploys code in this environment to a configured destination
For help on any individual command run `mech <command> -h`
Example:
Initializing and using a machine from HashiCorp's Vagrant Cloud:
mech init bento/ubuntu-14.04
mech up
mech ssh
"""
subcommand_name = '<command>'
def __init__(self, arguments):
super(Mech, self).__init__(arguments)
logger = logging.getLogger()
handler = logging.StreamHandler(sys.stderr)
formatter = logging.Formatter('%(levelname)s: %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
if arguments['--debug']:
logger.setLevel(logging.DEBUG)
box = MechBox
snapshot = MechSnapshot
def init(self, arguments):
"""
Initializes a new mech environment by creating a Mechfile.
Usage: mech init [options] [<name>] [<location>]
Notes:
The box descriptor can be the name of a box on HashiCorp's Vagrant Cloud,
or a URL, a local .box or .tar file, or a local .json file containing
the catalog metadata.
Options:
-f, --force Overwrite existing Mechfile
--insecure Do not validate SSL certificates
--cacert FILE CA certificate for SSL download
--capath DIR CA certificate directory for SSL download
--cert FILE A client SSL cert, if needed
--box-version VERSION Constrain version of the added box
--checksum CHECKSUM Checksum for the box
--checksum-type TYPE Checksum type (md5, sha1, sha256)
--name INSTANCE Name of the instance
-h, --help Print this help
"""
url = arguments['<location>']
if url:
name = arguments['<name>']
else:
url = arguments['<name>']
name = None
version = arguments['--box-version']
instance_name = arguments['--name']
force = arguments['--force']
requests_kwargs = utils.get_requests_kwargs(arguments)
if os.path.exists('Mechfile') and not force:
puts_err(colored.red(textwrap.fill(
"`Mechfile` already exists in this directory. Remove it "
"before running `mech init`."
)))
return
puts_err(colored.green("Initializing mech"))
if utils.init_mechfile(instance_name, url, name=name, version=version, requests_kwargs=requests_kwargs):
puts_err(colored.green(textwrap.fill(
"A `Mechfile` has been initialized and placed in this directory. "
"You are now ready to `mech up` your first virtual environment!"
)))
else:
puts_err(colored.red("Couldn't initialize mech"))
def up(self, arguments):
"""
Starts and provisions the mech environment.
Usage: mech up [options] [<instance>]
Options:
--gui Start GUI
--provision Enable provisioning
--insecure Do not validate SSL certificates
--cacert FILE CA certificate for SSL download
--capath DIR CA certificate directory for SSL download
--cert FILE A client SSL cert, if needed
--checksum CHECKSUM Checksum for the box
--checksum-type TYPE Checksum type (md5, sha1, sha256)
--no-cache Do not save the downloaded box
-h, --help Print this help
"""
gui = arguments['--gui']
save = not arguments['--no-cache']
requests_kwargs = utils.get_requests_kwargs(arguments)
instance_name = arguments['<instance>']
instance_name = self.activate(instance_name)
utils.index_active_instance(instance_name)
vmx = utils.init_box(self.box_name, self.box_version, requests_kwargs=requests_kwargs, save=save)
vmrun = VMrun(vmx, user=self.user, password=self.password)
puts_err(colored.blue("Bringing machine up..."))
started = vmrun.start(gui=gui)
if started is None:
puts_err(colored.red("VM not started"))
else:
time.sleep(3)
puts_err(colored.blue("Getting IP address..."))
lookup = self.get("enable_ip_lookup", False)
ip = vmrun.getGuestIPAddress(lookup=lookup)
puts_err(colored.blue("Sharing current folder..."))
vmrun.enableSharedFolders()
vmrun.addSharedFolder('mech', os.getcwd(), quiet=True)
if ip:
if started:
puts_err(colored.green("VM started on {}".format(ip)))
else:
puts_err(colored.yellow("VM was already started on {}".format(ip)))
else:
if started:
puts_err(colored.green("VM started on an unknown IP address"))
else:
puts_err(colored.yellow("VM was already started on an unknown IP address"))
start = up
def global_status(self, arguments):
"""
Outputs mech environments status for this user.
Usage: mech global-status [options]
Options:
--prune Prune invalid entries
-h, --help Print this help
"""
vmrun = VMrun()
print(vmrun.list())
def ps(self, arguments):
"""
List running processes in Guest OS.
Usage: mech ps [options] [<instance>]
Options:
-h, --help Print this help
"""
instance_name = arguments['<instance>']
instance_name = self.activate(instance_name)
vmrun = VMrun(self.vmx, self.user, self.password)
print(vmrun.listProcessesInGuest())
def status(self, arguments):
"""
Outputs status of the Mech machine.
Usage: mech status [options] [<instance>]
Options:
-h, --help Print this help
"""
instance_name = arguments['<instance>']
instance_name = self.activate(instance_name)
vmrun = VMrun(self.vmx, user=self.user, password=self.password)
box_name = self.box_name
lookup = self.get("enable_ip_lookup", False)
ip = vmrun.getGuestIPAddress(wait=False, quiet=True, lookup=lookup)
state = vmrun.checkToolsState(quiet=True)
print("Current machine states:" + os.linesep)
if ip is None:
ip = "poweroff"
elif not ip:
ip = "unknown"
print("%s\t%s\t(VMware Tools %s)" % (box_name, ip, state))
if ip == "poweroff":
print(os.linesep + "The VM is powered off. To restart the VM, simply run `mech up`")
elif ip == "unknown":
print(os.linesep + "The VM is on. but it has no IP to connect to, VMware Tools must be installed")
elif state in ("installed", "running"):
print(os.linesep + "The VM is ready. Connect to it using `mech ssh`")
def destroy(self, arguments):
"""
Stops and deletes all traces of the Mech machine.
Usage: mech destroy [options] [<instance>]
Options:
-f, --force Destroy without confirmation.
-h, --help Print this help
"""
force = arguments['--force']
instance_name = arguments['<instance>']
instance_name = self.activate(instance_name)
if instance_name:
instance = utils.settle_instance(instance_name)
path = instance['path']
else:
path = os.getcwd()
mech_path = os.path.join(path, '.mech')
if os.path.exists(mech_path):
if force or utils.confirm("Are you sure you want to delete {instance_name} at {path}".format(instance_name=instance_name, path=path), default='n'):
puts_err(colored.green("Deleting..."))
vmrun = VMrun(self.vmx, user=self.user, password=<PASSWORD>)
vmrun.stop(mode='hard', quiet=True)
time.sleep(3)
vmrun.deleteVM()
shutil.rmtree(mech_path)
else:
puts_err(colored.red("Deletion aborted"))
else:
puts_err(colored.red("The box hasn't been initialized."))
def down(self, arguments):
"""
Stops the Mech machine.
Usage: mech down [options] [<instance>]
Options:
--force Force a hard stop
-h, --help Print this help
"""
force = arguments['--force']
instance_name = arguments['<instance>']
instance_name = self.activate(instance_name)
vmrun = VMrun(self.vmx, user=self.user, password=self.password)
if not force and vmrun.installedTools():
stopped = vmrun.stop()
else:
stopped = vmrun.stop(mode='hard')
if stopped is None:
puts_err(colored.red("Not stopped", vmrun))
else:
puts_err(colored.green("Stopped", vmrun))
stop = down
halt = down
def pause(self, arguments):
"""
Pauses the Mech machine.
Usage: mech pause [options] [<instance>]
Options:
-h, --help Print this help
"""
instance_name = arguments['<instance>']
instance_name = self.activate(instance_name)
vmrun = VMrun(self.vmx, user=self.user, password=self.password)
if vmrun.pause() is None:
puts_err(colored.red("Not paused", vmrun))
else:
puts_err(colored.yellow("Paused", vmrun))
def resume(self, arguments):
"""
Resume a paused/suspended Mech machine.
Usage: mech resume [options] [<instance>]
Options:
--provision Enable provisioning
-h, --help Print this help
"""
instance_name = arguments['<instance>']
instance_name = self.activate(instance_name)
utils.index_active_instance(instance_name)
vmrun = VMrun(self.vmx, user=self.user, password=<PASSWORD>)
# Try to unpause
if vmrun.unpause(quiet=True) is not None:
time.sleep(1)
puts_err(colored.blue("Getting IP address..."))
lookup = self.get("enable_ip_lookup", False)
ip = vmrun.getGuestIPAddress(lookup=lookup)
if ip:
puts_err(colored.green("VM resumed on {}".format(ip)))
else:
puts_err(colored.green("VM resumed on an unknown IP address"))
# Otherwise try starting
else:
started = vmrun.start()
if started is None:
puts_err(colored.red("VM not started"))
else:
time.sleep(3)
puts_err(colored.blue("Getting IP address..."))
lookup = self.get("enable_ip_lookup", False)
ip = vmrun.getGuestIPAddress(lookup=lookup)
puts_err(colored.blue("Sharing current folder..."))
vmrun.enableSharedFolders()
vmrun.addSharedFolder('mech', os.getcwd(), quiet=True)
if ip:
if started:
puts_err(colored.green("VM started on {}".format(ip)))
else:
puts_err(colored.yellow("VM already was started on {}".format(ip)))
else:
if started:
puts_err(colored.green("VM started on an unknown IP address"))
else:
puts_err(colored.yellow("VM already was started on an unknown IP address"))
def suspend(self, arguments):
"""
Suspends the machine.
Usage: mech suspend [options] [<instance>]
Options:
-h, --help Print this help
"""
instance_name = arguments['<instance>']
instance_name = self.activate(instance_name)
vmrun = VMrun(self.vmx, user=self.user, password=<PASSWORD>)
if vmrun.suspend() is None:
puts_err(colored.red("Not suspended", vmrun))
else:
puts_err(colored.green("Suspended", vmrun))
def ssh_config(self, arguments):
"""
Output OpenSSH valid configuration to connect to the machine.
Usage: mech ssh-config [options] [<instance>]
Options:
-h, --help Print this help
"""
instance_name = arguments['<instance>']
instance_name = self.activate(instance_name)
print(utils.config_ssh_string(self.config_ssh))
def ssh(self, arguments):
"""
Connects to machine via SSH.
Usage: mech ssh [options] [<instance>] [-- <extra_ssh_args>...]
Options:
-c, --command COMMAND Execute an SSH command directly
-p, --plain Plain mode, leaves authentication up to user
-h, --help Print this help
"""
plain = arguments['--plain']
extra = arguments['<extra_ssh_args>']
command = arguments['--command']
instance_name = arguments['<instance>']
instance_name = self.activate(instance_name)
config_ssh = self.config_ssh
fp = tempfile.NamedTemporaryFile(delete=False)
try:
fp.write(utils.config_ssh_string(config_ssh).encode('utf-8'))
fp.close()
cmds = ['ssh']
if not plain:
cmds.extend(('-F', fp.name))
if extra:
cmds.extend(extra)
if not plain:
cmds.append(config_ssh['Host'])
if command:
cmds.extend(('--', command))
logger.debug(" ".join("'{}'".format(c.replace("'", "\\'")) if ' ' in c else c for c in cmds))
return subprocess.call(cmds)
finally:
os.unlink(fp.name)
def scp(self, arguments):
"""
Copies files to and from the machine via SCP.
Usage: mech scp [options] <src> <dst> [-- <extra scp args>...]
Options:
-h, --help Print this help
"""
extra = arguments['<extra scp args>']
src = arguments['<src>']
dst = arguments['<dst>']
dst_instance, dst_is_host, dst = dst.partition(':')
src_instance, src_is_host, src = src.partition(':')
if dst_is_host and src_is_host:
puts_err(colored.red("Both src and host are host destinations"))
sys.exit(1)
if dst_is_host:
instance_name = dst_instance
else:
dst = dst_instance
if src_is_host:
instance_name = src_instance
else:
src = src_instance
instance_name = self.activate(instance_name)
config_ssh = self.config_ssh
fp = tempfile.NamedTemporaryFile(delete=False)
try:
fp.write(utils.config_ssh_string(config_ssh))
fp.close()
cmds = ['scp']
cmds.extend(('-F', fp.name))
if extra:
cmds.extend(extra)
host = config_ssh['Host']
dst = '{}:{}'.format(host, dst) if dst_is_host else dst
src = '{}:{}'.format(host, src) if src_is_host else src
cmds.extend((src, dst))
logger.debug(" ".join("'{}'".format(c.replace("'", "\\'")) if ' ' in c else c for c in cmds))
return subprocess.call(cmds)
finally:
os.unlink(fp.name)
def ip(self, arguments):
"""
Outputs ip of the Mech machine.
Usage: mech ip [options] [<instance>]
Options:
-h, --help Print this help
"""
instance_name = arguments['<instance>']
instance_name = self.activate(instance_name)
vmrun = VMrun(self.vmx, user=self.user, password=self.password)
lookup = self.get("enable_ip_lookup", False)
ip = vmrun.getGuestIPAddress(lookup=lookup)
if ip:
puts_err(colored.green(ip))
else:
puts_err(colored.red("Unknown IP address"))
def provision(self, arguments):
"""
Provisions the Mech machine.
Usage: mech provision [options] [<instance>]
Options:
-h, --help Print this help
"""
instance_name = arguments['<instance>']
instance_name = self.activate(instance_name)
vmrun = VMrun(self.vmx, self.user, self.password)
if not vmrun.installedTools():
puts_err(colored.red("Tools not installed"))
return
provisioned = 0
for i, provision in enumerate(self.get('provision', [])):
if provision.get('type') == 'file':
source = provision.get('source')
destination = provision.get('destination')
if utils.provision_file(vmrun, source, destination) is None:
puts_err(colored.red("Not Provisioned"))
return
provisioned += 1
elif provision.get('type') == 'shell':
inline = provision.get('inline')
path = provision.get('path')
args = provision.get('args')
if not isinstance(args, list):
args = [args]
if utils.provision_shell(vmrun, inline, path, args) is None:
puts_err(colored.red("Not Provisioned"))
return
provisioned += 1
else:
puts_err(colored.red("Not Provisioned ({}".format(i)))
return
else:
puts_err(colored.green("Provisioned {} entries".format(provisioned)))
return
puts_err(colored.red("Not Provisioned ({}".format(i)))
def reload(self, arguments):
"""
Restarts Mech machine, loads new Mechfile configuration.
Usage: mech reload [options] [<instance>]
Options:
--provision Enable provisioning
-h, --help Print this help
"""
instance_name = arguments['<instance>']
instance_name = self.activate(instance_name)
vmrun = VMrun(self.vmx, user=self.user, password=self.password)
puts_err(colored.blue("Reloading machine..."))
started = vmrun.reset()
if started is None:
puts_err(colored.red("VM not restarted"))
else:
time.sleep(3)
puts_err(colored.blue("Getting IP address..."))
lookup = self.get("enable_ip_lookup", False)
ip = vmrun.getGuestIPAddress(lookup=lookup)
if ip:
if started:
puts_err(colored.green("VM started on {}".format(ip)))
else:
puts_err(colored.yellow("VM already was started on {}".format(ip)))
else:
if started:
puts_err(colored.green("VM started on an unknown IP address"))
else:
puts_err(colored.yellow("VM already was started on an unknown IP address"))
def port(self, arguments):
"""
Displays information about guest port mappings.
Usage: mech port [options] [<instance>]
Options:
--guest PORT Output the host port that maps to the given guest port
--machine-readable Display machine-readable output
-h, --help Print this help
"""
instance_name = arguments['<instance>']
instance_name = self.activate(instance_name)
vmrun = VMrun(self.vmx, user=self.user, password=self.password)
for network in vmrun.listHostNetworks().split('\n'):
network = network.split()
if len(network) > 2 and network[2] == 'nat':
print(vmrun.listPortForwardings(network[1]))
break
else:
puts_err(colored.red("Cannot find a nat network"))
def push(self, arguments):
"""
Deploys code in this environment to a configured destination.
Usage: mech push [options] [<strategy>]
Options:
-h, --help Print this help
"""
puts_err(colored.red("Not implemented!"))
def list(self, arguments):
"""
Lists all available boxes.
Usage: mech list [options]
Options:
-h, --help Print this help
"""
print("{}\t{}\t{}\t{}\t{}".format(
'NAME'.rjust(20),
'ADDRESS'.rjust(15),
'BOX'.rjust(35),
'VERSION'.rjust(12),
'PATH',
))
for instance_name, instance in utils.instances().items():
path = instance.get('path')
if path and os.path.exists(path):
self.activate(instance_name)
mech_path = os.path.join(path, '.mech')
if os.path.exists(mech_path):
vmx = self.get_vmx(silent=True)
if vmx:
vmrun = VMrun(vmx, user=self.user, password=<PASSWORD>)
lookup = self.get("enable_ip_lookup", False)
ip = vmrun.getGuestIPAddress(wait=False, quiet=True, lookup=lookup)
else:
ip = colored.red("invalid")
if ip is None:
ip = colored.yellow("poweroff")
elif not ip:
ip = colored.green("running")
else:
ip = colored.green(ip)
else:
ip = ""
box_name = self.box_name or ""
box_version = self.box_version or ""
print("{}\t{}\t{}\t{}\t{}".format(
colored.green(instance_name.rjust(20)),
ip.rjust(15),
box_name.rjust(35),
box_version.rjust(12),
path,
))
ls = list
| StarcoderdataPython |
3223319 | <gh_stars>1-10
"""Remote control support for Apple TV."""
import asyncio
from haphilipsjs.typing import SystemType
from openpeerpower.components.remote import (
ATTR_DELAY_SECS,
ATTR_NUM_REPEATS,
DEFAULT_DELAY_SECS,
RemoteEntity,
)
from . import LOGGER, PhilipsTVDataUpdateCoordinator
from .const import CONF_SYSTEM, DOMAIN
async def async_setup_entry(opp, config_entry, async_add_entities):
"""Set up the configuration entry."""
coordinator = opp.data[DOMAIN][config_entry.entry_id]
async_add_entities(
[
PhilipsTVRemote(
coordinator,
config_entry.data[CONF_SYSTEM],
config_entry.unique_id,
)
]
)
class PhilipsTVRemote(RemoteEntity):
"""Device that sends commands."""
def __init__(
self,
coordinator: PhilipsTVDataUpdateCoordinator,
system: SystemType,
unique_id: str,
) -> None:
"""Initialize the Philips TV."""
self._tv = coordinator.api
self._coordinator = coordinator
self._system = system
self._unique_id = unique_id
@property
def name(self):
"""Return the device name."""
return self._system["name"]
@property
def is_on(self):
"""Return true if device is on."""
return bool(
self._tv.on and (self._tv.powerstate == "On" or self._tv.powerstate is None)
)
@property
def should_poll(self):
"""No polling needed for Apple TV."""
return False
@property
def unique_id(self):
"""Return unique identifier if known."""
return self._unique_id
@property
def device_info(self):
"""Return a device description for device registry."""
return {
"name": self._system["name"],
"identifiers": {
(DOMAIN, self._unique_id),
},
"model": self._system.get("model"),
"manufacturer": "Philips",
"sw_version": self._system.get("softwareversion"),
}
async def async_turn_on(self, **kwargs):
"""Turn the device on."""
if self._tv.on and self._tv.powerstate:
await self._tv.setPowerState("On")
else:
await self._coordinator.turn_on.async_run(self.opp, self._context)
self.async_write_op_state()
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
if self._tv.on:
await self._tv.sendKey("Standby")
self.async_write_op_state()
else:
LOGGER.debug("Tv was already turned off")
async def async_send_command(self, command, **kwargs):
"""Send a command to one device."""
num_repeats = kwargs[ATTR_NUM_REPEATS]
delay = kwargs.get(ATTR_DELAY_SECS, DEFAULT_DELAY_SECS)
for _ in range(num_repeats):
for single_command in command:
LOGGER.debug("Sending command %s", single_command)
await self._tv.sendKey(single_command)
await asyncio.sleep(delay)
| StarcoderdataPython |
3375841 | from algorithm.numberTheory.fibonacci import fibonacci_log_n
from tests.base_test_case import BaseTestCase
class SearchTest(BaseTestCase):
def setUp(self):
super().setUp()
self.num = 100
def test_fibonacci_log_n(self):
print(fibonacci_log_n(self.num))
| StarcoderdataPython |
3245898 | #
# Copyright (C) 2009-2010 <NAME>, <NAME>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
from gamera.core import *
init_gamera()
from gamera import knn
from gamera.plugins import pagesegmentation
from gamera.classify import ShapedGroupingFunction
from gamera.plugins.image_utilities import union_images
from gamera.plugins.listutilities import median
from gamera.toolkits.ocr.classes import Textline
import unicodedata
import sys
import time
def return_char(unicode_str, extra_chars_dict={}):
"""Converts a unicode character name to a unicode symbol.
Signature:
``return_char (classname, extra_chars_dict={})``
with
*classname*:
A class name derived from a unicode character name.
Example: ``latin.small.letter.a`` returns the character ``a``.
*extra_chars_dict*
A dictionary of additional translations of classnames to character codes.
This is necessary when you use class names that are not unicode names.
The character 'code' does not need to be an actual code, but can be
any string. This can be useful, e.g. for ligatures:
.. code:: Python
return_char(glyph.get_main_id(), {'latin.small.ligature.st':'st'})
When *classname* is not listed in *extra_chars_dict*, it must correspond
to a `standard unicode character name`_,
as in the examples of the following table:
.. _`standard unicode character names`: http://www.unicode.org/charts/
+-----------+----------------------------+----------------------------+
| Character | Unicode Name | Class Name |
+===========+============================+============================+
| ``!`` | ``EXCLAMATION MARK`` | ``exclamation.mark`` |
+-----------+----------------------------+----------------------------+
| ``2`` | ``DIGIT TWO`` | ``digit.two`` |
+-----------+----------------------------+----------------------------+
| ``A`` | ``LATIN CAPITAL LETTER A`` | ``latin.capital.letter.a`` |
+-----------+----------------------------+----------------------------+
| ``a`` | ``LATIN SMALL LETTER A`` | ``latin.small.letter.a`` |
+-----------+----------------------------+----------------------------+
"""
if len(extra_chars_dict) > 0:
try:
return extra_chars_dict[unicode_str]
except:
pass
name = unicode_str.upper()
# some xml-files might be corrupted due to wrong grouping
if name.startswith('_GROUP.'):
name = name[len('_GROUP.'):]
if name.startswith('_PART.'):
name = name[len('_PART.'):]
name = name.replace(".", " ")
try:
return unicodedata.lookup(name)
except KeyError:
strings = unicode_str.split(".")
if(strings[0] == "collated"):
return strings[1]
if(strings[0] == "cursive"):
return return_char(unicode_str[8:])
else:
print "ERROR: Name not found:", name
return ""
def chars_make_words(lines_glyphs,threshold=None):
"""Groups the given glyphs to words based upon the horizontal distance
between adjacent glyphs.
Signature:
``chars_make_words (glyphs, threshold=None)``
with
*glyphs*:
A list of ``Cc`` data types, each of which representing a character.
All glyphs must stem from the same single line of text.
*threshold*:
Horizontal white space greater than *threshold* will be considered
a word separating gap. When ``None``, the threshold value is
calculated automatically as 2.5 times teh median white space
between adjacent glyphs.
The result is a nested list of glyphs with each sublist representing
a word. This is the same data structure as used in `Textline.words`_
.. _`Textline.words`: gamera.toolkits.ocr.classes.Textline.html
"""
glyphs = lines_glyphs[:]
wordlist = []
if(threshold == None):
spacelist = []
total_space = 0
for i in range(len(glyphs) - 1):
spacelist.append(glyphs[i + 1].ul_x - glyphs[i].lr_x)
if(len(spacelist) > 0):
threshold = median(spacelist)
threshold = threshold * 2.5
else:
threshold = 0
word = []
for i in range(len(glyphs)):
if i > 0:
if((glyphs[i].ul_x - glyphs[i - 1].lr_x) > threshold):
wordlist.append(word)
word = []
word.append(glyphs[i])
if(len(word) > 0):
wordlist.append(word)
return wordlist
def textline_to_string(line, heuristic_rules="roman", extra_chars_dict={}):
"""Returns a unicode string of the text in the given ``Textline``.
Signature:
``textline_to_string (textline, heuristic_rules="roman", extra_chars_dict={})``
with
*textline*:
A ``Textline`` object containing the glyphs. The glyphs must already
be classified.
*heuristic_rules*:
Depending on the alphabeth, some characters can very similar and
need further heuristic rules for disambiguation, like apostroph and
comma, which have the same shape and only differ in their position
relative to the baseline.
When set to \"roman\", several rules specific for latin alphabeths
are applied.
*extra_chars_dict*
A dictionary of additional translations of classnames to character codes.
This is necessary when you use class names that are not unicode names.
Will be passed to `return_char`_.
As this function uses `return_char`_, the class names of the glyphs in
*textline* must corerspond to unicode character names, as described in
the documentation of `return_char`_.
.. _`return_char`: #return-char
"""
wordlist = line.words
s = ""
char = ""
for i in range(len(wordlist)):
if(i):
s = s + " "
for glyph in wordlist[i]:
char = return_char(glyph.get_main_id(), extra_chars_dict)
if (heuristic_rules == "roman"):
# disambiguation of similar roman characters
if (char == "x" or char == "X"):
if (glyph.ul_y <= line.bbox.center_y-(line.bbox.nrows/4)):
glyph.classify_heuristic("latin.capital.letter.x")
else:
glyph.classify_heuristic("latin.small.letter.x")
char = return_char(glyph.get_main_id())
if (char == "p" or char == "P"):
if (glyph.ul_y <= line.bbox.center_y-(line.bbox.nrows/4)):
glyph.classify_heuristic("latin.capital.letter.p")
else:
glyph.classify_heuristic("latin.small.letter.p")
char = return_char(glyph.get_main_id())
if (char == "o" or char == "O"):
if (glyph.ul_y <= line.bbox.center_y-(line.bbox.nrows/4)):
glyph.classify_heuristic("latin.capital.letter.o")
else:
glyph.classify_heuristic("latin.small.letter.o")
char = return_char(glyph.get_main_id())
if (char == "w" or char == "W"):
if (glyph.ul_y <= line.bbox.center_y-(line.bbox.nrows/4)):
glyph.classify_heuristic("latin.capital.letter.w")
else:
glyph.classify_heuristic("latin.small.letter.w")
char = return_char(glyph.get_main_id())
if (char == "v" or char == "V"):
if (glyph.ul_y <= line.bbox.center_y-(line.bbox.nrows/4)):
glyph.classify_heuristic("latin.capital.letter.v")
else:
glyph.classify_heuristic("latin.small.letter.v")
char = return_char(glyph.get_main_id())
if (char == "z" or char == "Z"):
if (glyph.ul_y <= line.bbox.center_y-(line.bbox.nrows/4)):
glyph.classify_heuristic("latin.capital.letter.z")
else:
glyph.classify_heuristic("latin.small.letter.z")
char = return_char(glyph.get_main_id())
if (char == "s" or char == "S"):
# not for long s
if (glyph.get_main_id().upper() != "LATIN.SMALL.LETTER.LONG.S"):
if (glyph.ul_y <= line.bbox.center_y-(line.bbox.nrows/4)):
glyph.classify_heuristic("latin.capital.letter.s")
else:
glyph.classify_heuristic("latin.small.letter.s")
char = return_char(glyph.get_main_id())
#if(char == "T" and (float(glyph.nrows)/float(glyph.ncols)) > 1.5):
# glyph.classify_heuristic("LATIN SMALL LETTER F")
# char = return_char(glyph.get_main_id())
if (char == "'" or char == ","):
if (glyph.ul_y < line.bbox.center_y):
glyph.classify_heuristic("APOSTROPHE")
char = "'"
else:
glyph.classify_heuristic("COMMA")
char = ","
s = s + char
return s
def textline_to_xml(line, heuristic_rules="roman", extra_chars_dict={}):
"""Returns xml encoding of words and coordinates for the text in the given ``Textline``.
Signature:
``textline_to_xml (textline, heuristic_rules="roman", extra_chars_dict={})``
with
*textline*:
A ``Textline`` object containing the glyphs. The glyphs must already
be classified.
*heuristic_rules*:
Depending on the alphabeth, some characters can very similar and
need further heuristic rules for disambiguation, like apostroph and
comma, which have the same shape and only differ in their position
relative to the baseline.
When set to \"roman\", several rules specific for latin alphabeths
are applied.
*extra_chars_dict*
A dictionary of additional translations of classnames to character codes.
This is necessary when you use class names that are not unicode names.
Will be passed to `return_char`_.
As this function uses `return_char`_, the class names of the glyphs in
*textline* must corerspond to unicode character names, as described in
the documentation of `return_char`_.
.. _`return_char`: #return-char
"""
# This function was added by <NAME> - <EMAIL> in 9/2010
# It is based on the textline_to_string function, but modified to produce output with XML tags
# These tags add tagging of words, along with the coordinates for the upper right and lower
# left corners of the word bounding box.
# Added to support the requirements of the 18th Connect project.
#
# Modified by <NAME> - <EMAIL> on 7/24/2013
# Correcting to more accurately reflect the Gale OCR XML structure:
# chainge <line> to <p>
wordlist = line.words
s = "<p>\n"
char = ""
for i in range(len(wordlist)):
word = ""
#set left/right x and upper/lower y from first glyph
word_leftx = wordlist[i][0].ul_x
word_uppery = wordlist[i][0].ul_y
word_rightx = wordlist[i][0].lr_x
word_lowery = wordlist[i][0].lr_y
for glyph in wordlist[i]:
#update right x and (conditionally) upper/lower y from current glyph
word_rightx = glyph.lr_x
if (glyph.ul_y < word_uppery):
word_uppery = glyph.ul_y
if (glyph.lr_y > word_lowery):
word_lowery = glyph.lr_y
char = return_char(glyph.get_main_id(), extra_chars_dict)
if (heuristic_rules == "roman"):
# disambiguation of similar roman characters
if (char == "x" or char == "X"):
if (glyph.ul_y <= line.bbox.center_y-(line.bbox.nrows/4)):
glyph.classify_heuristic("latin.capital.letter.x")
else:
glyph.classify_heuristic("latin.small.letter.x")
char = return_char(glyph.get_main_id())
if (char == "p" or char == "P"):
if (glyph.ul_y <= line.bbox.center_y-(line.bbox.nrows/4)):
glyph.classify_heuristic("latin.capital.letter.p")
else:
glyph.classify_heuristic("latin.small.letter.p")
char = return_char(glyph.get_main_id())
if (char == "o" or char == "O"):
if (glyph.ul_y <= line.bbox.center_y-(line.bbox.nrows/4)):
glyph.classify_heuristic("latin.capital.letter.o")
else:
glyph.classify_heuristic("latin.small.letter.o")
char = return_char(glyph.get_main_id())
if (char == "w" or char == "W"):
if (glyph.ul_y <= line.bbox.center_y-(line.bbox.nrows/4)):
glyph.classify_heuristic("latin.capital.letter.w")
else:
glyph.classify_heuristic("latin.small.letter.w")
char = return_char(glyph.get_main_id())
if (char == "v" or char == "V"):
if (glyph.ul_y <= line.bbox.center_y-(line.bbox.nrows/4)):
glyph.classify_heuristic("latin.capital.letter.v")
else:
glyph.classify_heuristic("latin.small.letter.v")
char = return_char(glyph.get_main_id())
if (char == "z" or char == "Z"):
if (glyph.ul_y <= line.bbox.center_y-(line.bbox.nrows/4)):
glyph.classify_heuristic("latin.capital.letter.z")
else:
glyph.classify_heuristic("latin.small.letter.z")
char = return_char(glyph.get_main_id())
if (char == "s" or char == "S"):
# not for long s
if (glyph.get_main_id().upper() != "LATIN.SMALL.LETTER.LONG.S"):
if (glyph.ul_y <= line.bbox.center_y-(line.bbox.nrows/4)):
glyph.classify_heuristic("latin.capital.letter.s")
else:
glyph.classify_heuristic("latin.small.letter.s")
char = return_char(glyph.get_main_id())
#if(char == "T" and (float(glyph.nrows)/float(glyph.ncols)) > 1.5):
# glyph.classify_heuristic("LATIN SMALL LETTER F")
# char = return_char(glyph.get_main_id())
if (char == "'" or char == ","):
if (glyph.ul_y < line.bbox.center_y):
glyph.classify_heuristic("APOSTROPHE")
char = "'"
else:
glyph.classify_heuristic("COMMA")
char = ","
# Handle XML predefined entities - &, ', ", <, and >
if (char == '&'):
char = '&'
if (char == "'"):
char = '''
if (char == '"'):
char = '"'
if (char == '<'):
char = '<'
if (char == '>'):
char = '>'
word = word + char
# end of glyph processing for word
pos = "pos=\"" +str(word_leftx)+','+str(word_uppery)+','+str(word_rightx)+','+str(word_lowery)+'"'
s = s + "<wd "+pos+">"+word+"</wd>\n"
s = s + "</p>\n"
return s
def check_upper_neighbors(item,glyph,line):
"""Check for small signs grouped beside each other like quotation marks.
Signature:
``check_upper_neighbors(item,glyph,line)``
with
*item*:
Some connected-component.
*glyph*:
Some connected-component.
*line*:
The ``Textline`` Object which includes ``item`` and ``glyph``
Returns an array with two elements. The first element keeps a list of
characters (images that has been united to a single image) and the
second image is a list of characters which has to be removed as
these have been united to a single character.
"""
remove = []
add = []
result = []
minheight = min([item.nrows,glyph.nrows])
# glyphs must be small, of similar size and on the same height
if(not(glyph.lr_y >= line.center_y and glyph.lr_y-(glyph.nrows/3) <= line.lr_y)):
if (glyph.contains_y(item.center_y) and item.contains_y(glyph.center_y)):
minwidth = min([item.ncols,glyph.ncols])
distance = item.lr_x - glyph.lr_x
if(distance > 0 and distance <= minwidth*3):
remove.append(item)
remove.append(glyph)
new = union_images([item,glyph])
add.append(new)
result.append(add) #result[0] == ADD
result.append(remove) #result[1] == REMOVE
return result
def check_glyph_accent(item,glyph):
"""Check two glyphs for beeing grouped to one single character. This function is for unit connected-components like i, j or colon.
Signature:
``check_glyph_accent(item,glyph)``
with
*item*:
Some connected-component.
*glyph*:
Some connected-component.
There is returned an array with two elements. The first element keeps a list of characters (images that has been united to a single image) and the second image is a list of characters which has to be removed as these have been united to a single character.
"""
remove = []
add = []
result = []
if(glyph.contains_x(item.ul_x) or glyph.contains_x(item.lr_x) or glyph.contains_x(item.center_x)): ##nebeinander?
if(not(item.contains_y(glyph.ul_y) or item.contains_y(glyph.lr_y) or item.contains_y(glyph.center_y))): ##nicht y-dimensions ueberschneident
remove.append(item)
remove.append(glyph)
new = union_images([item,glyph])
add.append(new)
result.append(add) #result[0] == ADD
result.append(remove) #result[1] == REMOVE
return result
def get_line_glyphs(image,textlines):
"""Splits image regions representing text lines into characters.
Signature:
``get_line_glyphs (image, segments)``
with
*image*:
The document image that is to be further segmentated. It must contin the
same underlying image data as the second argument *segments*
*segments*:
A list ``Cc`` data types, each of which represents a text line region.
The image views must correspond to *image*, i.e. each pixels has a value
that is the unique label of the text line it belongs to. This is the
interface used by the plugins in the \"PageSegmentation\" section of the
Gamera core.
The result is returned as a list of Textline_ objects.
.. _Textline: gamera.toolkits.ocr.classes.Textline.html
"""
i=0
show = []
lines = []
ret,sub_ccs = image.sub_cc_analysis(textlines)
for ccs in sub_ccs:
line_bbox = Rect(textlines[i])
i = i + 1
glyphs = ccs[:]
newlist = []
remove = []
add = []
result = []
glyphs.sort(lambda x,y: cmp(x.ul_x, y.ul_x))
for position, item in enumerate(glyphs):
if(True):
#if(not(glyph.lr_y >= line_bbox.center_y and glyph.lr_y-(glyph.nrows/3) <= line_bbox.lr_y)): ## is this part of glyph higher then line.center_y ?
left = position - 2
if(left < 0):
left = 0
right = position + 2
if(right > len(glyphs)):
right = len(glyphs)
checklist = glyphs[left:right]
for glyph in checklist:
if (item == glyph):
continue
result = check_upper_neighbors(glyph,item,line_bbox)
if(len(result[0]) > 0): #something has been joind...
joind_upper_connection = result[0][0] #joind glyph
add.append(joind_upper_connection)
remove.append(result[1][0]) #first part of joind one
remove.append(result[1][1]) #second part of joind one
for glyph2 in checklist: #maybe the upper joind glyphs fits to a glyph below...
if(glyphs == joind_upper_connection):
continue
if(joind_upper_connection.contains_x(glyph2.center_x)): #fits for example on ae, oe, ue in german alph
new = union_images([glyph2,joind_upper_connection])
add.append(new)
remove.append(glyph2)
add.remove(joind_upper_connection)
break
for elem in remove:
if (elem in checklist):
checklist.remove(elem)
for glyph in checklist:
if(item == glyph):
continue
result = check_glyph_accent(item,glyph)
if(len(result[0]) > 0): #something has been joind...
add.append(result[0][0]) #joind glyph
remove.append(result[1][0]) #first part of joind one
remove.append(result[1][1]) #second part of joind one
for elem in remove:
if(elem in glyphs):
glyphs.remove(elem)
for elem in add:
glyphs.append(elem)
new_line = Textline(line_bbox)
final = []
if(len(glyphs) > 0):
for glyph in glyphs:
final.append(glyph)
new_line.add_glyphs(final,False)
new_line.sort_glyphs() #reading order -- from left to right
lines.append(new_line)
for glyph in glyphs:
show.append(glyph)
return lines
def show_bboxes(image,glyphs):
"""Returns an RGB image with bounding boxes of the given glyphs as
hollow rects. Useful for visualization and debugging of a segmentation.
Signature:
``show_bboxes (image, glyphs)``
with:
*image*:
An image of the textdokument which has to be segmentated.
*glyphs*:
List of rects which will be drawn on ``image`` as hollow rects.
As all image types are derived from ``Rect``, any image list can
be passed.
"""
rgb = image.to_rgb()
if(len(glyphs) > 0):
for glyph in glyphs:
rgb.draw_hollow_rect(glyph, RGBPixel(255,0,0), 1.0)
return rgb
| StarcoderdataPython |
3208593 | <filename>backend/app/main.py
from fastapi import FastAPI, Depends, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from sqlalchemy.orm import Session
from app.database import crud, models, schemas, SessionLocal, engine
from app.validate_wasm import validate_wasm
from typing import List
models.Base.metadata.create_all(bind=engine) # Dont know what this does
# Initialize the FastAPI instance
app = FastAPI(
title="WASM Bots",
description="A simple server for our LangSec project where we can upload base64 encoded wasm bots",
version="1.0"
)
app.add_middleware(
CORSMiddleware,
allow_credentials=True,
allow_origins=["*"],
allow_methods=["*"],
allow_headers=["*"],
)
# Used to create a connection to the db upon requests to the server
def get_db():
try:
db = SessionLocal()
yield db
finally:
db.close()
@app.get("/bots", response_model=List[schemas.Bot])
def get_bots(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
"""Returns all the wasm bots"""
bots = crud.get_bots(db, skip=skip, limit=limit)
return bots
@app.get("/bots/{bot_id}", response_model=schemas.Bot)
def get_bot(bot_id: int, db: Session = Depends(get_db)):
"""Fetched that particular bot"""
return crud.get_bot(db, bot_id)
@app.delete("/bots/{bot_id}")
def remove_bot(bot_id: int, db: Session = Depends(get_db)):
"""Removes that particular bot"""
if crud.remove_bot(db, bot_id):
return
else:
raise HTTPException(status_code=400, detail=f"Bot with id #{bot_id} does not exist")
@app.post("/bots/get-by-name", response_model=schemas.Bot)
def get_bot_by_name(name: str, db: Session = Depends(get_db)):
"""Fetched that particular bot"""
return crud.get_bot_by_name(db, name)
@app.post("/bots", response_model=schemas.Bot)
def create_bot(bot: schemas.BotBase, db: Session = Depends(get_db)):
"""Creates a new bot"""
if not validate_wasm(bot.base64_encoded_bot):
raise HTTPException(status_code=400, detail="Provided wasm file is invalid")
db_bot = crud.get_bot_by_name(db, name=bot.name)
if db_bot:
# A bot with that name already exists
raise HTTPException(status_code=400, detail=f"Bot with that name already exists: {bot.name}")
return crud.create_bot(db, bot)
| StarcoderdataPython |
170855 | <gh_stars>0
"""
The LSB implementation of the GIF steganography suite
"""
from maybe_open import maybe_open
import struct
all_data = bytearray()
def copy_blocks(in_f, out_f):
"""
Copy through blocks of data
"""
while True:
# Read the block size
block_size = in_f.read(1)
if len(block_size) != 1:
raise RuntimeError('The Block is too short to be valid')
block_size, = struct.unpack('<B', block_size)
# Read the data in the block
block_data = in_f.read(block_size)
if len(block_data) != block_size:
raise RuntimeError('The Block is shorter than specified')
# Write the size and data to the output
out_f.write(bytes([block_size]))
out_f.write(block_data)
# Length zero block signals the end of the data
if block_size == 0:
break
def extract_data(in_f, has_ct, ct_size):
"""
Extract the data from the color table and add it to all_data
"""
global all_data
if has_ct:
true_ct_size = 3 * (2 ** (ct_size + 1))
ct = bytearray(in_f.read(true_ct_size))
if len(ct) != true_ct_size:
raise RuntimeError('The Color Table is shorter than specified')
# Extract as much data into the Color Table as possible
# Use only one-byte chunks to avoid complication, so a 15 byte color table will
# contain one byte of data across the first 8 bytes and no data in the last 7
num_bytes = true_ct_size // 8
for index in range(num_bytes):
byte = 0
byte |= (ct[index * 8 + 0] & 0b00000001) << 7
byte |= (ct[index * 8 + 1] & 0b00000001) << 6
byte |= (ct[index * 8 + 2] & 0b00000001) << 5
byte |= (ct[index * 8 + 3] & 0b00000001) << 4
byte |= (ct[index * 8 + 4] & 0b00000001) << 3
byte |= (ct[index * 8 + 5] & 0b00000001) << 2
byte |= (ct[index * 8 + 6] & 0b00000001) << 1
byte |= (ct[index * 8 + 7] & 0b00000001) << 0
# Add the extracted byte if there is still data remaining
if len(all_data) == 0 or len(all_data) <= all_data[0]:
all_data.append(byte)
def hide_data(in_f, out_f, has_ct, ct_size, data):
"""
Insert the data into the color table and write to the output file
"""
if has_ct:
true_ct_size = 3 * (2 ** (ct_size + 1))
ct = bytearray(in_f.read(true_ct_size))
if len(ct) != true_ct_size:
raise RuntimeError('The Color Table is shorter than specified')
# Insert as much data into the Color Table as possible
# Use only one-byte chunks to avoid complication, so a 15 byte color table will
# contain one byte of data across the first 8 bytes and no data in the last 7
num_bytes = min(true_ct_size // 8, len(data))
for index, byte in enumerate(data[:num_bytes]):
ct[index * 8 + 0] = (ct[index * 8 + 0] & 0b11111110) | ((byte & 0b10000000) >> 7)
ct[index * 8 + 1] = (ct[index * 8 + 1] & 0b11111110) | ((byte & 0b01000000) >> 6)
ct[index * 8 + 2] = (ct[index * 8 + 2] & 0b11111110) | ((byte & 0b00100000) >> 5)
ct[index * 8 + 3] = (ct[index * 8 + 3] & 0b11111110) | ((byte & 0b00010000) >> 4)
ct[index * 8 + 4] = (ct[index * 8 + 4] & 0b11111110) | ((byte & 0b00001000) >> 3)
ct[index * 8 + 5] = (ct[index * 8 + 5] & 0b11111110) | ((byte & 0b00000100) >> 2)
ct[index * 8 + 6] = (ct[index * 8 + 6] & 0b11111110) | ((byte & 0b00000010) >> 1)
ct[index * 8 + 7] = (ct[index * 8 + 7] & 0b11111110) | ((byte & 0b00000001) >> 0)
# Write out the modified Color Table
out_f.write(ct)
# Return the number of bytes written into the Color Table
return num_bytes
else:
# No Color Table => No space to hide stuff
return 0
def steg(in_path, out_path=None, data=None):
"""
The steg function (use the LSB of the color table entries to hide the data)
"""
# Must encode the length of the data so we know how much to read when extracting
if data is not None:
data_array = bytearray(data)
data_array.insert(0, len(data))
data = data_array
with open(in_path, 'rb') as in_f:
with maybe_open(out_path, 'wb') as out_f:
# First the Header
header = in_f.read(6)
if len(header) != 6:
raise RuntimeError('The Header is too short to be valid')
signature, version = struct.unpack('<3s3s', header)
if signature != b'GIF':
raise RuntimeError('The signature does not match the GIF specification')
out_f.write(header)
# Next the Logical Screen Descriptor
screen_descriptor = in_f.read(7)
if len(screen_descriptor) != 7:
raise RuntimeError('The Logical Screen Descriptor is too short to be valid')
width, heigh, packed, bg_color_index, aspect_ratio = struct.unpack('<2H3B', screen_descriptor)
has_gct = (packed & 0b10000000) >> 7
color_res = (packed & 0b01110000) >> 4
sort_flag = (packed & 0b00001000) >> 3
gct_size = (packed & 0b00000111) >> 0
out_f.write(screen_descriptor)
# Then the Global Color Table (if present)
if data is not None:
bytes_written = hide_data(in_f, out_f, has_gct, gct_size, data)
else:
extract_data(in_f, has_gct, gct_size)
# Loop over the rest of the blocks in the image
while True:
# Read a byte to determine the block type
field = in_f.read(1)
if len(field) != 1:
raise RuntimeError('Expected more data when there was none')
byte = field[0]
if byte == 0x2C:
# Image Descriptor
descriptor = in_f.read(9)
if len(descriptor) != 9:
raise RuntimeError('The Image Descriptor is too short to be valid')
left_pos, top_pos, width, height, packed = struct.unpack('<4HB', descriptor)
has_lct = (packed & 0b10000000) >> 7
interlace = (packed & 0b01000000) >> 6
sort_flag = (packed & 0b00100000) >> 5
reserved = (packed & 0b00011000) >> 4
lct_size = (packed & 0b00000111) >> 0
out_f.write(bytes([byte]))
out_f.write(descriptor)
# Then the Local Color Table (if present)
if data is not None:
bytes_written += hide_data(in_f, out_f, has_lct, lct_size, data[bytes_written:])
else:
extract_data(in_f, has_lct, lct_size)
# Then the Table Based Image Data
lzw_min_size = in_f.read(1)
if len(lzw_min_size) != 1:
raise RuntimeError('No LZW Minimum Code Size value')
lzw_min_size, = struct.unpack('<B', lzw_min_size)
out_f.write(bytes([lzw_min_size]))
copy_blocks(in_f, out_f)
elif byte == 0x21:
# Extension Block
block_label = in_f.read(1)
if len(block_label) != 1:
raise RuntimeError('No Extension Block label')
out_f.write(bytes([byte]))
out_f.write(block_label)
# Just as a reference (for our purposes, we can pass these through all the same)
# F9 = Graphic Control
# FE = Comment
# 01 = Plain Text
# FF = Application
# 99 = Our Custom Extension Block Type
# Copy the blocks
copy_blocks(in_f, out_f)
elif byte == 0x3B:
# Trailer
out_f.write(bytes([byte]))
break
else:
raise RuntimeError(f'Unexpected byte {hex(byte)} found while decoding')
# Politely pass any extra appended data through :)
out_f.write(in_f.read())
if data is not None:
# Verify that we wrote all the data
if bytes_written != len(data):
raise RuntimeError(f'Failed to hide all the data ({max(0, bytes_written - 1)}/{len(data) - 1})')
else:
# If data was None (the extracting case), return all the extracted data
# Don't include the hidden length byte...
return all_data[1:]
| StarcoderdataPython |
49368 | from .provider import Provider
class NullProvider(Provider):
def connect(self):
pass
def close(self):
pass
def get_table_columns(self, table_name):
return []
def get_tables(self):
return []
def execute(self, query, **kwargs):
return None
def clear_table(self, table_name):
pass
def clear_table_column(self, table_name, column_name):
pass | StarcoderdataPython |
3394814 | # -*- coding: utf-8 -*-
import sys
import csv
import json
def get_selection(pref_code):
QUIZ_SELECTION = {
"01": ["北海道", "新潟県", "青森県", "秋田県"],
"02": ["青森県", "岩手県", "秋田県", "山形県"],
"03": ["岩手県", "山形県", "秋田県", "宮城県"],
"04": ["宮城県", "岩手県", "福島県", "山形県"],
"05": ["秋田県", "青森県", "山形県", "岩手県"],
"06": ["山形県", "秋田県", "宮城県", "岩手県"],
"07": ["福島県", "新潟県", "宮城県", "栃木県"],
"08": ["茨城県", "栃木県", "群馬県", "福島県"],
"09": ["栃木県", "茨城県", "群馬県", "福島県"],
"10": ["群馬県", "栃木県", "福島県", "新潟県"],
"11": ["埼玉県", "茨城県", "栃木県", "群馬県"],
"12": ["千葉県", "東京都", "埼玉県", "茨城県"],
"13": ["東京都", "千葉県", "埼玉県", "神奈川県"],
"14": ["神奈川県", "兵庫県", "東京都", "千葉県"],
"15": ["新潟県", "山形県", "富山県", "鳥取県"],
"16": ["富山県", "石川県", "新潟県", "長野県"],
"17": ["石川県", "新潟県", "富山県", "福井県"],
"18": ["福井県", "石川県", "富山県", "京都府"],
"19": ["山梨県", "長野県", "岐阜県", "群馬県"],
"20": ["長野県", "山梨県", "岐阜県", "群馬県"],
"21": ["岐阜県", "長野県", "滋賀県", "秋田県"],
"22": ["静岡県", "神奈川県", "愛知県", "山梨県"],
"23": ["愛知県", "静岡県", "三重県", "神奈川県"],
"24": ["三重県", "愛知県", "和歌山県", "滋賀県"],
"25": ["滋賀県", "愛知県", "福井県", "京都府"],
"26": ["京都府", "奈良県", "和歌山県", "滋賀県"],
"27": ["大阪府", "兵庫県", "東京都", "愛知県"],
"28": ["兵庫県", "大阪府", "広島県", "岡山県"],
"29": ["奈良県", "京都府", "和歌山県", "滋賀県"],
"30": ["和歌山県", "奈良県", "京都府", "三重県"],
"31": ["鳥取県", "島根県", "山口県", "兵庫県"],
"32": ["島根県", "鳥取県", "山口県", "広島県"],
"33": ["岡山県", "香川県", "兵庫県", "広島県"],
"34": ["広島県", "山口県", "岡山県", "福岡県"],
"35": ["山口県", "岡山県", "広島県", "福岡県"],
"36": ["徳島県", "高知県", "香川県", "愛媛県"],
"37": ["香川県", "徳島県", "愛媛県", "高知県"],
"38": ["愛媛県", "高知県", "徳島県", "香川県"],
"39": ["高知県", "和歌山県", "徳島県", "愛媛県"],
"40": ["福岡県", "山口県", "熊本県", "佐賀県"],
"41": ["佐賀県", "福岡県", "熊本県", "長崎県"],
"42": ["長崎県", "佐賀県", "熊本県", "大分県"],
"43": ["熊本県", "大分県", "宮崎県", "鹿児島県"],
"44": ["大分県", "鹿児島県", "宮崎県", "熊本県"],
"45": ["宮崎県", "鹿児島県", "大分県", "熊本県"],
"46": ["鹿児島県", "宮崎県", "沖縄県", "長崎県"],
"47": ["沖縄県", "東京都", "鹿児島県", "長崎県"],
}
selections = []
pref_code_ = ("00" + pref_code)[-2:]
it = iter(QUIZ_SELECTION[pref_code_])
selections.append({"text": next(it), "correct": True})
for i in it:
selections.append({"text": i, "correct": False})
return selections
def convert_json():
inputf = sys.stdin
outputf = sys.stdout
reader = csv.DictReader(inputf, delimiter="\t")
quizzes = []
for row in reader:
if row['spottitle'] == '':
continue
quizzes.append({
"url": row["url"],
"thumburl": row["thumb_url"],
"phototitle": row["title"],
"title": row["spottitle"],
"photographer": row["photographer"],
"subtitle": row["subtitle"],
"lat": float(row["lat"]),
"lon": float(row["lon"]),
"place_id": row["place_id"],
"selections": get_selection(row["prefecture_code"]),
"description": row["description"],
})
json.dump({"quizzes": quizzes}, outputf, ensure_ascii=False)
if __name__ == "__main__":
convert_json()
| StarcoderdataPython |
1770534 | <filename>login/views.py<gh_stars>0
# encoding: utf-8
from django.shortcuts import render
import logging
import urllib
import urllib2
import json
from django.http import HttpResponseRedirect,HttpResponse
from mysite import settings
from django.core.urlresolvers import reverse
from Xromate.lib.User import Users
import logging
logger = logging.getLogger('django')
# Create your views here.
# GITHUB
GITHUB_CLIENTID = settings.GITHUB_CLIENTID
GITHUB_CLIENTSECRET = settings.GITHUB_CLIENTSECRET
GITHUB_CALLBACK = settings.GITHUB_CALLBACK
GITHUB_AUTHORIZE_URL = settings.GITHUB_AUTHORIZE_URL
# GITLAB
GITLAB_CLIENTID = settings.GITLAB_CLIENTID
GITLAB_CLIENTSECRET = settings.GITLAB_CLIENTSECRET
GITLAB_CALLBACK = settings.GITLAB_CALLBACK
GITLAB_AUTHORIZE_URL = settings.GITLAB_AUTHORIZE_URL
GITLAB_URL = settings.GITLAB_URL
def login(request):
return render(request, 'login/login.html')
def _get_refer_url(request):
refer_url = request.META.get('HTTP_REFERER', '/')
host = request.META['HTTP_HOST']
if refer_url.startswith('http') and host in refer_url:
refer_url = '/'
if 'back_url' in request.session:
refer_url = request.session['back_url']
del request.session['back_url']
return refer_url
def gitlab_login(request):
data = {
'client_id': GITLAB_CLIENTID,
'redirect_uri': GITLAB_CALLBACK,
'response_type': 'code',
'state': _get_refer_url(request),
}
gitlab_auth_url = '%s?%s'% (
GITLAB_AUTHORIZE_URL, urllib.urlencode(data)
)
return HttpResponseRedirect(gitlab_auth_url)
def github_login(request):
data = {
'client_id': GITHUB_CLIENTID,
'client_secret': GITHUB_CLIENTSECRET,
'redirect_uri': GITHUB_CALLBACK,
'state': _get_refer_url(request),
}
github_auth_url = '%s?%s' % (
GITHUB_AUTHORIZE_URL, urllib.urlencode(data)
)
#print ('git_hub_auth_url',github_auth_url)
return HttpResponseRedirect(github_auth_url)
def gitlab_auth(request):
template_html = 'login/login.html'
if 'code' not in request.GET:
return render(request, template_html)
code = request.GET.get('code')
back_path = request.GET.get('state')
url = '%s%s' % (GITLAB_URL, 'oauth/token')
data = {
'client_id': GITLAB_CLIENTID,
'client_secret': GITLAB_CLIENTSECRET,
'code': code,
'grant_type': 'authorization_code',
'redirect_uri': GITLAB_CALLBACK,
}
binary_data = urllib.urlencode(data).encode('utf-8')
headers = {'Accep': 'application/json'}
req = urllib2.Request(url, binary_data, headers)
response = urllib2.urlopen(req)
result = json.loads(response.read())
access_token = result['access_token']
url = "%s%s?access_token=%s" % (GITLAB_URL,'api/v3/user', access_token)
response = urllib2.urlopen(url)
data = json.loads(response.read().decode('ascii'))
request.session['username'] = data['username']
request.session['is_authenticated'] = True
# 在这里处理数据库存储问题
if back_path.startswith('/xromate'):
#向Xromate数据库查询用户,如果没有,添加用户.如果存在,目前什么也不做
try:
user = Users.objects.get(username=data['username'])
user.update(access_token=access_token)
user.save()
except Users.DoesNotExist:
user = Users.objects.create(username = data['username'], access_token=access_token)
user.save()
elif back_path.startswith('/monitor'):
'''
向Monitor数据库查询用户,如果没有,添加用户.如果存在,目前什么也不做
'''
pass
else:
pass
return HttpResponseRedirect(back_path)
def github_auth(request):
template_html = 'login/login.html'
if 'code' not in request.GET:
return render(request, template_html)
code = request.GET.get('code')
# 确定back_url
back_path = request.GET.get('state')
# 获得acccess_token
url = 'https://github.com/login/oauth/access_token'
data = {
'client_id': GITHUB_CLIENTID,
'client_secret': GITHUB_CLIENTSECRET,
'code': code,
'redirect_uri': GITHUB_CALLBACK,
}
data = urllib.urlencode(data)
binary_data = data.encode('utf-8')
headers = {'Accept': 'application/json'}
req = urllib2.Request(url, binary_data, headers)
response = urllib2.urlopen(req)
result = json.loads(response.read())
access_token = result['access_token']
# 获得用户名
url = 'https://api.github.com/user?access_token=%s' % (access_token)
response = urllib2.urlopen(url)
data = json.loads(response.read().decode('ascii'))
request.session['username'] = data['name']
request.session['is_authenticated'] = True
# 通过back_path确定需要处理的数据库
if back_path.startswith('/xromate'):
'''
向Xromate数据库查询用户,如果没有,添加用户.如果存在,目前什么也不做
'''
pass
elif back_path.startswith('/monitor'):
'''
向Monitor数据库查询用户,如果没有,添加用户.如果存在,目前什么也不做
'''
pass
else:
pass
# 返回原先的访问地址
return HttpResponseRedirect(back_path)
def logout(request):
if 'username' in request.session:
del request.session['username']
request.session['is_authenticated'] = False
return HttpResponseRedirect(reverse('web_login'))
| StarcoderdataPython |
3370658 | from datetime import datetime
from typing import Tuple
from unittest import mock
from freezegun import freeze_time
from google.appengine.ext import testbed
from werkzeug.test import Client
from backend.common.consts.event_type import EventType
from backend.common.helpers.event_insights_helper import EventInsightsHelper
from backend.common.helpers.matchstats_helper import MatchstatsHelper
from backend.common.helpers.prediction_helper import PredictionHelper
from backend.common.models.event import Event
from backend.common.models.event_details import EventDetails
from backend.common.models.event_insights import EventInsights
from backend.common.models.event_predictions import (
EventPredictions,
TEventStatMeanVars,
TMatchPredictions,
TMatchPredictionStats,
TRankingPredictions,
TRankingPredictionStats,
)
from backend.common.models.stats import EventMatchStats, StatType
def test_enqueue_bad_year(tasks_client: Client) -> None:
resp = tasks_client.get("/tasks/math/enqueue/event_matchstats/asdf")
assert resp.status_code == 404
@freeze_time("2020-4-1")
def test_enqueue_no_events(
tasks_client: Client, taskqueue_stub: testbed.taskqueue_stub.TaskQueueServiceStub
) -> None:
resp = tasks_client.get("/tasks/math/enqueue/event_matchstats/2020")
assert resp.status_code == 200
assert len(resp.data) > 0
tasks = taskqueue_stub.get_filtered_tasks(queue_names="default")
assert len(tasks) == 0
def test_enqueue_event(
tasks_client: Client,
taskqueue_stub: testbed.taskqueue_stub.TaskQueueServiceStub,
ndb_stub,
) -> None:
Event(
id="2020event",
year=2020,
event_short="event",
event_type_enum=EventType.REGIONAL,
).put()
resp = tasks_client.get("/tasks/math/enqueue/event_matchstats/2020")
assert resp.status_code == 200
tasks = taskqueue_stub.get_filtered_tasks(queue_names="run-in-order")
assert len(tasks) == 1
@freeze_time("2020-4-1")
def test_enqueue_event_current(
tasks_client: Client,
taskqueue_stub: testbed.taskqueue_stub.TaskQueueServiceStub,
ndb_stub,
) -> None:
Event(
id="2020event",
year=2020,
event_short="event",
event_type_enum=EventType.REGIONAL,
start_date=datetime(2020, 4, 1),
end_date=datetime(2020, 4, 1),
).put()
resp = tasks_client.get("/tasks/math/enqueue/event_matchstats/now")
assert resp.status_code == 200
tasks = taskqueue_stub.get_filtered_tasks(queue_names="run-in-order")
assert len(tasks) == 1
def test_calc_no_event(tasks_client: Client) -> None:
resp = tasks_client.get("/tasks/math/do/event_matchstats/2020test")
assert resp.status_code == 404
@mock.patch.object(EventInsightsHelper, "calculate_event_insights")
@mock.patch.object(PredictionHelper, "get_ranking_predictions")
@mock.patch.object(PredictionHelper, "get_match_predictions")
@mock.patch.object(MatchstatsHelper, "calculate_matchstats")
def test_calc_matchstats(
matchstats_mock: mock.Mock,
match_prediction_mock: mock.Mock,
ranking_prediction_mock: mock.Mock,
event_insights_mock: mock.Mock,
tasks_client: Client,
) -> None:
Event(
id="2020test",
year=2020,
event_short="test",
event_type_enum=EventType.REGIONAL,
).put()
matchstats: EventMatchStats = {
StatType.OPR: {"254": 100.0},
}
match_predictions: Tuple[
TMatchPredictions,
TMatchPredictionStats,
TEventStatMeanVars,
] = (
{},
{},
{},
)
ranking_predictions: Tuple[TRankingPredictions, TRankingPredictionStats] = (
[],
{"last_played_match": None},
)
event_insights = EventInsights(
qual=None,
playoff=None,
)
matchstats_mock.return_value = matchstats
match_prediction_mock.return_value = match_predictions
ranking_prediction_mock.return_value = ranking_predictions
event_insights_mock.return_value = event_insights
resp = tasks_client.get("/tasks/math/do/event_matchstats/2020test")
assert resp.status_code == 200
assert len(resp.data) > 0
ed = EventDetails.get_by_id("2020test")
assert ed is not None
assert ed.matchstats == matchstats
assert ed.predictions == EventPredictions(
match_predictions=match_predictions[0],
match_prediction_stats=match_predictions[1],
stat_mean_vars=match_predictions[2],
ranking_predictions=ranking_predictions[0],
ranking_prediction_stats=ranking_predictions[1],
)
assert ed.insights == event_insights
@mock.patch.object(EventInsightsHelper, "calculate_event_insights")
@mock.patch.object(PredictionHelper, "get_ranking_predictions")
@mock.patch.object(PredictionHelper, "get_match_predictions")
@mock.patch.object(MatchstatsHelper, "calculate_matchstats")
def test_calc_matchstats_no_output_in_taskqueue(
matchstats_mock: mock.Mock,
match_prediction_mock: mock.Mock,
ranking_prediction_mock: mock.Mock,
event_insights_mock: mock.Mock,
tasks_client: Client,
) -> None:
Event(
id="2020test",
year=2020,
event_short="test",
event_type_enum=EventType.REGIONAL,
).put()
matchstats: EventMatchStats = {
StatType.OPR: {"254": 100.0},
}
match_predictions: Tuple[
TMatchPredictions,
TMatchPredictionStats,
TEventStatMeanVars,
] = (
{},
{},
{},
)
ranking_predictions: Tuple[TRankingPredictions, TRankingPredictionStats] = (
[],
{"last_played_match": None},
)
event_insights = EventInsights(
qual=None,
playoff=None,
)
matchstats_mock.return_value = matchstats
match_prediction_mock.return_value = match_predictions
ranking_prediction_mock.return_value = ranking_predictions
event_insights_mock.return_value = event_insights
resp = tasks_client.get(
"/tasks/math/do/event_matchstats/2020test",
headers={
"X-Appengine-Taskname": "test",
},
)
assert resp.status_code == 200
assert len(resp.data) == 0
| StarcoderdataPython |
185891 | <gh_stars>10-100
from PIL import Image
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from repalette.constants import RAW_DATABASE_PATH
from repalette.datasets import AbstractQueryDataset
from repalette.db.raw import RawImage
class RawDataset(AbstractQueryDataset):
"""
Dataset of images downloaded from https://www.design-seeds.com/blog/.
`repalette/utils/download_raw.py` must be run before using this dataset
"""
def __init__(self, query=None, random_seed=None):
if query is None:
engine = create_engine(f"sqlite:///{RAW_DATABASE_PATH}")
# create a configured "Session" class
Session = sessionmaker(bind=engine)
session = Session()
query = session.query(RawImage).all()
session.close()
super().__init__(query=query, random_seed=random_seed)
def _getitem(self, index):
raw_image = self.query[index]
image = Image.open(raw_image.path).convert("RGB")
return (
image,
raw_image.palette,
), raw_image
| StarcoderdataPython |
4803388 | import argparse
parser = argparse.ArgumentParser(description='Split a mem into serverl mems.')
parser.add_argument('--filename', type=str, required=True, help='mem initial file path')
def main(file):
f = open(file, "r")
res = []
for line in f:
line = line.strip()
if line == '':
continue
line = line.rjust(16, "0")
assert(len(line) == 16)
array = [line[i:i+2] for i in range(0, 16, 2)]
assert(len(array) == 8)
res.append(array)
f.close()
files = [file[:-4] + "_" + str(i) + file[-4:] for i in range(7, -1, -1)]
print(files)
for i, f in enumerate(files):
with open(f, "w") as f:
for e in res:
f.write(e[i] + "\n")
f.write("\n")
if __name__ == '__main__':
args = parser.parse_args()
filename = args.filename
main(filename) | StarcoderdataPython |
1721485 | <reponame>nosoyyo/blox<gh_stars>0
# -*- coding: utf-8 -*-
# flake8: noqa
# @absurdity
# pipelines esp. for blox
__author__ = 'nosoyyo'
#############################################################
#·TwitterPipeline·isn't·working·well·due·to·the·wall·u·know·#
#############################################################
# usage
#
# from web to qiniu:
# q = QiniuPipeline()
# pic_url = 'http://some.pic.url'
# ret = q.upload(pic_url)
#
# from qiniu to distribution:
# q = QiniuPipeline()
# downloadable_file_url = q.getFile(key)
import pymongo
import ezcf
import conf
# ==================
# MongoDB quickstart
# ==================
class Singleton(object):
_instance = None
def __new__(cls, dbname, usr, pwd, *args, **kw):
if not cls._instance:
cls._instance = super(Singleton, cls).__new__(cls, *args, **kw)
return cls._instance
class MongoDBPipeline(Singleton):
def __init__(self, dbname, usr, pwd, conf=conf):
self.client = pymongo.MongoClient(
conf.MongoDBServer,
conf.MongoDBPort,
username=usr,
password=<PASSWORD>,
ssl=conf.MongoDBSSL,
#ssl_certfile=conf.MongoDBSSL_CERTFILE,
#ssl_keyfile=conf.MongoDBSSL_KEYFILE,
)
self.db = self.client.get_database(dbname)
self.auth = self.db.authenticate(usr, pwd)
self.col = self.db.get_collection(conf.MongoDBInitCol)
def setDB(self, dbname):
self.db = self.client.get_database(dbname)
return self
def setCol(self, dbname, colname):
self.db = self.client.get_database(dbname)
self.col = self.db.get_collection(colname)
return self
def ls(self):
return self.db.list_collection_names()
# ===============
# wxpy quickstart
# ===============
class WxpyPipeline():
def __init__(self, cache_path=True, console_qr=True,):
'''
'from wxpy import *'' only allowed at module level, so
'''
import sys
import logging
from wxpy.api.bot import Bot
from wxpy.api.chats import Chat, Chats, Friend, Group, Groups, MP, Member, User
from wxpy.api.consts import ATTACHMENT, CARD, FRIENDS, MAP, NOTE, PICTURE, RECORDING, SHARING, SYSTEM, TEXT, VIDEO
from wxpy.api.consts import FEMALE, MALE
from wxpy.api.messages import Article, Message, Messages, SentMessage
from wxpy.exceptions import ResponseError
from wxpy.ext import Tuling, WeChatLoggingHandler, XiaoI, get_wechat_logger, sync_message_in_groups
from wxpy.utils import BaseRequest, detect_freq_limit, dont_raise_response_error, embed, ensure_one, mutual_friends
self.bot = Bot(conf.cache_path, conf.console_qr)
self.bot.enable_puid()
return
m = MongoDBPipeline(conf.WxpyDBName, conf.WxpyDBUser, conf.WxpyDBPwd)
puid_col = m.setCol(conf.WxpyDBName, 'profile').col.wx.puid
# get staff list
# ================
# Qiniu quickstart
# ================
class QiniuPipeline():
# import
from qiniu import Auth, BucketManager, put_file, etag, urlsafe_base64_encode
import qiniu.config
def __init__(self, dbname, usr, pwd):
self.m = MongoDBPipeline(dbname, usr, pwd)
self.m_auth = self.m.auth
self.keys = self.m.setCol(conf.QiniuDBUser, conf.QiniuProfileCol).col.find()[0].keys
self.access_key = self.keys.access_key
self.secret_key = self.keys.secret_key
# 构建鉴权对象
self.auth = self.Auth(self.access_key, self.secret_key)
# bucket
self.bucket = BucketManager(self.auth)
#要上传的空间
bucket_name = conf.BucketName
#上传到七牛后保存的文件名前缀
#prefix = 'tommy'
def upload(self, pic_url):
bucket_name = self.bucket_name
key = pic_url.split('/')[-1]
token = self.auth.upload_token(bucket_name, key, 0)
ret = self.bucket.fetch(pic_url, bucket_name, key)
return ret
def getFile(self, key):
url = self.auth.private_download_url(conf.QINIU_PRIVATE + key)
return url
def ls(self):
l = self.bucket.list(self.bucket_name)[0].items
return l
def count(self):
c = len(self.bucket.list(self.bucket_name)[0].items)
return c
# ==================
# Twitter quickstart
# ==================
class TwitterPipeline():
def __init__(self, dbname, username, password):
# import
import tweepy
# init m
self.m = MongoDBPipeline(dbname, username, password)
self.m_auth = self.m.auth
self.keys = self.m.setCol(conf.TWITTER_USERNAME, conf.TWITTER_PROFILE).col.find()[0].keys
# get keys
consumer_key = self.keys.consumer_key
consumer_secret = self.keys.consumer_secret
access_token = self.keys.access_token
access_token_secret = self.keys.access_token_secret
# auth and get APIs
auth = tweepy.OAuthHandler(self.consumer_key, self.consumer_secret)
auth.set_access_token(self.access_token, self.access_token_secret)
api = tweepy.API(self.auth) | StarcoderdataPython |
1789304 | import numpy as np
import logging
from benchmarker import benchmark
logger = logging.getLogger('exp11_blocked_matrices')
@benchmark
def blocked_solve_naive(A1, A2, B, C):
C = np.linalg.solve(np.concatenate(
(np.concatenate((A1, np.zeros((A1.shape[0], A1.shape[1]), dtype=np.float64)), axis=1),
np.concatenate((np.zeros((A2.shape[0], A2.shape[1]), dtype=np.float64), A2), axis=1)), axis=0), B)
return C
@benchmark
def blocked_solve_recommended(A1, A2, B, C):
b1 = B[0:A1.shape[0], 0:B.shape[1]]
b2 = B[A1.shape[0]:, 0:B.shape[1]]
C = np.concatenate((np.linalg.solve(A1, b1), np.linalg.solve(A2, b2)), axis=0)
return C
def exp11_blocked_matrices(b, n):
bm_n = int(n / 2)
A1 = np.random.randn(bm_n, bm_n)
A2 = np.random.randn(bm_n, bm_n)
B = np.random.randn(2 * bm_n, 2 * bm_n)
C = np.zeros((2 * bm_n, 2 * bm_n), dtype=np.float64)
res1 = b.benchmark("compact", blocked_solve_naive, A1, A2, B, C)
res2 = b.benchmark("blocked", blocked_solve_recommended, A1, A2, B, C)
logger.info('PartitionedMatrices correctness: {}'.format(np.allclose(res1, res2)))
| StarcoderdataPython |
137231 | for _ in range(150):
for _ in range(51):
print(1, end = ' ')
print() | StarcoderdataPython |
140845 | from django.db import models
class Document(models.Model):
description = models.CharField(max_length=255, blank=True)
document = models.FileField(upload_to='')
uploaded_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.document.name
# pylint: disable=arguments-differ
def delete(self, *args, **kwargs):
self.document.delete()
super().delete(*args, **kwargs)
class Meta:
default_permissions = ()
permissions = (
('upload_documents', 'Can upload documents'),
)
| StarcoderdataPython |
166734 | <gh_stars>1-10
"""
ゼロから学ぶスパイキングニューラルネットワーク
- Spiking Neural Networks from Scratch
Copyright (c) 2020 HiroshiARAKI. All Rights Reserved.
"""
import numpy as np
import matplotlib.pyplot as plt
class HodgkinHuxley:
def __init__(self, time, dt, rest=-65., Cm=1.0, gNa=120., gK=36., gl=0.3, ENa=50., EK=-77., El=-54.387):
"""
Initialize Neuron parameters
:param time: experimental time
:param dt: time step
:param rest: resting potential
:param Cm: membrane capacity
:param gNa: Na+ channel conductance
:param gK: K+ channel conductance
:param gl: other (Cl) channel conductance
:param ENa: Na+ equilibrium potential
:param EK: K+ equilibrium potential
:param El: other (Cl) equilibrium potentials
"""
self.time = time
self.dt = dt
self.rest = rest
self.Cm = Cm
self.gNa = gNa
self.gK = gK
self.gl = gl
self.ENa = ENa
self.EK = EK
self.El = El
def calc(self, i):
""" compute membrane potential """
# initialize parameters
v = self.rest
n = 0.32
m = 0.05
h = 0.6
v_monitor = []
n_monitor = []
m_monitor = []
h_monitor = []
time = int(self.time / self.dt)
# update time
for t in range(time):
# calc channel gating kinetics
n += self.dn(v, n)
m += self.dm(v, m)
h += self.dh(v, h)
# calc tiny membrane potential
dv = (i[t] -
self.gK * n**4 * (v - self.EK) - # K+ current
self.gNa * m**3 * h * (v - self.ENa) - # Na+ current
self.gl * (v - self.El)) / self.Cm # other current
# calc new membrane potential
v += dv * self.dt
# record
v_monitor.append(v)
n_monitor.append(n)
m_monitor.append(m)
h_monitor.append(h)
return v_monitor, n_monitor, m_monitor, h_monitor
def dn(self, v, n):
return (self.alpha_n(v) * (1 - n) - self.beta_n(v) * n) * self.dt
def dm(self, v, m):
return (self.alpha_m(v) * (1 - m) - self.beta_m(v) * m) * self.dt
def dh(self, v, h):
return (self.alpha_h(v) * (1 - h) - self.beta_h(v) * h) * self.dt
def alpha_n(self, v):
return 0.01 * (10 - (v - self.rest)) / (np.exp((10 - (v - self.rest))/10) - 1)
def alpha_m(self, v):
return 0.1 * (25 - (v - self.rest)) / (np.exp((25 - (v - self.rest))/10) - 1)
def alpha_h(self, v):
return 0.07 * np.exp(-(v - self.rest) / 20)
def beta_n(self, v):
return 0.125 * np.exp(-(v - self.rest) / 80)
def beta_m(self, v):
return 4 * np.exp(-(v - self.rest) / 18)
def beta_h(self, v):
return 1 / (np.exp((30 - (v - self.rest))/10) + 1)
if __name__ == '__main__':
# init experimental time and time-step
time = 300 # 実験時間 (観測時間)
dt = 2**-4 # 時間分解能 (HHモデルは結構小さめでないと上手く計算できない)
# Hodgkin-Huxley Neuron
neuron = HodgkinHuxley(time, dt)
# 入力データ (面倒臭いので適当な矩形波とノイズを合成して作った)
input_data = np.sin(0.5 * np.arange(0, time, dt))
input_data = np.where(input_data > 0, 20, 0) + 10 * np.random.rand(int(time/dt))
input_data_2 = np.cos(0.4 * np.arange(0, time, dt) + 0.5)
input_data_2 = np.where(input_data_2 > 0, 10, 0)
input_data += input_data_2
# 膜電位などを計算
v, m, n, h = neuron.calc(input_data)
# plot
plt.figure(figsize=(12, 6))
x = np.arange(0, time, dt)
plt.subplot(3, 1, 1)
plt.plot(x, input_data)
plt.ylabel('I [μA/cm2]')
plt.subplot(3, 1, 2)
plt.plot(x, v)
plt.ylabel('V [mV]')
plt.subplot(3, 1, 3)
plt.plot(x, n, label='n')
plt.plot(x, m, label='m')
plt.plot(x, h, label='h')
plt.xlabel('time [ms]')
plt.ylabel('Conductance param')
plt.legend()
plt.show()
| StarcoderdataPython |
80297 | <filename>Codes/fengxuanmo/2_add_two_nums/add_two_nums.py
#! coding:utf-8
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
def addTwoNumbers(l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
head = ListNode(0)
jin_wei = 0 # 进位和
cur = head
while l1 or l2:
#判断当前节点是否为None,而不是下一个节点,不然第一步进入不了
x = l1.val if l1 else 0
y = l2.val if l2 else 0
he = jin_wei + x + y
jin_wei = he // 10
cur.next = ListNode(he % 10)
cur = cur.next
if l1 != None: l1 = l1.next
if l2 != None: l2 = l2.next
if jin_wei > 0:
cur.next = ListNode(1)
return head.next
#
# if __name__ == '__main__':
# addTwoNumbers()
| StarcoderdataPython |
114445 | <reponame>kiteco/kiteco-public<gh_stars>10-100
from datetime import timedelta
import base64
import hashlib
import mixpanel
import gzip
import json
import customerio
from airflow.contrib.operators.s3_list_operator import S3ListOperator
# The DAG object; we'll need this to instantiate a DAG
from airflow import DAG
# Operators; we need this to operate!
from airflow.contrib.operators.aws_athena_operator import AWSAthenaOperator
from airflow.models import Variable
from airflow.hooks.S3_hook import S3Hook
from airflow.operators.python_operator import PythonOperator
from airflow.operators.python_operator import ShortCircuitOperator
import logging
import datetime
from jinja2 import PackageLoader
import kite_metrics
from kite_airflow.slack_alerts import task_fail_slack_alert
logger = logging.getLogger(__name__)
MP_START_DATE = datetime.datetime(2020, 5, 29)
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': datetime.datetime(2020, 5, 24),
'email_on_failure': False,
'email_on_retry': False,
'retries': 0,
'retry_delay': timedelta(minutes=5),
'on_failure_callback': task_fail_slack_alert,
}
DATA_LOC = 's3://kite-metrics/firehose/kite_status/'
PROD_RESULT_LOC_PREFIX = 's3://kite-metrics/athena-results'
dag = DAG(
'kite_status_1d',
default_args=default_args,
description='A simple tutorial DAG',
schedule_interval='10 0 * * *',
jinja_environment_kwargs={
'loader': PackageLoader('kite_airflow', 'templates')
},
)
kite_status_config = kite_metrics.load_context('kite_status')
kite_status_schema = kite_metrics.load_schema('kite_status')
schema_reload_ops = []
for table_name in ['kite_status', 'kite_status_segment', 'kite_status_normalized']:
schema_reload_ops.append(AWSAthenaOperator(
aws_conn_id='aws_us_east_1',
task_id='drop_{}'.format(table_name),
query='DROP TABLE {{params.table_name}}',
output_location='s3://kite-metrics-test/athena-results/ddl',
database='kite_metrics',
dag=dag,
params={'table_name': table_name},
) >> AWSAthenaOperator(
aws_conn_id='aws_us_east_1',
task_id='create_{}'.format(table_name),
query='athena/tables/{}.tmpl.sql'.format(table_name),
output_location='s3://kite-metrics-test/athena-results/ddl',
database='kite_metrics',
dag=dag,
params={'schema': kite_status_schema, 'table_name': table_name}
))
insert_kite_status_normalized = AWSAthenaOperator(
aws_conn_id='aws_us_east_1',
task_id='insert_kite_status_normalized',
query='athena/queries/kite_status_normalized.tmpl.sql',
output_location='s3://kite-metrics-test/athena-results/ddl',
database='kite_metrics',
dag=dag,
params={'schema': kite_status_schema}
)
cleanup_kite_status_normalized_table = AWSAthenaOperator(
aws_conn_id='aws_us_east_1',
task_id='cleanup_kite_status_normalized_table',
query='DROP TABLE kite_status_normalized_{{ds_nodash}}',
output_location='s3://kite-metrics-test/athena-results/ddl',
database='kite_metrics',
dag=dag,
)
schema_reload_ops >> insert_kite_status_normalized >> cleanup_kite_status_normalized_table
def read_s3_json_files(bucket, file_list):
s3 = S3Hook('aws_us_east_1')
for file in sorted(file_list):
obj = s3.get_key(file, bucket)
for line in gzip.open(obj.get()['Body']):
rec = json.loads(line)
to_clean = [rec]
while to_clean:
this = to_clean.pop()
for k in list(this.keys()):
v = this[k]
if isinstance(v, dict):
to_clean.append(v)
continue
if v is None:
del this[k]
yield rec
def load_athena_to_elastic(task_instance, execution_date, **context):
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
es = Elasticsearch(
cloud_id="metrics:XXXXXXX",
http_auth=("elastic", Variable.get('elastic_password')),
)
def iter():
iter_records = read_s3_json_files('kite-metrics', task_instance.xcom_pull(task_ids='list_mixpanel_json_files'))
for i, rec in enumerate(iter_records):
try:
if sum(rec.get('{}_events'.format(lang), 0) for lang in kite_status_config['languages']) == 0:
continue
if rec['event'] != 'kite_status':
continue
ts = datetime.datetime.fromtimestamp(rec['end_time'])
rec_id_str = '{}::{}'.format(rec.get('userid', ''), ts.strftime('%Y/%m/%d'))
rec_id = hashlib.md5(rec_id_str.encode('utf8')).hexdigest()
rec['timestamp'] = ts
yield {'_index': 'kite_status_1d_{}'.format(execution_date.format('%Y%m')), '_id': rec_id, '_source': rec}
except Exception:
logger.exception("Error processing line {}, content={}".format(i, rec))
raise
bulk(es, iter())
event_names = {
'anon_supported_file_edited': 'anon_supported_file_edited_1d',
'anon_kite_status': 'anon_kite_status_1d',
'kite_status': 'kite_status_1d',
}
def load_athena_to_mixpanel(task_instance, execution_date, dag_run, storage_task_name, **context):
mp_consumer = mixpanel.BufferedConsumer(max_size=100)
mp_client = mixpanel.Mixpanel(Variable.get('mixpanel_credentials', deserialize_json=True)['token'], consumer=mp_consumer)
start_row = task_instance.xcom_pull(task_ids=storage_task_name, key='progress')
iter_records = read_s3_json_files('kite-metrics', task_instance.xcom_pull(task_ids='list_mixpanel_json_files'))
for i, rec in enumerate(iter_records):
if i <= start_row:
continue
try:
insert_id = str(base64.b64encode(
hashlib.md5('{}::{}'.format(
rec['userid'],
execution_date.strftime('%Y/%m/%d')).encode('utf8')
).digest())[:16])
rec.update({
'time': rec['end_time'],
'_group': 'firehose/kite_status/{}/'.format(execution_date.strftime('%Y/%m/%d')),
'_version': '1.0.0',
'$insert_id': insert_id,
})
user_id = rec['userid']
name = event_names.get(rec['event'])
if name is None:
continue
if datetime.datetime.today() - execution_date < datetime.timedelta(days=4):
mp_client.track(user_id, name, rec)
else:
ts = rec.pop('time')
mp_client.import_data(Variable.get('mixpanel_credentials', deserialize_json=True)['api_key'], user_id, name, ts, rec)
if i > 0 and i % 10000 == 0:
logger.info("Processed line {}".format(i))
dag_run.get_task_instance(storage_task_name).xcom_push(key='progress', value=i)
except Exception:
dag_run.get_task_instance(storage_task_name).xcom_push(key='progress', value=i-100)
logger.exception("Error processing line {}, content={}".format(i, rec))
raise
mp_consumer.flush()
def load_athena_to_cio(task_instance, execution_date, dag_run, storage_task_name, **context):
import concurrent.futures
cio_creds = Variable.get('cio_credentials', deserialize_json=True)
start_row = task_instance.xcom_pull(task_ids=storage_task_name, key='progress')
iter_records = read_s3_json_files('kite-metrics', task_instance.xcom_pull(task_ids='list_cio_json_files'))
def iter():
for i, rec in enumerate(iter_records):
if i <= start_row:
continue
if rec['event'] != 'kite_status':
continue
rec.update({
'time': rec['end_time'],
'_group': 'firehose/kite_status/{}/'.format(execution_date.strftime('%Y/%m/%d')),
'_version': '1.0.0',
})
user_id = rec['userid']
if not user_id or not all(ord(c) < 128 for c in user_id):
continue
name = event_names.get(rec['event'])
if name is None:
continue
yield i, (user_id, name, rec['time']), rec
def call_cio(item):
i, args, kwargs = item
customerio.CustomerIO(cio_creds['site_id'], cio_creds['api_key']).backfill(*args, **kwargs)
return i
max_i = 0
with concurrent.futures.ThreadPoolExecutor(max_workers=20) as executor:
try:
for i in executor.map(call_cio, iter()):
if max_i > 0 and (i // 1000) > (max_i // 1000):
logger.info("Processed line {}".format(i))
dag_run.get_task_instance(storage_task_name).xcom_push(key='progress', value=max(max_i, i))
max_i = max(max_i, i)
except Exception:
dag_run.get_task_instance(storage_task_name).xcom_push(key='progress', value=max_i)
raise
for key, group_by, downstreams in [
('mixpanel', 'regexp_replace(kite_metrics.kite_status_normalized.userId, \'\p{Cntrl}\')', [(False, load_athena_to_elastic), (True, load_athena_to_mixpanel)]),
('cio', 'regexp_replace(coalesce(kite_metrics.kite_status_normalized.properties__forgetful_metrics_id, kite_metrics.kite_status_normalized.userId), \'\p{Cntrl}\')', [(True, load_athena_to_cio)])
]:
operator = insert_kite_status_normalized >> AWSAthenaOperator(
aws_conn_id='aws_us_east_1',
task_id='insert_kite_status_1d_{}'.format(key),
query='athena/queries/kite_status_1d.tmpl.sql',
output_location='s3://kite-metrics-test/athena-results/ddl',
database='kite_metrics',
params={
'key': key,
'group_by': group_by,
'languages': kite_status_config['languages'],
'editors': kite_status_config['editors'],
'lexical_providers': kite_status_config['lexical_providers'],
'python_providers': kite_status_config['python_providers']
},
dag=dag,
) >> AWSAthenaOperator(
aws_conn_id='aws_us_east_1',
task_id='generate_{}_json'.format(key),
query='athena/queries/kite_status_1d_json.tmpl.sql',
output_location='s3://kite-metrics-test/athena-results/ddl',
database='kite_metrics',
params={'key': key, 'languages': kite_status_config['languages']},
dag=dag,
)
operator >> AWSAthenaOperator(
aws_conn_id='aws_us_east_1',
task_id='cleanup_{}_table_json'.format(key),
query='DROP TABLE kite_status_1d_{{params.key}}_{{ds_nodash}}_json',
output_location='s3://kite-metrics-test/athena-results/ddl',
database='kite_metrics',
params={'key': key},
dag=dag,
)
operator >> AWSAthenaOperator(
aws_conn_id='aws_us_east_1',
task_id='cleanup_{}_table'.format(key),
query='DROP TABLE kite_status_1d_{{params.key}}_{{ds_nodash}}',
output_location='s3://kite-metrics-test/athena-results/ddl',
database='kite_metrics',
params={'key': key},
dag=dag,
)
operator = operator >> S3ListOperator(
aws_conn_id='aws_us_east_1',
task_id='list_{}_json_files'.format(key),
bucket='kite-metrics',
prefix='athena/kite_status_1d_{{params.key}}/json/{{ds}}/',
delimiter='/',
params={'key': key},
dag=dag,
)
def skip_older(execution_date, **ctx):
return execution_date >= MP_START_DATE or (datetime.datetime(2020, 5, 19) < execution_date < datetime.datetime(2020, 5, 26))
skip_older_operator = ShortCircuitOperator(
task_id='skip_older_{}'.format(key),
python_callable=skip_older,
dag=dag,
provide_context=True
)
for skip_older, downstream in downstreams:
progress_operator = PythonOperator(
python_callable=lambda ti, **kwargs: ti.xcom_push(key='progress', value=0),
task_id='progress_storage_{}'.format(downstream.__name__),
dag=dag,
provide_context=True,
)
ds_operator = PythonOperator(
python_callable=downstream,
task_id=downstream.__name__,
dag=dag,
retries=4,
provide_context=True,
op_kwargs={'storage_task_name': 'progress_storage_{}'.format(downstream.__name__)}
)
if skip_older:
operator >> skip_older_operator >> progress_operator >> ds_operator
else:
operator >> progress_operator >> ds_operator
insert_kite_status_normalized >> AWSAthenaOperator(
aws_conn_id='aws_us_east_1',
task_id='update_activations_table',
query='athena/queries/insert_activations.tmpl.sql',
output_location='s3://kite-metrics-test/athena-results/ddl',
database='kite_metrics',
depends_on_past=True,
dag=dag,
)
update_schema_dag = DAG(
'update_kite_status_schema',
default_args=default_args,
description='Update the kite_status and kite_status_normalized schemas.',
schedule_interval=None,
)
for table_name in ['kite_status', 'kite_status_segment', 'kite_status_normalized']:
AWSAthenaOperator(
aws_conn_id='aws_us_east_1',
task_id='drop_{}'.format(table_name),
query='DROP TABLE {{params.table_name}}',
output_location='s3://kite-metrics-test/athena-results/ddl',
database='kite_metrics',
dag=update_schema_dag,
params={'table_name': table_name},
) >> AWSAthenaOperator(
aws_conn_id='aws_us_east_1',
task_id='create_{}'.format(table_name),
query='athena/tables/{}.tmpl.sql'.format(table_name),
output_location='s3://kite-metrics-test/athena-results/ddl',
database='kite_metrics',
dag=update_schema_dag,
params={'schema': kite_status_schema, 'table_name': table_name}
)
| StarcoderdataPython |
165318 | import os
from whitenoise import WhiteNoise
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'project_backend.settings')
application = get_wsgi_application()
application = WhiteNoise(application, root='/frontend/build/static')
application.add_files('/static', prefix='more-files/')
| StarcoderdataPython |
3318251 | import google.protobuf.json_format as pbjson
import os
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import cv2
import LabeledImage_pb2
import argparse
import deepracing.imutils as imutils
import torchvision
import torchvision.utils as tvutils
import torchvision.transforms.functional as tf
import torch
def loadLabelFiles(labeldir,imagefilename):
labelstart = int(str.split(imagefilename,"_")[1])
#image_2_control_label.json
rtn = []
for i in range(labelstart, labelstart + 60):
label = LabeledImage_pb2.LabeledImage()
fp = os.path.join(labeldir,"image_%d_control_label.json" % (i,))
with open(fp,'r') as f:
pbjson.Parse(f.read(), label)
rtn.append(label)
return rtn
def show(img):
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1,2,0)), interpolation='nearest')
#D:\f1_training_data\trent_solo_4\pose_sequence_labels\image_36_sequence_label.json D:\f1_training_data\trent_solo\pose_sequence_labels\image_65_sequence_label.json
# are strong candidates
parser = argparse.ArgumentParser()
parser.add_argument("label1", help="First image file", type=str)
parser.add_argument("label2", help="Second image file", type=str)
args = parser.parse_args()
image1path = args.label1
image2path = args.label2
image1 = tf.to_tensor(imutils.readImage(image1path))
image2 = tf.to_tensor(imutils.readImage(image2path))
label1steeringdir = os.path.join(os.path.dirname(os.path.dirname(image1path)),"steering_labels")
label2steeringdir = os.path.join(os.path.dirname(os.path.dirname(image2path)),"steering_labels")
image1name = os.path.splitext(os.path.basename(image1path))[0]
image2name = os.path.splitext(os.path.basename(image2path))[0]
labels1 = loadLabelFiles(label1steeringdir, image1name)
labels2 = loadLabelFiles(label2steeringdir, image2name)
steering1 = [l.label.steering for l in labels1]
steering2 = [l.label.steering for l in labels2]
image1 = image1[:,32:,:]
image2 = image2[:,32:,:]
image1 = tf.to_tensor(tf.resize(tf.to_pil_image(image1), int(0.5*image1.shape[1])))
image2 = tf.to_tensor(tf.resize(tf.to_pil_image(image2), image1.shape[1:]))
fig1 = plt.figure()
t = np.linspace(0,1.42,60)
plt.plot(t, steering1)
plt.plot(t, steering2)
plt.xlabel("Time (Seconds)")
plt.ylabel("Normalized Steering Angles [-1, 1]")
fig2 = plt.figure()
imagegrid = tvutils.make_grid([image1, image2], nrow=1)
show(imagegrid)
plt.show() | StarcoderdataPython |
146457 | <reponame>LivingLogic/LivingApps.Python.LivingAPI
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# cython: language_level=3, always_allow_keywords=True
## Copyright 2016-2020 by LivingLogic AG, Bayreuth/Germany
##
## All Rights Reserved
"""
vSQL is a subset of UL4 expressions retargeted for generating SQL expressions
used in SQL queries. Currently only Oracle is supported.
This module contains classes and functions for generating and compiling
vSQL expressions.
A vSQL expression can be generated in two ways:
* By directly constructing a vSQL expression via the class method :meth:`make`
of the various :class:`AST` subclasses. For example a vSQL expression for
``"foo".lower() + "bar".upper()`` can be constructed like this::
vsql.AddAST.make(
vsql.MethAST.make(
vsql.StrAST.make("foo"),
"lower",
),
vsql.MethAST.make(
vsql.StrAST.make("bar"),
"upper",
),
)
* By compiling the appropriate UL4/vSQL source code into an :class:`AST` object.
So ``"foo".lower() + "bar".upper()`` can be compiled like this::
vsql.AST.fromsource("'foo'.lower() + 'bar'.upper()")
"""
import sys, datetime, itertools, re, pathlib
from ll import color, misc, ul4c, ul4on
try:
from ll import orasql
except ImportError:
orasql = None
###
### Typing stuff
###
from typing import *
T_AST_Content = Union["AST", str]
T_opt_str = Optional[str]
T_opt_int = Optional[int]
T_opt_ast = Optional["AST"]
T_sortdirection = Union[None, Literal["asc", "desc"]]
T_sortnulls = Union[None, Literal["first", "last"]]
def T_gen(type):
return Generator[type, None, None]
###
### Global configurations
###
scriptname = misc.sysinfo.short_script_name
###
### Fields for the table ``VSQLRULE``
###
fields = dict(
vr_nodetype=str,
vr_value=T_opt_str,
vr_result=str,
vr_signature=T_opt_str,
vr_arity=int,
vr_literal1=T_opt_str,
vr_child2=T_opt_int,
vr_literal3=T_opt_str,
vr_child4=T_opt_int,
vr_literal5=T_opt_str,
vr_child6=T_opt_int,
vr_literal7=T_opt_str,
vr_child8=T_opt_int,
vr_literal9=T_opt_str,
vr_child10=T_opt_int,
vr_literal11=T_opt_str,
vr_child12=T_opt_int,
vr_literal13=T_opt_str,
vr_cname=str,
vr_cdate=datetime.datetime,
)
###
### Helper functions and classes
###
class sqlliteral(str):
"""
Marker class that can be used to spcifiy that its value should be treated
as literal SQL.
"""
pass
def sql(value:Any) -> str:
"""
Return an SQL expression for the Python value ``value``.
"""
if value is None:
return "null"
elif isinstance(value, sqlliteral):
return str(value)
elif isinstance(value, int):
return str(value)
elif isinstance(value, datetime.datetime):
return f"to_date('{value:%Y-%m-%d %H:%M:%S}', 'YYYY-MM-DD HH24:MI:SS')"
elif isinstance(value, str):
if value:
value = value.replace("'", "''")
return f"'{value}'"
else:
return "null"
else:
raise TypeError(f"unknown type {type(value)!r}")
class Repr:
"""
Base class that provides functionality for implementing :meth:`__repr__`
and :meth:`_repr_pretty_` (used by IPython).
"""
def _ll_repr_prefix_(self) -> str:
"""
Return the initial part of the :meth:`__repr__` and :meth:`_repr_pretty_`
output (without the initial ``"<"``).
"""
return f"{self.__class__.__module__}.{self.__class__.__qualname__}"
def _ll_repr_suffix_(self) -> str:
"""
Return the final part of the :meth:`__repr__` and :meth:`_repr_pretty_`
output (without the final ``">"``).
"""
return f"at {id(self):#x}"
def __repr__(self) -> str:
parts = itertools.chain(
(f"<{self._ll_repr_prefix_()}",),
self._ll_repr_(),
(f"{self._ll_repr_suffix_()}>",),
)
return " ".join(parts)
def _ll_repr_(self) -> T_gen(str):
"""
Each string produced by :meth:`!_ll_repr__` will be part of the
:meth:`__repr__` output (joined by spaces).
"""
yield from ()
def _repr_pretty_(self, p:"IPython.lib.pretty.PrettyPrinter", cycle:bool) -> None:
if cycle:
p.text(f"{self._ll_repr_prefix_()} ... {self._ll_repr_suffix_()}>")
else:
with p.group(3, f"<{self._ll_repr_prefix_()}", ">"):
self._ll_repr_pretty_(p)
p.breakable()
p.text(self._ll_repr_suffix_())
def _ll_repr_pretty_(self, p:"IPython.lib.pretty.PrettyPrinter") -> None:
"""
Implement the body of the :meth:`_repr_pretty_` method.
This means that the cycle detection and :meth:`group` call have already
been done.
"""
pass
class DataType(misc.Enum):
"""
The datatypes supported in vSQL expressions.
"""
NULL = "null"
BOOL = "bool"
INT = "int"
NUMBER = "number"
STR = "str"
CLOB = "clob"
COLOR = "color"
GEO = "geo"
DATE = "date"
DATETIME = "datetime"
DATEDELTA = "datedelta"
DATETIMEDELTA = "datetimedelta"
MONTHDELTA = "monthdelta"
NULLLIST = "nulllist"
INTLIST = "intlist"
NUMBERLIST = "numberlist"
STRLIST = "strlist"
CLOBLIST = "cloblist"
DATELIST = "datelist"
DATETIMELIST = "datetimelist"
NULLSET = "nullset"
INTSET = "intset"
NUMBERSET = "numberset"
STRSET = "strset"
DATESET = "dateset"
DATETIMESET = "datetimeset"
@classmethod
def compatible_to(cls, given:"DataType", required:"DataType") -> Optional["Error"]:
"""
Check whether the type ``given`` is compatible to ``required``.
If ``required`` is ``None`` every ``given`` type is accepted. Otherwise
the types must be compatible (for example ``DataType.INT`` is compatible
to ``DataType.NUMBER``, but not the other way around). Every type is
compatible to itself.
If ``given`` is not compatible to ``required`` the appropriate error value
is returned, otherwise ``None`` is returned.
"""
# If we have no requirement for the datatype the given one is OK.
if required is None:
return None
# ``NULL`` is compatible with everything
elif given is DataType.NULL:
return None
# perfect match
elif given is required:
return None
# some type of string
elif required in {DataType.STR, DataType.CLOB} and given in {DataType.STR, DataType.CLOB}:
return None
# bool and int can be used for numbers
elif required is DataType.NUMBER and given in {DataType.BOOL, DataType.INT, DataType.NUMBER}:
return None
# bool can be used for ints
elif required is DataType.INT and given in {DataType.BOOL, DataType.INT}:
return None
# intlist can be used for numberlist
elif required is DataType.NUMBERLIST and given in {DataType.INTLIST, DataType.NUMBERLIST}:
return None
# datelist can be used for datetimelist
elif required is DataType.DATELIST and given in {DataType.INTLIST, DataType.DATETIMELIST}:
return None
# intset can be used for numberset
elif required is DataType.NUMBERSET and given in {DataType.INTSET, DataType.NUMBERSET}:
return None
# dateset can be used for datetimeset
elif required is DataType.DATESET and given in {DataType.INTSET, DataType.DATETIMESET}:
return None
# nulllist can be used as any list
elif required in {DataType.INTLIST, DataType.NUMBERLIST, DataType.STRLIST, DataType.CLOBLIST, DataType.DATELIST, DataType.DATETIMELIST} and given is DataType.NULLLIST:
return None
# nullset can be used as any set
elif required in {DataType.INTSET, DataType.NUMBERSET, DataType.STRSET, DataType.DATESET, DataType.DATETIMESET} and given is DataType.NULSET:
return None
else:
return Error[f"DATATYPE_{required.name}"]
class NodeType(misc.Enum):
"""
The different types of vSQL abstract syntax tree nodes.
This corresponds to the different subclasses of :class:`AST`.
"""
FIELD = "field"
CONST_NONE = "const_none"
CONST_BOOL = "const_bool"
CONST_INT = "const_int"
CONST_NUMBER = "const_number"
CONST_STR = "const_str"
CONST_CLOB = "const_clob"
CONST_DATE = "const_date"
CONST_DATETIME = "const_datetime"
CONST_TIMESTAMP = "const_timestamp"
CONST_COLOR = "const_color"
LIST = "list"
SET = "set"
CMP_EQ = "cmp_eq"
CMP_NE = "cmp_ne"
CMP_LT = "cmp_lt"
CMP_LE = "cmp_le"
CMP_GT = "cmp_gt"
CMP_GE = "cmp_ge"
BINOP_ADD = "binop_add"
BINOP_MUL = "binop_mul"
BINOP_SUB = "binop_sub"
BINOP_FLOORDIV = "binop_floordiv"
BINOP_TRUEDIV = "binop_truediv"
BINOP_MOD = "binop_mod"
BINOP_AND = "binop_and"
BINOP_OR = "binop_or"
BINOP_CONTAINS = "binop_contains"
BINOP_NOTCONTAINS = "binop_notcontains"
BINOP_IS = "binop_is"
BINOP_ISNOT = "binop_isnot"
BINOP_ITEM = "binop_item"
BINOP_SHIFTLEFT = "binop_shiftleft"
BINOP_SHIFTRIGHT = "binop_shiftright"
BINOP_BITAND = "binop_bitand"
BINOP_BITOR = "binop_bitor"
BINOP_BITXOR = "binop_bitxor"
TERNOP_SLICE = "ternop_slice"
UNOP_NOT = "unop_not"
UNOP_NEG = "unop_neg"
UNOP_BITNOT = "unop_bitnot"
TERNOP_IF = "ternop_if"
ATTR = "attr"
FUNC = "func"
METH = "meth"
class Error(misc.Enum):
"""
The types of errors that can lead to invalid vSQL AST nodes.
Note that some of those can not be produced by the Python implementation.
"""
SUBNODEERROR = "subnodeerror" # Subnodes are invalid
NODETYPE = "nodetype" # Unknown node type (not any of the ``NODETYPE_...`` values from above
ARITY = "arity" # Node does not have the required number of children
SUBNODETYPES = "subnodetypes" # Subnodes have a combination of types that are not supported by the node
FIELD = "field" # ``NODETYPE_FIELD`` nodes references an unknown field
CONST_BOOL = "const_bool" # ``NODETYPE_CONST_BOOL`` value is ``null`` or malformed
CONST_INT = "const_int" # ``NODETYPE_CONST_INT`` value is ``null`` or malformed
CONST_NUMBER = "const_number" # ``NODETYPE_CONST_NUMBER`` value is ``null`` or malformed
CONST_DATE = "const_date" # ``NODETYPE_CONST_DATE`` value is ``null`` or malformed
CONST_DATETIME = "const_datetime" # ``NODETYPE_CONST_DATETIME`` value is ``null`` or malformed
CONST_TIMESTAMP = "const_timestamp" # ``NODETYPE_CONST_DATETIME`` value is ``null`` or malformed
CONST_COLOR = "const_color" # ``NODETYPE_CONST_COLOR`` value is ``null`` or malformed
NAME = "name" # Attribute/Function/Method is unknown
LISTTYPEUNKNOWN = "listtypeunknown" # List is empty or only has literal ``None``s as items, so the type can't be determined
LISTMIXEDTYPES = "listmixedtypes" # List items have incompatible types, so the type can't be determined
LISTUNSUPPORTEDTYPES = "listunsupportedtypes" # List items have unsupported types, so the type can't be determined
SETTYPEUNKNOWN = "settypeunknown" # Set is empty or only has literal ``None``s as items, so the type can't be determined
SETMIXEDTYPES = "setmixedtypes" # Set items have incompatible types, so the type can't be determined
SETUNSUPPORTEDTYPES = "setunsupportedtypes" # Set items have unsupported types, so the type can't be determined
DATATYPE_NULL = "datatype_null" # The datatype of the node should be ``null`` but isn't
DATATYPE_BOOL = "datatype_bool" # The datatype of the node should be ``bool`` but isn't
DATATYPE_INT = "datatype_int" # The datatype of the node should be ``int`` but isn't
DATATYPE_NUMBER = "datatype_number" # The datatype of the node should be ``number`` but isn't
DATATYPE_STR = "datatype_str" # The datatype of the node should be ``str`` but isn't
DATATYPE_CLOB = "datatype_clob" # The datatype of the node should be ``clob`` but isn't
DATATYPE_COLOR = "datatype_color" # The datatype of the node should be ``color`` but isn't
DATATYPE_DATE = "datatype_date" # The datatype of the node should be ``date`` but isn't
DATATYPE_DATETIME = "datatype_datetime" # The datatype of the node should be ``datetime`` but isn't
DATATYPE_DATEDELTA = "datatype_datedelta" # The datatype of the node should be ``datedelta`` but isn't
DATATYPE_DATETIMEDELTA = "datatype_datetimedelta" # The datatype of the node should be ``datetimedelta`` but isn't
DATATYPE_MONTHDELTA = "datatype_monthdelta" # The datatype of the node should be ``monthdelta`` but isn't
DATATYPE_NULLLIST = "datatype_nulllist" # The datatype of the node should be ``nulllist`` but isn't
DATATYPE_INTLIST = "datatype_intlist" # The datatype of the node should be ``intlist`` but isn't
DATATYPE_NUMBERLIST = "datatype_numberlist" # The datatype of the node should be ``numberlist`` but isn't
DATATYPE_STRLIST = "datatype_strlist" # The datatype of the node should be ``strlist`` but isn't
DATATYPE_CLOBLIST = "datatype_cloblist" # The datatype of the node should be ``cloblist`` but isn't
DATATYPE_DATELIST = "datatype_datelist" # The datatype of the node should be ``datelist`` but isn't
DATATYPE_DATETIMELIST = "datatype_datetimelist" # The datatype of the node should be ``datetimelist`` but isn't
DATATYPE_NULLSET = "datatype_nullset" # The datatype of the node should be ``nullset`` but isn't
DATATYPE_INTSET = "datatype_intset" # The datatype of the node should be ``intset`` but isn't
DATATYPE_NUMBERSET = "datatype_numberset" # The datatype of the node should be ``numberset`` but isn't
DATATYPE_STRSET = "datatype_strset" # The datatype of the node should be ``strset`` but isn't
DATATYPE_DATESET = "datatype_dateset" # The datatype of the node should be ``dateset`` but isn't
DATATYPE_DATETIMESET = "datatype_datetimeset" # The datatype of the node should be ``datetimeset`` but isn't
@ul4on.register("de.livinglogic.vsql.field")
class Field(Repr):
"""
A :class:`!Field` object describes a database field.
This field is either in a database table or view or a global package variable.
As a table or view field it belongs to a :class:`Group` object.
"""
def __init__(self, identifier:T_opt_str=None, datatype:DataType=DataType.NULL, fieldsql:T_opt_str=None, joinsql:T_opt_str=None, refgroup:Optional["Group"]=None):
self.identifier = identifier
self.datatype = datatype
self.fieldsql = fieldsql
self.joinsql = joinsql
self.refgroup = refgroup
def _ll_repr_(self) -> T_gen(str):
yield f"identifier={self.identifier!r}"
if self.datatype is not None:
yield f"datatype={self.datatype.name}"
if self.fieldsql is not None:
yield f"fieldsql={self.fieldsql!r}"
if self.joinsql is not None:
yield f"joinsql={self.joinsql!r}"
if self.refgroup is not None:
yield f"refgroup.tablesql={self.refgroup.tablesql!r}"
def _ll_repr_pretty_(self, p:"IPython.lib.pretty.PrettyPrinter") -> None:
p.breakable()
p.text("identifier=")
p.pretty(self.identifier)
if self.datatype is not None:
p.breakable()
p.text(f"datatype={self.datatype.name}")
if self.fieldsql is not None:
p.breakable()
p.text("fieldsql=")
p.pretty(self.fieldsql)
if self.joinsql is not None:
p.breakable()
p.text("joinsql=")
p.pretty(self.joinsql)
if self.refgroup is not None:
p.breakable()
p.text("refgroup.tablesql=")
p.pretty(self.refgroup.tablesql)
def ul4ondump(self, encoder:ul4on.Encoder) -> None:
encoder.dump(self.identifier)
encoder.dump(self.datatype.value if self.datatype is not None else None)
encoder.dump(self.fieldsql)
encoder.dump(self.joinsql)
encoder.dump(self.refgroup)
def ul4onload(self, decoder:ul4on.Decoder) -> None:
self.identifier = decoder.load()
datatype = decoder.load()
self.datatype = DataType(datatype) if datatype is not None else None
self.fieldsql = decoder.load()
self.joinsql = decoder.load()
self.refgroup = decoder.load()
@ul4on.register("de.livinglogic.vsql.group")
class Group(Repr):
"""
A :class:`!Group` object describes a group of database fields.
These fields are part of a database table or view and are instances of
:class:`Field`.
"""
def __init__(self, tablesql:T_opt_str=None, **fields:Union["Field", Tuple[DataType, str], Tuple[DataType, str, str, "Group"]]):
self.tablesql = tablesql
self.fields = {}
for (fieldname, fielddata) in fields.items():
if not isinstance(fielddata, Field):
fielddata = Field(fieldname, *fielddata)
self.fields[fieldname] = fielddata
def _ll_repr_(self) -> T_gen(str):
yield f"tablesql={self.tablesql!r}"
yield f"with {len(self.fields):,} fields"
def _ll_repr_pretty_(self, p:"IPython.lib.pretty.PrettyPrinter") -> None:
p.breakable()
p.text("tablesql=")
p.pretty(self.tablesql)
def __getitem__(self, key:str) -> "Field":
if key in self.fields:
return self.fields[key]
elif "*" in self.fields:
return self.fields["*"]
else:
raise KeyError(key)
def add_field(self, identifier:str, datatype:DataType, fieldsql:str, joinsql:T_opt_str=None, refgroup:Optional["Group"]=None) -> None:
field = Field(identifier, datatype, fieldsql, joinsql, refgroup)
self.fields[identifier] = field
def ul4ondump(self, encoder:ul4on.Encoder) -> None:
encoder.dump(self.tablesql)
encoder.dump(self.fields)
def ul4onload(self, decoder:ul4on.Decoder) -> None:
self.tablesql = decoder.load()
self.fields = decoder.load()
class Query(Repr):
"""
A :class:`!Query` object can be used to build an SQL query using vSQL expressions.
"""
def __init__(self, comment:T_opt_str=None, **vars:"Field"):
"""
Create a new empty :class:`!Query` object.
Arguments are:
``comment`` : :class:`str` or ``None``
A comment that will be included in the generated SQL.
Note that the comment test may not include ``/*`` or ``*/``.
``vars`` : :class:`Field`
These are the top level variables that will be availabe for vSQL
expressions added to this query. The argument name is the name of
the variable. The argument value is a :class:`Field` object that
describes this variable.
"""
self.comment = comment
self.vars = vars
self._fields : Dict[str, "AST"] = {}
self._from : Dict[str, "AST"] = {}
self._where : Dict[str, "AST"] = {}
self._orderby : List[Tuple[str, "AST", T_opt_str, T_opt_str]] = []
self._identifier_aliases : Dict[str, str] = {}
def _vsql_register(self, fieldref:"FieldRefAST") -> T_opt_str:
if fieldref.error is not None:
return # Don't register broken expressions
if fieldref.parent is None:
# No need to register anything as this is a "global variable".
# Also we don't need a table alias to access this field.
return None
identifier = fieldref.parent.full_identifier
if identifier in self._identifier_aliases:
alias = self._identifier_aliases[identifier]
return alias
alias = self._vsql_register(fieldref.parent)
newalias = f"t{len(self._from)+1}"
joincond = fieldref.parent.field.joinsql
if joincond is not None:
# Only add to "where" if the join condition is not empty
if alias is not None:
joincond = joincond.replace("{m}", alias)
joincond = joincond.replace("{d}", newalias)
self._where[joincond] = fieldref.parent
if fieldref.parent.field.refgroup.tablesql is None:
# If this field is not part of a table (which can happen e.g. for
# the request parameters, which we get from function calls),
# we don't add the table aliases to the list of table aliases
# and we don't add a table to the "from" list.
return None
self._identifier_aliases[identifier] = newalias
self._from[f"{fieldref.parent.field.refgroup.tablesql} {newalias}"] = fieldref.parent
return newalias
def _vsql(self, expr:str) -> None:
expr = AST.fromsource(expr, **self.vars)
for fieldref in expr.fieldrefs():
self._vsql_register(fieldref)
return expr
def select(self, *exprs:str) -> "Query":
for expr in exprs:
expr = self._vsql(expr)
sqlsource = expr.sqlsource(self)
if sqlsource not in self._fields:
self._fields[sqlsource] = expr
return self
def where(self, *exprs:str) -> "Query":
for expr in exprs:
expr = self._vsql(expr)
if expr.datatype is not DataType.BOOL:
expr = FuncAST.make("bool", expr)
sqlsource = expr.sqlsource(self)
sqlsource = f"{sqlsource} = 1"
if sqlsource not in self._where:
self._where[sqlsource] = expr
return self
def orderby(self, expr:str, direction:T_sortdirection=None, nulls:T_sortnulls=None) -> "Query":
r"""
Add an "order by" specification to this query.
"order by" specifications will be output in the query in the order they
have been added.
Argument are:
``expr`` : :class:`str`
vSQL expression to be sorted by
``direction`` : ``None``, ``"asc"`` or ``"desc"``
Sort in ascending order (``"asc"``) or descending order (``"desc"``).
The default ``None`` adds neither ``asc`` nor ``desc`` (which is
equivalent to ``asc``.
Example::
>>> from ll import la
>>> from ll.la import vsql
>>> q = vsql.Query("Example query", user=la.User.vsqlfield())
>>> q.select("user.email") \
... .orderby("user.firstname", "asc") \
... .orderby("user.surname", "desc")
>>> print(q.sqlsource())
/* Example query */
select
t1.ide_account /* user.email */
from
identity t1 /* user */
where
livingapi_pkg.global_user = t1.ide_id(+) /* user */
order by
t1.ide_firstname /* user.firstname */ asc,
t1.ide_surname /* user.surname */ desc
"""
expr = self._vsql(expr)
sqlsource = expr.sqlsource(self)
self._orderby.append((sqlsource, expr, direction, nulls))
return self
def sqlsource(self, indent="\t") -> str:
tokens = []
def a(*parts):
tokens.extend(parts)
def s(sqlsource, expr):
tokens.append(sqlsource)
vsqlsource = f" /* {expr.source()} */"
if not sqlsource.endswith(vsqlsource):
tokens.append(vsqlsource)
if self.comment:
a("/* ", self.comment, " */", None)
a("select", None, +1)
if self._fields:
for (i, (field, expr)) in enumerate(self._fields.items()):
if i:
a(",", None)
s(field, expr)
else:
a("42")
a(None, -1)
a("from", None, +1)
if self._from:
for (i, (table, expr)) in enumerate(self._from.items()):
if i:
a(",", None)
s(table, expr)
a(None, -1)
else:
a("dual", None, -1)
if self._where:
a("where", None, +1)
for (i, (where, expr)) in enumerate(self._where.items()):
if i:
a(" and", None)
s(where, expr)
a(None, -1)
if self._orderby:
a("order by", None, +1)
for (i, (sqlsource, expr, direction, nulls)) in enumerate(self._orderby):
if i:
a(",", None)
s(sqlsource, expr)
if direction:
a(" ", direction)
if nulls:
a(" nulls ", nulls)
a(None, -1)
source = []
first = True
level = 0
for part in tokens:
if part is None:
if indent:
source.append("\n")
first = True
elif isinstance(part, int):
level += part
else:
if first:
if indent:
source.append(level*indent)
else:
source.append(" ")
source.append(part)
first = False
return "".join(source)
class Rule(Repr):
_re_specials = re.compile(r"{([st])(\d)}")
_re_sep = re.compile(r"\W+")
# Mappings of datatypes to other datatypes for creating the SQL source
source_aliases = {
"bool": "int",
"date": "datetime",
"datelist": "datetimelist",
"datetimelist": "datetimelist",
"intset": "intlist",
"numberset": "numberlist",
"strset": "strlist",
"dateset": "datetimelist",
"datetimeset": "datetimelist",
}
def __init__(self, astcls, result, name, key, signature, source):
self.astcls = astcls
self.result = result
self.name = name
self.key = key
self.signature = signature
self.source = self._parse_source(signature, source)
def _key(self) -> str:
key = ", ".join(p.name if isinstance(p, DataType) else repr(p) for p in self.key)
return f"({key})"
def _signature(self):
signature = ", ".join(p.name for p in self.signature)
return f"({signature})"
def _ll_repr_(self) -> T_gen(str):
yield f"nodetype={self.astcls.nodetype.name}"
yield f"result={self.result.name}"
if self.name is not None:
yield f"name={self.name!r}"
yield f"key={self._key()}"
yield f"signature={self._signature()}"
yield f"source={self.source}"
def _ll_repr_pretty_(self, p:"IPython.lib.pretty.PrettyPrinter") -> None:
p.breakable()
p.text("result=")
p.text(self.result.name)
if self.name is not None:
p.breakable()
p.text("name=")
p.pretty(self.name)
p.breakable()
p.text("signature=")
p.text(self._signature())
p.breakable()
p.text("key=")
p.text(self._key())
p.breakable()
p.text("source=")
p.pretty(self.source)
@classmethod
def _parse_source(cls, signature:str, source:str) -> Tuple[Union[int, str], ...]:
final_source = []
def append(text):
if final_source and isinstance(final_source[-1], str):
final_source[-1] += text
else:
final_source.append(text)
pos = 0
for match in cls._re_specials.finditer(source):
if match.start() != pos:
append(source[pos:match.start()])
sigpos = int(match.group(2))
if match.group(1) == "s":
final_source.append(sigpos)
else:
type = signature[sigpos-1].name.lower()
type = cls.source_aliases.get(type, type)
append(type)
pos = match.end()
if pos != len(source):
append(source[pos:])
return tuple(final_source)
def java_source(self) -> str:
key = ", ".join(
f"VSQLDataType.{p.name}" if isinstance(p, DataType) else misc.javaexpr(p)
for p in self.key
)
return f"addRule(rules, VSQLDataType.{self.result.name}, {key});"
def oracle_fields(self) -> Dict[str, Union[int, str, sqlliteral]]:
fields = {}
fields["vr_nodetype"] = self.astcls.nodetype.value
fields["vr_value"] = self.name
fields["vr_result"] = self.result.value
fields["vr_signature"] = " ".join(p.value for p in self.signature)
fields["vr_arity"] = len(self.signature)
wantlit = True
index = 1
for part in self.source:
if wantlit:
if isinstance(part, int):
index += 1 # skip this field
fields[f"vr_child{index}"] = part
else:
fields[f"vr_literal{index}"] = part
wantlit = False
else:
if isinstance(part, int):
fields[f"vr_child{index}"] = part
else:
raise ValueError("two children")
wantlit = True
index += 1
fields["vr_cdate"] = sqlliteral("sysdate")
fields["vr_cname"] = sqlliteral("c_user")
return fields
def oracle_source(self) -> str:
fieldnames = []
fieldvalues = []
for (fieldname, fieldvalue) in self.oracle_fields().items():
fieldvalue = sql(fieldvalue)
if fieldvalue != "null":
fieldnames.append(fieldname)
fieldvalues.append(fieldvalue)
fieldnames = ", ".join(fieldnames)
fieldvalues = ", ".join(fieldvalues)
return f"insert into vsqlrule ({fieldnames}) values ({fieldvalues});"
###
### Classes for all vSQL abstract syntax tree node types
###
class AST(Repr):
"""
Base class of all vSQL abstract syntax tree node types.
"""
nodetype = None
"""
Type of the node. There's a one-to-one correspondence between :class:`AST`
subclasses and :class:`NodeType` values (except for intermediate classes
like :class:`BinaryAST`)
"""
nodevalue = None
"""
The node value is an instance attribute that represents a string that
isn't be represented by any child node. E.g. the values of constants or
the names of functions, methods and attributes. Will be overwritten by
properties in subclasses.
"""
datatype = None
rules = None
def __init__(self, *content: T_AST_Content):
"""
Create a new :class:`!AST` node from its content.
``content`` is a mix of :class:`str` objects containing the UL4 source
and child :class:`!AST` nodes.
Normally the user doesn't call :meth:`!__init__` directly, but uses
:meth:`make` to create the appropriate :class:`!AST` node from child
nodes.
For example a function call to the function ``date`` could be created
like this::
FuncAST(
"date",
"(",
IntAST("2000", 2000),
", ",
IntAST("2", 2),
", ",
IntAST("29", 29),
")",
)
but more conveniently like this::
FuncAST.make(
"date",
ConstAST.make(2000),
ConstAST.make(2),
ConstAST.make(29),
)
"""
final_content = []
for item in content:
if isinstance(item, str):
if item: # Ignore empty strings
if final_content and isinstance(final_content[-1], str):
# Merge string with previous string
final_content[-1] += item
else:
final_content.append(item)
elif isinstance(item, AST):
final_content.append(item)
elif item is not None:
raise TypeError(item)
self.error = None
self.content = final_content
@classmethod
@misc.notimplemented
def make(cls) -> "AST":
"""
Create an instance of this AST class from its child AST nodes.
This method is abstract and is overwritten in each subclass.
"""
@classmethod
def fromul4(cls, node:ul4c.AST, **vars: "Field") -> "AST":
try:
vsqltype = _ul42vsql[type(node)]
except KeyError:
pass
else:
return vsqltype.fromul4(node, **vars)
if isinstance(node, ul4c.VarAST):
field = vars.get(node.name, None)
return FieldRefAST(None, node.name, field, *cls._make_content_from_ul4(node))
elif isinstance(node, ul4c.AttrAST):
obj = cls.fromul4(node.obj, **vars)
if isinstance(obj, FieldRefAST) and isinstance(obj.field, Field) and obj.field.refgroup:
try:
field = obj.field.refgroup[node.attrname]
except KeyError:
pass # Fall through to return a generic :class:`AttrAST` node
else:
return FieldRefAST(
obj,
node.attrname,
field,
*cls._make_content_from_ul4(node, node.obj, obj)
)
return AttrAST(
obj,
node.attrname,
*cls._make_content_from_ul4(node, node.obj, obj),
)
elif isinstance(node, ul4c.CallAST):
obj = cls.fromul4(node.obj, **vars)
content = [*obj.content]
callargs = []
if isinstance(obj, FieldRefAST):
if obj.parent is not None:
asttype = MethAST
args = (obj.parent, obj.identifier)
else:
asttype = FuncAST
args = (obj.identifier,)
elif isinstance(obj, AttrAST):
asttype = MethAST
args = (obj.obj, obj.attrname)
for arg in node.args:
if not isinstance(arg, ul4c.PositionalArgumentAST):
raise TypeError(f"Can't compile UL4 expression of type {misc.format_class(arg)}!")
content.append(arg.value)
arg = AST.fromul4(arg.value, **vars)
content.append(arg)
callargs.append(arg)
return asttype(
*args,
callargs,
*cls._make_content_from_ul4(node, *content),
)
raise TypeError(f"Can't compile UL4 expression of type {misc.format_class(node)}!")
@classmethod
def fromsource(cls, source:str, **vars: "Field") -> "AST":
template = ul4c.Template(f"<?return {source}?>")
expr = template.content[-1].obj
return cls.fromul4(expr, **vars)
def sqlsource(self, query:"Query") -> str:
return "".join(s for s in self._sqlsource(query))
def fieldrefs(self) -> T_gen("FieldRefAST"):
"""
Return all :class:`FieldRefAST` objects in this :class:`!AST`.
This is a generator.
"""
for child in self.children():
yield from child.fieldrefs()
@classmethod
def all_types(cls) -> T_gen(Type["AST"]):
"""
Return this class and all subclasses.
This is a generator.
"""
yield cls
for subcls in cls.__subclasses__():
yield from subcls.all_types()
@classmethod
def all_rules(cls) -> T_gen(Rule):
"""
Return all grammar rules of this class and all its subclasses.
This is a generator.
"""
for subcls in cls.all_types():
if subcls.rules is not None:
yield from subcls.rules.values()
@classmethod
def _add_rule(cls, rule:Rule) -> None:
cls.rules[rule.key] = rule
@classmethod
def typeref(cls, s:str) -> T_opt_int:
if s.startswith("T") and s[1:].isdigit():
return int(s[1:])
return None
@classmethod
def _specs(cls, spec:Tuple[str, ...]) -> T_gen(Tuple[str, Tuple[Union[DataType, str], ...]]):
# Find position of potential name in the spec, so we can correct
# the typeref offsets later.
for (i, p) in enumerate(spec):
if len(p) == 1 and not p[0].isupper():
namepos = i
name = p[0]
break
else:
namepos = None
name = None
for spec in itertools.product(*spec):
newspec = list(spec)
for (i, type) in enumerate(spec):
typeref = cls.typeref(type)
if typeref:
# Fetch reference type (and corect offset if there's in name in ``spec``)
type = spec[typeref+1 if namepos and typeref >= namepos else typeref]
if cls.typeref(type):
raise ValueError("typeref to typeref")
newspec[i] = type
# Convert type names to ``DataType`` values
newspec = tuple(DataType[p] if p.isupper() else p for p in newspec)
yield (name, newspec)
@classmethod
def add_rules(cls, spec:str, source:str) -> None:
"""
Register new syntax rules for this AST class.
These rules are used for type checking and type inference and for
converting the vSQL AST into SQL source code.
The arguments ``spec`` and ``source`` have the following meaning:
``spec``
``spec`` specifies the allowed combinations of operand types and the
resulting type. It consists of the following:
Upper case words
These specify types (e.g. ``INT`` or ``STR``; for a list of allowed
values see :class:`DataType`). Also allowed are:
* ``T`` followed by an integer, this is used to refer to another
type in the spec and
* a combination of several types joined with ``_``. This is a union
type, i.e. any of the types in the combination are allowed.
Lower case words
They specify the names of functions, methods or attributes
Any sequence of whitespace or other non-word characters
They are ignored, but can be used to separate types and names and
to make the rule clearer.
The first word in the rule always is the result type.
Examples:
``INT <- BOOL + BOOL``
Adding this rule to :class:`AddAST` specifies that the types ``BOOL``
and ``BOOL`` can be added and the resulting type is ``INT``. Note
that using ``+`` is only syntactic sugar. This rule could also have
been written as ``INT BOOL BOOL`` or even as ``INT?????BOOL#$%^&*BOOL``.
``INT <- BOOL_INT + BOOL_INT``
This is equivalent to the four rules: ``INT <- BOOL + BOOL``,
``INT <- INT + BOOL``, ``INT <- BOOL + INT`` and ``INT <- INT + INT``.
``T1 <- BOOL_INT + T1``
This is equivalent to the two rules ``BOOL <- BOOL + BOOL`` and
``INT <- INT + INT``.
Note that each rule will only be registered once. So the following
code::
AddAST.add_rules(
"INT <- BOOL_INT + BOOL_INT",
"..."
)
AddAST.add_rules(
"NUMBER <- BOOL_INT_NUMBER + BOOL_INT_NUMBER",
"..."
)
will register the rule ``INT <- BOOL + BOOL``, but not
``NUMBER <- BOOL + BOOL`` since the first call already registered
a rule for the signature ``BOOL BOOL``.
``source``
``source`` specifies the SQL source that will be generated for this
expression. Two types of placeholders are supported: ``{s1}`` means
"embed the source code of the first operand in this spot" (and ``{s2}``
etc. accordingly) and ``{t1}`` embeds the type name (in lowercase) in
this spot (and ``{t2}`` etc. accordingly).
Example 1::
AttrAST.add_rules(
f"INT <- DATE.year",
"extract(year from {s1})"
)
This specifies that a ``DATE`` value has an attribute ``year`` and that
for such a value ``value`` the generated SQL source code will be:
.. sourcecode:: sql
extract(year from value)
Example 2::
EQAST.add_rules(
f"BOOL <- STR_CLOB == STR_CLOB",
"vsqlimpl_pkg.eq_{t1}_{t2}({s1}, {s2})"
)
This registers four rules for equality comparison between ``STR`` and
``CLOB`` objects. The generated SQL source code for comparisons
between ``STR`` and ``STR`` will be
.. sourcecode:: sql
vsqlimpl_pkg.eq_str_str(value1, value2)
and for ``CLOB``/``CLOB`` comparison it will be
.. sourcecode:: sql
vsqlimpl_pkg.eq_clob_clob(value1, value2)
"""
# Split on non-names and drop empty parts
spec = tuple(filter(None, Rule._re_sep.split(spec)))
spec = [p.split("_") if p.isupper() else (p,) for p in spec]
for (name, spec) in cls._specs(spec):
# Drop return type from the lookup key
key = spec[1:]
if cls.rules is None:
cls.rules = {}
if key not in cls.rules:
result = spec[0]
# Drop name from the signature
signature = tuple(p for p in key if isinstance(p, DataType))
cls._add_rule(Rule(cls, result, name, key, signature, source))
def validate(self) -> None:
"""
Validate the content of this AST node.
If this node turns out to be invalid :meth:`!validate` will set the
attribute ``datatype`` to ``None`` and ``error`` to the appropriate
:class:`Error` value.
If this node turns out to be valid, :meth:`!validate` will set the
attribute ``error`` to ``None`` and ``datatype`` to the resulting data
type of this node.
"""
pass
def source(self) -> str:
"""
Return the UL4/vSQL source code of the AST.
"""
return "".join(s for s in self._source())
def _source(self) -> T_gen(str):
for item in self.content:
if isinstance(item, str):
yield item
else:
yield from item._source()
def children(self) -> T_gen("AST"):
"""
Return the child AST nodes of this node.
"""
yield from ()
def save(self, handler:"ll.la.handlers.DBHandler") -> str:
"""
Save this vSQL expression to the database and return the resulting
database id ``vs_id``.
``handler`` must be a :class:`~ll.la.handlers.DBHandler`.
"""
return handler.save_vsql_ast(self)[0]
def __str__(self) -> str:
parts = [f"{self.__class__.__module__}.{self.__class__.__qualname__}"]
if self.datatype is not None:
parts.append(f"(datatype {self.datatype.name})")
if self.error is not None:
parts.append(f"(error {self.error.name})")
parts.append(f": {self.source()}")
return "".join(parts)
def _ll_repr_(self) -> T_gen(str):
if self.datatype is not None:
yield f"datatype={self.datatype.name}"
if self.error is not None:
yield f"error={self.error.name}"
yield f"source={self.source()!r}"
def _ll_repr_pretty_(self, p:"IPython.lib.pretty.PrettyPrinter") -> None:
if self.datatype is not None:
p.breakable()
p.text(f"datatype={self.datatype.name}")
if self.error is not None:
p.breakable()
p.text(f"error={self.error.name}")
p.breakable()
p.text("source=")
p.pretty(self.source())
@classmethod
def _wrap(cls, obj:T_AST_Content, cond:bool) -> T_gen(T_AST_Content):
if cond:
yield "("
yield obj
if cond:
yield ")"
def ul4ondump(self, encoder:ul4on.Encoder) -> None:
encoder.dump(self._source)
encoder.dump(self.pos)
def ul4onload(self, decoder:ul4on.Decoder) -> None:
self._source = decoder.load()
self.pos = decoder.load()
@classmethod
def _make_content_from_ul4(cls, node:ul4c.AST, *args:Union[ul4c.AST, "AST", str, None]) -> Tuple[T_AST_Content, ...]:
content = []
lastpos = node.pos.start
for subnode in args:
if isinstance(subnode, AST):
content.append(subnode)
lastpos += len(subnode.source())
elif isinstance(subnode, ul4c.AST):
if lastpos != subnode.pos.start:
content.append(node.fullsource[lastpos:subnode.pos.start])
lastpos = subnode.pos.start
elif isinstance(subnode, str):
content.append(subnode)
lastpos += len(subnode)
if lastpos != node.pos.stop:
content.append(node.fullsource[lastpos:node.pos.stop])
return content
class ConstAST(AST):
"""
Base class for all vSQL expressions that are constants.
"""
precedence = 20
@staticmethod
def make(value:Any) -> "ConstAST":
cls = _consts.get(type(value))
if cls is None:
raise TypeError(value)
elif cls is NoneAST:
return cls.make()
else:
return cls.make(value)
@classmethod
def fromul4(cls, node, **vars: "Field") -> "AST":
try:
vsqltype = _consts[type(node.value)]
except KeyError:
raise TypeError(f"constant of type {misc.format_class(node.value)} not supported!") from None
return vsqltype.fromul4(node, **vars)
@ul4on.register("de.livinglogic.vsql.none")
class NoneAST(ConstAST):
"""
The constant ``None``.
"""
nodetype = NodeType.CONST_NONE
datatype = DataType.NULL
@classmethod
def make(cls) -> "NoneAST":
return cls("None")
def _sqlsource(self, query:"Query") -> T_gen(str):
yield "null"
@classmethod
def fromul4(cls, node:ul4c.ConstAST, **vars: "Field") -> "AST":
return cls(node.source)
class _ConstWithValueAST(ConstAST):
"""
Base class for all vSQL constants taht may have different values.
(i.e. anything except ``None``).
"""
def __init__(self, value, *content):
super().__init__(*content)
self.value = value
@classmethod
def make(cls, value:Any) -> "ConstAST":
return cls(value, ul4c._repr(value))
@classmethod
def fromul4(cls, node:ul4c.ConstAST, **vars: "Field") -> "ConstAST":
return cls(node.value, node.source)
@property
def nodevalue(self) -> str:
return self.value
def _ll_repr_(self) -> T_gen(str):
yield from super()._ll_repr_()
yield f"value={self.value!r}"
def _ll_repr_pretty_(self, p:"IPython.lib.pretty.PrettyPrinter") -> None:
super()._ll_repr_pretty_(p)
p.breakable()
p.text("value=")
p.pretty(self.value)
def ul4ondump(self, encoder:ul4on.Encoder) -> None:
super().ul4ondump(encoder)
encoder.dump(self.value)
def ul4onload(self, decoder:ul4on.Decoder) -> None:
super().ul4onload(decoder)
self.value = decoder.load()
@ul4on.register("de.livinglogic.vsql.bool")
class BoolAST(_ConstWithValueAST):
"""
A boolean constant (i.e. ``True`` or ``False``).
"""
nodetype = NodeType.CONST_BOOL
datatype = DataType.BOOL
@classmethod
def make(cls, value:Any) -> "BoolAST":
return cls(value, "True" if value else "False")
def _sqlsource(self, query:"Query") -> T_gen(str):
yield "1" if self.value else "0"
@property
def nodevalue(self) -> str:
return "True" if self.value else "False"
@ul4on.register("de.livinglogic.vsql.int")
class IntAST(_ConstWithValueAST):
"""
An integer constant.
"""
nodetype = NodeType.CONST_INT
datatype = DataType.INT
def _sqlsource(self, query:"Query") -> T_gen(str):
yield str(self.value)
@property
def nodevalue(self) -> str:
return str(self.value)
@ul4on.register("de.livinglogic.vsql.number")
class NumberAST(_ConstWithValueAST):
"""
A number constant (containing a decimal point).
"""
nodetype = NodeType.CONST_NUMBER
datatype = DataType.NUMBER
def _sqlsource(self, query:"Query") -> T_gen(str):
yield str(self.value)
@property
def nodevalue(self) -> str:
return repr(self.value)
@ul4on.register("de.livinglogic.vsql.str")
class StrAST(_ConstWithValueAST):
"""
A string constant.
"""
nodetype = NodeType.CONST_STR
datatype = DataType.STR
def _sqlsource(self, query:"Query") -> T_gen(str):
s = self.value.replace("'", "''")
yield f"'{s}'"
@ul4on.register("de.livinglogic.vsql.clob")
class CLOBAST(_ConstWithValueAST):
"""
A CLOB constant.
This normally will not be created by the Python implementation
"""
nodetype = NodeType.CONST_CLOB
datatype = DataType.CLOB
def _sqlsource(self, query:"Query") -> T_gen(str):
s = self.value.replace("'", "''")
yield f"'{s}'"
@ul4on.register("de.livinglogic.vsql.color")
class ColorAST(_ConstWithValueAST):
"""
A color constant (e.g. ``#fff``).
"""
nodetype = NodeType.CONST_COLOR
datatype = DataType.COLOR
def _sqlsource(self, query:"Query") -> T_gen(str):
c = self.value
yield str((c.r() << 24) + (c.g() << 16) + (c.b() << 8) + c.a())
@property
def nodevalue(self) -> str:
c = self.value
return f"{c.r():02x}{c.g():02x}{c.b():02x}{c.a():02x}"
@ul4on.register("de.livinglogic.vsql.date")
class DateAST(_ConstWithValueAST):
"""
A date constant (e.g. ``@(2000-02-29)``).
"""
nodetype = NodeType.CONST_DATE
datatype = DataType.DATE
def _sqlsource(self, query:"Query") -> T_gen(str):
yield f"to_date('{self.value:%Y-%m-%d}', 'YYYY-MM-DD')";
@property
def nodevalue(self) -> str:
return f"{self.value:%Y-%m-%d}"
@ul4on.register("de.livinglogic.vsql.datetime")
class DateTimeAST(_ConstWithValueAST):
"""
A datetime constant (e.g. ``@(2000-02-29T12:34:56)``).
"""
nodetype = NodeType.CONST_DATETIME
datatype = DataType.DATETIME
@classmethod
def make(cls, value:datetime.datetime) -> "DateTimeAST":
value = value.replace(microsecond=0)
return cls(value, ul4c._repr(value))
def _sqlsource(self, query:"Query") -> T_gen(str):
yield f"to_date('{self.value:%Y-%m-%d %H:%M:%S}', 'YYYY-MM-DD HH24:MI:SS')";
@property
def nodevalue(self) -> str:
return f"{self.value:%Y-%m-%dT%H:%M:%S}"
class _SeqAST(AST):
"""
Base class of :class:`ListAST` and :class:`SetAST`.
"""
def __init__(self, *content:T_AST_Content):
super().__init__(*content)
self.items = [item for item in content if isinstance(item, AST)]
self.datatype = None
self.validate()
@classmethod
def fromul4(cls, node:ul4c.AST, **vars: "Field") -> "AST":
content = []
lastpos = None # This value is never used
for item in node.items:
if not isinstance(item, ul4c.SeqItemAST):
raise TypeError(f"Can't compile UL4 expression of type {misc.format_class(item)}!")
content.append(item.value)
content.append(AST.fromul4(item.value, **vars))
return cls(*cls._make_content_from_ul4(node, *content))
def _sqlsource(self, query:"Query") -> T_gen(str):
if self.datatype is self.nulltype:
yield self.nodevalue
else:
(prefix, suffix) = self.sqltypes[self.datatype]
yield prefix
for (i, item) in enumerate(self.items):
if i:
yield ", "
yield from item._sqlsource(query)
yield suffix
def _ll_repr_(self) -> T_gen(str):
yield from super()._ll_repr_()
yield f"with {len(self.items):,} items"
def _ll_repr_pretty_(self, p:"IPython.lib.pretty.PrettyPrinter") -> None:
super()._ll_repr_pretty_(p)
for item in self.items:
p.breakable()
p.pretty(item)
def children(self) -> T_gen("AST"):
yield from self.items
def ul4ondump(self, encoder:ul4on.Encoder) -> None:
super().ul4ondump(encoder)
encoder.dump(self.items)
def ul4onload(self, decoder:ul4on.Decoder) -> None:
super().ul4onload(decoder)
self.items = decoder.load()
@ul4on.register("de.livinglogic.vsql.list")
class ListAST(_SeqAST):
"""
A list constant.
For this to work the list may only contain items of "compatible" types, i.e.
types that con be converted to a common type without losing information.
"""
nodetype = NodeType.LIST
nulltype = DataType.NULLLIST
precedence = 20
sqltypes = {
DataType.INTLIST: ("integers(", ")"),
DataType.NUMBERLIST: ("numbers(", ")"),
DataType.STRLIST: ("varchars(", ")"),
DataType.CLOBLIST: ("clobs(", ")"),
DataType.DATELIST: ("dates(", ")"),
DataType.DATETIMELIST: ("dates(", ")"),
}
def __init__(self, *content:T_AST_Content):
super().__init__(*content)
self.validate()
@classmethod
def make(cls, *items:"AST") -> "ListAST":
if items:
content = []
for (i, item) in enumerate(items):
content.append(", " if i else "[")
content.append(item)
content.append("]")
return cls(*content)
else:
return cls("[]")
def validate(self) -> None:
if any(item.error for item in self.items):
self.error = Error.SUBNODEERROR
self.datatype = None
else:
types = {item.datatype for item in self.items}
if DataType.NULL in types:
types.remove(DataType.NULL)
if not types:
self.error = None
self.datatype = DataType.NULLLIST
elif len(types) == 1:
self.error = None
datatype = misc.first(types)
if datatype is DataType.INT:
datatype = DataType.INTLIST
elif datatype is DataType.NUMBER:
datatype = DataType.NUMBERLIST
elif datatype is DataType.STR:
datatype = DataType.STRLIST
elif datatype is DataType.CLOB:
datatype = DataType.CLOBLIST
elif datatype is DataType.DATE:
datatype = DataType.DATELIST
elif datatype is DataType.DATETIME:
datatype = DataType.DATETIMELIST
else:
datatype = None
self.datatype = datatype
self.error = None if datatype else Error.LISTUNSUPPORTEDTYPES
else:
self.error = Error.LISTMIXEDTYPES
self.datatype = None
@property
def nodevalue(self) -> str:
return str(len(self.items)) if self.datatype is DataType.NULLLIST else None
@ul4on.register("de.livinglogic.vsql.set")
class SetAST(_SeqAST):
"""
A set constant.
For this to work the set may only contain items of "compatible" types, i.e.
types that can be converted to a common type without losing information.
"""
nodetype = NodeType.SET
nulltype = DataType.NULLSET
precedence = 20
sqltypes = {
DataType.INTSET: ("vsqlimpl_pkg.set_intlist(integers(", "))"),
DataType.NUMBERSET: ("vsqlimpl_pkg.set_numberlist(numbers(", "))"),
DataType.STRSET: ("vsqlimpl_pkg.set_strlist(varchars(", "))"),
DataType.DATESET: ("vsqlimpl_pkg.set_datetimelist(dates(", "))"),
DataType.DATETIMESET: ("vsqlimpl_pkg.set_datetimelist(dates(", "))"),
}
def __init__(self, *content:T_AST_Content):
super().__init__(*content)
self.validate()
@classmethod
def make(cls, *items:"AST") -> "SetAST":
if items:
content = []
for (i, item) in enumerate(items):
content.append(", " if i else "{")
content.append(item)
content.append("}")
return cls(*content)
else:
return cls("{/}")
def validate(self) -> None:
if any(item.error for item in self.items):
self.error = Error.SUBNODEERROR
self.datatype = None
else:
types = {item.datatype for item in self.items}
if DataType.NULL in types:
types.remove(DataType.NULL)
if not types:
self.error = None
self.datatype = DataType.NULLSET
elif len(types) == 1:
self.error = None
datatype = misc.first(types)
if datatype is DataType.INT:
datatype = DataType.INTSET
elif datatype is DataType.NUMBER:
datatype = DataType.NUMBERSET
elif datatype is DataType.STR:
datatype = DataType.STRSET
elif datatype is DataType.DATE:
datatype = DataType.DATESET
elif datatype is DataType.DATETIME:
datatype = DataType.DATETIMESET
else:
datatype = None
self.datatype = datatype
self.error = None if datatype else Error.SETUNSUPPORTEDTYPES
else:
self.error = Error.SETMIXEDTYPES
self.datatype = None
@property
def nodevalue(self) -> str:
return str(len(self.items)) if self.datatype is DataType.NULLSET else None
@ul4on.register("de.livinglogic.vsql.fieldref")
class FieldRefAST(AST):
"""
Reference to a field defined in the database.
"""
nodetype = NodeType.FIELD
precedence = 19
def __init__(self, parent:Optional["FieldRefAST"], identifier:str, field:Optional["Field"], *content:T_AST_Content):
"""
Create a :class:`FieldRef` object.
There are three possible scenarios with respect to ``identifier`` and
``field``:
``field is not None and field.identifier == identifier``
In this case we have a valid :class:`Field` that describes a real
field.
``field is not None and field.identifier != identifier and field.identifier == "*"``
In this case :obj:`field` is the :class:`Field` object for the generic
typed request parameters. E.g. when the vSQL expression is
``params.str.foo`` then :obj:`field` references the :class:`Field` for
``params.str.*``, so ``field.identifier == "*" and
identifier == "foo"``.
``field is None``
In this case the field is unknown.
"""
super().__init__(*content)
self.parent = parent
# Note that ``identifier`` might be different from ``field.identifier``
# if ``field.identifier == "*"``.
self.identifier = identifier
# Note that ``field`` might be ``None`` when the field can't be found.
self.field = field
self.validate()
@classmethod
def make_root(cls, field:Union[str, "Field"]) -> "FieldRefAST":
if isinstance(field, str):
# This is an invalid field reference
return FieldRefAST(None, field, None, field)
else:
return FieldRefAST(None, field.identifier, field, field.identifier)
@classmethod
def make(cls, parent:"FieldRefAST", identifier:str) -> "FieldRefAST":
result_field = None
parent_field = parent.field
if parent_field is not None:
group = parent_field.refgroup
if group is not None:
try:
result_field = group[identifier]
except KeyError:
pass
return FieldRefAST(parent, identifier, result_field, parent, ".", identifier)
def _sqlsource(self, query:"Query") -> T_gen(str):
alias = query._vsql_register(self)
full_identifier = self.full_identifier
if full_identifier.startswith("params."):
# If the innermost field is "params" we need special treatment
yield f"livingapi_pkg.reqparam_{self.parent.identifier}('{self.identifier}') /* {self.source()} */"
elif alias is None:
yield f"{self.field.fieldsql} /* {self.source()} */"
else:
yield f"{alias}.{self.field.fieldsql} /* {self.source()} */"
def validate(self) -> None:
self.error = Error.FIELD if self.field is None else None
@property
def datatype(self) -> Optional[DataType]:
return self.field.datatype if self.field is not None else None
@property
def nodevalue(self) -> str:
identifierpath = []
node = self
while node is not None:
identifierpath.insert(0, node.identifier)
node = node.parent
return ".".join(identifierpath)
def fieldrefs(self) -> T_gen("FieldRefAST"):
yield self
yield from super().fieldrefs()
@property
def full_identifier(self) -> Tuple[str]:
if self.parent is None:
return self.identifier
else:
return f"{self.parent.full_identifier}.{self.identifier}"
def _ll_repr_(self) -> T_gen(str):
yield from super()._ll_repr_()
if self.field is None or self.field.identifier != self.identifier:
yield f"identifier={self.identifier!r}"
if self.field is not None:
yield f"field={self.field!r}"
def _ll_repr_pretty_(self, p:"IPython.lib.pretty.PrettyPrinter") -> None:
super()._ll_repr_pretty_(p)
p.breakable()
p.text("identifier=")
p.pretty(self.identifier)
if self.field is None or self.field.identifier != self.identifier:
p.breakable()
p.text("identifier=")
p.pretty(self.identifier)
if self.field is not None:
p.breakable()
p.text("field=")
p.pretty(self.field)
def ul4ondump(self, encoder:ul4on.Encoder) -> None:
super().ul4ondump(encoder)
encoder.dump(self.parent)
encoder.dump(self.identifier)
encoder.dump(self.field)
def ul4onload(self, decoder:ul4on.Decoder) -> None:
super().ul4onload(decoder)
self.parent = decoder.load()
self.identifier = decoder.load()
self.field = decoder.load()
class BinaryAST(AST):
"""
Base class of all binary expressions (i.e. expressions with two operands).
"""
def __init__(self, obj1:AST, obj2:AST, *content:T_AST_Content):
super().__init__(*content)
self.obj1 = obj1
self.obj2 = obj2
self.datatype = None
self.validate()
@classmethod
def make(cls, obj1:AST, obj2:AST) -> "BinaryAST":
return cls(
obj1,
obj2,
*cls._wrap(obj1, obj1.precedence < cls.precedence),
f" {cls.operator} ",
*cls._wrap(obj2, obj2.precedence <= cls.precedence),
)
def validate(self) -> None:
if self.obj1.error or self.obj2.error:
self.error = Error.SUBNODEERROR
signature = (self.obj1.datatype, self.obj2.datatype)
try:
rule = self.rules[signature]
except KeyError:
self.error = Error.SUBNODETYPES
self.datatype = None
else:
self.error = None
self.datatype = rule.result
@classmethod
def fromul4(cls, node:ul4c.BinaryAST, **vars: "Field") -> "AST":
obj1 = AST.fromul4(node.obj1, **vars)
obj2 = AST.fromul4(node.obj2, **vars)
return cls(
obj1,
obj2,
*cls._make_content_from_ul4(node, node.obj1, obj1, node.obj2, obj2),
)
def _sqlsource(self, query:"Query") -> T_gen(str):
rule = self.rules[(self.obj1.datatype, self.obj2.datatype)]
result = []
for child in rule.source:
if child == 1:
yield from self.obj1._sqlsource(query)
elif child == 2:
yield from self.obj2._sqlsource(query)
else:
yield child
def children(self) -> T_gen(AST):
yield self.obj1
yield self.obj2
def _ll_repr_pretty_(self, p:"IPython.lib.pretty.PrettyPrinter") -> None:
super()._ll_repr_pretty_(p)
p.breakable()
p.text("obj1=")
p.pretty(self.obj1)
p.breakable()
p.text("obj2=")
p.pretty(self.obj2)
def ul4ondump(self, encoder:ul4on.Encoder) -> None:
super().ul4ondump(encoder)
encoder.dump(self.obj1)
encoder.dump(self.obj2)
def ul4onload(self, decoder:ul4on.Decoder) -> None:
super().ul4onload(decoder)
self.obj1 = decoder.load()
self.obj2 = decoder.load()
@ul4on.register("de.livinglogic.vsql.eq")
class EQAST(BinaryAST):
"""
Equality comparison (``A == B``).
"""
nodetype = NodeType.CMP_EQ
precedence = 6
operator = "=="
@ul4on.register("de.livinglogic.vsql.ne")
class NEAST(BinaryAST):
"""
Inequality comparison (``A != B``).
"""
nodetype = NodeType.CMP_NE
precedence = 6
operator = "!="
@ul4on.register("de.livinglogic.vsql.lt")
class LTAST(BinaryAST):
"""
Less-than comparison (``A < B``).
"""
nodetype = NodeType.CMP_LT
precedence = 6
operator = "<"
@ul4on.register("de.livinglogic.vsql.le")
class LEAST(BinaryAST):
"""
Less-than-or equal comparison (``A <= B``).
"""
nodetype = NodeType.CMP_LE
precedence = 6
operator = "<="
@ul4on.register("de.livinglogic.vsql.gt")
class GTAST(BinaryAST):
"""
Greater-than comparison (``A > B``).
"""
nodetype = NodeType.CMP_GT
precedence = 6
operator = ">"
@ul4on.register("de.livinglogic.vsql.ge")
class GEAST(BinaryAST):
"""
Greater-than-or equal comparison (``A >= B``).
"""
nodetype = NodeType.CMP_GE
precedence = 6
operator = ">="
@ul4on.register("de.livinglogic.vsql.add")
class AddAST(BinaryAST):
"""
Addition (``A + B``).
"""
nodetype = NodeType.BINOP_ADD
precedence = 11
operator = "+"
@ul4on.register("de.livinglogic.vsql.sub")
class SubAST(BinaryAST):
"""
Subtraction (``A - B``).
"""
nodetype = NodeType.BINOP_SUB
precedence = 11
operator = "-"
@ul4on.register("de.livinglogic.vsql.mul")
class MulAST(BinaryAST):
"""
Multiplication (``A * B``).
"""
nodetype = NodeType.BINOP_MUL
precedence = 12
operator = "*"
@ul4on.register("de.livinglogic.vsql.truediv")
class TrueDivAST(BinaryAST):
"""
True division (``A / B``).
"""
nodetype = NodeType.BINOP_TRUEDIV
precedence = 12
operator = "/"
@ul4on.register("de.livinglogic.vsql.floordiv")
class FloorDivAST(BinaryAST):
"""
Floor division (``A // B``).
"""
nodetype = NodeType.BINOP_FLOORDIV
precedence = 12
operator = "//"
@ul4on.register("de.livinglogic.vsql.mod")
class ModAST(BinaryAST):
"""
Modulo operator (``A % B``).
"""
nodetype = NodeType.BINOP_MOD
precedence = 12
operator = "%"
@ul4on.register("de.livinglogic.vsql.shiftleft")
class ShiftLeftAST(BinaryAST):
"""
Left shift operator (``A << B``).
"""
nodetype = NodeType.BINOP_SHIFTLEFT
precedence = 10
operator = "<<"
@ul4on.register("de.livinglogic.vsql.shiftright")
class ShiftRightAST(BinaryAST):
"""
Right shift operator (``A >> B``).
"""
nodetype = NodeType.BINOP_SHIFTRIGHT
precedence = 10
operator = ">>"
@ul4on.register("de.livinglogic.vsql.and")
class AndAST(BinaryAST):
"""
Logical "and" (``A and B``).
"""
nodetype = NodeType.BINOP_AND
precedence = 4
operator = "and"
@ul4on.register("de.livinglogic.vsql.or")
class OrAST(BinaryAST):
"""
Logical "or" (``A or B``).
"""
nodetype = NodeType.BINOP_OR
precedence = 4
operator = "or"
@ul4on.register("de.livinglogic.vsql.contains")
class ContainsAST(BinaryAST):
"""
Containment test (``A in B``).
"""
nodetype = NodeType.BINOP_CONTAINS
precedence = 6
operator = "in"
@ul4on.register("de.livinglogic.vsql.notcontains")
class NotContainsAST(BinaryAST):
"""
Inverted containment test (``A not in B``).
"""
nodetype = NodeType.BINOP_NOTCONTAINS
precedence = 6
operator = "not in"
@ul4on.register("de.livinglogic.vsql.is")
class IsAST(BinaryAST):
"""
Identity test (``A is B``).
"""
nodetype = NodeType.BINOP_IS
precedence = 6
operator = "is"
@ul4on.register("de.livinglogic.vsql.isnot")
class IsNotAST(BinaryAST):
"""
Inverted identity test (``A is not B``).
"""
nodetype = NodeType.BINOP_ISNOT
precedence = 6
operator = "is not"
@ul4on.register("de.livinglogic.vsql.item")
class ItemAST(BinaryAST):
"""
Item access operator (``A[B]``).
"""
nodetype = NodeType.BINOP_ITEM
precedence = 16
@classmethod
def make(self, obj1:AST, obj2:AST) -> "ItemAST":
if obj1.precedence >= self.precedence:
return cls(obj1, obj2, obj1, "[", obj2, "]")
else:
return cls(obj1, obj2, "(", obj1, ")[", obj2, "]")
@classmethod
def fromul4(cls, node:ul4c.ItemAST, **vars: "Field") -> "AST":
if isinstance(node.obj2, ul4c.SliceAST):
return SliceAST.fromul4(node, **vars)
return super().fromul4(node, **vars)
@ul4on.register("de.livinglogic.vsql.bitand")
class BitAndAST(BinaryAST):
"""
Bitwise "and" (``A & B``).
"""
nodetype = NodeType.BINOP_BITAND
precedence = 9
operator = "&"
@ul4on.register("de.livinglogic.vsql.bitor")
class BitOrAST(BinaryAST):
"""
Bitwise "or" (``A | B``).
"""
nodetype = NodeType.BINOP_BITOR
precedence = 7
operator = "|"
@ul4on.register("de.livinglogic.vsql.bitxor")
class BitXOrAST(BinaryAST):
"""
Bitwise "exclusive or" (``A ^ B``).
"""
nodetype = NodeType.BINOP_BITXOR
precedence = 8
operator = "^"
class UnaryAST(AST):
"""
Base class of all unary expressions (i.e. expressions with one operand).
"""
def __init__(self, obj:AST, *content:T_AST_Content):
super().__init__(*content)
self.obj = obj
self.datatype = None
self.validate()
@classmethod
def make(cls, obj:AST) -> "UnaryAST":
return cls(
obj,
cls.operator,
*cls._wrap(obj, obj.precedence <= cls.precedence),
)
@classmethod
def fromul4(cls, node:ul4c.UnaryAST, **vars: "Field") -> "AST":
obj = AST.fromul4(node.obj, **vars)
return cls(
obj,
*cls._make_content_from_ul4(node, node.obj, obj),
)
def validate(self) -> None:
if self.obj.error:
self.error = Error.SUBNODEERROR
signature = (self.obj.datatype,)
try:
rule = self.rules[signature]
except KeyError:
self.error = Error.SUBNODETYPES
self.datatype = None
else:
self.error = None
self.datatype = rule.result
def _sqlsource(self, query:"Query") -> T_gen(str):
rule = self.rules[(self.obj.datatype, )]
result = []
for child in rule.source:
if child == 1:
yield from self.obj._sqlsource(query)
else:
yield child
def children(self) -> T_gen(AST):
yield self.obj
def _ll_repr_pretty_(self, p:"IPython.lib.pretty.PrettyPrinter") -> None:
super()._ll_repr_pretty_(p)
p.breakable()
p.text("obj=")
p.pretty(self.obj)
def ul4ondump(self, encoder:ul4on.Encoder) -> None:
super().ul4ondump(encoder)
encoder.dump(self.obj)
def ul4onload(self, decoder:ul4on.Decoder) -> None:
super().ul4onload(decoder)
self.obj = decoder.load()
@ul4on.register("de.livinglogic.vsql.not")
class NotAST(UnaryAST):
"""
Logical negation (``not A``).
"""
nodetype = NodeType.UNOP_NOT
precedence = 5
operator = "not "
@ul4on.register("de.livinglogic.vsql.neg")
class NegAST(UnaryAST):
"""
Arithmetic negation (``-A``).
"""
nodetype = NodeType.UNOP_NEG
precedence = 14
operator = "-"
@ul4on.register("de.livinglogic.vsql.bitnot")
class BitNotAST(UnaryAST):
"""
Bitwise "not" (``~A``).
"""
nodetype = NodeType.UNOP_BITNOT
precedence = 14
operator = "~"
@ul4on.register("de.livinglogic.vsql.if")
class IfAST(AST):
"""
Ternary "if"/"else" (``A if COND else B``).
"""
nodetype = NodeType.TERNOP_IF
precedence = 3
def __init__(self, objif:AST, objcond:AST, objelse:AST, *content:T_AST_Content):
super().__init__(*content)
self.objif = objif
self.objcond = objcond
self.objelse = objelse
self.datatype = None
self.validate()
@classmethod
def make(cls, objif:AST, objcond:AST, objelse:AST) -> "IfAST":
return cls(
objif,
objcond,
objelse,
*cls._wrap(objif, objif.precedence <= cls.precedence),
" if ",
*cls._wrap(objcond, objcond.precedence <= cls.precedence),
" else ",
*cls._wrap(objelse, objcond.precedence <= cls.precedence),
)
def validate(self) -> None:
if self.objif.error or self.objcond.error or self.objelse.error:
self.error = Error.SUBNODEERROR
signature = (self.objif.datatype, self.objcond.datatype, self.objelse.datatype)
try:
rule = self.rules[signature]
except KeyError:
self.error = Error.SUBNODETYPES
self.datatype = None
else:
self.error = None
self.datatype = rule.result
@classmethod
def fromul4(cls, node:ul4c.IfAST, **vars: "Field") -> "IfAST":
objif = AST.fromul4(node.objif, **vars)
objcond = AST.fromul4(node.objcond, **vars)
objelse = AST.fromul4(node.objelse, **vars)
return cls(
objif,
objcond,
objelse,
*cls._make_content_from_ul4(node, node.objif, objif, node.objcond, objcond, node.objelse, objelse),
)
def _sqlsource(self, query:"Query") -> T_gen(str):
rule = self.rules[(self.objif.datatype, self.objcond.datatype, self.objelse.datatype)]
result = []
for child in rule.source:
if child == 1:
yield from self.objif._sqlsource(query)
elif child == 2:
yield from self.objcond._sqlsource(query)
elif child == 3:
yield from self.objelse._sqlsource(query)
else:
yield child
def children(self) -> T_gen(AST):
yield self.objif
yield self.objcond
yield self.objelse
def _ll_repr_pretty_(self, p:"IPython.lib.pretty.PrettyPrinter") -> None:
super()._ll_repr_pretty_(p)
p.breakable()
p.text("objif=")
p.pretty(self.objif)
p.breakable()
p.text("objcond=")
p.pretty(self.objcond)
p.breakable()
p.text("objelse=")
p.pretty(self.objelse)
def ul4ondump(self, encoder:ul4on.Encoder) -> None:
super().ul4ondump(encoder)
encoder.dump(self.objif)
encoder.dump(self.objcond)
encoder.dump(self.objelse)
def ul4onload(self, decoder:ul4on.Decoder) -> None:
super().ul4onload(decoder)
self.objif = decoder.load()
self.objcond = decoder.load()
self.objelse = decoder.load()
@ul4on.register("de.livinglogic.vsql.if")
class SliceAST(AST):
"""
Slice operator (``A[B:C]``).
"""
nodetype = NodeType.TERNOP_SLICE
precedence = 16
def __init__(self, obj:AST, index1:T_opt_ast, index2:T_opt_ast, *content:T_AST_Content):
super().__init__(*content)
self.obj = obj
self.index1 = index1
self.index2 = index2
self.datatype = None
self.validate()
@classmethod
def make(cls, obj:AST, index1:T_opt_ast, index2:T_opt_ast) -> "SliceAST":
if index1 is None:
index1 = NoneAST(None)
if index2 is None:
index2 = NoneAST(None)
return cls(
obj,
index1,
index2,
*cls._wrap(obj, obj.precedence < cls.precedence),
"[",
index1,
":",
index2,
"]",
)
def validate(self) -> None:
if self.obj.error or self.index1.error or self.index2.error:
self.error = Error.SUBNODEERROR
signature = (self.obj.datatype, self.index1.datatype, self.index2.datatype)
try:
rule = self.rules[signature]
except KeyError:
self.error = Error.SUBNODETYPES
self.datatype = None
else:
self.error = None
self.datatype = rule.result
@classmethod
def fromul4(cls, node:ul4c.ItemAST, **vars: "Field") -> "AST":
obj = AST.fromul4(node.obj1, **vars)
index1 = AST.fromul4(node.obj2.index1, **vars) if node.obj2.index1 is not None else NoneAST("")
index2 = AST.fromul4(node.obj2.index2, **vars) if node.obj2.index2 is not None else NoneAST("")
return cls(
obj,
index1,
index2,
*cls._make_content_from_ul4(node, node.obj1, obj, node.obj2.index1, index1, node.obj2.index2, index2)
)
def _sqlsource(self, query:"Query") -> T_gen(str):
rule = self.rules[(self.obj.datatype, self.index1.datatype, self.index2.datatype)]
result = []
for child in rule.source:
if child == 1:
yield from self.obj._sqlsource(query)
elif child == 2:
yield from self.index1._sqlsource(query)
elif child == 3:
yield from self.index2._sqlsource(query)
else:
yield child
def children(self) -> T_gen(AST):
yield self.obj
yield self.index1 if self.index1 is None else NoneAST("")
yield self.index2 if self.index2 is None else NoneAST("")
def _ll_repr_pretty_(self, p:"IPython.lib.pretty.PrettyPrinter") -> None:
super()._ll_repr_pretty_(p)
p.breakable()
p.text("obj=")
p.pretty(self.obj)
if self.index1 is not None:
p.breakable()
p.text("index1=")
p.pretty(self.index1)
if self.index2 is not None:
p.breakable()
p.text("index2=")
p.pretty(self.index2)
def ul4ondump(self, encoder:ul4on.Encoder) -> None:
super().ul4ondump(encoder)
encoder.dump(self.obj)
encoder.dump(self.index1)
encoder.dump(self.index1)
def ul4onload(self, decoder:ul4on.Decoder) -> None:
super().ul4onload(decoder)
self.obj = decoder.load()
self.index1 = decoder.load()
self.index2 = decoder.load()
@ul4on.register("de.livinglogic.vsql.attr")
class AttrAST(AST):
"""
Attribute access (``A.name``).
"""
nodetype = NodeType.ATTR
precedence = 19
def __init__(self, obj:AST, attrname:str, *content:T_AST_Content):
super().__init__(*content)
self.obj = obj
self.attrname = attrname
self.datatype = None
self.validate()
@classmethod
def make(cls, obj:AST, attrname:str) -> "AttrAST":
return cls(
obj,
attrname,
*cls._wrap(obj, obj.precedence < cls.precedence),
".",
attrname,
)
def validate(self) -> None:
if self.obj.error:
self.error = Error.SUBNODEERROR
signature = (self.obj.datatype, self.attrname)
try:
rule = self.rules[signature]
except KeyError:
self.error = Error.SUBNODETYPES
self.datatype = None
else:
self.error = None
self.datatype = rule.result
def _sqlsource(self, query:"Query") -> T_gen(str):
rule = self.rules[(self.obj.datatype, self.attrname)]
for child in rule.source:
if child == 1:
yield from self.obj._sqlsource(query)
else:
yield child
@property
def nodevalue(self) -> str:
return self.attrname
def children(self) -> T_gen(AST):
yield self.obj
def _ll_repr_(self) -> T_gen(str):
yield from super()._ll_repr_()
yield f"attrname={self.attrname!r}"
def _ll_repr_pretty_(self, p:"IPython.lib.pretty.PrettyPrinter") -> None:
super()._ll_repr_pretty_(p)
p.breakable()
p.text("obj=")
p.pretty(self.obj)
p.breakable()
p.text("attrname=")
p.pretty(self.attrname)
def ul4ondump(self, encoder:ul4on.Encoder) -> None:
super().ul4ondump(encoder)
encoder.dump(self.obj)
encoder.dump(self.attrname)
def ul4onload(self, decoder:ul4on.Decoder) -> None:
super().ul4onload(decoder)
self.obj = decoder.load()
self.attrname = decoder.load()
@ul4on.register("de.livinglogic.vsql.func")
class FuncAST(AST):
"""
Function call (``name(A, ...)``).
"""
nodetype = NodeType.FUNC
precedence = 18
names = {} # Maps function names to set of supported arities
def __init__(self, name:str, args:Tuple[AST, ...], *content:T_AST_Content):
super().__init__(*content)
self.name = name
self.args = args
self.datatype = None
self.validate()
@classmethod
def make(cls, name:str, *args:AST) -> "FuncAST":
content = [name, "("]
for (i, arg) in enumerate(args):
if i:
content.append(", ")
content.append(arg)
content.append(")")
return cls(name, args, *content)
def _sqlsource(self, query:"Query") -> T_gen(str):
rule = self.rules[(self.name,) + tuple(c.datatype for c in self.args)]
result = []
for child in rule.source:
if isinstance(child, int):
yield from self.args[child-1]._sqlsource(query)
else:
yield child
@classmethod
def _add_rule(cls, rule:Rule) -> None:
super()._add_rule(rule)
if rule.name not in cls.names:
cls.names[rule.name] = set()
cls.names[rule.name].add(len(rule.signature))
def validate(self) -> None:
if any(arg.error is not None for arg in self.args):
self.error = Error.SUBNODEERROR
signature = (self.name, *(arg.datatype for arg in self.args))
try:
rule = self.rules[signature]
except KeyError:
if self.name not in self.names:
self.error = Error.NAME
elif len(self.args) not in self.names[self.name]:
self.error = Error.ARITY
else:
self.error = Error.SUBNODETYPES
self.datatype = None
else:
self.error = None
self.datatype = rule.result
@property
def nodevalue(self) -> str:
return self.name
def children(self) -> T_gen(AST):
yield from self.args
def _ll_repr_(self) -> T_gen(str):
yield from super()._ll_repr_()
yield f"name={self.name!r}"
yield f"with {len(self.args):,} arguments"
def _ll_repr_pretty_(self, p:"IPython.lib.pretty.PrettyPrinter") -> None:
super()._ll_repr_pretty_(p)
for (i, arg) in enumerate(self.args):
p.breakable()
p.text(f"args[{i}]=")
p.pretty(arg)
def ul4ondump(self, encoder:ul4on.Encoder) -> None:
super().ul4ondump(encoder)
encoder.dump(self.name)
encoder.dump(self.args)
def ul4onload(self, decoder:ul4on.Decoder) -> None:
super().ul4onload(decoder)
self.name = decoder.load()
self.args = decoder.load()
@ul4on.register("de.livinglogic.vsql.meth")
class MethAST(AST):
"""
Method call (``A.name(B, ...)``).
"""
nodetype = NodeType.METH
precedence = 17
names = {} # Maps (type, meth name) to set of supported arities
def __init__(self, obj:AST, name:str, args:Tuple[AST, ...], *content:T_AST_Content):
super().__init__(*content)
self.obj = obj
self.name = name
self.args = args or ()
self.datatype = None
self.validate()
@classmethod
def make(cls, obj:AST, name:str, *args:AST) -> "MethAST":
content = [*cls._wrap(obj, obj.precedence < cls.precedence), ".", name, "("]
for (i, arg) in enumerate(args):
if i:
content.append(", ")
content.append(arg)
content.append(")")
return cls(obj, name, args, *content)
def _sqlsource(self, query:"Query") -> T_gen(str):
rule = self.rules[(self.obj.datatype, self.name) + tuple(c.datatype for c in self.args)]
result = []
for child in rule.source:
if isinstance(child, int):
if child == 1:
yield from self.obj._sqlsource(query)
else:
yield from self.args[child-2]._sqlsource(query)
else:
yield child
@classmethod
def _add_rule(cls, rule:Rule) -> None:
super()._add_rule(rule)
key = (rule.signature[0], rule.name)
if key not in cls.names:
cls.names[key] = set()
cls.names[key].add(len(rule.signature)-1)
def validate(self) -> None:
if self.obj.error is not None or any(arg.error is not None for arg in self.args):
self.error = Error.SUBNODEERROR
signature = (self.obj.datatype, self.name, *(arg.datatype for arg in self.args))
try:
rule = self.rules[signature]
except KeyError:
key = (self.obj.datatype, self.name)
if key not in self.names:
self.error = Error.NAME
elif len(self.args) not in self.names[key]:
self.error = Error.ARITY
else:
self.error = Error.SUBNODETYPES
self.datatype = None
else:
self.error = None
self.datatype = rule.result
@property
def nodevalue(self) -> str:
return self.name
def children(self) -> T_gen(AST):
yield self.obj
yield from self.args
def _ll_repr_(self) -> T_gen(str):
yield from super()._ll_repr_()
yield f"name={self.name!r}"
yield f"with {len(self.args):,} arguments"
def _ll_repr_pretty_(self, p:"IPython.lib.pretty.PrettyPrinter") -> None:
super()._ll_repr_pretty_(p)
p.breakable()
p.text("obj=")
p.pretty(self.obj)
p.breakable()
p.text("name=")
p.pretty(self.name)
for (i, arg) in enumerate(self.args):
p.breakable()
p.text(f"args[{i}]=")
p.pretty(arg)
def ul4ondump(self, encoder:ul4on.Encoder) -> None:
super().ul4ondump(encoder)
encoder.dump(self.obj)
encoder.dump(self.name)
encoder.dump(self.args)
def ul4onload(self, decoder:ul4on.Decoder) -> None:
super().ul4onload(decoder)
self.obj = decoder.load()
self.name = decoder.load()
self.args = decoder.load()
_consts = {
type(None): NoneAST,
bool: BoolAST,
int: IntAST,
float: NumberAST,
str: StrAST,
color.Color: ColorAST,
datetime.date: DateAST,
datetime.datetime: DateTimeAST,
}
# Set of UL4 AST nodes that directly map to their equivalent vSQL version
_ops = {
ul4c.ConstAST,
ul4c.NotAST,
ul4c.NegAST,
ul4c.BitNotAST,
*ul4c.BinaryAST.__subclasses__(),
ul4c.IfAST,
ul4c.SliceAST,
ul4c.ListAST,
ul4c.SetAST
}
# Create the mapping that maps the UL4 AST type to the vSQL AST type
v = vars()
_ul42vsql = {cls: v[cls.__name__] for cls in _ops}
# Remove temporary variables
del _ops, v
###
### Create vSQL rules for all AST classes for validating datatypes and type inference
###
# Subsets of datatypes
INTLIKE = f"BOOL_INT"
NUMBERLIKE = f"{INTLIKE}_NUMBER"
NUMBERSTORED = f"BOOL_INT_NUMBER_COLOR_DATEDELTA_DATETIMEDELTA_MONTHDELTA"
TEXT = f"STR_CLOB"
LIST = f"INTLIST_NUMBERLIST_STRLIST_CLOBLIST_DATELIST_DATETIMELIST"
SET = f"INTSET_NUMBERSET_STRSET_DATESET_DATETIMESET"
SEQ = f"{TEXT}_{LIST}_{SET}"
ANY = "_".join(DataType.__members__.keys())
# Field references and constants (will not be used for generating source,
# but for checking that the node type is valid and that they have no child nodes)
FieldRefAST.add_rules(f"NULL", "")
NoneAST.add_rules(f"NULL", "")
BoolAST.add_rules(f"BOOL", "")
IntAST.add_rules(f"INT", "")
NumberAST.add_rules(f"NUMBER", "")
StrAST.add_rules(f"STR", "")
CLOBAST.add_rules(f"CLOB", "")
ColorAST.add_rules(f"COLOR", "")
DateAST.add_rules(f"DATE", "")
DateTimeAST.add_rules(f"DATETIME", "")
# Function ``today()``
FuncAST.add_rules(f"DATE today", "trunc(sysdate)")
# Function ``now(0``
FuncAST.add_rules(f"DATETIME now", "sysdate")
# Function ``bool()``
FuncAST.add_rules(f"BOOL <- bool()", "0")
FuncAST.add_rules(f"BOOL <- bool(NULL)", "0")
FuncAST.add_rules(f"BOOL <- bool(BOOL)", "{s1}")
FuncAST.add_rules(f"BOOL <- bool(INT_NUMBER_DATEDELTA_DATETIMEDELTA_MONTHDELTA_NULLLIST_NULLSET)", "(case when nvl({s1}, 0) = 0 then 0 else 1 end)")
FuncAST.add_rules(f"BOOL <- bool(DATE_DATETIME_STR_COLOR_GEO)", "(case when {s1} is null then 0 else 1 end)")
FuncAST.add_rules(f"BOOL <- bool({ANY})", "vsqlimpl_pkg.bool_{t1}({s1})")
# Function ``int()``
FuncAST.add_rules(f"INT <- int()", "0")
FuncAST.add_rules(f"INT <- int({INTLIKE})", "{s1}")
FuncAST.add_rules(f"INT <- int(NUMBER_STR_CLOB)", "vsqlimpl_pkg.int_{t1}({s1})")
# Function ``float()``
FuncAST.add_rules(f"NUMBER <- float()", "0.0")
FuncAST.add_rules(f"NUMBER <- float({NUMBERLIKE})", "{s1}")
FuncAST.add_rules(f"NUMBER <- float({TEXT})", "vsqlimpl_pkg.float_{t1}({s1})")
# Function ``geo()``
FuncAST.add_rules(f"GEO <- geo({NUMBERLIKE}, {NUMBERLIKE})", "vsqlimpl_pkg.geo_number_number_str({s1}, {s2}, null)")
FuncAST.add_rules(f"GEO <- geo({NUMBERLIKE}, {NUMBERLIKE}, STR)", "vsqlimpl_pkg.geo_number_number_str({s1}, {s2}, {s3})")
# Function ``str()``
FuncAST.add_rules(f"STR <- str()", "null")
FuncAST.add_rules(f"STR <- str(NULL)", "null")
FuncAST.add_rules(f"STR <- str(STR)", "{s1}")
FuncAST.add_rules(f"CLOB <- str(CLOB)", "{s1}")
FuncAST.add_rules(f"STR <- str(BOOL)", "(case {s1} when 0 then 'False' when null then 'None' else 'True' end)")
FuncAST.add_rules(f"STR <- str(INT)", "to_char({s1})")
FuncAST.add_rules(f"STR <- str(NUMBER)", "vsqlimpl_pkg.str_number({s1})")
FuncAST.add_rules(f"STR <- str(GEO)", "vsqlimpl_pkg.repr_geo({s1})")
FuncAST.add_rules(f"STR <- str(DATE)", "to_char({s1}, 'YYYY-MM-DD')")
FuncAST.add_rules(f"STR <- str(DATETIME)", "to_char({s1}, 'YYYY-MM-DD HH24:MI:SS')")
FuncAST.add_rules(f"STR <- str(NULLLIST)", "vsqlimpl_pkg.repr_nulllist({s1})")
FuncAST.add_rules(f"STR <- str(DATELIST)", "vsqlimpl_pkg.repr_datelist({s1})")
FuncAST.add_rules(f"STR <- str({LIST})", "vsqlimpl_pkg.repr_{t1}({s1})")
FuncAST.add_rules(f"STR <- str(NULLSET)", "vsqlimpl_pkg.repr_nullset({s1})")
FuncAST.add_rules(f"STR <- str(INTSET)", "vsqlimpl_pkg.repr_intset({s1})")
FuncAST.add_rules(f"STR <- str(NUMBERSET)", "vsqlimpl_pkg.repr_numberset({s1})")
FuncAST.add_rules(f"STR <- str(STRSET)", "vsqlimpl_pkg.repr_strset({s1})")
FuncAST.add_rules(f"STR <- str(DATESET)", "vsqlimpl_pkg.repr_dateset({s1})")
FuncAST.add_rules(f"STR <- str(DATETIMESET)", "vsqlimpl_pkg.repr_datetimeset({s1})")
FuncAST.add_rules(f"STR <- str({ANY})", "vsqlimpl_pkg.str_{t1}({s1})")
# Function ``repr()``
FuncAST.add_rules(f"STR <- repr(NULL)", "'None'")
FuncAST.add_rules(f"STR <- repr(BOOL)", "(case {s1} when 0 then 'False' when null then 'None' else 'True' end)")
FuncAST.add_rules(f"CLOB <- repr(CLOB_CLOBLIST)", "vsqlimpl_pkg.repr_{t1}({s1})")
FuncAST.add_rules(f"STR <- repr(DATE)", "vsqlimpl_pkg.repr_date({s1})")
FuncAST.add_rules(f"STR <- repr(DATELIST)", "vsqlimpl_pkg.repr_datelist({s1})")
FuncAST.add_rules(f"STR <- repr(NULLSET)", "vsqlimpl_pkg.repr_nullset({s1})")
FuncAST.add_rules(f"STR <- repr(INTSET)", "vsqlimpl_pkg.repr_intset({s1})")
FuncAST.add_rules(f"STR <- repr(NUMBERSET)", "vsqlimpl_pkg.repr_numberset({s1})")
FuncAST.add_rules(f"STR <- repr(STRSET)", "vsqlimpl_pkg.repr_strset({s1})")
FuncAST.add_rules(f"STR <- repr(DATESET)", "vsqlimpl_pkg.repr_dateset({s1})")
FuncAST.add_rules(f"STR <- repr(DATETIMESET)", "vsqlimpl_pkg.repr_datetimeset({s1})")
FuncAST.add_rules(f"STR <- repr({ANY})", "vsqlimpl_pkg.repr_{t1}({s1})")
# Function ``date()``
FuncAST.add_rules(f"DATE <- date(INT, INT, INT)", "vsqlimpl_pkg.date_int({s1}, {s2}, {s3})")
FuncAST.add_rules(f"DATE <- date(DATETIME)", "trunc({s1})")
# Function ``datetime()``
FuncAST.add_rules(f"DATETIME <- datetime(INT, INT, INT)", "vsqlimpl_pkg.datetime_int({s1}, {s2}, {s3})")
FuncAST.add_rules(f"DATETIME <- datetime(INT, INT, INT, INT)", "vsqlimpl_pkg.datetime_int({s1}, {s2}, {s3}, {s4})")
FuncAST.add_rules(f"DATETIME <- datetime(INT, INT, INT, INT, INT)", "vsqlimpl_pkg.datetime_int({s1}, {s2}, {s3}, {s4}, {s5})")
FuncAST.add_rules(f"DATETIME <- datetime(INT, INT, INT, INT, INT, INT)", "vsqlimpl_pkg.datetime_int({s1}, {s2}, {s3}, {s4}, {s5}, {s6})")
FuncAST.add_rules(f"DATETIME <- datetime(DATE)", "{s1}")
FuncAST.add_rules(f"DATETIME <- datetime(DATE, INT)", "({s1} + {s2}/24)")
FuncAST.add_rules(f"DATETIME <- datetime(DATE, INT, INT)", "({s1} + {s2}/24 + {s3}/24/60)")
FuncAST.add_rules(f"DATETIME <- datetime(DATE, INT, INT, INT)", "({s1} + {s2}/24 + {s3}/24/60 + {s4}/24/60/60)")
# Function ``len()``
FuncAST.add_rules(f"INT <- len({TEXT})", "nvl(length({s1}), 0)")
FuncAST.add_rules(f"INT <- len(NULLLIST)", "{s1}")
FuncAST.add_rules(f"INT <- len({LIST})", "vsqlimpl_pkg.len_{t1}({s1})")
FuncAST.add_rules(f"INT <- len(NULLSET)", "case when {s1} > 0 then 1 else {s1} end")
FuncAST.add_rules(f"INT <- len({SET})", "vsqlimpl_pkg.len_{t1}({s1})")
# Function ``timedelta()``
FuncAST.add_rules(f"DATEDELTA <- timedelta()", "0")
FuncAST.add_rules(f"DATEDELTA <- timedelta(INT)", "{s1}")
FuncAST.add_rules(f"DATETIMEDELTA <- timedelta(INT, INT)", "({s1} + {s2}/86400)")
# Function ``monthdelta()``
FuncAST.add_rules(f"MONTHDELTA <- monthdelta()", "0")
FuncAST.add_rules(f"MONTHDELTA <- monthdelta(INT)", "{s1}")
# Function ``years()``
FuncAST.add_rules(f"MONTHDELTA <- years(INT)", "(12 * {s1})")
# Function ``months()``
FuncAST.add_rules(f"MONTHDELTA <- months(INT)", "{s1}")
# Function ``weeks()``
FuncAST.add_rules(f"DATEDELTA <- weeks(INT)", "(7 * {s1})")
# Function ``days()``
FuncAST.add_rules(f"DATEDELTA <- days(INT)", "{s1}")
# Function ``hours()``
FuncAST.add_rules(f"DATETIMEDELTA <- hours(INT)", "({s1} / 24)")
# Function ``minutes()``
FuncAST.add_rules(f"DATETIMEDELTA <- minutes(INT)", "({s1} / 1440)")
# Function ``seconds()``
FuncAST.add_rules(f"DATETIMEDELTA <- seconds(INT)", "({s1} / 86400)")
# Function `md5()``
FuncAST.add_rules(f"STR <- md5(STR)", "lower(rawtohex(dbms_crypto.hash(utl_raw.cast_to_raw({s1}), 2)))")
# Function `random()``
FuncAST.add_rules(f"NUMBER <- random()", "dbms_random.value")
# Function `randrange()``
FuncAST.add_rules(f"INT <- randrange(INT, INT)", "floor(dbms_random.value({s1}, {s2}))")
# Function `seq()``
FuncAST.add_rules(f"INT <- seq()", "livingapi_pkg.seq()")
# Function `rgb()``
FuncAST.add_rules(f"COLOR <- rgb({NUMBERLIKE}, {NUMBERLIKE}, {NUMBERLIKE})", "vsqlimpl_pkg.rgb({s1}, {s2}, {s3})")
FuncAST.add_rules(f"COLOR <- rgb({NUMBERLIKE}, {NUMBERLIKE}, {NUMBERLIKE}, {NUMBERLIKE})", "vsqlimpl_pkg.rgb({s1}, {s2}, {s3}, {s4})")
# Function `list()``
FuncAST.add_rules(f"STRLIST <- list({TEXT})", "vsqlimpl_pkg.list_{t1}({s1})")
FuncAST.add_rules(f"T1 <- list(NULLLIST_{LIST})", "{s1}")
FuncAST.add_rules(f"NULLLIST <- list(NULLSET)", "{s1}")
FuncAST.add_rules(f"INTLIST <- list(INTSET)", "{s1}")
FuncAST.add_rules(f"NUMBERLIST <- list(NUMBERSET)", "{s1}")
FuncAST.add_rules(f"STRLIST <- list(STRSET)", "{s1}")
FuncAST.add_rules(f"DATELIST <- list(DATESET)", "{s1}")
FuncAST.add_rules(f"DATETIMELIST <- list(DATETIMESET)", "{s1}")
# Function `set()``
FuncAST.add_rules(f"STRSET <- set({TEXT})", "vsqlimpl_pkg.set_{t1}({s1})")
FuncAST.add_rules(f"T1 <- set({SET})", "{s1}")
FuncAST.add_rules(f"NULLSET <- set(NULLLIST)", "case when {s1} > 0 then 1 else {s1} end")
FuncAST.add_rules(f"INTSET <- set(INTLIST)", "vsqlimpl_pkg.set_{t1}({s1})")
FuncAST.add_rules(f"NUMBERSET <- set(NUMBERLIST)", "vsqlimpl_pkg.set_{t1}({s1})")
FuncAST.add_rules(f"STRSET <- set(STRLIST)", "vsqlimpl_pkg.set_{t1}({s1})")
FuncAST.add_rules(f"DATESET <- set(DATELIST)", "vsqlimpl_pkg.set_{t1}({s1})")
FuncAST.add_rules(f"DATETIMESET <- set(DATETIMELIST)", "vsqlimpl_pkg.set_{t1}({s1})")
# Function ``dist()``
FuncAST.add_rules(f"NUMBER <- dist(GEO, GEO)", "vsqlimpl_pkg.dist_geo_geo({s1}, {s2})")
# Function ``abs()``
FuncAST.add_rules(f"INT <- abs(BOOL)", "{s1}")
FuncAST.add_rules(f"INT <- abs(INT)", "abs({s1})")
FuncAST.add_rules(f"NUMBER <- abs(NUMBER)", "abs({s1})")
# Function ``cos()``
FuncAST.add_rules(f"NUMBER <- cos({NUMBERLIKE})", "cos({s1})")
# Function ``sin()``
FuncAST.add_rules(f"NUMBER <- sin({NUMBERLIKE})", "sin({s1})")
# Function ``tan()``
FuncAST.add_rules(f"NUMBER <- tan({NUMBERLIKE})", "tan({s1})")
# Function ``sqrt()``
FuncAST.add_rules(f"NUMBER <- sqrt({NUMBERLIKE})", "sqrt(case when {s1} >= 0 then {s1} else null end)")
# Method ``lower()``
MethAST.add_rules(f"T1 <- {TEXT}.lower()", "lower({s1})")
# Method ``upper()``
MethAST.add_rules(f"T1 <- {TEXT}.upper()", "upper({s1})")
# Method ``startswith()``
MethAST.add_rules(f"BOOL <- {TEXT}.startswith(STR_STRLIST)", "vsqlimpl_pkg.startswith_{t1}_{t2}({s1}, {s2})")
# Method ``endswith()``
MethAST.add_rules(f"BOOL <- {TEXT}.endswith(STR_STRLIST)", "vsqlimpl_pkg.endswith_{t1}_{t2}({s1}, {s2})")
# Method ``strip()``
MethAST.add_rules(f"T1 <- {TEXT}.strip()", "vsqlimpl_pkg.strip_{t1}({s1}, null, 1, 1)")
MethAST.add_rules(f"T1 <- {TEXT}.strip(STR) ", "vsqlimpl_pkg.strip_{t1}({s1}, {s2}, 1, 1)")
# Method ``lstrip()``
MethAST.add_rules(f"T1 <- {TEXT}.lstrip()", "vsqlimpl_pkg.strip_{t1}({s1}, null, 1, 0)")
MethAST.add_rules(f"T1 <- {TEXT}.lstrip(STR) ", "vsqlimpl_pkg.strip_{t1}({s1}, {s2}, 1, 0)")
# Method ``rstrip()``
MethAST.add_rules(f"T1 <- {TEXT}.rstrip()", "vsqlimpl_pkg.strip_{t1}({s1}, null, 0, 1)")
MethAST.add_rules(f"T1 <- {TEXT}.rstrip(STR) ", "vsqlimpl_pkg.strip_{t1}({s1}, {s2}, 0, 1)")
# Method ``find()``
MethAST.add_rules(f"INT <- {TEXT}.find({TEXT})", "(instr({s1}, {s2}) - 1)")
MethAST.add_rules(f"INT <- {TEXT}.find({TEXT}, NULL)", "(instr({s1}, {s2}) - 1)")
MethAST.add_rules(f"INT <- {TEXT}.find({TEXT}, NULL, NULL)", "(instr({s1}, {s2}) - 1)")
MethAST.add_rules(f"INT <- {TEXT}.find({TEXT}, NULL_INT)", "vsqlimpl_pkg.find_{t1}_{t2}({s1}, {s2}, {s3}, null)")
MethAST.add_rules(f"INT <- {TEXT}.find({TEXT}, NULL_INT, NULL_INT)", "vsqlimpl_pkg.find_{t1}_{t2}({s1}, {s2}, {s3}, {s4})")
# Method ``replace()``
MethAST.add_rules(f"T1 <- {TEXT}.replace(STR, STR)", "replace({s1}, {s2}, {s3})")
# Method ``split()``
MethAST.add_rules(f"STRLIST <- STR.split()", "vsqlimpl_pkg.split_{t1}_str({s1}, null)")
MethAST.add_rules(f"CLOBLIST <- CLOB.split()", "vsqlimpl_pkg.split_{t1}_str({s1}, null)")
MethAST.add_rules(f"STRLIST <- STR.split(NULL)", "vsqlimpl_pkg.split_{t1}_str(null, null)")
MethAST.add_rules(f"CLOBLIST <- CLOB.split(NULL)", "vsqlimpl_pkg.split_{t1}_str(null, null)")
MethAST.add_rules(f"STRLIST <- STR.split(STR)", "vsqlimpl_pkg.split_{t1}_str({s1}, {s2})")
MethAST.add_rules(f"CLOBLIST <- CLOB.split(STR)", "vsqlimpl_pkg.split_{t1}_str({s1}, {s2})")
MethAST.add_rules(f"STRLIST <- STR.split(STR, NULL)", "vsqlimpl_pkg.split_{t1}_str({s1}, {s2})")
MethAST.add_rules(f"CLOBLIST <- CLOB.split(STR, NULL)", "vsqlimpl_pkg.split_{t1}_str({s1}, {s2})")
MethAST.add_rules(f"STRLIST <- STR.split(NULL, BOOL_INT)", "vsqlimpl_pkg.split_{t1}_str({s1}, null, {s3})")
MethAST.add_rules(f"CLOBLIST <- CLOB.split(NULL, BOOL_INT)", "vsqlimpl_pkg.split_{t1}_str({s1}, null, {s3})")
MethAST.add_rules(f"STRLIST <- STR.split(STR, BOOL_INT)", "vsqlimpl_pkg.split_{t1}_str({s1}, {s2}, {s3})")
MethAST.add_rules(f"CLOBLIST <- CLOB.split(STR, BOOL_INT)", "vsqlimpl_pkg.split_{t1}_str({s1}, {s2}, {s3})")
# Method ``join()``
MethAST.add_rules(f"STR <- STR.join(STR_STRLIST)", "vsqlimpl_pkg.join_str_{t2}({s1}, {s2})")
MethAST.add_rules(f"CLOB <- STR.join(CLOB_CLOBLIST)", "vsqlimpl_pkg.join_str_{t2}({s1}, {s2})")
# Method ``lum()``
MethAST.add_rules(f"NUMBER <- COLOR.lum()", "vsqlimpl_pkg.lum({s1})")
# Method ``week()``
MethAST.add_rules(f"INT <- DATE_DATETIME.week()", "to_number(to_char({s1}, 'IW'))")
# Attributes
AttrAST.add_rules(f"INT <- DATE_DATETIME.year", "extract(year from {s1})")
AttrAST.add_rules(f"INT <- DATE_DATETIME.month", "extract(month from {s1})")
AttrAST.add_rules(f"INT <- DATE_DATETIME.day", "extract(day from {s1})")
AttrAST.add_rules(f"INT <- DATETIME.hour", "to_number(to_char({s1}, 'HH24'))")
AttrAST.add_rules(f"INT <- DATETIME.minute", "to_number(to_char({s1}, 'MI'))")
AttrAST.add_rules(f"INT <- DATETIME.second", "to_number(to_char({s1}, 'SS'))")
AttrAST.add_rules(f"INT <- DATE_DATETIME.weekday", "(to_char({s1}, 'D')-1)")
AttrAST.add_rules(f"INT <- DATE_DATETIME.yearday", "to_number(to_char({s1}, 'DDD'))")
AttrAST.add_rules(f"INT <- DATEDELTA_DATETIMEDELTA.days", "trunc({s1})")
AttrAST.add_rules(f"INT <- DATETIMEDELTA.seconds", "trunc(mod({s1}, 1) * 86400 + 0.5)")
AttrAST.add_rules(f"NUMBER <- DATETIMEDELTA.total_days", "{s1}")
AttrAST.add_rules(f"NUMBER <- DATETIMEDELTA.total_hours", "({s1} * 24)")
AttrAST.add_rules(f"NUMBER <- DATETIMEDELTA.total_minutes", "({s1} * 1440)")
AttrAST.add_rules(f"NUMBER <- DATETIMEDELTA.total_seconds", "({s1} * 86400)")
AttrAST.add_rules(f"INT <- COLOR.r", "vsqlimpl_pkg.attr_color_r({s1})")
AttrAST.add_rules(f"INT <- COLOR.g", "vsqlimpl_pkg.attr_color_g({s1})")
AttrAST.add_rules(f"INT <- COLOR.b", "vsqlimpl_pkg.attr_color_b({s1})")
AttrAST.add_rules(f"INT <- COLOR.a", "vsqlimpl_pkg.attr_color_a({s1})")
AttrAST.add_rules(f"NUMBER <- GEO.lat", "vsqlimpl_pkg.attr_geo_lat({s1})")
AttrAST.add_rules(f"NUMBER <- GEO.long", "vsqlimpl_pkg.attr_geo_long({s1})")
AttrAST.add_rules(f"STR <- GEO.info", "vsqlimpl_pkg.attr_geo_info({s1})")
# Equality comparison (A == B)
EQAST.add_rules(f"BOOL <- NULL == NULL", "1")
EQAST.add_rules(f"BOOL <- {ANY} == NULL", "(case when {s1} is null then 1 else 0 end)")
EQAST.add_rules(f"BOOL <- NULL == {ANY}", "(case when {s2} is null then 1 else 0 end)")
EQAST.add_rules(f"BOOL <- {INTLIKE} == {INTLIKE}", "vsqlimpl_pkg.eq_int_int({s1}, {s2})")
EQAST.add_rules(f"BOOL <- {NUMBERLIKE} == {NUMBERLIKE}", "vsqlimpl_pkg.eq_{t1}_{t2}({s1}, {s2})")
EQAST.add_rules(f"BOOL <- GEO == GEO", "vsqlimpl_pkg.eq_str_str({s1}, {s2})")
EQAST.add_rules(f"BOOL <- COLOR == COLOR", "vsqlimpl_pkg.eq_int_int({s1}, {s2})")
EQAST.add_rules(f"BOOL <- {TEXT} == {TEXT}", "vsqlimpl_pkg.eq_{t1}_{t2}({s1}, {s2})")
EQAST.add_rules(f"BOOL <- DATE_DATETIME == T1", "vsqlimpl_pkg.eq_{t1}_{t2}({s1}, {s2})")
EQAST.add_rules(f"BOOL <- DATEDELTA_MONTHDELTA_COLOR == T1", "vsqlimpl_pkg.eq_int_int({s1}, {s2})")
EQAST.add_rules(f"BOOL <- DATETIMEDELTA == DATETIMEDELTA", "vsqlimpl_pkg.eq_datetimedelta_datetimedelta({s1}, {s2})")
EQAST.add_rules(f"BOOL <- NULLLIST == NULLLIST_{LIST}", "vsqlimpl_pkg.eq_{t1}_{t2}({s1}, {s2})")
EQAST.add_rules(f"BOOL <- NULLLIST_{LIST} == NULLLIST", "vsqlimpl_pkg.eq_{t1}_{t2}({s1}, {s2})")
EQAST.add_rules(f"BOOL <- INTLIST_NUMBERLIST == INTLIST_NUMBERLIST", "vsqlimpl_pkg.eq_{t1}_{t2}({s1}, {s2})")
EQAST.add_rules(f"BOOL <- STRLIST_CLOBLIST == STRLIST_CLOBLIST", "vsqlimpl_pkg.eq_{t1}_{t2}({s1}, {s2})")
EQAST.add_rules(f"BOOL <- DATELIST_DATETIMELIST == DATELIST_DATETIMELIST", "vsqlimpl_pkg.eq_{t1}_{t2}({s1}, {s2})")
EQAST.add_rules(f"BOOL <- NULLSET == NULLSET", "vsqlimpl_pkg.eq_nullset_nullset({s1}, {s2})")
EQAST.add_rules(f"BOOL <- NULLSET == INTSET", "vsqlimpl_pkg.eq_nullset_intset({s1}, {s2})")
EQAST.add_rules(f"BOOL <- NULLSET == NUMBERSET", "vsqlimpl_pkg.eq_nullset_numberset({s1}, {s2})")
EQAST.add_rules(f"BOOL <- NULLSET == STRSET", "vsqlimpl_pkg.eq_nullset_strset({s1}, {s2})")
EQAST.add_rules(f"BOOL <- NULLSET == DATESET", "vsqlimpl_pkg.eq_nullset_datetimeset({s1}, {s2})")
EQAST.add_rules(f"BOOL <- NULLSET == DATETIMESET", "vsqlimpl_pkg.eq_nullset_datetimeset({s1}, {s2})")
EQAST.add_rules(f"BOOL <- INTSET == NULLSET", "vsqlimpl_pkg.eq_intset_nullset({s1}, {s2})")
EQAST.add_rules(f"BOOL <- NUMBERSET == NULLSET", "vsqlimpl_pkg.eq_numberset_nullset({s1}, {s2})")
EQAST.add_rules(f"BOOL <- STRSET == NULLSET", "vsqlimpl_pkg.eq_strset_nullset({s1}, {s2})")
EQAST.add_rules(f"BOOL <- DATESET == NULLSET", "vsqlimpl_pkg.eq_datetimeset_nullset({s1}, {s2})")
EQAST.add_rules(f"BOOL <- DATETIMESET == NULLSET", "vsqlimpl_pkg.eq_datetimeset_nullset({s1}, {s2})")
EQAST.add_rules(f"BOOL <- INTSET == INTSET", "vsqlimpl_pkg.eq_intset_intset({s1}, {s2})")
EQAST.add_rules(f"BOOL <- NUMBERSET == NUMBERSET", "vsqlimpl_pkg.eq_numberset_numberset({s1}, {s2})")
EQAST.add_rules(f"BOOL <- STRSET == STRSET", "vsqlimpl_pkg.eq_strset_strset({s1}, {s2})")
EQAST.add_rules(f"BOOL <- DATESET_DATETIMESET == DATESET_DATETIMESET", "vsqlimpl_pkg.eq_datetimeset_datetimeset({s1}, {s2})")
EQAST.add_rules(f"BOOL <- {ANY} == {ANY}", "(case when {s1} is null and {s2} is null then 1 else 0 end)")
# Inequality comparison (A != B)
NEAST.add_rules(f"BOOL <- NULL != NULL", "0")
NEAST.add_rules(f"BOOL <- {ANY} != NULL", "(case when {s1} is null then 0 else 1 end)")
NEAST.add_rules(f"BOOL <- NULL != {ANY}", "(case when {s2} is null then 0 else 1 end)")
NEAST.add_rules(f"BOOL <- {INTLIKE} != {INTLIKE}", "(1 - vsqlimpl_pkg.eq_int_int({s1}, {s2}))")
NEAST.add_rules(f"BOOL <- {NUMBERLIKE} != {NUMBERLIKE}", "(1 - vsqlimpl_pkg.eq_{t1}_{t2}({s1}, {s2}))")
NEAST.add_rules(f"BOOL <- GEO != GEO", "(1 - vsqlimpl_pkg.eq_str_str({s1}, {s2}))")
NEAST.add_rules(f"BOOL <- COLOR != COLOR", "(1 - vsqlimpl_pkg.eq_int_int({s1}, {s2}))")
NEAST.add_rules(f"BOOL <- {TEXT} != {TEXT}", "(1 - vsqlimpl_pkg.eq_{t1}_{t2}({s1}, {s2}))")
NEAST.add_rules(f"BOOL <- DATE_DATETIME != T1", "(1 - vsqlimpl_pkg.eq_{t1}_{t2}({s1}, {s2}))")
NEAST.add_rules(f"BOOL <- DATEDELTA_MONTHDELTA_COLOR != T1", "(1 - vsqlimpl_pkg.eq_int_int({s1}, {s2}))")
NEAST.add_rules(f"BOOL <- DATETIMEDELTA != DATETIMEDELTA", "(1 - vsqlimpl_pkg.eq_datetimedelta_datetimedelta({s1}, {s2}))")
NEAST.add_rules(f"BOOL <- NULLLIST != NULLLIST_{LIST}", "(1 - vsqlimpl_pkg.eq_{t1}_{t2}({s1}, {s2}))")
NEAST.add_rules(f"BOOL <- NULLLIST_{LIST} != NULLLIST", "(1 - vsqlimpl_pkg.eq_{t1}_{t2}({s1}, {s2}))")
NEAST.add_rules(f"BOOL <- INTLIST_NUMBERLIST != INTLIST_NUMBERLIST", "(1 - vsqlimpl_pkg.eq_{t1}_{t2}({s1}, {s2}))")
NEAST.add_rules(f"BOOL <- STRLIST_CLOBLIST != STRLIST_CLOBLIST", "(1 - vsqlimpl_pkg.eq_{t1}_{t2}({s1}, {s2}))")
NEAST.add_rules(f"BOOL <- DATELIST_DATETIMELIST != DATELIST_DATETIMELIST", "(1 - vsqlimpl_pkg.eq_{t1}_{t2}({s1}, {s2}))")
NEAST.add_rules(f"BOOL <- NULLSET != NULLSET", "(1 - vsqlimpl_pkg.eq_nullset_nullset({s1}, {s2}))")
NEAST.add_rules(f"BOOL <- NULLSET != INTSET", "(1 - vsqlimpl_pkg.eq_nullset_intset({s1}, {s2}))")
NEAST.add_rules(f"BOOL <- NULLSET != NUMBERSET", "(1 - vsqlimpl_pkg.eq_nullset_numberset({s1}, {s2}))")
NEAST.add_rules(f"BOOL <- NULLSET != STRSET", "(1 - vsqlimpl_pkg.eq_nullset_strset({s1}, {s2}))")
NEAST.add_rules(f"BOOL <- NULLSET != DATESET", "(1 - vsqlimpl_pkg.eq_nullset_datetimeset({s1}, {s2}))")
NEAST.add_rules(f"BOOL <- NULLSET != DATETIMESET", "(1 - vsqlimpl_pkg.eq_nullset_datetimeset({s1}, {s2}))")
NEAST.add_rules(f"BOOL <- INTSET != NULLSET", "(1 - vsqlimpl_pkg.eq_intset_nullset({s1}, {s2}))")
NEAST.add_rules(f"BOOL <- NUMBERSET != NULLSET", "(1 - vsqlimpl_pkg.eq_numberset_nullset({s1}, {s2}))")
NEAST.add_rules(f"BOOL <- STRSET != NULLSET", "(1 - vsqlimpl_pkg.eq_strset_nullset({s1}, {s2}))")
NEAST.add_rules(f"BOOL <- DATESET != NULLSET", "(1 - vsqlimpl_pkg.eq_datetimeset_nullset({s1}, {s2}))")
NEAST.add_rules(f"BOOL <- DATETIMESET != NULLSET", "(1 - vsqlimpl_pkg.eq_datetimeset_nullset({s1}, {s2}))")
NEAST.add_rules(f"BOOL <- INTSET != INTSET", "(1 - vsqlimpl_pkg.eq_intset_intset({s1}, {s2}))")
NEAST.add_rules(f"BOOL <- NUMBERSET != NUMBERSET", "(1 - vsqlimpl_pkg.eq_numberset_numberset({s1}, {s2}))")
NEAST.add_rules(f"BOOL <- STRSET != STRSET", "(1 - vsqlimpl_pkg.eq_strset_strset({s1}, {s2}))")
NEAST.add_rules(f"BOOL <- DATESET_DATETIMESET != DATESET_DATETIMESET", "(1 - vsqlimpl_pkg.eq_datetimeset_datetimeset({s1}, {s2}))")
NEAST.add_rules(f"BOOL <- {ANY} != {ANY}", "(case when {s1} is null and {s2} is null then 0 else 1 end)")
# The following comparisons always treat ``None`` as the smallest value
# Greater-than comparison (A > B)
GTAST.add_rules(f"BOOL <- NULL > NULL", "0")
GTAST.add_rules(f"BOOL <- {ANY} > NULL", "(case when {s1} is null then 0 else 1 end)")
GTAST.add_rules(f"BOOL <- NULL > {ANY}", "0")
GTAST.add_rules(f"BOOL <- {INTLIKE} > {INTLIKE}", "(case when vsqlimpl_pkg.cmp_int_int({s1}, {s2}) > 0 then 1 else 0 end)")
GTAST.add_rules(f"BOOL <- {NUMBERLIKE} > {NUMBERLIKE}", "(case when vsqlimpl_pkg.cmp_{t1}_{t2}({s1}, {s2}) > 0 then 1 else 0 end)")
GTAST.add_rules(f"BOOL <- {TEXT} > {TEXT}", "(case when vsqlimpl_pkg.cmp_{t1}_{t2}({s1}, {s2}) > 0 then 1 else 0 end)")
GTAST.add_rules(f"BOOL <- DATE_DATETIME > T1", "(case when vsqlimpl_pkg.cmp_{t1}_{t2}({s1}, {s2}) > 0 then 1 else 0 end)")
GTAST.add_rules(f"BOOL <- DATEDELTA > DATEDELTA", "(case when vsqlimpl_pkg.cmp_int_int({s1}, {s2}) > 0 then 1 else 0 end)")
GTAST.add_rules(f"BOOL <- DATETIMEDELTA > DATETIMEDELTA", "(case when vsqlimpl_pkg.cmp_number_number({s1}, {s2}) > 0 then 1 else 0 end)")
GTAST.add_rules(f"BOOL <- INTLIST_NUMBERLIST > INTLIST_NUMBERLIST", "(case when vsqlimpl_pkg.cmp_{t1}_{t2}({s1}, {s2}) > 0 then 1 else 0 end)")
GTAST.add_rules(f"BOOL <- STRLIST_CLOBLIST > STRLIST_CLOBLIST", "(case when vsqlimpl_pkg.cmp_{t1}_{t2}({s1}, {s2}) > 0 then 1 else 0 end)")
GTAST.add_rules(f"BOOL <- DATELIST_DATETIMELIST > T1", "(case when vsqlimpl_pkg.cmp_{t1}_{t2}({s1}, {s2}) > 0 then 1 else 0 end)")
GTAST.add_rules(f"BOOL <- NULLLIST > NULLLIST_{LIST}", "(case when vsqlimpl_pkg.cmp_{t1}_{t2}({s1}, {s2}) > 0 then 1 else 0 end)")
GTAST.add_rules(f"BOOL <- NULLLIST_{LIST} > NULLLIST", "(case when vsqlimpl_pkg.cmp_{t1}_{t2}({s1}, {s2}) > 0 then 1 else 0 end)")
# Greater-than-or equal comparison (A >= B)
GEAST.add_rules(f"BOOL <- {ANY} >= NULL", "1")
GEAST.add_rules(f"BOOL <- NULL >= {ANY}", "(case when {s2} is null then 1 else 0 end)")
GEAST.add_rules(f"BOOL <- {INTLIKE} >= {INTLIKE}", "(case when vsqlimpl_pkg.cmp_int_int({s1}, {s2}) >= 0 then 1 else 0 end)")
GEAST.add_rules(f"BOOL <- {NUMBERLIKE} >= {NUMBERLIKE}", "(case when vsqlimpl_pkg.cmp_{t1}_{t2}({s1}, {s2}) >= 0 then 1 else 0 end)")
GEAST.add_rules(f"BOOL <- {TEXT} >= {TEXT}", "(case when vsqlimpl_pkg.cmp_{t1}_{t2}({s1}, {s2}) >= 0 then 1 else 0 end)")
GEAST.add_rules(f"BOOL <- DATE_DATETIME >= T1", "(case when vsqlimpl_pkg.cmp_{t1}_{t2}({s1}, {s2}) >= 0 then 1 else 0 end)")
GEAST.add_rules(f"BOOL <- DATEDELTA >= DATEDELTA", "(case when vsqlimpl_pkg.cmp_int_int({s1}, {s2}) >= 0 then 1 else 0 end)")
GEAST.add_rules(f"BOOL <- DATETIMEDELTA >= DATETIMEDELTA", "(case when vsqlimpl_pkg.cmp_number_number({s1}, {s2}) >= 0 then 1 else 0 end)")
GEAST.add_rules(f"BOOL <- INTLIST_NUMBERLIST >= INTLIST_NUMBERLIST", "(case when vsqlimpl_pkg.cmp_{t1}_{t2}({s1}, {s2}) >= 0 then 1 else 0 end)")
GEAST.add_rules(f"BOOL <- STRLIST_CLOBLIST >= STRLIST_CLOBLIST", "(case when vsqlimpl_pkg.cmp_{t1}_{t2}({s1}, {s2}) >= 0 then 1 else 0 end)")
GEAST.add_rules(f"BOOL <- DATELIST_DATETIMELIST >= T1", "(case when vsqlimpl_pkg.cmp_{t1}_{t2}({s1}, {s2}) >= 0 then 1 else 0 end)")
GEAST.add_rules(f"BOOL <- NULLLIST >= NULLLIST_{LIST}", "(case when vsqlimpl_pkg.cmp_{t1}_{t2}({s1}, {s2}) >= 0 then 1 else 0 end)")
GEAST.add_rules(f"BOOL <- NULLLIST_{LIST} >= NULLLIST", "(case when vsqlimpl_pkg.cmp_{t1}_{t2}({s1}, {s2}) >= 0 then 1 else 0 end)")
# Less-than comparison (A < B)
LTAST.add_rules(f"BOOL <- {ANY} < NULL", "0")
LTAST.add_rules(f"BOOL <- NULL < {ANY}", "(case when {s2} is null then 0 else 1 end)")
LTAST.add_rules(f"BOOL <- {INTLIKE} < {INTLIKE}", "(case when vsqlimpl_pkg.cmp_int_int({s1}, {s2}) < 0 then 1 else 0 end)")
LTAST.add_rules(f"BOOL <- {NUMBERLIKE} < {NUMBERLIKE}", "(case when vsqlimpl_pkg.cmp_{t1}_{t2}({s1}, {s2}) < 0 then 1 else 0 end)")
LTAST.add_rules(f"BOOL <- {TEXT} < {TEXT}", "(case when vsqlimpl_pkg.cmp_{t1}_{t2}({s1}, {s2}) < 0 then 1 else 0 end)")
LTAST.add_rules(f"BOOL <- DATE_DATETIME < T1", "(case when vsqlimpl_pkg.cmp_{t1}_{t2}({s1}, {s2}) < 0 then 1 else 0 end)")
LTAST.add_rules(f"BOOL <- DATEDELTA < DATEDELTA", "(case when vsqlimpl_pkg.cmp_int_int({s1}, {s2}) < 0 then 1 else 0 end)")
LTAST.add_rules(f"BOOL <- DATETIMEDELTA < DATETIMEDELTA", "(case when vsqlimpl_pkg.cmp_number_number({s1}, {s2}) < 0 then 1 else 0 end)")
LTAST.add_rules(f"BOOL <- INTLIST_NUMBERLIST < INTLIST_NUMBERLIST", "(case when vsqlimpl_pkg.cmp_{t1}_{t2}({s1}, {s2}) < 0 then 1 else 0 end)")
LTAST.add_rules(f"BOOL <- STRLIST_CLOBLIST < STRLIST_CLOBLIST", "(case when vsqlimpl_pkg.cmp_{t1}_{t2}({s1}, {s2}) < 0 then 1 else 0 end)")
LTAST.add_rules(f"BOOL <- DATELIST_DATETIMELIST < T1", "(case when vsqlimpl_pkg.cmp_{t1}_{t2}({s1}, {s2}) < 0 then 1 else 0 end)")
LTAST.add_rules(f"BOOL <- NULLLIST < NULLLIST_{LIST}", "(case when vsqlimpl_pkg.cmp_{t1}_{t2}({s1}, {s2}) < 0 then 1 else 0 end)")
LTAST.add_rules(f"BOOL <- NULLLIST_{LIST} < NULLLIST", "(case when vsqlimpl_pkg.cmp_{t1}_{t2}({s1}, {s2}) < 0 then 1 else 0 end)")
# Less-than-or equal comparison (A <= B)
LEAST.add_rules(f"BOOL <- NULL <= NULL", "1")
LEAST.add_rules(f"BOOL <- {ANY} <= NULL", "(case when {s1} is null then 1 else 0 end)")
LEAST.add_rules(f"BOOL <- NULL <= {ANY}", "1")
LEAST.add_rules(f"BOOL <- {INTLIKE} <= {INTLIKE}", "(case when vsqlimpl_pkg.cmp_int_int({s1}, {s2}) <= 0 then 1 else 0 end)")
LEAST.add_rules(f"BOOL <- {NUMBERLIKE} <= {NUMBERLIKE}", "(case when vsqlimpl_pkg.cmp_{t1}_{t2}({s1}, {s2}) <= 0 then 1 else 0 end)")
LEAST.add_rules(f"BOOL <- {TEXT} <= {TEXT}", "(case when vsqlimpl_pkg.cmp_{t1}_{t2}({s1}, {s2}) <= 0 then 1 else 0 end)")
LEAST.add_rules(f"BOOL <- DATE_DATETIME <= T1", "(case when vsqlimpl_pkg.cmp_{t1}_{t2}({s1}, {s2}) <= 0 then 1 else 0 end)")
LEAST.add_rules(f"BOOL <- DATEDELTA <= DATEDELTA", "(case when vsqlimpl_pkg.cmp_int_int({s1}, {s2}) <= 0 then 1 else 0 end)")
LEAST.add_rules(f"BOOL <- DATETIMEDELTA <= DATETIMEDELTA", "(case when vsqlimpl_pkg.cmp_number_number({s1}, {s2}) <= 0 then 1 else 0 end)")
LEAST.add_rules(f"BOOL <- INTLIST_NUMBERLIST <= INTLIST_NUMBERLIST", "(case when vsqlimpl_pkg.cmp_{t1}_{t2}({s1}, {s2}) <= 0 then 1 else 0 end)")
LEAST.add_rules(f"BOOL <- STRLIST_CLOBLIST <= STRLIST_CLOBLIST", "(case when vsqlimpl_pkg.cmp_{t1}_{t2}({s1}, {s2}) <= 0 then 1 else 0 end)")
LEAST.add_rules(f"BOOL <- DATELIST_DATETIMELIST <= T1", "(case when vsqlimpl_pkg.cmp_{t1}_{t2}({s1}, {s2}) <= 0 then 1 else 0 end)")
LEAST.add_rules(f"BOOL <- NULLLIST <= NULLLIST_{LIST}", "(case when vsqlimpl_pkg.cmp_{t1}_{t2}({s1}, {s2}) <= 0 then 1 else 0 end)")
LEAST.add_rules(f"BOOL <- NULLLIST_{LIST} <= NULLLIST", "(case when vsqlimpl_pkg.cmp_{t1}_{t2}({s1}, {s2}) <= 0 then 1 else 0 end)")
# Addition (A + B)
AddAST.add_rules(f"INT <- {INTLIKE} + {INTLIKE}", "({s1} + {s2})")
AddAST.add_rules(f"NUMBER <- {NUMBERLIKE} + {NUMBERLIKE}", "({s1} + {s2})")
AddAST.add_rules(f"STR <- STR + STR", "({s1} || {s2})")
AddAST.add_rules(f"CLOB <- {TEXT} + {TEXT}", "({s1} || {s2})")
AddAST.add_rules(f"INTLIST <- INTLIST + INTLIST", "vsqlimpl_pkg.add_intlist_intlist({s1}, {s2})")
AddAST.add_rules(f"NUMBERLIST <- INTLIST_NUMBERLIST + INTLIST_NUMBERLIST", "vsqlimpl_pkg.add_{t1}_{t2}({s1}, {s2})")
AddAST.add_rules(f"STRLIST <- STRLIST + STRLIST", "vsqlimpl_pkg.add_strlist_strlist({s1}, {s2})")
AddAST.add_rules(f"CLOBLIST <- STRLIST_CLOBLIST + STRLIST_CLOBLIST", "vsqlimpl_pkg.add_{t1}_{t2}({s1}, {s2})")
AddAST.add_rules(f"T1 <- DATELIST_DATETIMELIST + T1", "vsqlimpl_pkg.add_{t1}_{t2}({s1}, {s2})")
AddAST.add_rules(f"NULLLIST <- NULLLIST + NULLLIST", "({s1} + {s2})")
AddAST.add_rules(f"T2 <- NULLLIST + NULLLIST_{LIST}", "vsqlimpl_pkg.add_{t1}_{t2}({s1}, {s2})")
AddAST.add_rules(f"T1 <- NULLLIST_{LIST} + NULLLIST", "vsqlimpl_pkg.add_{t1}_{t2}({s1}, {s2})")
AddAST.add_rules(f"DATE <- DATE + DATEDELTA", "({s1} + {s2})")
AddAST.add_rules(f"DATETIME <- DATETIME + DATEDELTA_DATETIMEDELTA", "({s1} + {s2})")
AddAST.add_rules(f"T1 <- DATE_DATETIME + MONTHDELTA", "vsqlimpl_pkg.add_{t1}_months({s1}, {s2})")
AddAST.add_rules(f"T2 <- MONTHDELTA + DATE_DATETIME", "vsqlimpl_pkg.add_months_{t2}({s1}, {s2})")
AddAST.add_rules(f"DATEDELTA <- DATEDELTA + DATEDELTA", "({s1} + {s2})")
AddAST.add_rules(f"DATETIMEDELTA <- DATEDELTA_DATETIMEDELTA + DATEDELTA_DATETIMEDELTA", "({s1} + {s2})")
AddAST.add_rules(f"MONTHDELTA <- MONTHDELTA + MONTHDELTA", "({s1} + {s2})")
# Subtraction (A - B)
SubAST.add_rules(f"INT <- {INTLIKE} - {INTLIKE}", "({s1} - {s2})")
SubAST.add_rules(f"NUMBER <- {NUMBERLIKE} - {NUMBERLIKE}", "({s1} - {s2})")
SubAST.add_rules(f"DATE <- DATE - DATEDELTA", "({s1} - {s2})")
SubAST.add_rules(f"DATEDELTA <- DATE - DATE", "({s1} - {s2})")
SubAST.add_rules(f"DATETIMEDELTA <- DATETIME - DATETIME", "({s1} - {s2})")
SubAST.add_rules(f"T1 <- DATE_DATETIME - MONTHDELTA", "vsqlimpl_pkg.add_{t1}_months({s1}, -{s2})")
SubAST.add_rules(f"DATETIME <- DATETIME - DATEDELTA_DATETIMEDELTA", "({s1} - {s2})")
SubAST.add_rules(f"T1 <- DATEDELTA_MONTHDELTA - T1", "({s1} - {s2})")
SubAST.add_rules(f"DATETIMEDELTA <- DATEDELTA_DATETIMEDELTA - DATEDELTA_DATETIMEDELTA" , "({s1} - {s2})")
# Multiplication (A * B)
MulAST.add_rules(f"INT <- {INTLIKE} * {INTLIKE}", "({s1} * {s2})")
MulAST.add_rules(f"NUMBER <- {NUMBERLIKE} * {NUMBERLIKE}", "({s1} * {s2})")
MulAST.add_rules(f"T2 <- {INTLIKE} * DATEDELTA_DATETIMEDELTA_MONTHDELTA", "({s1} * {s2})")
MulAST.add_rules(f"DATETIMEDELTA <- NUMBER * DATETIMEDELTA", "({s1} * {s2})")
MulAST.add_rules(f"T2 <- {INTLIKE} * {TEXT}", "vsqlimpl_pkg.mul_int_{t2}({s1}, {s2})")
MulAST.add_rules(f"T1 <- {TEXT} * {INTLIKE}", "vsqlimpl_pkg.mul_{t1}_int({s1}, {s2})")
MulAST.add_rules(f"T2 <- {INTLIKE} * {LIST}", "vsqlimpl_pkg.mul_int_{t2}({s1}, {s2})")
MulAST.add_rules(f"T1 <- {LIST} * {INTLIKE}", "vsqlimpl_pkg.mul_{t1}_int({s1}, {s2})")
MulAST.add_rules(f"NULLLIST <- {INTLIKE} * NULLLIST", "({s1} * {s2})")
MulAST.add_rules(f"NULLLIST <- NULLLIST * {INTLIKE}", "({s1} * {s2})")
# True division (A / B)
TrueDivAST.add_rules(f"INT <- BOOL / BOOL", "({s1} / {s2})")
TrueDivAST.add_rules(f"NUMBER <- {NUMBERLIKE} / {NUMBERLIKE}", "({s1} / {s2})")
TrueDivAST.add_rules(f"DATETIMEDELTA <- DATETIMEDELTA / {NUMBERLIKE}", "({s1} / {s2})")
# Floor division (A // B)
FloorDivAST.add_rules(f"INT <- {NUMBERLIKE} // {NUMBERLIKE}", "vsqlimpl_pkg.floordiv_{t1}_{t2}({s1}, {s2})")
FloorDivAST.add_rules(f"T1 <- DATEDELTA_MONTHDELTA // {INTLIKE}", "vsqlimpl_pkg.floordiv_int_int({s1}, {s2})")
FloorDivAST.add_rules(f"DATEDELTA <- DATETIMEDELTA // {NUMBERLIKE}", "vsqlimpl_pkg.floordiv_number_int({s1}, {s2})")
# Modulo operator (A % B)
ModAST.add_rules(f"INT <- {INTLIKE} % {INTLIKE}", "vsqlimpl_pkg.mod_int_int({s1}, {s2})")
ModAST.add_rules(f"NUMBER <- {NUMBERLIKE} % {NUMBERLIKE}", "vsqlimpl_pkg.mod_{t1}_{t2}({s1}, {s2})")
ModAST.add_rules(f"COLOR <- COLOR % COLOR", "vsqlimpl_pkg.mod_color_color({s1}, {s2})")
# Left shift operator (A << B)
ShiftLeftAST.add_rules(f"INT <- {INTLIKE} << {INTLIKE}", "trunc({s1} * power(2, {s2}))")
# Right shift operator (A >> B)
ShiftRightAST.add_rules(f"INT <- {INTLIKE} >> {INTLIKE}", "trunc({s1} / power(2, {s2}))")
# Logical "and" (A and B)
# Can't use the real operator ("and") in the spec, so use "?"
AndAST.add_rules(f"T1 <- {ANY} ? NULL", "null")
AndAST.add_rules(f"T2 <- NULL ? {ANY}", "null")
AndAST.add_rules(f"BOOL <- BOOL ? BOOL", "(case when {s1} = 1 then {s2} else 0 end)")
AndAST.add_rules(f"INT <- {INTLIKE} ? {INTLIKE}", "(case when nvl({s1}, 0) != 0 then {s2} else {s1} end)")
AndAST.add_rules(f"NUMBER <- {NUMBERLIKE} ? {NUMBERLIKE}", "(case when nvl({s1}, 0) != 0 then {s2} else {s1} end)")
AndAST.add_rules(f"STR <- STR ? STR", "nvl2({s1}, {s2}, {s1})")
AndAST.add_rules(f"CLOB <- CLOB ? CLOB", "(case when {s1} is not null and length({s1}) != 0 then {s2} else {s1} end)")
AndAST.add_rules(f"T1 <- DATE_DATETIME ? T1", "nvl2({s1}, {s2}, {s1})")
AndAST.add_rules(f"T1 <- DATEDELTA_DATETIMEDELTA_MONTHDELTA ? T1", "(case when nvl({s1}, 0) != 0 then {s2} else {s1} end)")
AndAST.add_rules(f"T1 <- {LIST} ? T1", "(case when nvl(vsqlimpl_pkg.len_{t1}({s1}), 0) != 0 then {s2} else {s1} end)")
AndAST.add_rules(f"DATETIMELIST <- DATELIST_DATETIMELIST ? DATELIST_DATETIMELIST", "(case when nvl(vsqlimpl_pkg.len_{t1}({s1}), 0) != 0 then {s2} else {s1} end)")
AndAST.add_rules(f"NULLLIST <- NULLLIST ? NULLLIST", "(case when nvl({s1}, 0) != 0 then {s2} else {s1} end)")
AndAST.add_rules(f"T2 <- NULLLIST ? {LIST}", "(case when nvl({s1}, 0) != 0 then {s2} else vsqlimpl_pkg.{t2}_fromlen({s1}) end)")
AndAST.add_rules(f"T1 <- {LIST} ? NULLLIST", "(case when nvl(vsqlimpl_pkg.len_{t1}({s1}), 0) != 0 then vsqlimpl_pkg.{t1}_fromlen({s2}) else {s1} end)")
# Logical "or" (A or B)
# Can't use the real operator ("or") in the spec, so use "?"
OrAST.add_rules(f"T1 <- {ANY} ? NULL", "{s1}")
OrAST.add_rules(f"T2 <- NULL ? {ANY}", "{s2}")
OrAST.add_rules(f"BOOL <- BOOL ? BOOL", "(case when {s1} = 1 then 1 else {s2} end)")
OrAST.add_rules(f"INT <- {INTLIKE} ? {INTLIKE}", "(case when nvl({s1}, 0) != 0 then {s1} else {s2} end)")
OrAST.add_rules(f"NUMBER <- {NUMBERLIKE} ? {NUMBERLIKE}", "(case when nvl({s1}, 0) != 0 then {s1} else {s2} end)")
OrAST.add_rules(f"STR <- STR ? STR", "nvl({s1}, {s2})")
OrAST.add_rules(f"CLOB <- CLOB ? CLOB", "(case when {s1} is not null and length({s1}) != 0 then {s1} else {s2} end)")
OrAST.add_rules(f"T1 <- DATE_DATETIME ? T1", "nvl({s1}, {s2})")
OrAST.add_rules(f"T1 <- DATEDELTA_DATETIMEDELTA_MONTHDELTA ? T1", "(case when nvl({s1}, 0) != 0 then {s1} else {s2} end)")
OrAST.add_rules(f"T1 <- {LIST} ? T1", "(case when nvl(vsqlimpl_pkg.len_{t1}({s1}), 0) != 0 then {s1} else {s2} end)")
OrAST.add_rules(f"DATETIMELIST <- DATELIST_DATETIMELIST ? DATELIST_DATETIMELIST", "(case when nvl(vsqlimpl_pkg.len_{t1}({s1}), 0) != 0 then {s1} else {s2} end)")
OrAST.add_rules(f"NULLLIST <- NULLLIST ? NULLLIST", "(case when nvl({s1}, 0) != 0 then {s1} else {s2} end)")
OrAST.add_rules(f"T2 <- NULLLIST ? {LIST}", "(case when nvl({s1}, 0) != 0 then vsqlimpl_pkg.{t2}_fromlen({s1}) else {s2} end)")
OrAST.add_rules(f"T1 <- {LIST} ? NULLLIST", "(case when nvl(vsqlimpl_pkg.len_{t1}({s1}), 0) != 0 then {s1} else vsqlimpl_pkg.{t1}_fromlen({s2}) end)")
# Containment test (A in B)
# Can't use the real operator ("in") in the spec, so use "?"
ContainsAST.add_rules(f"BOOL <- NULL ? {LIST}_NULLLIST", "vsqlimpl_pkg.contains_null_{t2}({s2})")
ContainsAST.add_rules(f"BOOL <- STR ? STR_CLOB_STRLIST_CLOBLIST_STRSET", "vsqlimpl_pkg.contains_str_{t2}({s1}, {s2})")
ContainsAST.add_rules(f"BOOL <- INT_NUMBER ? INTLIST_NUMBERLIST_INTSET_NUMBERSET", "vsqlimpl_pkg.contains_{t1}_{t2}({s1}, {s2})")
ContainsAST.add_rules(f"BOOL <- DATE ? DATELIST_DATESET", "vsqlimpl_pkg.contains_{t1}_{t2}({s1}, {s2})")
ContainsAST.add_rules(f"BOOL <- DATETIME ? DATETIMELIST_DATETIMESET", "vsqlimpl_pkg.contains_{t1}_{t2}({s1}, {s2})")
ContainsAST.add_rules(f"BOOL <- {ANY} ? NULLLIST", "case when {s1} is null then vsqlimpl_pkg.contains_null_nulllist({s2}) else 0 end")
# Inverted containment test (A not in B)
# Can't use the real operator ("not in") in the spec, so use "?"
NotContainsAST.add_rules(f"BOOL <- NULL ? {LIST}_NULLLIST", "(1 - vsqlimpl_pkg.contains_null_{t2}({s2}))")
NotContainsAST.add_rules(f"BOOL <- STR ? STR_CLOB_STRLIST_CLOBLIST_STRSET", "(1 - vsqlimpl_pkg.contains_str_{t2}({s1}, {s2}))")
NotContainsAST.add_rules(f"BOOL <- INT_NUMBER ? INTLIST_NUMBERLIST_INTSET_NUMBERSET", "(1 - vsqlimpl_pkg.contains_{t1}_{t2}({s1}, {s2}))")
NotContainsAST.add_rules(f"BOOL <- DATE ? DATELIST_DATESET", "(1 - vsqlimpl_pkg.contains_{t1}_{t2}({s1}, {s2}))")
NotContainsAST.add_rules(f"BOOL <- DATETIME ? DATETIMELIST_DATETIMESET", "(1 - vsqlimpl_pkg.contains_{t1}_{t2}({s1}, {s2}))")
NotContainsAST.add_rules(f"BOOL <- {ANY} ? NULLLIST", "case when {s1} is null then 1 - vsqlimpl_pkg.contains_null_nulllist({s2}) else 1 end")
# Identity test (A is B)
# Can't use the real operator ("is") in the spec, so use "?"
IsAST.add_rules(f"BOOL <- NULL ? NULL", "1")
IsAST.add_rules(f"BOOL <- {ANY} ? NULL", "(case when {s1} is null then 1 else 0 end)")
IsAST.add_rules(f"BOOL <- NULL ? {ANY}", "(case when {s2} is null then 1 else 0 end)")
# Inverted identity test (A is not B)
# Can't use the real operator ("is not") in the spec, so use "?"
IsNotAST.add_rules(f"BOOL <- NULL ? NULL", "0")
IsNotAST.add_rules(f"BOOL <- {ANY} ? NULL", "(case when {s1} is not null then 1 else 0 end)")
IsNotAST.add_rules(f"BOOL <- NULL ? {ANY}", "(case when {s2} is not null then 1 else 0 end)")
# Item access operator (A[B])
ItemAST.add_rules(f"NULL <- NULLLIST[{INTLIKE}]", "null")
ItemAST.add_rules(f"STR <- STR_CLOB_STRLIST[{INTLIKE}]", "vsqlimpl_pkg.item_{t1}({s1}, {s2})")
ItemAST.add_rules(f"CLOB <- CLOBLIST[{INTLIKE}]", "vsqlimpl_pkg.item_{t1}({s1}, {s2})")
ItemAST.add_rules(f"INT <- INTLIST[{INTLIKE}]", "vsqlimpl_pkg.item_{t1}({s1}, {s2})")
ItemAST.add_rules(f"NUMBER <- NUMBERLIST[{INTLIKE}]", "vsqlimpl_pkg.item_{t1}({s1}, {s2})")
ItemAST.add_rules(f"DATE <- DATELIST[{INTLIKE}]", "vsqlimpl_pkg.item_{t1}({s1}, {s2})")
ItemAST.add_rules(f"DATETIME <- DATETIMELIST[{INTLIKE}]", "vsqlimpl_pkg.item_{t1}({s1}, {s2})")
# Bitwise "and" (A & B)
BitAndAST.add_rules(f"INT <- {INTLIKE} & {INTLIKE}", "bitand({s1}, {s2})")
BitAndAST.add_rules(f"T1 <- INTSET & INTSET", "vsqlimpl_pkg.bitand_intset({s1}, {s2})")
BitAndAST.add_rules(f"T1 <- NUMBERSET & NUMBERSET", "vsqlimpl_pkg.bitand_numberset({s1}, {s2})")
BitAndAST.add_rules(f"T1 <- STRSET & STRSET", "vsqlimpl_pkg.bitand_strset({s1}, {s2})")
BitAndAST.add_rules(f"T1 <- DATESET_DATETIMESET & T1", "vsqlimpl_pkg.bitand_datetimeset({s1}, {s2})")
# Bitwise "or" (A | B)
BitOrAST.add_rules(f"INT <- {INTLIKE} | {INTLIKE}", "vsqlimpl_pkg.bitor_int({s1}, {s2})")
BitOrAST.add_rules(f"T1 <- INTSET | INTSET", "vsqlimpl_pkg.bitor_intset({s1}, {s2})")
BitOrAST.add_rules(f"T1 <- NUMBERSET | NUMBERSET", "vsqlimpl_pkg.bitor_numberset({s1}, {s2})")
BitOrAST.add_rules(f"T1 <- STRSET | STRSET", "vsqlimpl_pkg.bitor_strset({s1}, {s2})")
BitOrAST.add_rules(f"T1 <- DATESET_DATETIMESET | T1", "vsqlimpl_pkg.bitor_datetimeset({s1}, {s2})")
# Bitwise "exclusive or" (A ^ B)
BitXOrAST.add_rules(f"INT <- {INTLIKE} ^ {INTLIKE}", "vsqlimpl_pkg.bitxor_int({s1}, {s2})")
# Logical negation (not A)
# Can't use the real operator ("not") in the spec, so use "?"
NotAST.add_rules(f"BOOL <- ? NULL", "1")
NotAST.add_rules(f"BOOL <- ? BOOL", "(case {s1} when 1 then 0 else 1 end)")
NotAST.add_rules(f"BOOL <- ? INT_NUMBER_DATEDELTA_DATETIMEDELTA_MONTHDELTA", "(case nvl({s1}, 0) when 0 then 1 else 0 end)")
NotAST.add_rules(f"BOOL <- ? DATE_DATETIME_STR_COLOR_GEO", "(case when {s1} is null then 1 else 0 end)")
NotAST.add_rules(f"BOOL <- ? {ANY}", "(1 - vsqlimpl_pkg.bool_{t1}({s1}))")
# Arithmetic negation (-A)
NegAST.add_rules(f"INT <- BOOL", "(-{s1})")
NegAST.add_rules(f"T1 <- INT_NUMBER_DATEDELTA_DATETIMEDELTA_MONTHDELTA", "(-{s1})")
# Bitwise "not" (~A)
BitNotAST.add_rules(f"INT <- {INTLIKE}", "(-{s1} - 1)")
# Ternary "if"/"else" (A if COND else B)
# Can't use the real operator ("if"/"else") in the spec, so use "?"
IfAST.add_rules(f"T1 <- {ANY} ? NULL ? T1", "{s3}")
IfAST.add_rules(f"INT <- {INTLIKE} ? NULL ? {INTLIKE}", "{s3}")
IfAST.add_rules(f"NUMBER <- {NUMBERLIKE} ? NULL ? {NUMBERLIKE}", "{s3}")
IfAST.add_rules(f"T1 <- {ANY} ? NULL ? NULL", "{s3}")
IfAST.add_rules(f"T3 <- NULL ? NULL ? {ANY}", "{s3}")
IfAST.add_rules(f"T1 <- {ANY} ? {NUMBERSTORED} ? T1", "(case when nvl({s2}, 0) != 0 then {s1} else {s3} end)")
IfAST.add_rules(f"INT <- {INTLIKE} ? {NUMBERSTORED} ? {INTLIKE}", "(case when nvl({s2}, 0) != 0 then {s1} else {s3} end)")
IfAST.add_rules(f"NUMBER <- {NUMBERLIKE} ? {NUMBERSTORED} ? {NUMBERLIKE}", "(case when nvl({s2}, 0) != 0 then {s1} else {s3} end)")
IfAST.add_rules(f"T1 <- {ANY} ? {NUMBERSTORED} ? NULL", "(case when nvl({s2}, 0) != 0 then {s1} else {s3} end)")
IfAST.add_rules(f"T3 <- NULL ? {NUMBERSTORED} ? {ANY}", "(case when nvl({s2}, 0) != 0 then {s1} else {s3} end)")
IfAST.add_rules(f"T1 <- {ANY} ? DATE_DATETIME_STR_GEO ? T1", "(case when {s2} is not null then {s1} else {s3} end)")
IfAST.add_rules(f"INT <- {INTLIKE} ? DATE_DATETIME_STR_GEO ? {INTLIKE}", "(case when {s2} is not null then {s1} else {s3} end)")
IfAST.add_rules(f"NUMBER <- {NUMBERLIKE} ? DATE_DATETIME_STR_GEO ? {NUMBERLIKE}", "(case when {s2} is not null then {s1} else {s3} end)")
IfAST.add_rules(f"T1 <- {ANY} ? DATE_DATETIME_STR_GEO ? NULL", "(case when {s2} is not null then {s1} else {s3} end)")
IfAST.add_rules(f"T3 <- NULL ? DATE_DATETIME_STR_GEO ? {ANY}", "(case when {s2} is not null then {s1} else {s3} end)")
IfAST.add_rules(f"T1 <- {ANY} ? {ANY} ? T1", "(case when vsqlimpl_pkg.bool_{t2}({s2}) = 1 then {s1} else {s3} end)")
IfAST.add_rules(f"INT <- {INTLIKE} ? {ANY} ? {INTLIKE}", "(case when vsqlimpl_pkg.bool_{t2}({s2}) = 1 then {s1} else {s3} end)")
IfAST.add_rules(f"NUMBER <- {NUMBERLIKE} ? {ANY} ? {NUMBERLIKE}", "(case when vsqlimpl_pkg.bool_{t2}({s2}) = 1 then {s1} else {s3} end)")
IfAST.add_rules(f"T1 <- {ANY} ? {ANY} ? NULL", "(case when vsqlimpl_pkg.bool_{t2}({s2}) = 1 then {s1} else {s3} end)")
IfAST.add_rules(f"T3 <- NULL ? {ANY} ? {ANY}", "(case when vsqlimpl_pkg.bool_{t2}({s2}) = 1 then {s1} else {s3} end)")
# Slice operator (A[B:C])
SliceAST.add_rules(f"T1 <- {TEXT}_{LIST}[NULL_{INTLIKE}:NULL_{INTLIKE}]", "vsqlimpl_pkg.slice_{t1}({s1}, {s2}, {s3})")
SliceAST.add_rules(f"NULLLIST <- NULLLIST[NULL_{INTLIKE}:NULL_{INTLIKE}]", "vsqlimpl_pkg.slice_{t1}({s1}, {s2}, {s3})")
###
### Class for regenerating the Java type information.
###
class JavaSource:
"""
A :class:`JavaSource` object combines the source code of a Java class that
implements a vSQL AST type with the Python class that implements that AST
type.
It is used to update the vSQL syntax rules in the Java implemenatio of vSQL.
"""
_start_line = "//BEGIN RULES (don't remove this comment)"
_end_line = "//END RULES (don't remove this comment)"
def __init__(self, astcls:Type[AST], path:pathlib.Path):
self.astcls = astcls
self.path = path
self.lines = path.read_text(encoding="utf-8").splitlines(False)
def __repr__(self):
return f"<{self.__class__.__module__}.{self.__class__.__qualname__} cls={self.cls!r} path={str(self.path)!r} at {id(self):#x}>"
def new_lines(self) -> T_gen(str):
"""
Return an iterator over the new Java source code lines that should
replace the static initialization block inside the Java source file.
"""
# How many ``addRule()`` calls to pack in one static method.
# This avoids the ``code too large`` error from the Java compiler.
bunch = 100
number = 0
yield f"\t{self._start_line}"
for (i, rule) in enumerate(self.astcls.rules.values()):
if i % bunch == 0:
number += 1
yield f"\tprivate static void addRulesPart{number}()"
yield "\t{"
yield f"\t\t{rule.java_source()}"
if i % bunch == bunch-1:
yield "\t}"
yield ""
if i % bunch != bunch-1:
yield "\t}"
yield ""
yield f"\tstatic"
yield "\t{"
for i in range(1, number+1):
yield f"\t\taddRulesPart{i}();"
yield "\t}"
yield f"\t{self._end_line}"
def save(self) -> None:
"""
Resave the Java source code incorporating the new vSQL type info from the
Python AST class.
"""
inrules = False
with self.path.open("w", encoding="utf-8") as f:
for line in self.lines:
if inrules:
if line.strip() == self._end_line:
inrules = False
else:
if line.strip() == self._start_line:
inrules = True
for new_line in self.new_lines():
f.write(f"{new_line}\n")
else:
f.write(f"{line}\n")
@classmethod
def all_java_source_files(cls, path: pathlib.Path) -> T_gen("JavaSource"):
"""
Return an iterator over all :class:`!JavaSource` objects that can be found
in the directory ``path``. ``path`` should point to the directory
containing the Java vSQL AST classes.
"""
# Find all AST classes that have rules
classes = {cls.__name__: cls for cls in AST.all_types() if hasattr(cls, "rules")}
for filename in path.glob("**/*.java"):
try:
# Do we have a Python class for this Java source?
cls = classes[filename.stem]
except KeyError:
pass
else:
yield JavaSource(cls, filename)
@classmethod
def rewrite_all_java_source_files(cls, path:pathlib.Path, verbose:bool=False) -> None:
"""
Rewrite all Java source code files implementing Java vSQL AST classes
in the directory ``path``. ``path`` should point to the directory
containing the Java vSQL AST classes.
"""
if verbose:
print(f"Rewriting Java source files in {str(path)!r}")
for javasource in cls.all_java_source_files(path):
javasource.save()
###
### Functions for regenerating the Oracle type information.
###
def oracle_sql_table() -> str:
"""
Return the SQL statement for creating the table ``VSQLRULE``.
"""
recordfields = [rule.oracle_fields() for rule in AST.all_rules()]
sql = []
sql.append("create table vsqlrule")
sql.append("(")
for (i, (fieldname, fieldtype)) in enumerate(fields.items()):
term = "" if i == len(fields)-1 else ","
if fieldname == "vr_cname":
sql.append(f"\t{fieldname} varchar2(200) not null{term}")
elif fieldtype is int:
sql.append(f"\t{fieldname} integer not null{term}")
elif fieldtype is T_opt_int:
sql.append(f"\t{fieldname} integer{term}")
elif fieldtype is datetime.datetime:
sql.append(f"\t{fieldname} date not null{term}")
elif fieldtype is str:
size = max(len(r[fieldname]) for r in recordfields if fieldname in r and r[fieldname])
sql.append(f"\t{fieldname} varchar2({size}) not null{term}")
elif fieldtype is T_opt_str:
size = max(len(r[fieldname]) for r in recordfields if fieldname in r and r[fieldname])
sql.append(f"\t{fieldname} varchar2({size}){term}")
else:
raise ValueError(f"unknown field type {fieldtype!r}")
sql.append(")")
return "\n".join(sql)
def oracle_sql_procedure() -> str:
"""
Return the SQL statement for creating the procedure ``VSQLGRAMMAR_MAKE``.
"""
sql = []
sql.append("create or replace procedure vsqlgrammar_make(c_user varchar2)")
sql.append("as")
sql.append("begin")
sql.append("\tdelete from vsqlrule;")
for rule in AST.all_rules():
sql.append(f"\t{rule.oracle_source()}")
sql.append("end;")
return "\n".join(sql)
def oracle_sql_index() -> str:
"""
Return the SQL statement for creating the index ``VSQLRULE_I1``.
"""
return "create unique index vsqlrule_i1 on vsqlrule(vr_nodetype, vr_value, vr_signature, vr_arity)"
def oracle_sql_tablecomment() -> str:
"""
Return the SQL statement for creating a comment on the table ``VSQLRULE``.
"""
return "comment on table vsqlrule is 'Syntax rules for vSQL expressions.'"
def recreate_oracle(connectstring:str, verbose:bool=False) -> None:
"""
Recreate the vSQL syntax rules in the database.
This recreate the procedure ``VSQLGRAMMAR_MAKE`` and the table ``VSQLRULE``
and its content.
"""
from ll import orasql
db = orasql.connect(connectstring, readlobs=True)
cursor = db.cursor()
oldtable = orasql.Table("VSQLRULE", connection=db)
try:
oldsql = oldtable.createsql(term=False).strip().lower().replace(" byte)", ")")
except orasql.SQLObjectNotFoundError:
oldsql = None
newsql = oracle_sql_table()
if oldsql is not None and oldsql != newsql:
if verbose:
print(f"Dropping old table VSQLRULE in {db.connectstring()!r}", file=sys.stderr)
cursor.execute("drop table vsqlrule")
if oldsql != newsql:
if verbose:
print(f"Creating new table VSQLRULE in {db.connectstring()!r}", file=sys.stderr)
cursor.execute(newsql)
if verbose:
print(f"Creating index VSQLRULE_I1 in {db.connectstring()!r}", file=sys.stderr)
cursor.execute(oracle_sql_index())
if verbose:
print(f"Creating table comment for VSQLRULE in {db.connectstring()!r}", file=sys.stderr)
cursor.execute(oracle_sql_tablecomment())
if verbose:
print(f"Creating procedure VSQLGRAMMAR_MAKE in {db.connectstring()!r}", file=sys.stderr)
cursor.execute(oracle_sql_procedure())
if verbose:
print(f"Calling procedure VSQLGRAMMAR_MAKE in {db.connectstring()!r}", file=sys.stderr)
cursor.execute(f"begin vsqlgrammar_make('{scriptname}'); end;")
if verbose:
print(f"Committing transaction in {db.connectstring()!r}", file=sys.stderr)
db.commit()
def main(args:Optional[Tuple[str, ...]]=None) -> None:
import argparse
p = argparse.ArgumentParser(description="Recreate vSQL type info for the Java and Oracle implementations")
p.add_argument("-c", "--connectstring", help="Oracle database where the table VSQLRULE and the procedure VSQLGRAMMAR_MAKE will be created")
p.add_argument("-j", "--javapath", dest="javapath", help="Path to the Java implementation of vSQL?", type=pathlib.Path)
p.add_argument("-v", "--verbose", dest="verbose", help="Give a progress report? (default %(default)s)", default=False, action="store_true")
args = p.parse_args(args)
if args.connectstring:
recreate_oracle(args.connectstring, verbose=args.verbose)
if args.javapath:
JavaSource.rewrite_all_java_source_files(args.javapath, verbose=args.verbose)
if __name__ == "__main__":
sys.exit(main())
| StarcoderdataPython |
34462 | <reponame>ceafdc/PythonChallenge<gh_stars>1-10
#!/usr/bin/env python3
# url: http://www.pythonchallenge.com/pc/return/5808.html
import requests
import io
import PIL.Image
url = 'http://www.pythonchallenge.com/pc/return/cave.jpg'
un = 'huge'
pw = 'file'
auth = un, pw
req = requests.get(url, auth=auth)
img_io = io.BytesIO(req.content)
img = PIL.Image.open(img_io)
pixels = img.load()
half = img.size[0] // 2, img.size[1] // 2
img1 = PIL.Image.new('RGB', half, 'black')
img2 = PIL.Image.new('RGB', half, 'black')
pixels1 = img1.load()
pixels2 = img2.load()
for i in range(img.size[0]):
for j in range(img.size[1]):
if (i + j) % 2:
pixels1[i // 2, j // 2] = pixels[i, j]
else:
pixels2[i // 2, j // 2] = pixels[i, j]
img1.show()
img2.show()
# next: http://www.pythonchallenge.com/pc/return/evil.html
| StarcoderdataPython |
1747593 | <filename>pydis_site/apps/api/models/bot/role.py
from __future__ import annotations
from django.core.validators import MinValueValidator
from django.db import models
from pydis_site.apps.api.models.mixins import ModelReprMixin
class Role(ModelReprMixin, models.Model):
"""
A role on our Discord server.
The comparison operators <, <=, >, >=, ==, != act the same as they do with Role-objects of the
discord.py library, see https://discordpy.readthedocs.io/en/latest/api.html#discord.Role
"""
id = models.BigIntegerField(
primary_key=True,
validators=(
MinValueValidator(
limit_value=0,
message="Role IDs cannot be negative."
),
),
help_text="The role ID, taken from Discord.",
verbose_name="ID"
)
name = models.CharField(
max_length=100,
help_text="The role name, taken from Discord."
)
colour = models.IntegerField(
validators=(
MinValueValidator(
limit_value=0,
message="Colour hex cannot be negative."
),
),
help_text="The integer value of the colour of this role from Discord."
)
permissions = models.BigIntegerField(
validators=(
MinValueValidator(
limit_value=0,
message="Role permissions cannot be negative."
),
),
help_text="The integer value of the permission bitset of this role from Discord."
)
position = models.IntegerField(
help_text="The position of the role in the role hierarchy of the Discord Guild."
)
def __str__(self) -> str:
"""Returns the name of the current role, for display purposes."""
return self.name
def __lt__(self, other: Role) -> bool:
"""Compares the roles based on their position in the role hierarchy of the guild."""
return self.position < other.position
def __le__(self, other: Role) -> bool:
"""Compares the roles based on their position in the role hierarchy of the guild."""
return self.position <= other.position
class Meta:
"""Set role ordering from highest to lowest position."""
ordering = ("-position",)
| StarcoderdataPython |
4819139 | import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_debugtoolbar import DebugToolbarExtension
from flask_cors import CORS
db = SQLAlchemy()
toolbar = DebugToolbarExtension()
cors = CORS()
def create_app():
app = Flask(__name__)
app.debug = True
app_settings = os.getenv('APP_SETTINGS')
app.config['SECRET_KEY'] = os.getenv('SECRET_KEY')
app.config.from_object(app_settings)
db.init_app(app)
toolbar.init_app(app)
cors.init_app(app)
from project.api.users import users_blueprint
app.register_blueprint(users_blueprint)
@app.shell_context_processor
def ctx():
return {'app': app, 'db': db}
return app
| StarcoderdataPython |
1700326 | <gh_stars>1-10
import json
import uuid
import azure.functions as func
from scraper_pipeline.models import page_content
from scraper_pipeline.process import state_processor
def main(msg: func.ServiceBusMessage, notification: func.Out[str]):
json_str = msg.get_body().decode('utf-8')
content = page_content.PageContent(**json.loads(json_str))
proc = state_processor.StateProcessor()
notif = proc.process(content)
if notif:
notification.set(json.dumps(notif._asdict()))
row_key = str(<KEY>
table_row = {
"PartitionKey": content.page_id,
"RowKey": row_key,
**content._asdict()
}
return json.dumps(table_row) | StarcoderdataPython |
1614339 | import copy
import json
import base64
from functools import partial
import asyncio
import collections
from .values import DocStringLine, MetricValue
from .registry import Registry
from .task_manager import TaskManager
REGISTRY = Registry(task_manager=TaskManager())
DEFAULT_GAUGE_INDEX_KEY = 'GLOBAL_GAUGE_INDEX'
class WithLabels(object):
__slot__ = (
"instance",
"labels"
)
def __init__(self, instance, labels: dict):
self.instance = instance
self.labels = labels
def __getattr__(self, item):
attr = getattr(self.instance, item)
if not callable(attr):
raise TypeError("Labels work with function only")
return partial(attr, labels=self.labels)
class Metric(object):
"""
Proxy object for real work objects called 'minions'.
Use as global representation on metric.
"""
minion = None
type = ''
def __init__(self, name: str,
documentation: str, labelnames: list=None,
registry: Registry=REGISTRY):
self.documentation = documentation
self.labelnames = labelnames or []
self.name = name
self.registry = registry
self.registry.add_metric(self)
def doc_string(self) -> DocStringLine:
return DocStringLine(
self.name,
self.type,
self.documentation
)
async def collect(self) -> list:
redis = self.registry.redis
group_key = self.get_metric_group_key()
members = await redis.smembers(
group_key
)
result = []
for metric_key in members:
name, packed_labels = self.parse_metric_key(metric_key)
labels = self.unpack_labels(packed_labels)
value = await redis.get(metric_key)
if value is None:
await redis.srem(group_key, metric_key)
continue
result.append(MetricValue(
name=name,
labels=labels,
value=value.decode('utf-8')
))
return result
def get_metric_group_key(self):
return "{}_group".format(self.name)
def get_metric_key(self, labels, suffix: str=None):
return "{}{}:{}".format(
self.name, suffix or "",
self.pack_labels(labels).decode('utf-8')
)
def parse_metric_key(self, key) -> (str, dict):
return key.decode('utf-8').split(':', maxsplit=1)
def pack_labels(self, labels: dict) -> bytes:
return base64.b64encode(
json.dumps(labels, sort_keys=True).encode('utf-8')
)
def unpack_labels(self, labels: str) -> dict:
return json.loads(base64.b64decode(labels).decode('utf-8'))
def _check_labels(self, labels):
if set(labels.keys()) != set(self.labelnames):
raise ValueError("Expect define all labels {}, got only {}".format(
", ".join(self.labelnames),
", ".join(labels.keys())
))
def labels(self, *args, **kwargs):
labels = dict(zip(self.labelnames, args))
labels.update(kwargs)
return WithLabels(
instance=self,
labels=labels
)
async def cleanup(self):
pass
class Counter(Metric):
type = 'counter'
def inc(self, value: int=1, labels=None):
labels = labels or {}
self._check_labels(labels)
self.registry.task_manager.add_task(
self._a_inc(value, labels)
)
async def a_inc(self, value: int = 1, labels=None):
labels = labels or {}
self._check_labels(labels)
return await self._a_inc(value, labels)
async def _a_inc(self, value: int = 1, labels=None):
"""
Calculate metric with labels redis key.
Add this key to set of key for this metric.
"""
if not isinstance(value, int):
raise ValueError("Value should be int, got {}".format(
type(value)
))
group_key = self.get_metric_group_key()
metric_key = self.get_metric_key(labels)
tr = self.registry.redis.multi_exec()
tr.sadd(group_key, metric_key)
future_answer = tr.incrby(metric_key, int(value))
await tr.execute()
return await future_answer
class Summary(Metric):
type = 'summary'
async def a_observe(self, value: float, labels=None):
labels = labels or {}
self._check_labels(labels)
return await self._a_observe(value, labels)
def observe(self, value, labels=None):
labels = labels or {}
self._check_labels(labels)
self.registry.task_manager.add_task(
self._a_observe(value, labels)
)
async def _a_observe(self, value: float, labels=None):
group_key = self.get_metric_group_key()
sum_metric_key = self.get_metric_key(labels, "_sum")
count_metric_key = self.get_metric_key(labels, "_count")
tr = self.registry.redis.multi_exec()
tr.sadd(group_key, count_metric_key, sum_metric_key)
future_answer = tr.incrbyfloat(sum_metric_key, float(value))
tr.incr(count_metric_key)
await tr.execute()
return await future_answer
class Gauge(Metric):
type = 'gauge'
DEFAULT_EXPIRE = 60
def __init__(self, *args,
expire=DEFAULT_EXPIRE,
refresh_enable=True,
**kwargs):
super().__init__(*args, **kwargs)
self.refresh_enable = refresh_enable
self._refresher_added = False
self.lock = asyncio.Lock()
self.gauge_values = collections.defaultdict(lambda: 0)
self.expire = expire
self.index = None
async def add_refresher(self):
if self.refresh_enable and not self._refresher_added:
await self.registry.task_manager.add_refresher(
self.refresh_values
)
self._refresher_added = True
def _set_internal(self, key: str, value: float):
self.gauge_values[key] = value
def _inc_internal(self, key: str, value: float):
self.gauge_values[key] += value
def inc(self, value: float, labels=None):
labels = labels or {}
self._check_labels(labels)
self.registry.task_manager.add_task(self._a_inc(value, labels))
async def a_inc(self, value: float = 1, labels=None):
labels = labels or {}
self._check_labels(labels)
return await self._a_inc(value, labels)
def dec(self, value: float, labels=None):
labels = labels or {}
self._check_labels(labels)
self.registry.task_manager.add_task(self._a_inc(-value, labels))
async def a_dec(self, value: float = 1, labels=None):
labels = labels or {}
self._check_labels(labels)
return await self._a_inc(-value, labels)
async def _a_inc(self, value: float, labels: dict):
async with self.lock:
group_key = self.get_metric_group_key()
labels['gauge_index'] = await self.get_gauge_index()
metric_key = self.get_metric_key(labels)
tr = self.registry.redis.multi_exec()
tr.sadd(group_key, metric_key)
future_answer = tr.incrbyfloat(metric_key, float(value))
tr.expire(metric_key, self.expire)
self._inc_internal(metric_key, float(value))
await tr.execute()
await self.add_refresher()
return await future_answer
def set(self, value: float, labels=None):
labels = labels or {}
self._check_labels(labels)
self.registry.task_manager.add_task(self._a_set(value, labels))
async def a_set(self, value: float=1, labels=None):
labels = labels or {}
self._check_labels(labels)
return await self._a_set(value, labels)
async def _a_set(self, value: float, labels: dict):
async with self.lock:
group_key = self.get_metric_group_key()
labels['gauge_index'] = await self.get_gauge_index()
metric_key = self.get_metric_key(labels)
tr = self.registry.redis.multi_exec()
tr.sadd(group_key, metric_key)
future_answer = tr.set(
metric_key, float(value),
expire=self.expire
)
self._set_internal(metric_key, float(value))
await tr.execute()
await self.add_refresher()
return await future_answer
async def get_gauge_index(self):
if self.index is None:
self.index = await self.make_gauge_index()
return self.index
async def make_gauge_index(self):
index = await self.registry.redis.incr(
DEFAULT_GAUGE_INDEX_KEY
)
await self.registry.task_manager.add_refresher(
self.refresh_values
)
return index
async def refresh_values(self):
async with self.lock:
for key, value in self.gauge_values.items():
await self.registry.redis.set(
key, value, expire=self.expire
)
async def cleanup(self):
async with self.lock:
group_key = self.get_metric_group_key()
keys = list(self.gauge_values.keys())
if len(keys) == 0:
return
tr = self.registry.redis.multi_exec()
tr.srem(group_key, *keys)
tr.delete(*keys)
await tr.execute()
class Histogram(Metric):
type = 'histogram'
def __init__(self, *args, buckets: list, **kwargs):
super().__init__(*args, **kwargs)
self.buckets = sorted(buckets, reverse=True)
async def a_observe(self, value: float, labels=None):
labels = labels or {}
self._check_labels(labels)
return await self._a_observe(value, labels)
def observe(self, value, labels=None):
labels = labels or {}
self._check_labels(labels)
self.registry.task_manager.add_task(
self._a_observe(value, labels)
)
async def _a_observe(self, value: float, labels):
group_key = self.get_metric_group_key()
sum_key = self.get_metric_key(labels, '_sum')
counter_key = self.get_metric_key(labels, '_count')
tr = self.registry.redis.multi_exec()
for bucket in self.buckets:
if value > bucket:
break
labels['le'] = bucket
bucket_key = self.get_metric_key(labels, '_bucket')
tr.sadd(group_key, bucket_key)
tr.incr(bucket_key)
tr.sadd(group_key, sum_key, counter_key)
tr.incr(counter_key)
tr.incrbyfloat(sum_key, float(value))
await tr.execute()
def _get_missing_metric_values(self, redis_metric_values):
missing_metrics_values = set(
json.dumps({"le": b}) for b in self.buckets
)
groups = set("{}")
# If flag is raised then we should add
# *_sum and *_count values for empty labels.
sc_flag = True
for mv in redis_metric_values:
key = json.dumps(mv.labels, sort_keys=True)
labels = copy.copy(mv.labels)
if 'le' in labels:
del labels['le']
group = json.dumps(labels, sort_keys=True)
if group == "{}":
sc_flag = False
if group not in groups:
for b in self.buckets:
labels['le'] = b
missing_metrics_values.add(
json.dumps(labels, sort_keys=True)
)
groups.add(group)
if key in missing_metrics_values:
missing_metrics_values.remove(key)
return missing_metrics_values, sc_flag
async def collect(self) -> list:
redis_metrics = await super().collect()
missing_metrics_values, sc_flag = \
self._get_missing_metric_values(
redis_metrics
)
missing_values = [
MetricValue(
self.name + "_bucket",
labels=json.loads(ls),
value=0
) for ls in missing_metrics_values
]
if sc_flag:
missing_values.append(
MetricValue(
self.name + "_sum",
labels={},
value=0
)
)
missing_values.append(
MetricValue(
self.name + "_count",
labels={},
value=0
)
)
return redis_metrics + missing_values
| StarcoderdataPython |
3284349 | <filename>foundation/erpnext_foundation/doctype/service_provider/service_provider.py
# -*- coding: utf-8 -*-
# Copyright (c) 2015, EOSSF and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import get_datetime
from frappe.model.document import Document
class ServiceProvider(Document):
def autoname(self):
self.name = self.title
def send_alert_to_inactive_service_providers():
for service_provider in frappe.get_all("Service Provider", fields=['email', 'title']):
if get_last_login_diff(service_provider.email) == 80:
send_reminder(service_provider)
def send_reminder(service_provider_details):
message = frappe.render_template("foundation/templates/emails/inactivity_reminder.md", service_provider_details)
frappe.sendmail(recipients=service_provider_details.email, subject="About account inactivity", message=message)
def get_last_login_diff(user):
"""
Returns difference between todays date and last login date
"""
last_login = frappe.db.get_value("User", user, ["last_login"])
for x in xrange(1,10):
print((get_datetime() - get_datetime(last_login)).days, user)
return (get_datetime() - get_datetime(last_login)).days
| StarcoderdataPython |
3380946 | <reponame>TachibanaET/CODSUG2<filename>source/model_fine_tune.py<gh_stars>0
import json
import os
import numpy as np
import argparse
from tqdm import tqdm
from utility.encode_bpe import BPEEncoder_ja
import torch
import torch.nn.functional as F
import torch.optim as optim
from transformers import GPT2Tokenizer, GPT2LMHeadModel
from transformers import AdamW, get_linear_schedule_with_warmup
from torch.utils.data import Dataset, DataLoader
import requests
class GPT2FineTune():
def __init__(self, h_params, bpe_encoder):
# print(bpe_encoder)
self.h_params = h_params
self.tokenizer = bpe_encoder
self.pre_trained_model = GPT2LMHeadModel.from_pretrained(self.h_params['model_path'])
def fine_tune(self):
torch.backends.cudnn.benchmark = True
self.pre_trained_model = self.model.to(self.h_params['device'])
if self.h_params['device'] == 'cuda':
self.pre_trained_model = torch.nn.DataParallel(self.pre_trained_model)
self.model.train()
if __name__ == '__main__':
h_params = {
'temperature' : 1,
'top_k' : 40,
'top_p' : 0.9,
'batch_size' : 64,
'epochs' : 50,
'learning_rate' : 1e-4,
'warmup_steps' : 5000,
'max_seq_len' : 256,
'device' : 'cuda' if torch.cuda.is_available() else 'cpu',
'model_path' : '/workspace/source/models/gpt2-pytorch-model-medium/',
}
with open('ja-bpe.txt') as f:
bpe = f.read().split('\n')
with open('emoji.json') as f:
emoji = json.loads(f.read())
bpe_encoder = BPEEncoder_ja(bpe, emoji)
gpt2_fine_tune = GPT2FineTune(
h_params = h_params,
bpe_encoder = bpe_encoder
)
| StarcoderdataPython |
3344832 | <gh_stars>10-100
from .target import *
from .enumerator import Enumerator, start_tls
| StarcoderdataPython |
185195 | <reponame>osamuaoki/fun2prog
#!/usr/bin/python
# vi:ts=4:sts=4:et
# == c-list
# 0 context-switches
# 0 cpu-migrations
# 784 page-faults
# 484275890 cycles
# 131868649 stalled-cycles-frontend
# 15810580 stalled-cycles-backend
# 224008635 instructions
# 63839867 branches
# 436925 branch-misses
# 0.236179310 seconds time elapsed
import sys, os, re
def tableline(item):
print "|" + item + "|" + perf[0][item] + "|" + perf[1][item] + "|" + perf[2][item] + "|" + perf[3][item]
perf = []
for j in range(0,4):
data = {}
line = sys.stdin.readline()
for i in range(0,10):
line = sys.stdin.readline()
data[line[19:].strip()] = line[:19].strip()
perf.append(data)
print "|Performance counter stats|C|Vala (Gee)|Python|Vala (GLib)"
tableline("context-switches")
tableline("cpu-migrations")
tableline("page-faults")
tableline("cycles")
tableline("stalled-cycles-frontend")
tableline("stalled-cycles-backend")
tableline("instructions")
tableline("branches")
tableline("branch-misses")
tableline("seconds time elapsed")
| StarcoderdataPython |
125465 | # Standard imports
import cv2
import argparse
import numpy as np
ap = argparse.ArgumentParser()
ap.add_argument('-i', '--image', required=False, help='Path to the image')
args = vars(ap.parse_args())
# Read image
image = cv2.imread(args['image'])
# image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
dim = (320, 240)
image = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
lower = np.array([0, 106, 190], dtype = "uint8")
upper = np.array([255, 255, 255], dtype = "uint8")
# find the colors within the specified boundaries and apply
# the mask
mask = cv2.inRange(image, lower, upper)
mask = cv2.GaussianBlur(mask, (3,3), 0)
(cnts, _) = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if len(cnts) > 0:
cnt = sorted(cnts, key = cv2.contourArea, reverse = True)[0]
x,y,w,h = cv2.boundingRect(cnt)
image = cv2.rectangle(image,(x,y),(x+w,y+h),(0,255,0),2)
# print('mask', mask)
output = cv2.bitwise_and(image, image, mask = mask)
# show the images
print("shapes: ", image.shape, mask.shape)
cv2.imshow("images", np.hstack([image]))
cv2.waitKey(0) | StarcoderdataPython |
135876 | import os
from GenericBase import GenericBase
from STAPLERerror import VirtualIOError
from STAPLERerror import STAPLERerror
import utils
class samtools_index(GenericBase):
"""Class for creating command lines for samtools index.
Parameters:
in_cmd: String containing a command line
in_dir: Directory object containing input files
out_dir: Directory object containing output files
NOTICE! Keep the directory objects up to date about file edits!
Attributes:
name: Name of the function.
input_type: Input types accepted by this application.
output_types: List of output types produced by the application.
mandatory_args: Args the user be provided in in_cmd when initializing.
user_mandatory_args: Args the user must provide.
remove_user_args: Args that will be removed from the final command.
optional_args: Args that may be part of the command line.
in_cmd: Command entered by user.
parsed_cmd: Final output command as option:value dict.
file_names: Names of output files.
command_ids: File names of input file(s) with no file extensions.
Methods:
get_cmd: Method for getting the final cmd line string for output.
"""
name = 'stapler_samtools_index'
#Accept all defined types:
input_types = {'.bam'}
output_types = []
require_output_dir = False
hidden_mandatory_args = ['--!i']
user_mandatory_args = []
remove_user_args = user_mandatory_args
user_optional_args = ['-b', '-c', '-m']
parallelizable = True
help_description = '''
Tested with samtools 1.2.
The index files are generated into the input directory.
'''
def _select_IO(self, out_cmd, in_dir, out_dir):
"""Infers the input and output file paths.
This method must keep the directory objects up to date of the file
edits!
Parameters:
in_cmd: A dict containing the command line.
in_dir: Input directory (instance of filetypes.Directory).
out_dir: Output directory (instance of filetypes.Directory).
Returns:
out_cmd: Dict containing the output commands
command_identifier: Input file name based identifier for the current command
Raises:
VirtualIOError: No valid input file can be found.
"""
IO_files = {}
file_names = set()
for fl in in_dir.files:
if self.name not in fl.users:
if utils.splitext(fl.name)[-1] in self.input_types:
# Infer input file
IO_files['--!i'] = os.path.join(in_dir.path, fl.name)
command_ids = [utils.infer_path_id(IO_files['--!i'])]
in_dir.use_file(fl.name, self.name)
# Add index file to the input directory
in_dir.add_file(fl.name + '.bai')
break
if not IO_files:
raise VirtualIOError('No more unused input files')
out_cmd.update(IO_files)
return out_cmd, command_ids
def get_cmd(self):
"""Returns the final command line.
Returns:
final_cmd: List of command line produced by the object (line breaks not allowed within command lines!).
"""
run_command = utils.parse_config(self.name, 'cmd_name', 'execute')
final_cmd = [run_command]
for arg, val in self.out_cmd.iteritems():
if arg in {'--!i', '--!o'}: continue
final_cmd.append(arg + ' ' + val)
final_cmd.append(self.out_cmd['--!i'])
return [' '.join(final_cmd)]
class samtools_rmdup(GenericBase):
"""Class for creating command lines for samtools rmdup.
Parameters:
in_cmd: String containing a command line
in_dir: Directory object containing input files
out_dir: Directory object containing output files
NOTICE! Keep the directory objects up to date about file edits!
Attributes:
name: Name of the function.
input_type: Input types accepted by this application.
output_types: List of output types produced by the application.
mandatory_args: Args the user be provided in in_cmd when initializing.
user_mandatory_args: Args the user must provide.
remove_user_args: Args that will be removed from the final command.
optional_args: Args that may be part of the command line.
in_cmd: Command entered by user.
parsed_cmd: Final output command as option:value dict.
file_names: Names of output files.
command_ids: File names of input file(s) with no file extensions.
Methods:
get_cmd: Method for getting the final cmd line string for output.
"""
name = 'stapler_samtools_rmdup'
#Accept all defined types:
input_types = {'.bam'}
output_types = ['.bam']
hidden_mandatory_args = ['--!i', '--!o']
user_mandatory_args = []
remove_user_args = user_mandatory_args
user_optional_args = ['-s', '-S']
parallelizable = True
help_description = '''
Tested with samtools 0.1.19
Notice that this function does not seem to work in the current version of
samtools (1.2), use of older version is therefore recommended. Beware module
conflicts!
'''
def _select_IO(self, out_cmd, in_dir, out_dir):
"""Infers the input and output file paths.
This method must keep the directory objects up to date of the file
edits!
Parameters:
in_cmd: A dict containing the command line.
in_dir: Input directory (instance of filetypes.Directory).
out_dir: Output directory (instance of filetypes.Directory).
Returns:
out_cmd: Dict containing the output commands
command_identifier: Input file name based identifier for the current command
Raises:
VirtualIOError: No valid input file can be found.
"""
IO_files = {}
file_names = set()
for fl in in_dir.files:
if self.name not in fl.users:
if utils.splitext(fl.name)[-1] in self.input_types:
IO_files['--!i'] = os.path.join(in_dir.path, fl.name)
command_ids = [utils.infer_path_id(IO_files['--!i'])]
in_dir.use_file(fl.name, self.name)
assert len(self.output_types) == 1, 'Several output ' \
'types, override ' \
'this method!'
output_name = utils.splitext(fl.name)[0] + \
self.output_types[0]
output_path = os.path.join(out_dir.path, output_name)
IO_files['--!o'] = output_path
file_names.add(output_name)
out_dir.add_file(output_name)
break
if not IO_files:
raise VirtualIOError('No more unused input files')
out_cmd.update(IO_files)
return out_cmd, command_ids
def get_cmd(self):
"""Returns the final command line.
Returns:
final_cmd: List of command line produced by the object (line breaks not allowed within command lines!).
"""
run_command = utils.parse_config(self.name, 'cmd_name', 'execute')
final_cmd = [run_command]
final_cmd.append(self.out_cmd['--!i'])
final_cmd.append(self.out_cmd['--!o'])
for arg, val in self.out_cmd.iteritems():
if arg in {'--!i', '--!o'}: continue
final_cmd.append(arg + ' ' + val)
return [' '.join(final_cmd)]
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.