id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
3328883 | <reponame>adrianmoo2/leetcode-workthroughs
def helperFunction(self, root, result):
if root:
self.helperFunction(root.left, result)
result.append(root.val)
self.helperFunction(root.right, result)
def inorderTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
result = []
self.helperFunction(root, result)
return result
| StarcoderdataPython |
168604 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import mptt.fields
class Migration(migrations.Migration):
dependencies = [
('development', '0035_developmentproject_misc_textareas'),
]
operations = [
migrations.CreateModel(
name='FilingCode',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')),
('code', models.CharField(unique=True, max_length=200)),
('label', models.CharField(unique=True, max_length=200)),
('lft', models.PositiveIntegerField(db_index=True, editable=False)),
('rght', models.PositiveIntegerField(db_index=True, editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(db_index=True, editable=False)),
('parent', mptt.fields.TreeForeignKey(null=True, blank=True, related_name='children', to='development.FilingCode')),
],
options={
'abstract': False,
},
),
]
| StarcoderdataPython |
3346077 | '''create files contains estimated generalization errors for model
INPUT FILE
WORKING/transactions-subset2.pickle
OUTPUT FILES
WORKING/ege_week/YYYY-MM-DD/MODEL-TD/HP-FOLD.pickle dict all_results
WORKING/ege_month/YYYY-MM-DD/MODEL-TD/HP-FOLD.pickle dict all_results
'''
import collections
import cPickle as pickle
import datetime
import numpy as np
import os
import pandas as pd
import pdb
from pprint import pprint
from sklearn import cross_validation
from sklearn import linear_model
from sklearn import ensemble
import sys
import warnings
from Bunch import Bunch
from DataframeAppender import DataframeAppender
from directory import directory
from Logger import Logger
import parse_command_line
def usage(msg=None):
if msg is not None:
print 'invocation error: ' + str(msg)
print 'usage: python ege_week.py YYYY-MM-DD <other options>'
print ' YYYY-MM-DD mid-point of week; analyze -3 to +3 days'
print ' --month optional; test on next month, not next week'
print ' --model {lr|rf} which model to run'
print ' --td <range> training_days'
print ' --hpd <range> required iff model is rf; max_depths to model'
print ' --hpw <range> required iff model is rf; weight functions to model'
print ' --hpx <form> required iff mode is lr; transformation to x'
print ' --hpy <form> required iff mode is lr; transformation to y'
print ' --test optional; if present, program runs in test mode'
print 'where'
print ' <form> is {lin|log}+ saying whether the variable is in natural or log units'
print ' <range> is start [stop [step]], just like Python\'s range(start,stop,step)'
sys.exit(1)
DateRange = collections.namedtuple('DateRange', 'first last')
def make_DateRange(mid, half_range):
return DateRange(first=mid - datetime.timedelta(half_range),
last=mid + datetime.timedelta(half_range),
)
def make_predictors():
'''return dict key: column name, value: whether and how to convert to log domain
Include only features of the census and tax roll, not the assessment,
because previous work found that using features derived from the
assessment degraded estimated generalization errors.
NOTE: the columns in the x_array objects passed to scikit learn are in
this order. FIXME: that must be wrong, as we return a dictionary
'''
# earlier version returned a dictionary, which invalided the assumption
# about column order in x
result = ( # the columns in the x_arrays are in this order
('fraction.owner.occupied', None),
('FIREPLACE.NUMBER', 'log1p'),
('BEDROOMS', 'log1p'),
('BASEMENT.SQUARE.FEET', 'log1p'),
('LAND.SQUARE.FOOTAGE', 'log'),
('zip5.has.industry', None),
('census.tract.has.industry', None),
('census.tract.has.park', None),
('STORIES.NUMBER', 'log1p'),
('census.tract.has.school', None),
('TOTAL.BATHS.CALCULATED', 'log1p'),
('median.household.income', 'log'), # not log feature in earlier version
('LIVING.SQUARE.FEET', 'log'),
('has.pool', None),
('zip5.has.retail', None),
('census.tract.has.retail', None),
('is.new.construction', None),
('avg.commute', None),
('zip5.has.park', None),
('PARKING.SPACES', 'log1p'),
('zip5.has.school', None),
('TOTAL.ROOMS', 'log1p'),
('age', None),
('age2', None),
('effective.age', None),
('effective.age2', None),
)
return result
class CensusAdjacencies(object):
def __init__(self):
path = directory('working') + 'census_tract_adjacent.pickle'
f = open(path, 'rb')
self.adjacent = pickle.load(f)
f.close()
def adjacen(self, census_tract):
return self.adjacent.get(census_tract, None)
def make_control(argv):
'Return control Bunch'''
print 'argv'
pprint(argv)
if len(argv) < 3:
usage('missing invocation options')
def make_sale_date(s):
year, month, day = s.split('-')
return datetime.date(int(year), int(month), int(day))
pcl = parse_command_line.ParseCommandLine(argv)
arg = Bunch(
base_name=argv[0].split('.')[0],
hpd=pcl.get_range('--hpd') if pcl.has_arg('--hpd') else None,
hpw=pcl.get_range('--hpw') if pcl.has_arg('--hpw') else None,
hpx=pcl.get_arg('--hpx') if pcl.has_arg('--hpx') else None,
hpy=pcl.get_arg('--hpy') if pcl.has_arg('--hpy') else None,
model=pcl.get_arg('--model'),
month=pcl.has_arg('--month'),
sale_date=make_sale_date(argv[1]),
td=pcl.get_range('--td'),
test=pcl.has_arg('--test'),
)
print 'arg'
print arg
# check for missing options
if arg.model is None:
usage('missing --model')
if arg.td is None:
usage('missing --td')
# validate combinations of invocation options
if arg.model == 'lr':
if arg.hpx is None or arg.hpy is None:
usage('model lr requires --hpx and --hpy')
elif arg.model == 'rf':
if arg.hpd is None or arg.hpw is None:
usage('model rf requires --hpd and --hpw')
else:
usage('bad --model: %s' % str(arg.model))
random_seed = 123
now = datetime.datetime.now()
predictors = make_predictors()
print 'number of predictors', len(predictors)
sale_date_range = make_DateRange(arg.sale_date, 15 if arg.month else 3)
log_file_name = arg.base_name + '.' + now.isoformat('T') + '.log'
# dir_out: WORKING/ege_[month|week]/<sale_date>/
dir_out = (directory('working') +
'ege_' +
('month' if arg.month else 'week') +
'/' + argv[1] + '/'
)
debug = False
test = arg.test
b = Bunch(
arg=arg,
census_adjacencies=CensusAdjacencies(),
date_column='python.sale_date',
debug=debug,
dir_out=dir_out,
n_folds=2 if test else 10,
n_rf_estimators=100 if test else 1000, # num trees in a random forest
path_in_old=directory('working') + 'transactions-subset2.pickle',
path_in=directory('working') + 'transactions-subset3-subset-train.csv',
path_log=directory('log') + log_file_name,
predictors=predictors,
price_column='SALE.AMOUNT',
random_seed=random_seed,
relevant_date_range=DateRange(first=datetime.date(2003, 1, 1), last=datetime.date(2009, 3, 31)),
sale_date_range=sale_date_range,
start_time=now,
test=test,
use_old_input=True,
)
return b
def elapsed_time(start_time):
return datetime.datetime.now() - start_time
def x(mode, df, predictors):
'''return 2D np.array, with df x values possibly transformed to log
RETURNS array: np.array 2D
'''
def transform(v, mode, transformation):
if mode is None:
return v
if mode == 'linear' or mode == 'lin':
return v
if mode == 'log':
if transformation is None:
return v
if transformation == 'log':
return np.log(v)
if transformation == 'log1p':
return np.log1p(v)
raise RuntimeError('bad transformation: ' + str(transformation))
raise RuntimeError('bad mode:' + str(mode))
array = np.empty(shape=(df.shape[0], len(predictors)),
dtype=np.float64).T
# build up in transposed form
index = 0
for predictor_name, transformation in predictors:
v = transform(df[predictor_name].values, mode, transformation)
array[index] = v
index += 1
return array.T
def y(mode, df, price_column):
'''return np.array 1D with transformed price column from df'''
df2 = df.copy(deep=True)
if mode == 'log':
df2[price_column] = pd.Series(np.log(df[price_column]), index=df.index)
array = np.array(df2[price_column].as_matrix(), np.float64)
return array
def mask_in_date_range(df, date_range):
df_date = df['sale.python_date']
return (df_date >= date_range.first) & (df_date <= date_range.last)
def samples_in_date_range(df, date_range):
'return new df'
return df[mask_in_date_range(df, date_range)]
def add_age(df, sale_date):
'Return new df with extra columns for age and effective age'
column_names = df.columns.tolist()
if 'age' in column_names:
print column_names
print 'age in column_names'
pdb.set_trace()
assert('age' not in column_names)
assert('age2' not in column_names)
assert('effective.age' not in column_names)
assert('effective.age2' not in column_names)
sale_year = df['sale.year']
def age(column_name):
'age from sale_date to specified column'
age_in_years = sale_year - df[column_name].values
return pd.Series(age_in_years, index=df.index)
result = df.copy(deep=True)
result['age'] = age('YEAR.BUILT')
result['effective.age'] = age('EFFECTIVE.YEAR.BUILT')
result['age2'] = result['age'] * result['age']
result['effective.age2'] = result['effective.age'] * result['effective.age']
return result
def squeeze(obj, verbose=False):
'replace np.array float64 with np.array float32'
if isinstance(obj, dict):
return {k: squeeze(v) for k, v in obj.iteritems()}
if isinstance(obj, np.ndarray) and obj.dtype == np.float64:
return np.array(obj, dtype=np.float32)
return obj
def make_weights(query, train_df, hpw, control):
'return numpy.array of weights for each sample'
if hpw == 1:
return np.ones(len(train_df))
else:
print 'bad hpw: %s' % hpw
def sweep_hp_lr(train_df, validate_df, control):
'sweep hyperparameters, fitting and predicting for each combination'
def x_matrix(df, transform):
augmented = add_age(df, control.arg.sale_date)
return x(transform, augmented, control.predictors)
def y_vector(df, transform):
return y(transform, df, control.price_column)
verbose = True
LR = linear_model.LinearRegression
results = {}
for hpx in control.arg.hpx:
for hpy in control.arg.hpy:
if verbose:
print 'sweep_hr_lr hpx %s hpy %s' % (hpx, hpy)
model = LR(fit_intercept=True,
normalize=True,
copy_X=False,
)
train_x = x_matrix(train_df, hpx)
train_y = y_vector(train_df, hpy)
model.fit(train_x, train_y)
estimates = model.predict(x_matrix(validate_df, hpx))
actuals = y_vector(validate_df, hpy)
attributes = {
'coef_': model.coef_,
'intercept_': model.intercept_
}
results[('y_transform', hpy), ('x_transform', hpx)] = squeeze({
'estimate': estimates,
'actual': actuals,
'attributes': attributes
})
return results
def sweep_hp_rf(train_df, validate_df, control):
'fit a model and validate a model for each hyperparameter'
def x_matrix(df):
augmented = add_age(df, control.arg.sale_date)
return x(None, augmented, control.predictors)
def y_vector(df):
return y(None, df, control.price_column)
verbose = True
RFR = ensemble.RandomForestRegressor
train_x = x_matrix(train_df)
train_y = y_vector(train_df)
results = {}
for hpd in control.arg.hpd:
for hpw in control.arg.hpw:
for validate_row_index in xrange(len(validate_df)):
if verbose:
print 'sweep_hp_rf hpd %d hpw %d validate_row_index %d of %d' % (
hpd, hpw, validate_row_index, len(validate_df))
validate_row = validate_df[validate_row_index: validate_row_index + 1]
model = RFR(n_estimators=control.n_rf_estimators, # number of trees
random_state=control.random_seed,
max_depth=hpd)
weights = make_weights(validate_row, train_df, hpw, control)
model.fit(train_x, train_y, weights)
estimate = squeeze(model.predict(x_matrix(validate_row))[0])
actual = squeeze(y_vector(validate_row)[0])
# Don't keep some attributes
# oob attributes are not produced because we didn't ask for them
# estimators_ contains a fitted model for each estimate
attributes = {
'feature_importances_': model.feature_importances_,
}
results[('max_depth', hpd), ('weight_scheme_index', hpw)] = squeeze({
'estimate': estimate,
'actual': actual,
'attributes': attributes,
})
return results
def cross_validate(df, control):
'produce estimated generalization errors'
verbose = True
results = {}
fold_number = -1
sale_dates_mask = mask_in_date_range(df, control.sale_date_range)
skf = cross_validation.StratifiedKFold(sale_dates_mask, control.n_folds)
for train_indices, validate_indices in skf:
fold_number += 1
fold_train_all = df.iloc[train_indices].copy(deep=True)
fold_validate_all = df.iloc[validate_indices].copy(deep=True)
for td in control.arg.td:
if verbose:
print 'cross_validate fold %d of %d training_days %d' % (
fold_number, control.n_folds, td)
fold_train = samples_in_date_range(
fold_train_all,
DateRange(first=control.arg.sale_date - datetime.timedelta(td),
last=control.arg.sale_date - datetime.timedelta(1))
)
fold_validate = samples_in_date_range(
fold_validate_all,
control.sale_date_range
)
if control.arg.model == 'lr':
d = sweep_hp_lr(fold_train, fold_validate, control)
elif control.arg.model == 'rf':
d = sweep_hp_rf(fold_train, fold_validate, control)
# d = cross_validate_rf(fold_train, fold_validate, control)
else:
print 'bad model: %s' % control.model
pdb.set_trace()
results[(('fn', fold_number), ('td', td))] = d
return results
def predict_next(df, control):
'fit each model and predict transaction in next period'
verbose = True
for td in control.arg.td:
if verbose:
print 'predict_next training_days %d' % td
last_sale_date = control.sale_date_range.last
train_df = samples_in_date_range(
df,
DateRange(first=last_sale_date - datetime.timedelta(td),
last=last_sale_date)
)
next_days = 30 if control.arg.month else 7
test_df = samples_in_date_range(
df,
DateRange(first=last_sale_date,
last=last_sale_date + datetime.timedelta(next_days))
)
if control.arg.model == 'lr':
return sweep_hp_lr(train_df, test_df, control)
elif control.arg.model == 'rf':
return sweep_hp_rf(train_df, test_df, control)
else:
print 'bad model: %s' % control.arg.model
def fit_and_test_models(df_all, control):
'Return all_results dict'
# throw away irrelevant transactions
df_relevant = samples_in_date_range(df_all, control.relevant_date_range)
results_cv = cross_validate(df_relevant, control)
results_next = predict_next(df_relevant, control)
pdb.set_trace()
return results_cv, results_next
def main(argv):
# warnings.filterwarnings('error') # convert warnings to errors
control = make_control(argv)
sys.stdout = Logger(logfile_path=control.path_log) # print also write to log file
print control
# read input
if control.use_old_input:
f = open(control.path_in_old, 'rb')
df_loaded = pickle.load(f)
f.close()
else:
df_loaded = pd.read_csv(control.path_in, engine='c')
df_loaded_copy = df_loaded.copy(deep=True) # make sure df_loaded isn't changed
results_cv, results_next = fit_and_test_models(df_loaded, control)
assert(df_loaded.equals(df_loaded_copy))
# write results
def file_name(key):
'model-foldNumber-trainingDays'
assert len(key) == 2, key
s = '%s-%s-%s' % (control.arg.model, key[0], key[1])
return s
def write(dir_prefix, results):
for k, v in results.iteritems():
directory = control.dir_out + dir_prefix
if not os.path.exists(directory):
os.makedirs(directory)
f = open(directory + file_name(k), 'wb')
pickle.dump((k, v), f)
f.close()
write('cv/', results_cv)
write('next/', results_next)
print 'ok'
if __name__ == "__main__":
if False:
# quite pyflakes warnings
pdb.set_trace()
pprint(None)
np.all()
pd.Series()
main(sys.argv)
| StarcoderdataPython |
177047 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: object_detection/protos/region_similarity_calculator.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n:object_detection/protos/region_similarity_calculator.proto\x12\x17object_detection.protos\"\xde\x02\n\x1aRegionSimilarityCalculator\x12N\n\x16neg_sq_dist_similarity\x18\x01 \x01(\x0b\x32,.object_detection.protos.NegSqDistSimilarityH\x00\x12@\n\x0eiou_similarity\x18\x02 \x01(\x0b\x32&.object_detection.protos.IouSimilarityH\x00\x12@\n\x0eioa_similarity\x18\x03 \x01(\x0b\x32&.object_detection.protos.IoaSimilarityH\x00\x12W\n\x1athresholded_iou_similarity\x18\x04 \x01(\x0b\x32\x31.object_detection.protos.ThresholdedIouSimilarityH\x00\x42\x13\n\x11region_similarity\"\x15\n\x13NegSqDistSimilarity\"\x0f\n\rIouSimilarity\"\x0f\n\rIoaSimilarity\"6\n\x18ThresholdedIouSimilarity\x12\x1a\n\riou_threshold\x18\x01 \x01(\x02:\x03\x30.5')
_REGIONSIMILARITYCALCULATOR = DESCRIPTOR.message_types_by_name['RegionSimilarityCalculator']
_NEGSQDISTSIMILARITY = DESCRIPTOR.message_types_by_name['NegSqDistSimilarity']
_IOUSIMILARITY = DESCRIPTOR.message_types_by_name['IouSimilarity']
_IOASIMILARITY = DESCRIPTOR.message_types_by_name['IoaSimilarity']
_THRESHOLDEDIOUSIMILARITY = DESCRIPTOR.message_types_by_name['ThresholdedIouSimilarity']
RegionSimilarityCalculator = _reflection.GeneratedProtocolMessageType('RegionSimilarityCalculator', (_message.Message,), {
'DESCRIPTOR' : _REGIONSIMILARITYCALCULATOR,
'__module__' : 'object_detection.protos.region_similarity_calculator_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.RegionSimilarityCalculator)
})
_sym_db.RegisterMessage(RegionSimilarityCalculator)
NegSqDistSimilarity = _reflection.GeneratedProtocolMessageType('NegSqDistSimilarity', (_message.Message,), {
'DESCRIPTOR' : _NEGSQDISTSIMILARITY,
'__module__' : 'object_detection.protos.region_similarity_calculator_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.NegSqDistSimilarity)
})
_sym_db.RegisterMessage(NegSqDistSimilarity)
IouSimilarity = _reflection.GeneratedProtocolMessageType('IouSimilarity', (_message.Message,), {
'DESCRIPTOR' : _IOUSIMILARITY,
'__module__' : 'object_detection.protos.region_similarity_calculator_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.IouSimilarity)
})
_sym_db.RegisterMessage(IouSimilarity)
IoaSimilarity = _reflection.GeneratedProtocolMessageType('IoaSimilarity', (_message.Message,), {
'DESCRIPTOR' : _IOASIMILARITY,
'__module__' : 'object_detection.protos.region_similarity_calculator_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.IoaSimilarity)
})
_sym_db.RegisterMessage(IoaSimilarity)
ThresholdedIouSimilarity = _reflection.GeneratedProtocolMessageType('ThresholdedIouSimilarity', (_message.Message,), {
'DESCRIPTOR' : _THRESHOLDEDIOUSIMILARITY,
'__module__' : 'object_detection.protos.region_similarity_calculator_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.ThresholdedIouSimilarity)
})
_sym_db.RegisterMessage(ThresholdedIouSimilarity)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_REGIONSIMILARITYCALCULATOR._serialized_start=88
_REGIONSIMILARITYCALCULATOR._serialized_end=438
_NEGSQDISTSIMILARITY._serialized_start=440
_NEGSQDISTSIMILARITY._serialized_end=461
_IOUSIMILARITY._serialized_start=463
_IOUSIMILARITY._serialized_end=478
_IOASIMILARITY._serialized_start=480
_IOASIMILARITY._serialized_end=495
_THRESHOLDEDIOUSIMILARITY._serialized_start=497
_THRESHOLDEDIOUSIMILARITY._serialized_end=551
# @@protoc_insertion_point(module_scope)
| StarcoderdataPython |
3223338 |
import os
import math
import sys
import time
from os.path import abspath, basename, join
from seisflows.tools import msg
from seisflows.tools import unix
from seisflows.tools.tools import call, findpath, saveobj
from seisflows.config import ParameterError, custom_import
PAR = sys.modules['seisflows_parameters']
PATH = sys.modules['seisflows_paths']
class pbs_lg(custom_import('system', 'base')):
""" An interface through which to submit workflows, run tasks in serial or
parallel, and perform other system functions.
By hiding environment details behind a python interface layer, these
classes provide a consistent command set across different computing
environments.
Intermediate files are written to a global scratch path PATH.SCRATCH,
which must be accessible to all compute nodes.
Optionally, users can provide a local scratch path PATH.LOCAL if each
compute node has its own local filesystem.
For important additional information, please see
http://seisflows.readthedocs.org/en/latest/manual/manual.html#system-configuration
"""
def check(self):
""" Checks parameters and paths
"""
print msg.Warning_pbs_sm
# name of job
if 'TITLE' not in PAR:
setattr(PAR, 'TITLE', basename(abspath('.')))
# time allocated for workflow in minutes
if 'WALLTIME' not in PAR:
setattr(PAR, 'WALLTIME', 30.)
# number of tasks
if 'NTASK' not in PAR:
raise ParameterError(PAR, 'NTASK')
# number of cores per task
if 'NPROC' not in PAR:
raise ParameterError(PAR, 'NPROC')
# number of cores per node
if 'NODESIZE' not in PAR:
raise ParameterError(PAR, 'NODESIZE')
# how to invoke executables
if 'MPIEXEC' not in PAR:
setattr(PAR, 'MPIEXEC', '')
# optional additional PBS arguments
if 'PBSARGS' not in PAR:
setattr(PAR, 'PBSARGS', '')
# optional environment variable list VAR1=val1,VAR2=val2,...
if 'ENVIRONS' not in PAR:
setattr(PAR, 'ENVIRONS', '')
# level of detail in output messages
if 'VERBOSE' not in PAR:
setattr(PAR, 'VERBOSE', 1)
# where job was submitted
if 'WORKDIR' not in PATH:
setattr(PATH, 'WORKDIR', abspath('.'))
# where output files are written
if 'OUTPUT' not in PATH:
setattr(PATH, 'OUTPUT', PATH.WORKDIR+'/'+'output')
# where temporary files are written
if 'SCRATCH' not in PATH:
setattr(PATH, 'SCRATCH', PATH.WORKDIR+'/'+'scratch')
# where system files are written
if 'SYSTEM' not in PATH:
setattr(PATH, 'SYSTEM', PATH.SCRATCH+'/'+'system')
# optional local scratch path
if 'LOCAL' not in PATH:
setattr(PATH, 'LOCAL', None)
def submit(self, workflow):
""" Submits workflow
"""
# create scratch directories
unix.mkdir(PATH.SCRATCH)
unix.mkdir(PATH.SYSTEM)
# create output directories
unix.mkdir(PATH.OUTPUT)
workflow.checkpoint()
hours = PAR.WALLTIME/60
minutes = PAR.WALLTIME%60
walltime = 'walltime=%02d:%02d:00 ' % (hours, minutes)
ncpus = PAR.NODESIZE
mpiprocs = PAR.NODESIZE
# prepare qsub arguments
call( 'qsub '
+ '%s ' % PAR.PBSARGS
+ '-l select=1:ncpus=%d:mpiprocs=%d ' % (ncpus, mpiprocs)
+ '-l %s ' % walltime
+ '-N %s ' % PAR.TITLE
+ '-j %s ' %'oe'
+ '-q %s ' %'medium'
+ '-o %s ' % (PATH.SUBMIT+'/'+'output.log')
+ '-V '
+ ' -- ' + findpath('seisflows.system') +'/'+ 'wrappers/submit '
+ PATH.OUTPUT)
def run(self, classname, method, hosts='all', **kwargs):
""" Executes the following task:
classname.method(*args, **kwargs)
"""
self.checkpoint()
if hosts == 'all':
# run all tasks
call(findpath('seisflows.system') +'/'+'wrappers/dsh '
+ ','.join(self.hostlist()) + ' '
+ findpath('seisflows.system') +'/'+'wrappers/run '
+ PATH.OUTPUT + ' '
+ classname + ' '
+ method + ' '
+ 'PYTHONPATH='+findpath('seisflows'),+','
+ PAR.ENVIRONS)
elif hosts == 'head':
# run a single task
call('ssh ' + self.hostlist()[0] + ' '
+ '"'
+ 'export SEISFLOWS_TASK_ID=0; '
+ join(findpath('seisflows.system'), 'wrappers/run ')
+ PATH.OUTPUT + ' '
+ classname + ' '
+ method + ' '
+ 'PYTHONPATH='+findpath('seisflows'),+','
+ PAR.ENVIRONS
+'"')
else:
raise KeyError('Bad keyword argument: system.run: hosts')
def mpiexec(self):
""" Specifies MPI executable used to invoke solver
"""
return PAR.MPIEXEC
def taskid(self):
""" Provides a unique identifier for each running task
"""
try:
return os.getenv('PBS_NODENUM')
except:
raise Exception("PBS_NODENUM environment variable not defined.")
def hostlist(self):
""" Generates list of allocated cores
"""
with open(os.environ['PBS_NODEFILE'], 'r') as f:
return [line.strip() for line in f.readlines()]
def save_kwargs(self, classname, method, kwargs):
kwargspath = join(PATH.OUTPUT, 'kwargs')
kwargsfile = join(kwargspath, classname+'_'+method+'.p')
unix.mkdir(kwargspath)
saveobj(kwargsfile, kwargs)
| StarcoderdataPython |
1774118 | <reponame>cjdsj/RDN-for-SR-by-keras<filename>test.py
import cv2
import numpy as np
import tensorflow as tf
from matplotlib import pyplot as plt
from tensorflow.keras.models import load_model
from data_processing import get_test_data, imgappend, getYimg, psnr
from model import RDN, L1_loss
''' Set parameters '''
scale = 2
size = 32 # Input size
aug_num = 4 # Number of image augmentations, the maximum value is 4
num_G = 32 # Number of convolution kernels
lr = 1e-4 # Learning rate
Imgflag = 2 # The way of reading images. 0: RGB, 1: GRAY, 2: Y channel from Y_CrCb form
if Imgflag == 0:
channels = 3
else:
channels = 1
model_save_path = '/content/drive/MyDrive/Colaboratory/跨平台超分辨率/models/RDN.hdf5'
test_file = '/content/drive/MyDrive/Colaboratory/跨平台超分辨率/SR_train&test/SR_testing_datasets/Set5/butterfly.png'
Imglist = ['RGB', 'GRAY', 'YCrCb']
print('Have got parameters, for ' + Imglist[Imgflag] + ' images.')
'''Get test data '''
HR_test, HR_img, HR_res = get_test_data(test_file, size, scale, Rflag=0, Imgflag=Imgflag)
LR_test, LR_img, LR_res = get_test_data(test_file, size, scale, Rflag=1, Imgflag=Imgflag)
x_test, y_test = LR_test / 255.0, HR_test / 255.0
if Imgflag != 0: # Have to add another dimension for channel if images only have one channel
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], x_test.shape[2], 1))
y_test = np.reshape(y_test, (y_test.shape[0], y_test.shape[1], y_test.shape[2], 1))
print(HR_img.shape, LR_img.shape)
if Imgflag == 2: # Get Cr and Cb channels for Y_CrCb form
print(HR_res.shape, LR_res.shape)
print(y_test.shape, x_test.shape)
''' Load model and evaluate test image '''
model = RDN(num_G=num_G, channels=channels, scale=scale)
model.build((None, size, size, channels))
model.load_weights(model_save_path)
model.evaluate(x_test, y_test, batch_size=16)
''' Recover test image and show the result '''
y_pred = model.predict(x_test)
length_num = HR_img.shape[0] // (size * scale)
width_num = HR_img.shape[1] // (size * scale)
pred_img = imgappend(y_pred, length_num, width_num)
pred_img = (pred_img * 255).astype(np.uint8)
if Imgflag == 0:
HR_img = HR_img[:pred_img.shape[0], :pred_img.shape[1], :pred_img.shape[2]]
LR_img = LR_img[:pred_img.shape[0] // scale, :pred_img.shape[1] // scale, :pred_img.shape[2]]
else:
HR_img = HR_img[:pred_img.shape[0], :pred_img.shape[1]]
LR_img = LR_img[:pred_img.shape[0] // scale, :pred_img.shape[1] // scale]
pred_img = np.reshape(pred_img, (pred_img.shape[0], pred_img.shape[1]))
ILR_img = cv2.resize(LR_img, (LR_img.shape[1] * scale, LR_img.shape[0] * scale), interpolation=cv2.INTER_LINEAR)
if Imgflag == 2:
ILR_img, pred_img, HR_img = getYimg(ILR_img, pred_img, HR_img, LR_res, HR_res)
if Imgflag != 1: # Grayscale image can't calculate psnr
cmap = None
print('PSNR between ILR and HR:', psnr(HR_img, ILR_img))
print('PSNR between output and HR:', psnr(HR_img, pred_img), '\n')
b, g, r = cv2.split(pred_img)
pred_img = cv2.merge([r, g, b])
b, g, r = cv2.split(HR_img)
HR_img = cv2.merge([r, g, b])
b, g, r = cv2.split(ILR_img)
ILR_img = cv2.merge([r, g, b])
else:
cmap = plt.cm.gray
plt.figure(figsize=(15, 15))
plt.subplot(1, 3, 1)
plt.imshow(ILR_img, cmap)
plt.title("ILR")
plt.subplot(1, 3, 2)
plt.imshow(pred_img, cmap)
plt.title("Prediction")
plt.subplot(1, 3, 3)
plt.imshow(HR_img, cmap)
plt.title("HR")
plt.show() | StarcoderdataPython |
113978 | __version_info__ = ( 1, 0, 0 )
__version__ = '.'.join( map( str, __version_info__ ))
| StarcoderdataPython |
3352379 | from .custom_component_server import setup_view
from .parser import Parser
MODULE = "custom_icons"
DATA_EXTRA_MODULE_URL = 'frontend_extra_module_url'
async def async_setup(hass, config):
setup_view(hass, MODULE)
parser = Parser()
if parser.checkIfNeeded():
parser.do()
parser.cleanUnusedCacheFiles()
if DATA_EXTRA_MODULE_URL not in hass.data:
hass.data[DATA_EXTRA_MODULE_URL] = set()
for cacheFilePath in parser.listCacheFiles():
hass.data[DATA_EXTRA_MODULE_URL].add(cacheFilePath)
return True
| StarcoderdataPython |
50387 | <filename>SIGNUS/modules/crawler/etc/driver_agent.py<gh_stars>0
from selenium import webdriver
from platform import platform
import os
# def chromedriver():
# options = webdriver.ChromeOptions()
# options.add_argument('headless')
# options.add_argument('window-size=1920x1080')
# options.add_argument("disable-gpu")
# options.add_argument("user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5)AppleWebKit 537.36 (KHTML, like Gecko) Chrome")
# options.add_argument("lang=ko_KR")
# path = os.getenv("SIGNUS_CHROMEDRIVER_PATH")
# if platform().startswith("Windows"):
# driver = webdriver.Chrome('../chromedriver.exe', options=options)
# else:
# driver = webdriver.Chrome(path, options=options)
# return driver
def chromedriver():
path = os.getenv("SIGNUS_CHROMEDRIVER_PATH")
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument('--disable-dev-shm-usage')
chrome_options.add_argument("--ignore-ssl-errors=true")
chrome_options.add_argument("--ssl-protocol=any")
chrome_options.add_argument('--ignore-certificate-errors')
chrome_options.add_argument("user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5)AppleWebKit 537.36 (KHTML, like Gecko) Chrome")
driver = webdriver.Chrome(executable_path=path,options=chrome_options)
return driver
| StarcoderdataPython |
3213399 | <gh_stars>1-10
from unittest import TestCase
from neo.Prompt import Utils
from neocore.Fixed8 import Fixed8
from neocore.UInt160 import UInt160
class TestInputParser(TestCase):
def test_utils_1(self):
args = [1, 2, 3]
args, neo, gas = Utils.get_asset_attachments(args)
self.assertEqual(args, [1, 2, 3])
self.assertIsNone(neo)
self.assertIsNone(gas)
def test_utils_2(self):
args = []
args, neo, gas = Utils.get_asset_attachments(args)
self.assertEqual(args, [])
self.assertIsNone(neo)
self.assertIsNone(gas)
def test_utils_3(self):
args = None
with self.assertRaises(Exception):
Utils.get_asset_attachments(args)
def test_utils_4(self):
args = [1, 2, '--attach-neo=100']
args, neo, gas = Utils.get_asset_attachments(args)
self.assertEqual(args, [1, 2])
self.assertEqual(neo, Fixed8.FromDecimal(100))
self.assertIsNone(gas)
def test_utils_5(self):
args = [1, 2, '--attach-gas=100.0003']
args, neo, gas = Utils.get_asset_attachments(args)
self.assertEqual(args, [1, 2])
self.assertEqual(gas, Fixed8.FromDecimal(100.0003))
self.assertIsNone(neo)
def test_utils_6(self):
args = [1, 2, '--attachgas=100.0003']
args, neo, gas = Utils.get_asset_attachments(args)
self.assertEqual(args, [1, 2, '--attachgas=100.0003'])
self.assertIsNone(neo)
self.assertIsNone(gas)
def test_utils_7(self):
args = [1, 2, '--attach-gas=100.0003', '--attach-neo=5.7']
args, neo, gas = Utils.get_asset_attachments(args)
self.assertEqual(args, [1, 2])
self.assertEqual(neo, None)
self.assertEqual(gas, Fixed8.FromDecimal(100.0003))
def test_utils_8(self):
args = [1, 2, '--attach-gas=100.0003', '--attach-neo=6']
args, neo, gas = Utils.get_asset_attachments(args)
self.assertEqual(args, [1, 2])
self.assertEqual(neo, Fixed8.FromDecimal(6))
self.assertEqual(gas, Fixed8.FromDecimal(100.0003))
def test_owner_1(self):
args = [1, 2]
args, owners = Utils.get_owners_from_params(args)
self.assertEqual(args, [1, 2])
self.assertIsNone(owners)
def test_owner_2(self):
args = [1, 2, "--owners=['ABC','DEF',]"]
args, owners = Utils.get_owners_from_params(args)
self.assertEqual(args, [1, 2])
self.assertEqual(owners, set())
def test_owner_3(self):
args = [1, 2, "--owners=['<KEY>','<KEY>',]"]
args, owners = Utils.get_owners_from_params(args)
self.assertEqual(args, [1, 2])
self.assertEqual(len(owners), 2)
self.assertIsInstance(list(owners)[0], UInt160)
def test_owner_and_assets(self):
args = [1, 2, "--owners=['<KEY>','<KEY>',]", '--attach-neo=10']
args, owners = Utils.get_owners_from_params(args)
args, neo, gas = Utils.get_asset_attachments(args)
self.assertEqual(args, [1, 2])
self.assertEqual(len(owners), 2)
self.assertIsInstance(list(owners)[0], UInt160)
self.assertEqual(neo, Fixed8.FromDecimal(10))
| StarcoderdataPython |
3278612 | class Solution:
def findTheDifference(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
"""
cs, ct = collections.Counter(s), collections.Counter(t)
for ch in string.ascii_lowercase:
if cs[ch] < ct[ch]:
return ch | StarcoderdataPython |
30420 | import os
import scipy.io.wavfile
import matplotlib.pyplot as plt
import numpy as np
import os
import random
'''
Create a random dataset with three different frequencies that are always in fase.
Frequencies will be octave [440, 880, 1320].
'''
fs = 16000
x1 = scipy.io.wavfile.read('corpus/Analysis/a440.wav')[1]
x2 = scipy.io.wavfile.read('corpus/Analysis/c531.wav')[1]
x3 = scipy.io.wavfile.read('corpus/Analysis/e667.wav')[1]
x4 = scipy.io.wavfile.read('corpus/Analysis/a880.wav')[1]
x5 = scipy.io.wavfile.read('corpus/Analysis/c1056.wav')[1]
x6 = scipy.io.wavfile.read('corpus/Analysis/e1320.wav')[1]
x7 = scipy.io.wavfile.read('corpus/Analysis/a1760.wav')[1]
# Categories
a = [0]
b = [1]
c = [2]
def createRandomSequence():
# sequence length
sq_length = random.randint(5, 10)
#create sequence
sequence = []
sampleSequence = []
minLen = 1818
for i in range(0, sq_length):
value = random.randint(0,6)
sequence.append(value)
#create lengths per value
lenValue = minLen * random.randint(1,10)
sampleSequence.append(lenValue)
return sequence, sampleSequence
def genFile(sequence, sampleSequence, c):
newSequence = []
fullSequence = []
for i in range(len(sequence)):
newSequence = int(sampleSequence[i]) * [sequence[i]]
fullSequence = fullSequence + newSequence
file00 = open(os.path.join('corpus', 'panFluteBigDataset', 'lc_train%s.txt' % c), 'w')
for item in fullSequence:
file00.write('%i,\n' % item)
file00.close()
def case(x):
return {
0: x1,
1: x2,
2: x3,
3: x4,
4: x5,
5: x6,
6: x7
}[x]
def genSignals(sequence, sampleSequence, c):
y=[]
for i in range(len(sequence)):
# convert categories to frequencies
freq = case(sequence[i])
#nSamples = np.arange(sampleSequence[i])
#a = random.randint(25, 100)/100
a = 1
#y0 = a*np.sin(2*np.pi*freq*nSamples / fs)
y0= freq[:sampleSequence[i]]
y = scipy.hstack((y, y0))
y = y / y[np.argmax(y)]
noise = 0.01*np.random.normal(0, 1, len(y))
y = np.asarray(y) + noise
scipy.io.wavfile.write(os.path.join('corpus', 'panFluteBigDataset7freq', 'lc_train%s.wav' % c), fs, y)
def main():
for c in range(0,100):
sequence, sampleSequence = createRandomSequence()
#print(sequence, sampleSequence)
#genFile(sequence, sampleSequence, c)
genSignals(sequence, sampleSequence, c)
if __name__ == '__main__':
main()
| StarcoderdataPython |
30689 | <gh_stars>1-10
import os
import logging
import pdb
import time
import random
from multiprocessing import Process
import numpy as np
from client import MilvusClient
import utils
import parser
from runner import Runner
logger = logging.getLogger("milvus_benchmark.local_runner")
class LocalRunner(Runner):
"""run local mode"""
def __init__(self, ip, port):
super(LocalRunner, self).__init__()
self.ip = ip
self.port = port
def run(self, definition, run_type=None):
if run_type == "performance":
for op_type, op_value in definition.items():
run_count = op_value["run_count"]
run_params = op_value["params"]
if op_type == "insert":
for index, param in enumerate(run_params):
table_name = param["table_name"]
# random_1m_100_512
(data_type, table_size, index_file_size, dimension, metric_type) = parser.table_parser(table_name)
milvus = MilvusClient(table_name, ip=self.ip, port=self.port)
# Check has table or not
if milvus.exists_table():
milvus.delete()
time.sleep(10)
milvus.create_table(table_name, dimension, index_file_size, metric_type)
res = self.do_insert(milvus, table_name, data_type, dimension, table_size, param["ni_per"])
logger.info(res)
elif op_type == "query":
for index, param in enumerate(run_params):
logger.info("Definition param: %s" % str(param))
table_name = param["dataset"]
(data_type, table_size, index_file_size, dimension, metric_type) = parser.table_parser(table_name)
milvus = MilvusClient(table_name, ip=self.ip, port=self.port)
# parse index info
index_types = param["index.index_types"]
nlists = param["index.nlists"]
# parse top-k, nq, nprobe
top_ks, nqs, nprobes = parser.search_params_parser(param)
for index_type in index_types:
for nlist in nlists:
milvus.create_index(index_type, nlist)
# preload index
milvus.preload_table()
# Run query test
for nprobe in nprobes:
logger.info("index_type: %s, nlist: %s, metric_type: %s, nprobe: %s" % (index_type, nlist, metric_type, nprobe))
res = self.do_query(milvus, table_name, top_ks, nqs, nprobe, run_count)
headers = [param["dataset"]]
headers.extend([str(top_k) for top_k in top_ks])
utils.print_table(headers, nqs, res)
elif run_type == "stability":
for op_type, op_value in definition.items():
if op_type != "query":
logger.warning("invalid operation: %s in accuracy test, only support query operation" % op_type)
break
run_count = op_value["run_count"]
run_params = op_value["params"]
nq = 10000
for index, param in enumerate(run_params):
logger.info("Definition param: %s" % str(param))
table_name = param["dataset"]
(data_type, table_size, index_file_size, dimension, metric_type) = parser.table_parser(table_name)
# set default test time
if "during_time" not in param:
during_time = 100 # seconds
else:
during_time = int(param["during_time"]) * 60
# set default query process num
if "query_process_num" not in param:
query_process_num = 10
else:
query_process_num = int(param["query_process_num"])
milvus = MilvusClient(table_name)
# Check has table or not
if not milvus.exists_table():
logger.warning("Table %s not existed, continue exec next params ..." % table_name)
continue
start_time = time.time()
insert_vectors = [[random.random() for _ in range(dimension)] for _ in range(nq)]
while time.time() < start_time + during_time:
processes = []
# # do query
# for i in range(query_process_num):
# milvus_instance = MilvusClient(table_name)
# top_k = random.choice([x for x in range(1, 100)])
# nq = random.choice([x for x in range(1, 1000)])
# nprobe = random.choice([x for x in range(1, 500)])
# logger.info(nprobe)
# p = Process(target=self.do_query, args=(milvus_instance, table_name, [top_k], [nq], 64, run_count, ))
# processes.append(p)
# p.start()
# time.sleep(0.1)
# for p in processes:
# p.join()
milvus_instance = MilvusClient(table_name)
top_ks = random.sample([x for x in range(1, 100)], 4)
nqs = random.sample([x for x in range(1, 1000)], 3)
nprobe = random.choice([x for x in range(1, 500)])
res = self.do_query(milvus, table_name, top_ks, nqs, nprobe, run_count)
# milvus_instance = MilvusClient(table_name)
status, res = milvus_instance.insert(insert_vectors, ids=[x for x in range(len(insert_vectors))])
if not status.OK():
logger.error(status.message)
if (time.time() - start_time) % 300 == 0:
status = milvus_instance.drop_index()
if not status.OK():
logger.error(status.message)
index_type = random.choice(["flat", "ivf_flat", "ivf_sq8"])
status = milvus_instance.create_index(index_type, 16384)
if not status.OK():
logger.error(status.message)
| StarcoderdataPython |
1653056 | <reponame>xhchrn/D-LADMM<filename>mu_updater.py
"""
File: mu_updater.py
Created: September 19, 2019
Revised: December 2, 2019
Authors: <NAME>, <NAME>
Purpose: Define a set of mu updaters in Safeguarded KM method.
We implement 5 types of mu updaters, named `Geometric Series`,
`Arithmetic Average`, `Exponential Moving Average`, `Recent Term`,
and `Recent Max` respectively.
"""
import numpy as np
class EMAUpdater(object):
"""Docstring for EMAUpdater. """
def __init__(self, mu, parameter):
"""TODO: to be defined. """
self.mu = mu
self.parameter = parameter
def step(self, Sx_L2O_norm, bool_term):
update = self.parameter * Sx_L2O_norm + (1 - self.parameter) * self.mu
bool_comp = 1.0 - bool_term
self.mu = (bool_comp * self.mu + bool_term * update).detach()
return self.mu
class GSUpdater(object):
"""Docstring for GSUpdater. """
def __init__(self, mu, parameter):
"""TODO: to be defined.
:mu: TODO
:parameter: TODO
"""
self.mu = mu
self.parameter = parameter
def step(self, Sx_L2O_norm, bool_term):
update = (1 - self.parameter) * self.mu
bool_comp = 1.0 - bool_term
self.mu = (bool_comp * self.mu + bool_term * update).detach()
return self.mu
class RTUpdater(object):
"""Docstring for RTUpdater. """
def __init__(self, mu, parameter):
"""TODO: to be defined.
:mu: TODO
:parameter: TODO
"""
self.mu = mu
self.parameter = parameter
def step(self, Sx_L2O_norm, bool_term):
update = Sx_L2O_norm
bool_comp = 1.0 - bool_term
self.mu = (bool_comp * self.mu + bool_term * update).detach()
return self.mu
class RMUpdater(object):
"""Docstring for RMUpdater. """
def __init__(self, mu, parameter):
"""TODO: to be defined.
:mu: TODO
:parameter: TODO
"""
self.parameter = int(parameter)
self.recent = mu.new_zeros((mu.shape[0], parameter))
self.pointer = 0
self.step(mu)
def step(self, Sx_L2O_norm, bool_term=None):
self.recent[:,self.pointer] = Sx_L2O_norm
self.pointer = (self.pointer + 1) % self.parameter
return self.recent.max(dim=1)
class BlankUpdater(object):
"""Docstring for BlankUpdater. """
def __init__(self, mu, parameter):
"""TODO: to be defined.
:mu: TODO
:parameter: TODO
"""
def step(self, Sx_L2O_norm, bool_term):
return np.power(10.0,10)
mu_updater_dict = {
'EMA': EMAUpdater,
'GS': GSUpdater,
'RT': RTUpdater,
'RM': RMUpdater,
'None': BlankUpdater
}
| StarcoderdataPython |
3368277 | <filename>multiinstance/distanceApproaches.py
# AUTOGENERATED! DO NOT EDIT! File to edit: 04_Distribution_Distance_Approaches.ipynb (unless otherwise specified).
__all__ = ['fitKDE', 'KLD', 'JSD', 'getJSDDistMat', 'getKLDMat', 'getWassersteinMat', 'getOptimalAdjacency']
# Cell
from .utils import *
import seaborn as sns
import community as community_louvain
import networkx as nx
from .data.syntheticData import buildDataset,getBag
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.ensemble import BaggingClassifier
from sklearn.metrics import roc_auc_score
from sklearn.neighbors import KernelDensity
from scipy.special import logsumexp
import scipy.stats as ss
from tqdm.notebook import tqdm
import matplotlib.pyplot as plt
# Cell
def fitKDE(vec):
kde = KernelDensity(kernel="gaussian").fit(vec)
return kde
def KLD(lnDensI,lnDensJ):
return ss.entropy(np.exp(lnDensI), qk=np.exp(lnDensJ),base=2)
def JSD(ds, kdeI, i, j):
_,uI = getTransformScores(ds,i)
uI = uI.reshape((-1,1))
_,uJ = getTransformScores(ds,j)
uJ = uJ.reshape((-1,1))
kdeJ = fitKDE(uJ)
lnDensI0 = kdeI.score_samples(uI)
lnDensJ0 = kdeJ.score_samples(uI)
lnDensM0 = np.array([logsumexp((ldi,ldj),
b=np.array([.5,.5])) for ldi,ldj in zip(lnDensI0, lnDensJ0)])
lnDensI1 = kdeI.score_samples(uJ)
lnDensJ1 = kdeJ.score_samples(uJ)
lnDensM1 = np.array([logsumexp((ldi,ldj),
b=np.array([.5,.5])) for ldi,ldj in zip(lnDensI1, lnDensJ1)])
x = KLD(lnDensI0,lnDensM0)
y = KLD(lnDensJ1, lnDensM1)
return x + y
def getJSDDistMat(ds):
N = ds.N
dist = np.zeros((N,N))
for i in range(N):
_, uI = getTransformScores(ds,i)
kdeI = fitKDE(uI.reshape((-1,1)))
for j in range(i+1, N):
jsd = JSD(ds, kdeI, i,j)
dist[i,j] = jsd
dist[j,i] = jsd
return dist
def getKLDMat(ds):
N = ds.N
dist = np.zeros((N,N))
for i in range(N):
_, uI = getTransformScores(ds,i)
uI = uI.reshape((-1,1))
kdeI = fitKDE(uI)
for j in range(N):
_,uJ = getTransformScores(ds,j)
uJ = uJ.reshape((-1,1))
kdeJ = fitKDE(uJ)
lnDensI = kdeI.score_samples(uI)
lnDensJ = kdeJ.score_samples(uI)
dist[i,j] = KLD(lnDensI, lnDensJ)
return dist
def getWassersteinMat(ds):
N = ds.N
dist = np.zeros((N,N))
for i in range(N):
_, uI = getTransformScores(ds,i)
# uI = uI.reshape((-1,1))
for j in range(N):
_,uJ = getTransformScores(ds,j)
# uJ = uJ.reshape((-1,1))
dist[i,j] = ss.wasserstein_distance(uI,uJ)
return dist
def getOptimalAdjacency(trueAlphas):
N = trueAlphas.shape[0]
adj = np.zeros((N,N))
for i,a0 in enumerate(trueAlphas):
for j,a1 in enumerate(trueAlphas[i+1:],start=i+1):
adj[i,j] = np.abs(a0 - a1)
adj[j,i] = np.abs(a0 - a1)
return adj | StarcoderdataPython |
3341645 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#...the usual suspects.
import os, inspect
#...for the unit testing.
import unittest
#...for the logging.
import logging as lg
# The wrapper class to test.
from blb import BLB
class TestBLB(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_blb(self):
## The BLB annotation CSV file.
blb = BLB("testdata/BLB/000000_00_00_00.csv")
# The tests.
# The headers.
self.assertEqual(blb.get_number_of_headers(), 4)
self.assertEqual(blb.get_header(0), "annotation_id")
self.assertEqual(blb.get_header(1), "x")
self.assertEqual(blb.get_header(2), "y")
self.assertEqual(blb.get_header(3), "r")
# The annotations.
# Test the number of blobs found.
self.assertEqual(blb.get_number_of_blobs(), 188)
if __name__ == "__main__":
lg.basicConfig(filename='log_test_blb.log', filemode='w', level=lg.DEBUG)
lg.info(" *")
lg.info(" *=========================================")
lg.info(" * Logger output from wrappers/test_blb.py ")
lg.info(" *=========================================")
lg.info(" *")
unittest.main()
| StarcoderdataPython |
11461 | """
Bayes sensor code split out from
https://github.com/home-assistant/home-assistant/blob/dev/homeassistant/components/binary_sensor/bayesian.py
This module is used to explore the sensor.
"""
from collections import OrderedDict
from const import *
def update_probability(prior, prob_true, prob_false):
"""Update probability using Bayes' rule."""
numerator = prob_true * prior
denominator = numerator + prob_false * (1 - prior)
probability = numerator / denominator
return probability
def setup_platform(config):
"""Set up the Bayesian Binary sensor.
Modified from async_setup_platform."""
name = config[CONF_NAME]
observations = config[CONF_OBSERVATIONS]
prior = config[CONF_PRIOR]
probability_threshold = config[CONF_PROBABILITY_THRESHOLD]
device_class = config[CONF_DEVICE_CLASS]
return BayesianBinarySensor(
name, prior, observations, probability_threshold, device_class)
class BinarySensorDevice(): # Entity
"""Represent a binary sensor."""
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return None
@property
def state(self):
"""Return the state of the binary sensor."""
return STATE_ON if self.is_on else STATE_OFF
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return None
class BayesianBinarySensor(BinarySensorDevice):
"""Representation of a Bayesian sensor.
Removed some methods I don't think will be needed for this investigation.
"""
def __init__(self, name, prior, observations, probability_threshold,
device_class):
"""Initialize the Bayesian sensor."""
self._name = name
self._observations = observations
self._probability_threshold = probability_threshold
self._device_class = device_class
self._deviation = False
self.prior = prior
self.probability = prior
self.current_obs = OrderedDict({})
# return the entity_id to observ
to_observe = set(obs['entity_id'] for obs in self._observations)
self.entity_obs = dict.fromkeys(to_observe, [])
# Append observations
for ind, obs in enumerate(self._observations):
obs['id'] = ind
self.entity_obs[obs['entity_id']].append(obs)
self.watchers = {
'numeric_state': self._process_numeric_state,
'state': self._process_state
}
# @asyncio.coroutine
def async_added_to_hass(self):
"""Call when entity about to be added."""
@callback
# pylint: disable=invalid-name
def async_threshold_sensor_state_listener(entity, old_state,
new_state):
"""Handle sensor state changes."""
if new_state.state == STATE_UNKNOWN:
return
entity_obs_list = self.entity_obs[entity]
for entity_obs in entity_obs_list:
platform = entity_obs['platform']
self.watchers[platform](entity_obs)
prior = self.prior
for obs in self.current_obs.values():
prior = update_probability(
prior, obs['prob_true'], obs['prob_false'])
self.probability = prior # Updates prior for each observation.
# self.hass.async_add_job(self.async_update_ha_state, True)
entities = [obs['entity_id'] for obs in self._observations]
# async_track_state_change(
# self.hass, entities, async_threshold_sensor_state_listener)
def _update_current_obs(self, entity_observation, should_trigger):
"""Update current observation for single entity."""
obs_id = entity_observation['id']
if should_trigger:
prob_true = entity_observation['prob_given_true']
prob_false = entity_observation.get(
'prob_given_false', 1 - prob_true)
# Update prob_true and prob_false
self.current_obs[obs_id] = {
'prob_true': prob_true,
'prob_false': prob_false
}
else:
self.current_obs.pop(obs_id, None)
def _process_numeric_state(self, entity_observation):
"""Add entity to current_obs if numeric state conditions are met (regular sensor)."""
entity = entity_observation['entity_id']
should_trigger = condition.async_numeric_state(
self.hass, entity,
entity_observation.get('below'),
entity_observation.get('above'), None, entity_observation)
self._update_current_obs(entity_observation, should_trigger)
def _process_state(self, entity_observation):
"""Add entity to current observations if state conditions are met (binary sensor)."""
entity = entity_observation['entity_id']
should_trigger = condition.state(
self.hass, entity, entity_observation.get('to_state'))
self._update_current_obs(entity_observation, should_trigger)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return true if sensor is on."""
return self._deviation
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def device_class(self):
"""Return the sensor class of the sensor."""
return self._device_class
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
return {
ATTR_OBSERVATIONS: [val for val in self.current_obs.values()],
ATTR_PROBABILITY: round(self.probability, 2),
ATTR_PROBABILITY_THRESHOLD: self._probability_threshold,
}
<EMAIL>
def async_update(self):
"""Get the latest data and update the states."""
self._deviation = bool(self.probability > self._probability_threshold)
| StarcoderdataPython |
3214297 | <filename>community/cloud-foundation/templates/gcs_bucket/gcs_bucket.py
# Copyright 2018 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Creates a cloud storage bucket. """
def generate_config(context):
""" Entry point for the deployment resources. """
resources = []
project_id = context.env['project']
bucket_name = context.properties.get('name') or context.env['name']
# output variables
bucket_selflink = '$(ref.{}.selfLink)'.format(bucket_name)
bucket_uri = 'gs://' + bucket_name + '/'
bucket = {
'name': bucket_name,
'type': 'storage.v1.bucket',
'properties': {
'project': project_id,
'name': bucket_name
}
}
optional_props = [
'location',
'versioning',
'storageClass',
'predefinedAcl',
'predefinedDefaultObjectAcl',
'logging',
'lifecycle',
'labels',
'website'
]
for prop in optional_props:
if prop in context.properties:
bucket['properties'][prop] = context.properties[prop]
resources.append(bucket)
# If IAM policy bindings are defined then those bindings need to be applied
storage_provider_type = 'gcp-types/storage-v1:storage.buckets.setIamPolicy'
bindings = context.properties.get('bindings', [])
if bindings:
iam_policy = {
'name': bucket_name + '-iampolicy',
'action': (storage_provider_type),
'properties':
{
'bucket': '$(ref.' + bucket_name + '.name)',
'project': project_id,
'bindings': bindings
}
}
resources.append(iam_policy)
return {
'resources':
resources,
'outputs':
[
{
'name': 'storageBucketSelfLink',
'value': bucket_selflink
},
{
'name': 'storageBucketURL',
'value': bucket_uri
}
]
}
| StarcoderdataPython |
3279114 | import os
import re
def create_results_dir():
results_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../results')
os.makedirs(results_root, exist_ok=True)
max_num = -1
for root, subdirs, files in os.walk(results_root):
for subdir in subdirs:
numbers = re.findall('[0-9]+', subdir)
if numbers:
if (int(numbers[0]) > max_num):
max_num = int(numbers[0])
max_num += 1
results_dir = os.path.join(results_root, 'training-' + str(max_num).zfill(6))
os.makedirs(results_dir)
return results_dir | StarcoderdataPython |
1742892 | import sqlite3
try:
import tkinter
except ImportError: # python 2
import Tkinter as tkinter
conn = sqlite3.connect('lesson_176_music.sqlite')
class Scrollbox(tkinter.Listbox):
def __init__(self, window, **kwargs):
# tkinter.Listbox.__init__(self, window, **kwargs) # Python 2
super().__init__(window, **kwargs)
self.scrollbar = tkinter.Scrollbar(window, orient=tkinter.VERTICAL, command=self.yview)
def grid(self, row, column, sticky='nsw', rowspan=1, columnspan=1, **kwargs):
# tkinter.Listbox.grid(self, row=row, column=column, sticky=sticky, rowspan=rowspan,
# **kwargs) # Python 2
super().grid(row=row, column=column, sticky=sticky, rowspan=rowspan, columnspan=columnspan, **kwargs)
self.scrollbar.grid(row=row, column=column, sticky='nse', rowspan=rowspan)
self['yscrollcommand'] = self.scrollbar.set
class DataListBox(Scrollbox):
def __init__(self, window, connection, table, field, sort_order=(), **kwargs):
# Scrollbox.__init__(self, window, **kwargs) # Python 2
super().__init__(window, **kwargs)
self.cursor = connection.cursor()
self.table = table
self.field = field
self.sql_select = "SELECT " + self.field + ", _id" + " FROM " + self.table
if sort_order:
self.sql_sort = " ORDER BY " + ','.join(sort_order)
else:
self.sql_sort = " ORDER BY " + self.field
def clear(self):
self.delete(0, tkinter.END)
def requery(self, link_value=None):
if link_value:
sql = self.sql_select + " WHERE " + "artist" + "=?" + self.sql_sort
print(sql) # TODO delete this line
self.cursor.execute(sql, (link_value,))
else:
print(self.sql_select + self.sql_sort) # TODO delete this line
self.cursor.execute(self.sql_select + self.sql_sort)
# clear the listbox contents before re-loading
self.clear()
for value in self.cursor:
self.insert(tkinter.END, value[0])
def on_select(self, event):
print(self is event.widget) # TODO delete this line
index = self.curselection()[0]
value = self.get(index),
# get the artist ID from the database row
link_id = self.cursor.execute(self.sql_select + " WHERE " + self.field + "=?", value).fetchone()[1]
albumList.requery(link_id)
# artist_id = conn.execute("SELECT artists._id FROM artists WHERE artists.name=?", artist_name).fetchone()
# alist = []
# for row in conn.execute("SELECT albums.name FROM albums WHERE albums.artist = ? ORDER BY albums.name", artist_id):
# alist.append(row[0])
# albumLV.set(tuple(alist))
# songLV.set(("Choose an album",))
def get_songs(event):
lb = event.widget
index = int(lb.curselection()[0])
album_name = lb.get(index),
# get the artist ID from the database row
album_id = conn.execute("SELECT albums._id FROM albums WHERE albums.name=?", album_name).fetchone()
alist = []
for x in conn.execute("SELECT songs.title FROM songs WHERE songs.album=? ORDER BY songs.track", album_id):
alist.append(x[0])
songLV.set(tuple(alist))
mainWindow = tkinter.Tk()
mainWindow.title('Music DB Browser')
mainWindow.geometry('1024x768')
mainWindow.columnconfigure(0, weight=2)
mainWindow.columnconfigure(1, weight=2)
mainWindow.columnconfigure(2, weight=2)
mainWindow.columnconfigure(3, weight=1) # spacer column on right
mainWindow.rowconfigure(0, weight=1)
mainWindow.rowconfigure(1, weight=5)
mainWindow.rowconfigure(2, weight=5)
mainWindow.rowconfigure(3, weight=1)
# ===== labels =====
tkinter.Label(mainWindow, text="Artists").grid(row=0, column=0)
tkinter.Label(mainWindow, text="Albums").grid(row=0, column=1)
tkinter.Label(mainWindow, text="Songs").grid(row=0, column=2)
# ===== Artists Listbox =====
artistList = DataListBox(mainWindow, conn, "artists", "name")
artistList.grid(row=1, column=0, sticky='nsew', rowspan=2, padx=(30, 0))
artistList.config(border=2, relief='sunken')
artistList.requery()
artistList.bind('<<ListboxSelect>>', get_albums)
# ===== Albums Listbox =====
albumLV = tkinter.Variable(mainWindow)
albumLV.set(("Choose an artist",))
albumList = DataListBox(mainWindow, conn, "albums", "name", sort_order=("name",))
albumList.requery(12)
albumList.grid(row=1, column=1, sticky='nsew', padx=(30, 0))
albumList.config(border=2, relief='sunken')
albumList.bind('<<ListboxSelect>>', get_songs)
# ===== Songs Listbox =====
songLV = tkinter.Variable(mainWindow)
songLV.set(("Choose an album",))
songList = DataListBox(mainWindow, conn, "songs", "title", ("track", "title"))
songList.requery()
songList.grid(row=1, column=2, sticky='nsew', padx=(30, 0))
songList.config(border=2, relief='sunken')
# ===== Main loop =====
testList = range(0, 100)
albumLV.set(tuple(testList))
mainWindow.mainloop()
print("closing database connection")
conn.close()
| StarcoderdataPython |
1631383 | def processing(mode, text, key):
key_ints = [ord(i) for i in key]
text_ints = [ord(i) for i in text]
finished_text = ""
for i in range(len(text_ints)):
adder = key_ints[i % len(key)]
if mode == 1:
adder *= -1
char = (text_ints[i] - 32 + adder) % 95
finished_text += chr(char + 32)
return finished_text
def assembly(mode):
text = str(input("[+] Enter your text - "))
key = str(input("[+] Enter your key - "))
finished_text = processing(mode, text, key)
print("\n »» The result by Vigenere algorithm. ««")
print(finished_text)
def main():
print("[x] Vigenere cryptography algorithm. [x]")
print(" • 0. Encryption mode.\n • 1. Decryption mode.")
mode = int(input("[?] Select program mode - "))
assembly(mode)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3216821 | #!/usr/bin/env python
__version__ = '3.3.1'
__author__ = "<NAME> (<EMAIL>)"
__date__ = '2014-February-4'
__url__ = 'https://engineering.purdue.edu/kak/dist/BitVector-3.3.1.html'
__copyright__ = "(C) 2014 Avinash Kak. Python Software Foundation."
__doc__ = '''
BitVector.py
Version: ''' + __version__ + '''
Author: <NAME> (<EMAIL>)
Date: ''' + __date__ + '''
@title
CHANGE LOG:
Version 3.3.1:
This is a minor upgrade to make the syntax of the API method
declarations more uniform. Previously, while most of the method
names used underscores to connect multiple words, some used
camelcasing. Now all use underscores. For backward
compatibility, the old calls will continue to work.
Version 3.3:
This version includes: (1) One additional constructor mode that
allows a bit vector to be constructed directly from the bytes
type objects in the memory. (2) A bugfix in the slice function
for the case when the upper and the lower bounds of the slice
range are identical. (3) A bugfix for the next_set_bit() method.
Version 3.2:
This version includes support for constructing bit vectors
directly from text strings and hex strings. This version also
includes a safety check on the sizes of the two argument bit
vectors when calculating Jaccard similarity between the two.
Version 3.1.1:
This version includes: (1) a fix to the module test code to
account for how string input is handled in the io.StringIO class
in Python 2.7; (2) some improvements to the documentation.
Version 3.1:
This version includes: (1) Correction for a documentation error;
(2) Fix for a bug in slice assignment when one or both of the
slice limits were left unspecified; (3) The non-circular bit
shift methods now return self so that they can be chained; (4) A
method for testing a bitvector for its primality; and (5) A
method that uses Python's 'random.getrandbits()' to generate
a bitvector that can serve as candidate for primes whose bitfield
size is specified.
Version 3.0:
This is a Python 3.x compliant version of the latest incarnation
of the BitVector module. This version should work with both
Python 2.x and Python 3.x.
Version 2.2:
Fixed a couple of bugs, the most important being in the
bitvector initialization code for the cases when the
user-specified value for size conflicts with the user-specified
int value for the vector. Version 2.2 also includes a new
method runs() that returns a list of strings of the consecutive
runs of 1's and 0's in the bitvector. The implementation of
the circular shift operators has also been improved in Version
2.2. This version allows for a chained invocation of these
operators. Additionally, the circular shift operators now
exhibit expected behavior if the user-specified shift value is
negative.
Version 2.1:
Includes enhanced support for folks who use this class for
computer security and cryptography work. You can now call on
the methods of the BitVector class to do Galois Field GF(2^n)
arithmetic on bit arrays. This should save the users of this
class the bother of having to write their own routines for
finding multiplicative inverses in GF(2^n) finite fields.
Version 2.0.1:
Fixed numerous typos and other errors in the documentation page
for the module. The implementation code remains unchanged.
Version 2.0:
To address the needs of the folks who are using the BitVector
class in data mining research, the new version of the class
includes several additional methods. Since the bitvectors used
by these folks can be extremely long, possibly involving
millions of bits, the new version of the class includes a much
faster method for counting the total number of set bits when a
bitvector is sparse. [But note that this new bit counting
method may perform poorly for dense bitvectors. So the old bit
counting method has been retained.] Also for data mining folks,
the new version of the class is provided with similarity and
distance calculation metrics such as the Jaccard similarity
coefficient, the Jaccard distance, and the Hamming distance.
Again for the same folks, the class now also has a
next_set_bit(from_index) method. Other enhancements to the
class include methods for folks who do research in cryptography.
Now you can directly calculate the greatest common divisor of
two bitvectors, or find the multiplicative inverse of one
bitvector modulo another bitvector.
Version 1.5.1:
Removed a bug from the implementation of the right circular
shift operator.
Version 1.5:
This version should prove to be much more efficient for long
bitvectors. Efficiency in BitVector construction when only its
size is specified was achieved by eliminating calls to
_setbit(). The application of logical operators to two
BitVectors of equal length was also made efficient by
eliminating calls to the padding function. Another feature of
this version is the count_bits() method that returns the total
number of bits set in a BitVector instance. Yet another feature
of this version is the setValue() method that alters the bit
pattern associated with a previously constructed BitVector.
Version 1.4.1:
The reset() method now returns 'self' to allow for cascaded
invocation with the slicing operator. Also removed the
discrepancy between the value of the __copyright__ variable in
the module and the value of license variable in setup.py.
Version 1.4:
This version includes the following two upgrades: 1) code for
slice assignment; and 2) A reset function to reinitialize a
previously constructed BitVector. Additionally, the code was
cleaned up with the help of pychecker.
Version 1.3.2:
Fixed a potentially misleading documentation issue for the
Windows users of the BitVector class. If you are writing an
internally generated BitVector to a disk file, you must open the
file in the binary mode. If you don't, the bit patterns that
correspond to line breaks will be misinterpreted. On a Windows
machine in the text mode, the bit pattern 000001010 ('\\n') will
be written out to the disk as 0000110100001010 ('\\r\\n').
Version 1.3.1:
Removed the inconsistency in the internal representation of
bitvectors produced by logical bitwise operations vis-a-vis the
bitvectors created by the constructor. Previously, the logical
bitwise operations resulted in bitvectors that had their bits
packed into lists of ints, as opposed to arrays of unsigned
shorts.
Version 1.3:
(a) One more constructor mode included: When initializing a new
bitvector with an integer value, you can now also specify a size
for the bitvector. The constructor zero-pads the bitvector
from the left with zeros. (b) The BitVector class now supports
'if x in y' syntax to test if the bit pattern 'x' is contained
in the bit pattern 'y'. (c) Improved syntax to conform to
well-established Python idioms. (d) What used to be a comment
before the beginning of each method definition is now a
docstring.
Version 1.2:
(a) One more constructor mode included: You can now construct a
bitvector directly from a string of 1's and 0's. (b) The class
now constructs a shortest possible bit vector from an integer
value. So the bit vector for the integer value 0 is just one
bit of value 0, and so on. (c) All the rich comparison operators
are now overloaded. (d) The class now includes a new method
'intValue()' that returns the unsigned integer value of a bit
vector. This can also be done through '__int__'. (e) The
package now includes a unittest based framework for testing out
an installation. This is in a separate directory called
"TestBitVector".
Version 1.1.1:
The function that does block reads from a disk file now peeks
ahead at the end of each block to see if there is anything
remaining to be read in the file. If nothing remains, the
more_to_read attribute of the BitVector object is set to False.
This simplifies reading loops. This version also allows
BitVectors of size 0 to be constructed
Version 1.1:
I have changed the API significantly to provide more ways for
constructing a bit vector. As a result, it is now necessary to
supply a keyword argument to the constructor.
@title
INSTALLATION:
The BitVector class was packaged using Distutils. For installation,
execute the following command-line in the source directory (this is
the directory that contains the setup.py file after you have
downloaded and uncompressed the tar archive):
python setup.py install
You have to have root privileges for this to work. On Linux
distributions, this will install the module file at a location that
looks like
/usr/lib/python2.7/dist-packages/
If you do not have root access, you have the option of working
directly off the directory in which you downloaded the software by
simply placing the following statements at the top of your scripts
that use the BitVector class
import sys
sys.path.append( "pathname_to_BitVector_directory" )
To uninstall the module, simply delete the source directory, locate
where BitVector was installed with "locate BitVector" and delete
those files. As mentioned above, the full pathname to the installed
version is likely to look like
/usr/lib/python2.7/dist-packages/BitVector*
If you want to carry out a non-standard install of BitVector, look
up the on-line information on Disutils by pointing your browser to
http://docs.python.org/dist/dist.html
@title
INTRODUCTION:
The BitVector class is for a memory-efficient packed representation
of bit arrays and for logical operations on such arrays. The
operations supported on bit vectors are:
__add__ for concatenation
__and__ for bitwise logical AND
__contains__
__eq__, __ne__, __lt__, __le__, __gt__, __ge__
__getitem__ for indexed access
__getslice__ for slice access
__int__ for returning integer value
__invert__ for inverting the 1's and 0's
__iter__ for iterating through
__len__ for len()
__lshift__ for circular shifts to the left
__or__ for bitwise logical OR
__rshift__ for circular shifts to the right
__setitem__ for indexed and slice setting
__str__ for str()
__xor__ for bitwise logical XOR
count_bits
count_bits_sparse faster for sparse bit vectors
deep_copy
divide_into_two
gcd for greatest common divisor
gen_rand_bits_for_prime
get_hex_string_from_bitvector
get_text_from_bitvector
gf_divide for divisions in GF(2^n)
gf_MI for multiplicative inverse in GF(2^n)
gf_multiply for multiplications in GF(2)
gf_multiply_modular for multiplications in GF(2^n)
hamming_distance
int_val for returning the integer value
is_power_of_2
is_power_of_2_sparse faster for sparse bit vectors
jaccard_distance
jaccard_similarity
length
multiplicative_inverse
next_set_bit
pad_from_left
pad_from_right
permute
rank_of_bit_set_at_index
read_bits_from_file
reset
reverse
runs
shift_left for non-circular left shift
shift_right for non-circular right shift
slice assignment
set_value
test_for_primality
unpermute
write_to_file
write_bits_to_fileobject
@title
CONSTRUCTING BIT VECTORS:
You can construct a bit vector in the following different ways:
@tagC0
(C0) You construct an EMPTY bit vector using the following syntax:
bv = BitVector(size = 0)
@tagC1
(C1) You can construct a bit vector directly from either a tuple
or a list of bits, as in
bv = BitVector(bitlist = [1,0,1,0,0,1,0,1,0,0,1,0,1,0,0,1])
@tagC2
(C2) You can construct a bit vector from an integer by
bv = BitVector(intVal = 56789)
The bits stored now will correspond to the binary
representation of the integer. The resulting bit vector is
the shortest possible bit vector for the integer value
supplied. For example, when intVal is 0, the bit vector
constructed will consist of just the bit 0.
@tagC3
(C3) When initializing a bit vector with an intVal as shown above,
you can also specify a size for the bit vector:
bv = BitVector(intVal = 0, size = 8)
will return the bit vector consisting of the bit pattern
00000000. The zero padding needed for meeting the size
requirement is always on the left. If the size supplied is
smaller than what it takes to create the shortest possible
bit vector for intVal, an exception is thrown.
@tagC4
(C4) You can create a zero-initialized bit vector of a given size by
bv = BitVector(size = 62)
This bit vector will hold exactly 62 bits, all initialized to
the 0 bit value.
@tagC5
(C5) You can construct a bit vector from a disk file by a two-step
procedure. First you construct an instance of bit vector by
bv = BitVector(filename = 'somefile')
This bit vector itself is incapable of holding the bits. To
now create bit vectors that actually hold the bits, you need
to make the following sort of a call on the above variable
bv:
bv1 = bv.read_bits_from_file(64)
bv1 will be a regular bit vector containing 64 bits from the
disk file. If you want to re-read a file from the beginning
for some reason, you must obviously first close the file
object that was acquired with a call to the BitVector
constructor with a filename argument. This can be
accomplished by
bv.close_file_object()
@tagC6
(C6) You can construct a bit vector from a string of 1's and 0's by
bv = BitVector(bitstring = '110011110000')
@tagC7
(C7) Yet another way to construct a bit vector is to read the bits
directly from a file-like object, as in
import io
x = "111100001111"
fp_read = io.StringIO( x )
bv = BitVector(fp = fp_read)
print(bv) # 111100001111
@tagC8
(C8) You can also construct a bit vector directly from a text string
as shown by the example:
bv3 = BitVector(textstring = "hello")
print(bv3) # 0110100001100101011011000110110001101111
mytext = bv3.get_text_from_bitvector()
print mytext # hello
The bit vector is constructed by using the one-byte ASCII
encoding of the characters in the text string.
@tagC9
(C9) You can also construct a bit vector directly from a string
of hex digits as shown by the example:
bv4 = BitVector(hexstring = "68656c6c6f")
print(bv4) # 0110100001100101011011000110110001101111
myhexstring = bv4.get_hex_string_from_bitvector()
print myhexstring # 68656c6c6
@tagC10
(C10) You can also construct a bit vector directly from a bytes type
object you previously created in your script. This can be
useful when you are trying to recover the integer parameters
stored in public and private keys. A typical usage scenario:
keydata = base64.b64decode(open(sys.argv[1]).read().split(None)[1])
bv = BitVector.BitVector(rawbytes = keydata)
where sys.argv[1] is meant to supply the name of a public key
file (in this case an SSH RSA public key file).
@title
OPERATIONS SUPPORTED BY THE BITVECTOR CLASS:
@title
DISPLAYING BIT VECTORS:
@tag1
(1) Since the BitVector class implements the __str__ method, a bit
vector can be displayed on a terminal by
print(bitvec)
or, for only Python 2.x, by
print bitvec
Basically, you can always obtain the string representation of a
bit vector by
str(bitvec)
and integer value by
int(bitvec)
@title
ACCESSING AND SETTING INDIVIDUAL BITS AND SLICES:
@tag2
(2) Any single bit of a bit vector bv can be set to 1 or 0 by
bv[M] = 1_or_0
print( bv[M] )
or, for just Python 2.x, by
bv[M] = 1_or_0
print bv[M]
for accessing (and setting) the bit at the position that is
indexed M. You can retrieve the bit at position M by bv[M].
Note that the index 0 corresponds to the first bit at the left
end of a bit pattern. This is made possible by the
implementation of the __getitem__ and __setitem__ methods.
@tag3
(3) A slice of a bit vector obtained by
bv[i:j]
is a bit vector constructed from the bits at index positions
from i through j-1. This is made possible by the
implementation of the __getslice__ method.
@tag4
(4) You can also carry out slice assignment:
bv1 = BitVector(size = 25)
bv2 = BitVector(bitstring = '1010001')
bv1[6:9] = bv2[0:3]
bv3 = BitVector(bitstring = '101')
bv1[0:3] = bv3
The first slice assignment will set the 6th, 7th, and the 8th
bits of the bit vector bv1 according to the first three bits of
bv2. The second slice assignment will set the first three bits
of bv1 according to the three bits in bv3. This is made
possible by the slice setting code in the __setitem__ method.
@tag5
(5) You can iterate over a bit vector, as illustrated by
for bit in bitvec:
print(bit)
This is made possible by the override definition for the special
__iter__() method.
@tag6
(6) Negative subscripts for array-like indexing are supported.
Therefore,
bitvec[-i]
is legal assuming that the index range is not violated. A
negative index carries the usual Python interpretation: The
last element of a bit vector is indexed -1 and the first
element -(n+1) if n is the total number of bits in the bit
vector. Negative subscripts are made possible by
special-casing such access in the implementation of the
__getitem__ method (actually it is the _getbit method).
@tag7
(7) You can reset a previously constructed bit vector to either the
all-zeros state or the all-ones state by
bv1 = BitVector(size = 25)
...
...
bv1.reset(1)
...
...
bv1.reset(0)
The first call to reset() will set all the bits of bv1 to 1's
and the second call all the bits to 0's.
@title
LOGICAL OPERATIONS ON BIT VECTORS:
@tag8
(8) Given two bit vectors bv1 and bv2, you can perform bitwise
logical operations on them by
result_bv = bv1 ^ bv2 # for bitwise XOR
result_bv = bv1 & bv2 # for bitwise AND
result_bv = bv1 | bv2 # for bitwise OR
result_bv = ~bv1 # for bitwise negation
These are made possible by implementing the __xor__, __and__,
__or__, and __invert__ methods, respectively.
@title
COMPARING BIT VECTORS:
@tag9
(9) Given two bit vectors bv1 and bv2, you can carry out the
following comparisons that return Boolean values:
bv1 == bv2
bv1 != bv2
bv1 < bv2
bv1 <= bv2
bv1 > bv2
bv1 >= bv2
The equalities and inequalities are determined by the integer
values associated with the bit vectors. These operator
overloadings are made possible by providing implementation code
for __eq__, __ne__, __lt__, __le__, __gt__, and __ge__,
respectively.
@title
OTHER SUPPORTED OPERATIONS:
@tag10
(10) You can permute and unpermute bit vectors:
bv_permuted = bv.permute(permutation_list)
bv_unpermuted = bv.unpermute(permutation_list)
@tag11
(11) Left and right circular rotations can be carried out by
bitvec << N
bitvec >> N
for circular rotations to the left and to the right by N bit
positions. These operator overloadings are made possible by
implementing the __lshift__ and __rshift__ methods,
respectively.
@tag12
(12) If you want to shift a bitvector non-circularly:
bitvec = BitVector(bitstring = '10010000')
bitvec.shift_left(3) # 10000000
bitvec.shift_right(3) # 00010000
Obviously, for a sufficient large left or right non-circular
shift, you will end up with a bitvector that is all zeros.
@tag13
(13) A bit vector containing an even number of bits can be divided
into two equal parts by
[left_half, right_half] = bitvec.divide_into_two()
where left_half and right_half hold references to the two
returned bit vectors.
@tag14
(14) You can find the integer value of a bit array by
bitvec.int_val()
or by
int(bitvec)
@tag15
(15) You can convert a bit vector into its string representation by
str( bitvec )
@tag16
(16) Because __add__ is supplied, you can always join two bit vectors
by
bitvec3 = bitvec1 + bitvec2
bitvec3 is a new bit vector that contains all the bits of
bitvec1 followed by all the bits of bitvec2.
@tag17
(17) You can find the length of a bitvector by
len = bitvec.length()
@tag18
(18) You can make a deep copy of a bitvector by
bitvec_copy = bitvec.deep_copy()
@tag19
(19) You can write a bit vector directly to a file, as illustrated
by the following example that reads one bit vector from a file
and then writes it to another file
bv = BitVector(filename = 'input.txt')
bv1 = bv.read_bits_from_file(64)
print(bv1)
FILEOUT = open('output.bits', 'wb')
bv1.write_to_file( FILEOUT )
FILEOUT.close()
bv = BitVector(filename = 'output.bits')
bv2 = bv.read_bits_from_file(64)
print(bv2)
IMPORTANT: The size of a bit vector must be a multiple of of 8
for this write function to work. If this
condition is not met, the function will throw an
exception.
IMPORTANT FOR WINDOWS USERS: When writing an internally
generated bit vector out to a disk file, it is
important to open the file in the binary mode as
shown. Otherwise, the bit pattern 00001010
('\\n') in your bitstring will be written out as
0000110100001010 ('\\r\\n'), which is the
linebreak on Windows machines.
@tag20
(20) You can also write a bit vector directly to a stream object, as
illustrated by
fp_write = io.StringIO()
bitvec.write_bits_to_fileobject( fp_write )
print( fp_write.getvalue() )
@tag21
(21) You can pad a bit vector from the left or from the right with a
designated number of zeros
bitvec.pad_from_left( n )
bitvec.pad_from_right( n )
In the first case, the new bit vector will be the same as the
old bit vector except for the additional n zeros on the left.
The same thing happens in the second case except that now the
additional n zeros will be on the right.
@tag22
(22) You can test if a bit vector x is contained in another bit
vector y by using the syntax 'if x in y'. This is made
possible by the override definition for the special
__contains__ method.
@tag23
(23) You can change the bit pattern associated with a previously
constructed BitVector instance:
bv = BitVector(intVal = 7, size =16)
print(bv) # 0000000000000111
bv.set_value(intVal = 45)
print(bv) # 101101
@tag24
(24) You can count the number of bits set in a BitVector instance by
bv = BitVector( bitstring = '100111' )
print( bv.count_bits() ) # 4
@tag25
(25) For folks who use bit vectors with millions of bits in them but
with only a few bits set, your bit counting will go much, much
faster if you call count_bits_sparse() instead of count_bits():
# a BitVector with 2 million bits:
bv = BitVector(size = 2000000)
bv[345234] = 1
bv[233]=1
bv[243]=1
bv[18]=1
bv[785] =1
print(bv.count_bits_sparse()) # 5
@tag26
(26) You can calculate the similarity and the distance between two
bit vectors using the Jaccard similarity coefficient and the
Jaccard distance. Also, you can calculate the Hamming distance
between two bit vectors:
bv1 = BitVector(bitstring = '11111111')
bv2 = BitVector(bitstring = '00101011')
print bv1.jaccard_similarity(bv2)
print(str(bv1.jaccard_distance(bv2)))
print(str(bv1.hamming_distance(bv2)))
@tag27
(27) Starting from a given bit position, you can find the position
index of the next set bit:
bv = BitVector(bitstring = '00000000000001')
print(bv.next_set_bit(5)) # 13
since the position index of the SET bit after the bit
whose position index 5 is 13.
@tag28
(28) You can measure the "rank" of a bit that is set at a given
position. Rank is the number of bits that are set up to the
position of the bit you are interested in.
bv = BitVector(bitstring = '01010101011100')
print(bv.rank_of_bit_set_at_index(10)) # 6
@tag29
(29) You can test whether the integer value of a bit vector is a
power of two. The sparse version of this method will work much
faster for very long bit vectors. However, the regular version
may work faster for small bit vectors.
bv = BitVector(bitstring = '10000000001110')
print( bv.is_power_of_2() )
print( bv.is_power_of_2_sparse() )
@tag30
(30) Given a bit vector, you can construct a bit vector with all the
bits reversed, in the sense that what was left to right before
now becomes right to left.
bv = BitVector(bitstring = '0001100000000000001')
print(str(bv.reverse()))
@tag31
(31) You can find the greatest common divisor of two bit vectors:
bv1 = BitVector(bitstring = '01100110') # int val: 102
bv2 = BitVector(bitstring = '011010') # int val: 26
bv = bv1.gcd(bv2)
print(int(bv)) # 2
@tag32
(32) You can find the multiplicative inverse of a bit vector
vis-a-vis a given modulus:
bv_modulus = BitVector(intVal = 32)
bv = BitVector(intVal = 17)
bv_result = bv.multiplicative_inverse( bv_modulus )
if bv_result is not None:
print(str(int(bv_result))) # 17
else: print "No multiplicative inverse in this case"
This multiplicative inverse is calculated using normal integer
arithmetic. For multiplicative inverses in GF(2^n), use the
gf_MI() method described below.
@tag33
(33) To find the multiplicative inverse of a bit vector in
GF(2^n) with respect to a modulus polynomial, you can do
the following:
modulus = BitVector(bitstring = '100011011')
n = 8
a = BitVector(bitstring = '00110011')
multi_inverse = a.gf_MI(modulus, n)
print multi_inverse # 01101100
@tag34
(34) If you just want to multiply two bit patterns in GF(2):
a = BitVector(bitstring='0110001')
b = BitVector(bitstring='0110')
c = a.gf_multiply(b)
print(c) # 00010100110
@tag35
(35) On the other hand, if you want to carry out modular
multiplications in GF(2^n):
modulus = BitVector( bitstring='100011011' ) # AES modulus
n = 8
a = BitVector(bitstring='0110001')
b = BitVector(bitstring='0110')
c = a.gf_multiply_modular(b, modulus, n)
print( c ) # 10100110
@tag36
(36) To divide by a modulus bitvector in GF(2^n):
mod = BitVector(bitstring='100011011') # AES modulus
n = 8
bitvec = BitVector(bitstring='11100010110001')
quotient, remainder = bitvec.gf_divide(mod, n)
print(quotient) # 00000000111010
print(remainder) # 10001111
@tag37
(37) You can extract from a bit vector the runs of 1's and 0's
in the vector
bv = BitVector(bitlist = (1,1, 1, 0, 0, 1))
print( str(bv.runs()) ) # ['111', '00', '1']
@tag38
(38) You can generate a bit vector with random bits that span in
full the specified width. For example, if you wanted the
random bit vector to fully span 32 bits, you would say
bv = BitVector(intVal = 0)
bv = bv.gen_rand_bits_for_prime(32)
print(bv) # 11011010001111011010011111000101
@tag39
(39) You can test whether a randomly generated bit vector is a prime
number using the probabilistic Miller-Rabin test
bv = BitVector(intVal = 0)
bv = bv.gen_rand_bits_for_prime(32)
check = bv.test_for_primality()
print(check)
@tag40
(40) You can call get_text_from_bitvector() to directly convert a bit
vector into a text string (this is a useful thing to do only if
the length of the vector is an integral multiple of 8 and every
byte in your bitvector has a print representation):
bv = BitVector(textstring = "hello")
print(bv) # 0110100001100101011011000110110001101111
mytext = bv3.get_text_from_bitvector()
print mytext # hello
@tag41
(41) You can directly convert a bit vector into a hex string (this
is a useful thing to do only if the length of the vector is an
integral multiple of 4):
bv4 = BitVector(hexstring = "68656c6c6f")
print(bv4) # 0110100001100101011011000110110001101111
myhexstring = bv4.get_hex_string_from_bitvector()
print myhexstring # 68656c6c6
@title
HOW THE BIT VECTORS ARE STORED:
The bits of a bit vector are stored in 16-bit unsigned ints
following <NAME>'s recommendation to that effect on the
Pyrex mailing list. As you can see in the code for `__init__()',
after resolving the argument with which the constructor is called,
the very first thing the constructor does is to figure out how many
of those 2-byte ints it needs for the bits (see how the value is
assigned to the variable `two_byte_ints_needed' toward the end of
`__init__()'). For example, if you wanted to store a 64-bit array,
the variable 'two_byte_ints_needed' would be set to 4. (This does
not mean that the size of a bit vector must be a multiple of 16.
Any sized bit vectors can be constructed --- the constructor will
choose the minimum number of two-byte ints needed.) Subsequently,
the constructor acquires an array of zero-initialized 2-byte ints.
The last thing that is done in the code for `__init__()' is to
shift the bits into the array of two-byte ints.
As mentioned above, note that it is not necessary for the size of a
bit vector to be a multiple of 16 even though we are using C's
unsigned short as as a basic unit for storing the bit arrays. The
class BitVector keeps track of the actual number of bits in the bit
vector through the "size" instance variable.
Note that, except for one case, the constructor must be called with
a single keyword argument, which determines how the bit vector will
be constructed. The single exception to this rule is for the
keyword argument `intVal' which can be used along with the `size'
keyword argument. When `intVal' is used without the `size' option,
the bit vector constructed for the integer is the shortest possible
bit vector. On the other hand, when `size' is also specified, the
bit vector is padded with zeroes from the left so that it has the
specified size. The code for `__init__()' begins by making sure
your constructor call only uses the acceptable keywords. The
constraints on how many keywords can be used together in a
constructor call are enforced when we process each keyword option
separately in the rest of the code for `__init__()'.
The first keyword option processed by `__init__()' is for
`filename'. When the constructor is called with the `filename'
keyword, as in
bv = BitVector(filename = 'myfilename')
the call returns a bit vector on which you must subsequently invoke
the `read_bits_from_file()' method to actually obtain a bit vector
consisting of the bits that constitute the information stored in
the file.
The next keyword option considered in `__init__()' is for `fp',
which is for constructing a bit vector by reading off the bits from
a file-like object, as in
x = "111100001111"
fileobj = StringIO.StringIO( x )
bv = BitVector( fp = fileobj )
The keyword option `intVal' considered next is for converting an
integer into a bit vector through a constructor call like
bv = BitVector(intVal = 123456)
The bits stored in the bit vector thus created correspond to the
big-endian binary representation of the integer argument provided
through `intVal' (meaning that the most significant bit will be at
the leftmost position in the bit vector.) THE BIT VECTOR
CONSTRUCTED WITH THE ABOVE CALL IS THE SHORTEST POSSIBLE BIT VECTOR
FOR THE INTEGER SUPPLIED. As a case in point, when `intVal' is set
to 0, the bit vector consists of a single bit is 0 also. When
constructing a bit vector with the `intVal' option, if you also
want to impose a size condition on the bit vector, you can make a
call like
bv = BitVector(intVal = 46, size = 16)
which returns a bit vector of the indicated size by padding the
shortest possible vector for the `intVal' option with zeros from
the left.
The next option processed by `__init_()' is for the `size' keyword
when this keyword is used all by itself. If you want a bit vector
of just 0's of whatever size, you make a call like
bv = BitVector(size = 61)
This returns a bit vector that will hold exactly 61 bits, all
initialized to the zero value.
The next constructor keyword processed by `__init__()' is
`bitstring'. This is to allow a bit vector to be constructed
directly from a bit string as in
bv = BitVector(bitstring = '00110011111')
The keyword considered next is `bitlist' which allows a bit vector
to be constructed from a list or a tuple of individual bits, as in
bv = BitVector(bitlist = (1, 0, 1, 1, 0, 0, 1))
The last two keyword options considered in `__init__()' are for
keywords `textstring' and `hexstring'. If you want to construct a
bitvector directly from a text string, you call
bv = BitVector(textstring = "hello")
The bit vector created corresponds to the ASCII encodings of the
individual characters in the text string.
And if you want to do the same with a hex string, you call
bv = BitVector(hexstring = "68656c6c6f")
Now, as you would expect, the bits in the bit vector will
correspond directly to the hex digits in your hex string.
@title
ACKNOWLEDGMENTS:
The author is grateful to <NAME> for suggesting many
improvements that were incorporated in Version 1.1 of this package.
The author would like to thank <NAME> whose email resulted in
the creation of Version 1.2. Kurt also caught an error in my
earlier version of 'setup.py' and suggested a unittest based
approach to the testing of the package. Kurt also supplied the
Makefile that is included in this distribution. The author would
also like to thank all (<NAME>, <NAME>, and <NAME>) for their responses to my comp.lang.python query
concerning how to make a Python input stream peekable. This
feature was included in Version 1.1.1.
With regard to the changes incorporated in Version 1.3, thanks are
owed to <NAME> and <NAME> for bringing to my
attention the bug related to the intVal method of initializing a
bit vector when the value of intVal exceeded sys.maxint. This
problem is fixed in Version 1.3. Version 1.3 also includes many
other improvements that make the syntax better conform to the
standard idioms of Python. These changes and the addition of the
new constructor mode (that allows a bit vector of a given size to
be constructed from an integer value) are also owing to Kurt's
suggestions.
With regard to the changes incorporated in Version 1.3.1, I would
like to thank <NAME> for noticing that the bitwise
logical operators resulted in bit vectors that had their bits
packed into lists of ints, as opposed to arrays of unsigned shorts.
This inconsistency in representation has been removed in version
1.3.1. Michael has also suggested that since BitVector is mutable,
I should be overloading __iand__(), __ior__(), etc., for in-place
modifications of bit vectors. Michael certainly makes a good
point. But I am afraid that this change will break the code for the
existing users of the BitVector class.
I thank <NAME> for bringing to my attention the problem with
writing bitstrings out to a disk files on Windows machines. This
turned out to be a problem more with the documentation than with
the BitVector class itself. On a Windows machine, it is
particularly important that a file you are writing a bitstring into
be opened in binary mode since otherwise the bit pattern 00001010
('\\n') will be written out as 0000110100001010 ('\\r\\n'). This
documentation fix resulted in Version 1.3.2.
With regard to Version 1.4, the suggestions/bug reports made by
<NAME>, <NAME>, and <NAME> contributed to this
version. I wish to thank all three. John wanted me to equip the
class with a reset() method so that a previously constructed class
could be reset to either all 0's or all 1's. Bob spotted loose
local variables in the implementation --- presumably left over from
a debugging phase of the code. Bob recommended that I clean up the
code with pychecker. That has been done. Steve noticed that slice
assignment was not working. It should work now.
Version 1.4.1 was prompted by <NAME> suggesting that if
reset() returned self, then the slice operation could be combined
with the reset operation. Thanks John! Another reason for 1.4.1
was to remove the discrepancy between the value of the
__copyright__ variable in the module and the value of license
variable in setup.py. This discrepancy was brought to my attention
by <NAME>. Thanks David!
Version 1.5 has benefited greatly by the suggestions made by Ryan
Cox. By examining the BitVector execution with cProfile, Ryan
observed that my implementation was making unnecessary method calls
to _setbit() when just the size option is used for constructing a
BitVector instance. Since Python allocates cleaned up memory, it
is unnecessary to set the individual bits of a vector if it is
known in advance that they are all zero. Ryan made a similar
observation for the logical operations applied to two BitVector
instances of equal length. He noticed that I was making
unnecessary calls to _resize_pad_from_left() for the case of equal
arguments to logical operations. Ryan also recommended that I
include a method that returns the total number of bits set in a
BitVector instance. The new method count_bits() does exactly
that. Thanks Ryan for all your suggestions. Version 1.5 also
includes the method setValue() that allows the internally stored
bit pattern associated with a previously constructed BitVector to
be changed. A need for this method was expressed by Aleix
Conchillo. Thanks Aleix.
Version 1.5.1 is a quick release to fix a bug in the right circular
shift operator. This bug was discovered by <NAME>. Thanks
very much Jasper.
Version 2.0 was prompted mostly by the needs of the folks who play
with very long bit vectors that may contain millions of bits. I
believe such bit vectors are encountered in data mining research
and development. Towards that end, among the new methods in
Version 2.0, the count_bits_sparse() was provided by <NAME>.
She says when a bit vector contains over 2 million bits and only,
say, five bits are set, her method is faster than the older
count_bits() method by a factor of roughly 18. Thanks
Rhiannon. [The logic of the new implementation works best for very
sparse bit vectors. For very dense vectors, it may perform more
slowly than the regular count_bits() method. For that reason, I
have retained the original method.] Rhiannon's implementation is
based on what has been called the Kernighan way at the web site
http://graphics.stanford.edu/~seander/bithacks.html. Version 2.0
also includes a few additional functions posted at this web site
for extracting information from bit fields. Also included in this
new version is the next_set_bit() method supplied by <NAME>.
I believe this method is also useful for data mining folks. Thanks
Jason. Additional methods in Version 2.0 include the similarity and
the distance metrics for comparing two bit vectors, method for
finding the greatest common divisor of two bit vectors, and a
method that determines the multiplicative inverse of a bit vector
vis-a-vis a modulus. The last two methods should prove useful to
folks in cryptography.
With regard to Version 2.2, I would like to thank <NAME> for
bringing to my attention a bug in the BitVector initialization code
for the case when both the int value and the size are user-
specified and the two values happen to be inconsistent. Ethan also
discovered that the circular shift operators did not respond to
negative values for the shift. These and some other shortcomings
discovered by Ethan have been fixed in Version 2.2. Thanks Ethan!
For two of the changes included in Version 3.1, I'd like to thank
<NAME> and <NAME>. Libor discovered a documentation
error in the listing of the 'count_bits_sparse()' method and David
discovered a bug in slice assignment when one or both of the slice
limits are left unspecified. These errors in Version 3.0 have been
fixed in Version 3.1.
Version 3.1.1 was triggered by two emails, one from <NAME> and the other from <NAME>, both related to the
issue of compilation of the module. John-Mark mentioned that since
this module did not work with Python 2.4.3, the statement that the
module was appropriate for all Python 2.x was not correct, and
Nessim reported that he had run into a problem with the compilation
of the test portion of the code with Python 2.7 where a string of
1's and 0's is supplied to io.StringIO() for the construction of a
memory file. Both these issues have been resolved in 3.1.1.
Version 3.2 was triggered by my own desire to include additional
functionality in the module to make it more useful for
experimenting with hashing functions. While I was at it, I also
included in it a couple of safety checks on the lengths of the two
arguments bit vectors when computing their Jaccard similarity. I
could see the need for these checks after receiving an email from
<NAME> about the error messages he was receiving during
Jaccard similarity calculations. Thanks Patrick!
Version 3.3 includes a correction by <NAME> for a bug in the
next_set_bit() method. Thanks, John!
@title
ABOUT THE AUTHOR:
<NAME> is the author of "Programming with Objects: A Comparative
Presentation of Object-Oriented Programming with C++ and Java",
published by John-Wiley in 2003. This book presents a new approach
to the combined learning of two large object-oriented languages,
C++ and Java. It is being used as a text in a number of
educational programs around the world. This book has also been
translated into Chinese. <NAME> is also the author of "Scripting
with Objects: A Comparative Presentation of Object-Oriented
Scripting with Perl and Python," published in 2008 by John-Wiley.
@title
SOME EXAMPLE CODE:
#!/usr/bin/env python
import BitVector
# Construct a bit vector from a list or tuple of bits:
bv = BitVector.BitVector( bitlist = (1, 0, 0, 1) )
print(bv) # 1001
# Construct a bit vector from an integer:
bv = BitVector.BitVector( intVal = 5678 )
print(bv) # 0001011000101110
# Construct a bit vector of a given size from a given
# integer:
bv = BitVector( intVal = 45, size = 16 )
print(bv) # 0000000000101101
# Construct a zero-initialized bit vector of a given size:
bv = BitVector.BitVector( size = 5 )
print(bv) # 00000
# Construct a bit vector from a bit string:
bv = BitVector.BitVector( bitstring = '110001' )
print(bv[0], bv[1], bv[2], bv[3], bv[4], bv[5]) # 1 1 0 0 0 1
print(bv[-1], bv[-2], bv[-3], bv[-4], bv[-5], bv[-6]) # 1 0 0 0 1 1
# Construct a bit vector from a file like object:
import io
x = "111100001111"
fp_read = io.StringIO( x )
bv = BitVector( fp = fp_read )
print(bv) # 111100001111
# Experiments with bitwise logical operations:
bv3 = bv1 | bv2
bv3 = bv1 & bv2
bv3 = bv1 ^ bv2
bv6 = ~bv5
# Find the length of a bit vector
print( str(len( bitvec ) ) )
# Find the integer value of a bit vector
print( bitvec.intValue() )
# Open a file for reading bit vectors from
bv = BitVector.BitVector( filename = 'TestBitVector/testinput1.txt' )
print( bv ) # nothing yet
bv1 = bv.read_bits_from_file(64)
print( bv1 ) # first 64 bits from the file
# Divide a bit vector into two equal sub-vectors:
[bv1, bv2] = bitvec.divide_into_two()
# Permute and Un-Permute a bit vector:
bv2 = bitvec.permute( permutation_list )
bv2 = bitvec.unpermute( permutation_list )
# Try circular shifts to the left and to the right
bitvec << 7
bitvec >> 7
# Try 'if x in y' syntax for bit vectors:
bv1 = BitVector( bitstring = '0011001100' )
bv2 = BitVector( bitstring = '110011' )
if bv2 in bv1:
print( "%s is in %s" % (bv2, bv1) )
else:
print( "%s is not in %s" % (bv2, bv1) )
.....
.....
(For a more complete working example, see the
example code in the BitVectorDemo.py file in the
Examples sub-directory.)
@endofdocs
'''
import array
import operator
import sys
_hexdict = { '0' : '0000', '1' : '0001', '2' : '0010', '3' : '0011',
'4' : '0100', '5' : '0101', '6' : '0110', '7' : '0111',
'8' : '1000', '9' : '1001', 'a' : '1010', 'b' : '1011',
'c' : '1100', 'd' : '1101', 'e' : '1110', 'f' : '1111' }
def _readblock( blocksize, bitvector ):
'''
If this function can read all blocksize bits, it peeks ahead to see
if there is anything more to be read in the file. It uses
tell-read-seek mechanism for this in lines (R18) through (R21). If
there is nothing further to be read, it sets the more_to_read attribute
of the bitvector object to False. Obviously, this can only be done for
seekable streams such as those connected with disk files. According to
Blair Houghton, a similar feature could presumably be implemented for
socket streams by using recv() or recvfrom() if you set the flags
argument to MSG_PEEK.
'''
global _hexdict
bitstring = ''
i = 0
while ( i < blocksize / 8 ):
i += 1
byte = bitvector.FILEIN.read(1)
if byte == b'':
if len(bitstring) < blocksize:
bitvector.more_to_read = False
return bitstring
if sys.version_info[0] == 3:
hexvalue = '%02x' % byte[0]
else:
hexvalue = hex( ord( byte ) )
hexvalue = hexvalue[2:]
if len( hexvalue ) == 1:
hexvalue = '0' + hexvalue
bitstring += _hexdict[ hexvalue[0] ]
bitstring += _hexdict[ hexvalue[1] ]
file_pos = bitvector.FILEIN.tell()
# peek at the next byte; moves file position only if a
# byte is read
next_byte = bitvector.FILEIN.read(1)
if next_byte:
# pretend we never read the byte
bitvector.FILEIN.seek( file_pos )
else:
bitvector.more_to_read = False
return bitstring
#-------------------- BitVector Class Definition ----------------------
class BitVector( object ):
def __init__( self, *args, **kwargs ):
if args:
raise ValueError(
'''BitVector constructor can only be called with
keyword arguments for the following keywords:
filename, fp, size, intVal, bitlist, bitstring,
hexstring, textstring, and rawbytes)''')
allowed_keys = 'bitlist','bitstring','filename','fp','intVal',\
'size','textstring','hexstring','rawbytes'
keywords_used = kwargs.keys()
for keyword in keywords_used:
if keyword not in allowed_keys:
raise ValueError("Wrong keyword used --- check spelling")
filename=fp=intVal=size=bitlist=bitstring=textstring=hexstring=rawbytes=None
if 'filename' in kwargs : filename=kwargs.pop('filename')
if 'fp' in kwargs : fp = kwargs.pop('fp')
if 'size' in kwargs : size = kwargs.pop('size')
if 'intVal' in kwargs : intVal = kwargs.pop('intVal')
if 'bitlist' in kwargs : bitlist = kwargs.pop('bitlist')
if 'bitstring' in kwargs : bitstring = kwargs.pop('bitstring')
if 'hexstring' in kwargs : hexstring = kwargs.pop('hexstring')
if 'textstring' in kwargs : textstring = kwargs.pop('textstring')
if 'rawbytes' in kwargs : rawbytes = kwargs.pop('rawbytes')
self.filename = None
self.size = 0
self.FILEIN = None
self.FILEOUT = None
if filename:
if fp or size or intVal or bitlist or bitstring or hexstring or textstring or rawbytes:
raise ValueError('''When filename is specified, you cannot give values
to any other constructor args''')
self.filename = filename
self.FILEIN = open(filename, 'rb')
self.more_to_read = True
return
elif fp:
if filename or size or intVal or bitlist or bitstring or hexstring or \
textstring or rawbytes:
raise ValueError('''When fileobject is specified, you cannot give
values to any other constructor args''')
bits = self.read_bits_from_fileobject(fp)
bitlist = list(map(int, bits))
self.size = len( bitlist )
elif intVal or intVal == 0:
if filename or fp or bitlist or bitstring or hexstring or textstring or rawbytes:
raise ValueError('''When intVal is specified, you can only give a
value to the 'size' constructor arg''')
if intVal == 0:
bitlist = [0]
if size is None:
self.size = 1
elif size == 0:
raise ValueError('''The value specified for size must be at least
as large as for the smallest bit vector possible
for intVal''')
else:
if size < len(bitlist):
raise ValueError('''The value specified for size must be at least
as large as for the smallest bit vector
possible for intVal''')
n = size - len(bitlist)
bitlist = [0]*n + bitlist
self.size = len(bitlist)
else:
hexVal = hex(intVal).lower().rstrip('l')
hexVal = hexVal[2:]
if len(hexVal) == 1:
hexVal = '0' + hexVal
bitlist = ''.join(map(lambda x: _hexdict[x],hexVal))
bitlist = list(map( int, bitlist))
i = 0
while (i < len(bitlist)):
if bitlist[i] == 1: break
i += 1
del bitlist[0:i]
if size is None:
self.size = len(bitlist)
elif size == 0:
if size < len(bitlist):
raise ValueError('''The value specified for size must be at least
as large as for the smallest bit vector possible
for intVal''')
else:
if size < len(bitlist):
raise ValueError('''The value specified for size must be at least
as large as for the smallest bit vector possible
for intVal''')
n = size - len(bitlist)
bitlist = [0]*n + bitlist
self.size = len( bitlist )
elif size is not None and size >= 0:
if filename or fp or intVal or bitlist or bitstring or hexstring or \
textstring or rawbytes:
raise ValueError('''When size is specified (without an intVal), you cannot
give values to any other constructor args''')
self.size = size
two_byte_ints_needed = (size + 15) // 16
self.vector = array.array('H', [0]*two_byte_ints_needed)
return
elif bitstring or bitstring == '':
if filename or fp or size or intVal or bitlist or hexstring or textstring or rawbytes:
raise ValueError('''When a bitstring is specified, you cannot give
values to any other constructor args''')
bitlist = list(map(int, list(bitstring)))
self.size = len(bitlist)
elif bitlist:
if filename or fp or size or intVal or bitstring or hexstring or textstring or rawbytes:
raise ValueError('''When bits are specified, you cannot give values
to any other constructor args''')
self.size = len(bitlist)
elif textstring or textstring == '':
if filename or fp or size or intVal or bitlist or bitstring or hexstring or rawbytes:
raise ValueError('''When bits are specified through textstring, you
cannot give values to any other constructor args''')
hexlist = ''.join(map(lambda x: x[2:], map(hex, map(ord, list(textstring))) ))
bitlist = list(map(int,list(''.join(map(lambda x: _hexdict[x], list(hexlist))))))
self.size = len(bitlist)
elif hexstring or hexstring == '':
if filename or fp or size or intVal or bitlist or bitstring or textstring or rawbytes:
raise ValueError('''When bits are specified through hexstring, you
cannot give values to any other constructor args''')
bitlist = list(map(int,list(''.join(map(lambda x: _hexdict[x], list(hexstring))))))
self.size = len(bitlist)
elif rawbytes:
if filename or fp or size or intVal or bitlist or bitstring or textstring or hexstring:
raise ValueError('''When bits are specified through rawbytes, you
cannot give values to any other constructor args''')
import binascii
hexlist = binascii.hexlify(rawbytes)
if sys.version_info[0] == 3:
bitlist = list(map(int,list(''.join(map(lambda x: _hexdict[x], \
list(map(chr,list(hexlist))))))))
else:
bitlist = list(map(int,list(''.join(map(lambda x: _hexdict[x], list(hexlist))))))
self.size = len(bitlist)
else:
raise ValueError("wrong arg(s) for constructor")
two_byte_ints_needed = (len(bitlist) + 15) // 16
self.vector = array.array( 'H', [0]*two_byte_ints_needed )
list( map( self._setbit, range(len(bitlist)), bitlist) )
def _setbit(self, posn, val):
'Set the bit at the designated position to the value shown'
if val not in (0, 1):
raise ValueError( "incorrect value for a bit" )
if isinstance( posn, (tuple) ):
posn = posn[0]
if posn >= self.size or posn < -self.size:
raise ValueError( "index range error" )
if posn < 0: posn = self.size + posn
block_index = posn // 16
shift = posn & 15
cv = self.vector[block_index]
if ( cv >> shift ) & 1 != val:
self.vector[block_index] = cv ^ (1 << shift)
def _getbit(self, pos):
'Get the bit from the designated position'
if not isinstance( pos, slice ):
if pos >= self.size or pos < -self.size:
raise ValueError( "index range error" )
if pos < 0: pos = self.size + pos
return ( self.vector[pos//16] >> (pos&15) ) & 1
else:
bitstring = ''
if pos.start is None:
start = 0
else:
start = pos.start
if pos.stop is None:
stop = self.size
else:
stop = pos.stop
for i in range( start, stop ):
bitstring += str(self[i])
return BitVector( bitstring = bitstring )
def __xor__(self, other):
'''
Take a bitwise 'XOR' of the bit vector on which the method is
invoked with the argument bit vector. Return the result as a new
bit vector. If the two bit vectors are not of the same size, pad
the shorter one with zeros from the left.
'''
if self.size < other.size:
bv1 = self._resize_pad_from_left(other.size - self.size)
bv2 = other
elif self.size > other.size:
bv1 = self
bv2 = other._resize_pad_from_left(self.size - other.size)
else:
bv1 = self
bv2 = other
res = BitVector( size = bv1.size )
lpb = map(operator.__xor__, bv1.vector, bv2.vector)
res.vector = array.array( 'H', lpb )
return res
def __and__(self, other):
'''
Take a bitwise 'AND' of the bit vector on which the method is
invoked with the argument bit vector. Return the result as a new
bit vector. If the two bit vectors are not of the same size, pad
the shorter one with zeros from the left.
'''
if self.size < other.size:
bv1 = self._resize_pad_from_left(other.size - self.size)
bv2 = other
elif self.size > other.size:
bv1 = self
bv2 = other._resize_pad_from_left(self.size - other.size)
else:
bv1 = self
bv2 = other
res = BitVector( size = bv1.size )
lpb = map(operator.__and__, bv1.vector, bv2.vector)
res.vector = array.array( 'H', lpb )
return res
def __or__(self, other):
'''
Take a bitwise 'OR' of the bit vector on which the method is
invoked with the argument bit vector. Return the result as a new
bit vector. If the two bit vectors are not of the same size, pad
the shorter one with zero's from the left.
'''
if self.size < other.size:
bv1 = self._resize_pad_from_left(other.size - self.size)
bv2 = other
elif self.size > other.size:
bv1 = self
bv2 = other._resize_pad_from_left(self.size - other.size)
else:
bv1 = self
bv2 = other
res = BitVector( size = bv1.size )
lpb = map(operator.__or__, bv1.vector, bv2.vector)
res.vector = array.array( 'H', lpb )
return res
def __invert__(self):
'''
Invert the bits in the bit vector on which the method is invoked
and return the result as a new bit vector.
'''
res = BitVector( size = self.size )
lpb = list(map( operator.__inv__, self.vector ))
res.vector = array.array( 'H' )
for i in range(len(lpb)):
res.vector.append( lpb[i] & 0x0000FFFF )
return res
def __add__(self, other):
'''
Concatenate the argument bit vector with the bit vector on which
the method is invoked. Return the concatenated bit vector as a new
BitVector object.
'''
i = 0
outlist = []
while ( i < self.size ):
outlist.append( self[i] )
i += 1
i = 0
while ( i < other.size ):
outlist.append( other[i] )
i += 1
return BitVector( bitlist = outlist )
def _getsize(self):
'Return the number of bits in a bit vector.'
return self.size
def read_bits_from_file(self, blocksize):
'''
Read blocksize bits from a disk file and return a BitVector object
containing the bits. If the file contains fewer bits than
blocksize, construct the BitVector object from however many bits
there are in the file. If the file contains zero bits, return a
BitVector object of size attribute set to 0.
'''
error_str = '''You need to first construct a BitVector
object with a filename as argument'''
if not self.filename:
raise SyntaxError( error_str )
if blocksize % 8 != 0:
raise ValueError( "block size must be a multiple of 8" )
bitstr = _readblock( blocksize, self )
if len( bitstr ) == 0:
return BitVector( size = 0 )
else:
return BitVector( bitstring = bitstr )
def read_bits_from_fileobject( self, fp ):
'''
This function is meant to read a bit string from a file like
object.
'''
bitlist = []
while 1:
bit = fp.read()
if bit == '': return bitlist
bitlist += bit
def write_bits_to_fileobject( self, fp ):
'''
This function is meant to write a bit vector directly to a file
like object. Note that whereas 'write_to_file' method creates a
memory footprint that corresponds exactly to the bit vector, the
'write_bits_to_fileobject' actually writes out the 1's and 0's as
individual items to the file object. That makes this method
convenient for creating a string representation of a bit vector,
especially if you use the StringIO class, as shown in the test
code.
'''
for bit_index in range(self.size):
# For Python 3.x:
if sys.version_info[0] == 3:
if self[bit_index] == 0:
fp.write( str('0') )
else:
fp.write( str('1') )
# For Python 2.x:
else:
if self[bit_index] == 0:
fp.write( unicode('0') )
else:
fp.write( unicode('1') )
def divide_into_two(self):
'''
Divides an even-sized bit vector into two and returns the two
halves as a list of two bit vectors.
'''
if self.size % 2 != 0:
raise ValueError( "must have even num bits" )
i = 0
outlist1 = []
while ( i < self.size /2 ):
outlist1.append( self[i] )
i += 1
outlist2 = []
while ( i < self.size ):
outlist2.append( self[i] )
i += 1
return [ BitVector( bitlist = outlist1 ),
BitVector( bitlist = outlist2 ) ]
def permute(self, permute_list):
'''
Permute a bit vector according to the indices shown in the second
argument list. Return the permuted bit vector as a new bit vector.
'''
if max(permute_list) > self.size -1:
raise ValueError( "Bad permutation index" )
outlist = []
i = 0
while ( i < len( permute_list ) ):
outlist.append( self[ permute_list[i] ] )
i += 1
return BitVector( bitlist = outlist )
def unpermute(self, permute_list):
'''
Unpermute the bit vector according to the permutation list supplied
as the second argument. If you first permute a bit vector by using
permute() and then unpermute() it using the same permutation list,
you will get back the original bit vector.
'''
if max(permute_list) > self.size -1:
raise ValueError( "Bad permutation index" )
if self.size != len( permute_list ):
raise ValueError( "Bad size for permute list" )
out_bv = BitVector( size = self.size )
i = 0
while ( i < len(permute_list) ):
out_bv[ permute_list[i] ] = self[i]
i += 1
return out_bv
def write_to_file(self, file_out):
'''
Write the bitvector to the file object file_out. (A file object is
returned by a call to open()). Since all file I/O is byte oriented,
the bitvector must be multiple of 8 bits. Each byte treated as MSB
first (0th index).
'''
err_str = '''Only a bit vector whose length is a multiple of 8 can
be written to a file. Use the padding functions to satisfy
this constraint.'''
if not self.FILEOUT:
self.FILEOUT = file_out
if self.size % 8:
raise ValueError( err_str )
for byte in range( int(self.size/8) ):
value = 0
for bit in range(8):
value += (self._getbit( byte*8+(7 - bit) ) << bit )
if sys.version_info[0] == 3:
file_out.write( bytes(chr(value), 'utf-8') )
else:
file_out.write( chr(value) )
def close_file_object(self):
'''
For closing a file object that was used for reading the bits into
one or more BitVector objects.
'''
if not self.FILEIN:
raise SyntaxError( "No associated open file" )
self.FILEIN.close()
def int_val(self):
'Return the integer value of a bitvector'
intVal = 0
for i in range(self.size):
intVal += self[i] * (2 ** (self.size - i - 1))
return intVal
intValue = int_val
def get_text_from_bitvector(self):
'''
Return the text string formed by dividing the bitvector into bytes
from the left and replacing each byte by its ASCII character (this
is a useful thing to do only if the length of the vector is an
integral multiple of 8 and every byte in your bitvector has a print
representation)
'''
if self.size % 8:
raise ValueError('''\nThe bitvector for get_text_from_bitvector()
must be an integral multiple of 8 bits''')
return ''.join(map(chr, map(int,[self[i:i+8] for i in range(0,self.size,8)])))
getTextFromBitVector = get_text_from_bitvector
def get_hex_string_from_bitvector(self):
'''
Return a string of hex digits by scanning the bits from the left
and replacing each sequence of 4 bits by its corresponding hex
digit (this is a useful thing to do only if the length of the
vector is an integral multiple of 4)
'''
if self.size % 4:
raise ValueError('''\nThe bitvector for get_hex_string_from_bitvector()
must be an integral multiple of 4 bits''')
return ''.join(map(lambda x: x.replace('0x',''), \
map(hex,map(int,[self[i:i+4] for i in range(0,self.size,4)]))))
getHexStringFromBitVector = get_hex_string_from_bitvector
def __lshift__( self, n ):
'For an in-place left circular shift by n bit positions'
if self.size == 0:
raise ValueError('''Circular shift of an empty vector
makes no sense''')
if n < 0:
return self >> abs(n)
for i in range(n):
self.circular_rotate_left_by_one()
return self
def __rshift__( self, n ):
'For an in-place right circular shift by n bit positions.'
if self.size == 0:
raise ValueError('''Circular shift of an empty vector
makes no sense''')
if n < 0:
return self << abs(n)
for i in range(n):
self.circular_rotate_right_by_one()
return self
def circular_rotate_left_by_one(self):
'For a one-bit in-place left circular shift'
size = len(self.vector)
bitstring_leftmost_bit = self.vector[0] & 1
left_most_bits = list(map(operator.__and__, self.vector, [1]*size))
left_most_bits.append(left_most_bits[0])
del(left_most_bits[0])
self.vector = list(map(operator.__rshift__, self.vector, [1]*size))
self.vector = list(map( operator.__or__, self.vector, \
list( map(operator.__lshift__, left_most_bits, [15]*size) )))
self._setbit(self.size -1, bitstring_leftmost_bit)
def circular_rotate_right_by_one(self):
'For a one-bit in-place right circular shift'
size = len(self.vector)
bitstring_rightmost_bit = self[self.size - 1]
right_most_bits = list(map( operator.__and__,
self.vector, [0x8000]*size ))
self.vector = list(map( operator.__and__, self.vector, [~0x8000]*size ))
right_most_bits.insert(0, bitstring_rightmost_bit)
right_most_bits.pop()
self.vector = list(map(operator.__lshift__, self.vector, [1]*size))
self.vector = list(map( operator.__or__, self.vector, \
list(map(operator.__rshift__, right_most_bits, [15]*size))))
self._setbit(0, bitstring_rightmost_bit)
def circular_rot_left(self):
'''
This is merely another implementation of the method
circular_rotate_left_by_one() shown above. This one does NOT use
map functions. This method carries out a one-bit left circular
shift of a bit vector.
'''
max_index = (self.size -1) // 16
left_most_bit = self.vector[0] & 1
self.vector[0] = self.vector[0] >> 1
for i in range(1, max_index + 1):
left_bit = self.vector[i] & 1
self.vector[i] = self.vector[i] >> 1
self.vector[i-1] |= left_bit << 15
self._setbit(self.size -1, left_most_bit)
def circular_rot_right(self):
'''
This is merely another implementation of the method
circular_rotate_right_by_one() shown above. This one does NOT use
map functions. This method does a one-bit right circular shift of
a bit vector.
'''
max_index = (self.size -1) // 16
right_most_bit = self[self.size - 1]
self.vector[max_index] &= ~0x8000
self.vector[max_index] = self.vector[max_index] << 1
for i in range(max_index-1, -1, -1):
right_bit = self.vector[i] & 0x8000
self.vector[i] &= ~0x8000
self.vector[i] = self.vector[i] << 1
self.vector[i+1] |= right_bit >> 15
self._setbit(0, right_most_bit)
def shift_left_by_one(self):
'''
For a one-bit in-place left non-circular shift. Note that
bitvector size does not change. The leftmost bit that moves
past the first element of the bitvector is discarded and
rightmost bit of the returned vector is set to zero.
'''
size = len(self.vector)
left_most_bits = list(map(operator.__and__, self.vector, [1]*size))
left_most_bits.append(left_most_bits[0])
del(left_most_bits[0])
self.vector = list(map(operator.__rshift__, self.vector, [1]*size))
self.vector = list(map( operator.__or__, self.vector, \
list(map(operator.__lshift__, left_most_bits, [15]*size))))
self._setbit(self.size -1, 0)
def shift_right_by_one(self):
'''
For a one-bit in-place right non-circular shift. Note that
bitvector size does not change. The rightmost bit that moves
past the last element of the bitvector is discarded and
leftmost bit of the returned vector is set to zero.
'''
size = len(self.vector)
right_most_bits = list(map( operator.__and__, self.vector, [0x8000]*size ))
self.vector = list(map( operator.__and__, self.vector, [~0x8000]*size ))
right_most_bits.insert(0, 0)
right_most_bits.pop()
self.vector = list(map(operator.__lshift__, self.vector, [1]*size))
self.vector = list(map( operator.__or__, self.vector, \
list(map(operator.__rshift__,right_most_bits, [15]*size))))
self._setbit(0, 0)
def shift_left( self, n ):
'For an in-place left non-circular shift by n bit positions'
for i in range(n):
self.shift_left_by_one()
return self
def shift_right( self, n ):
'For an in-place right non-circular shift by n bit positions.'
for i in range(n):
self.shift_right_by_one()
return self
# Allow array like subscripting for getting and setting:
__getitem__ = _getbit
def __setitem__(self, pos, item):
'''
This is needed for both slice assignments and for index
assignments. It checks the types of pos and item to see if the
call is for slice assignment. For slice assignment, pos must be of
type 'slice' and item of type BitVector. For index assignment, the
argument types are checked in the _setbit() method.
'''
# The following section is for slice assignment:
if isinstance(pos, slice):
if (not isinstance( item, BitVector )):
raise TypeError('''For slice assignment,
the right hand side must be a BitVector''')
if (not pos.start and not pos.stop):
return item.deep_copy()
elif not pos.start:
if (pos.stop != len(item)):
raise ValueError('incompatible lengths for slice assignment')
for i in range(pos.stop):
self[i] = item[ i ]
return
elif not pos.stop:
if ((len(self) - pos.start) != len(item)):
raise ValueError('incompatible lengths for slice assignment')
for i in range(len(item)-1):
self[pos.start + i] = item[ i ]
return
else:
if ( (pos.stop - pos.start) != len(item) ):
raise ValueError('incompatible lengths for slice assignment')
for i in range( pos.start, pos.stop ):
self[i] = item[ i - pos.start ]
return
# For index assignment use _setbit()
self._setbit(pos, item)
def __getslice__(self, i, j):
'Fetch slices with [i:j], [:], etc.'
if self.size == 0:
return BitVector( bitstring = '' )
if i == j:
return BitVector( bitstring = '' )
slicebits = []
if j > self.size: j = self.size
for x in range(i,j):
slicebits.append( self[x] )
return BitVector( bitlist = slicebits )
# Allow len() to work:
__len__ = _getsize
# Allow int() to work:
__int__ = intValue
def __iter__(self):
'''
To allow iterations over a bit vector by supporting the 'for bit in
bit_vector' syntax:
'''
return BitVectorIterator(self)
def __str__(self):
'To create a print representation'
if self.size == 0:
return ''
return ''.join(map(str, self))
# Compare two bit vectors:
def __eq__(self, other):
if self.size != other.size:
return False
i = 0
while ( i < self.size ):
if (self[i] != other[i]): return False
i += 1
return True
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self.intValue() < other.intValue()
def __le__(self, other):
return self.intValue() <= other.intValue()
def __gt__(self, other):
return self.intValue() > other.intValue()
def __ge__(self, other):
return self.intValue() >= other.intValue()
def deep_copy( self ):
'Make a deep copy of a bit vector'
copy = str( self )
return BitVector( bitstring = copy )
_make_deep_copy = deep_copy
def _resize_pad_from_left( self, n ):
'''
Resize a bit vector by padding with n 0's from the left. Return the
result as a new bit vector.
'''
new_str = '0'*n + str( self )
return BitVector( bitstring = new_str )
def _resize_pad_from_right( self, n ):
'''
Resize a bit vector by padding with n 0's from the right. Return
the result as a new bit vector.
'''
new_str = str( self ) + '0'*n
return BitVector( bitstring = new_str )
def pad_from_left( self, n ):
'Pad a bit vector with n zeros from the left'
new_str = '0'*n + str( self )
bitlist = list(map( int, list(new_str) ))
self.size = len( bitlist )
two_byte_ints_needed = (len(bitlist) + 15) // 16
self.vector = array.array( 'H', [0]*two_byte_ints_needed )
list(map( self._setbit, enumerate(bitlist), bitlist))
def pad_from_right( self, n ):
'Pad a bit vector with n zeros from the right'
new_str = str( self ) + '0'*n
bitlist = list(map( int, list(new_str) ))
self.size = len( bitlist )
two_byte_ints_needed = (len(bitlist) + 15) // 16
self.vector = array.array( 'H', [0]*two_byte_ints_needed )
list(map( self._setbit, enumerate(bitlist), bitlist))
def __contains__( self, otherBitVec ):
'''
This supports 'if x in y' and 'if x not in y' syntax for bit
vectors.
'''
if self.size == 0:
raise ValueError("First arg bitvec has no bits")
elif self.size < otherBitVec.size:
raise ValueError("First arg bitvec too short")
max_index = self.size - otherBitVec.size + 1
for i in range(max_index):
if self[i:i+otherBitVec.size] == otherBitVec:
return True
return False
def reset( self, val ):
'''
Resets a previously created BitVector to either all zeros or all
ones depending on the argument val. Returns self to allow for
syntax like
bv = bv1[3:6].reset(1)
or
bv = bv1[:].reset(1)
'''
if val not in (0,1):
raise ValueError( "Incorrect reset argument" )
bitlist = [val for i in range( self.size )]
list(map( self._setbit, enumerate(bitlist), bitlist ))
return self
def count_bits( self ):
'''
Return the number of bits set in a BitVector instance.
'''
from functools import reduce
return reduce( lambda x, y: int(x)+int(y), self )
def set_value(self, *args, **kwargs):
'''
Changes the bit pattern associated with a previously constructed
BitVector instance. The allowable modes for changing the internally
stored bit pattern are the same as for the constructor.
'''
self.__init__( *args, **kwargs )
setValue = set_value
def count_bits_sparse(self):
'''
For sparse bit vectors, this method, contributed by Rhiannon, will
be much faster. She estimates that if a bit vector with over 2
millions bits has only five bits set, this will return the answer
in 1/18 of the time taken by the count_bits() method. Note
however, that count_bits() may work much faster for dense-packed
bit vectors. Rhianon's implementation is based on an algorithm
generally known as the <NAME>an's way, although its
antecedents predate its mention by Kernighan and Ritchie.
'''
num = 0
for intval in self.vector:
if intval == 0: continue
c = 0; iv = intval
while iv > 0:
iv = iv & (iv -1)
c = c + 1
num = num + c
return num
def jaccard_similarity(self, other):
'''
Computes the Jaccard similarity coefficient between two bit vectors
'''
assert self.intValue() > 0 or other.intValue() > 0, 'Jaccard called on two zero vectors --- NOT ALLOWED'
assert self.size == other.size, 'vectors of unequal length'
intersect = self & other
union = self | other
return ( intersect.count_bits_sparse() / float( union.count_bits_sparse() ) )
def jaccard_distance( self, other ):
'''
Computes the Jaccard distance between two bit vectors
'''
assert self.size == other.size, 'vectors of unequal length'
return 1 - self.jaccard_similarity( other )
def hamming_distance( self, other ):
'''
Computes the Hamming distance between two bit vectors
'''
assert self.size == other.size, 'vectors of unequal length'
diff = self ^ other
return diff.count_bits_sparse()
def next_set_bit(self, from_index=0):
'''
This method, contributed originally by <NAME> and updated
subsequently by <NAME>, calculates the position of the next
set bit at or after the current position index. It returns -1 if
there is no next set bit.
'''
assert from_index >= 0, 'from_index must be nonnegative'
i = from_index
v = self.vector
l = len(v)
o = i >> 4
s = i & 0x0F
i = o << 4
while o < l:
h = v[o]
if h:
i += s
m = 1 << s
while m != (1 << 0x10):
if h & m: return i
m <<= 1
i += 1
else:
i += 0x10
s = 0
o += 1
return -1
def rank_of_bit_set_at_index(self, position):
'''
For a bit that is set at the argument 'position', this method
returns how many bits are set to the left of that bit. For
example, in the bit pattern 000101100100, a call to this method
with position set to 9 will return 4.
'''
assert self[position] == 1, 'the arg bit not set'
bv = self[0:position+1]
return bv.count_bits()
def is_power_of_2( self ):
'''
Determines whether the integer value of a bit vector is a power of
2.
'''
if self.intValue() == 0: return False
bv = self & BitVector( intVal = self.intValue() - 1 )
if bv.intValue() == 0: return True
return False
isPowerOf2 = is_power_of_2
def is_power_of_2_sparse(self):
'''
Faster version of is_power_of2() for sparse bit vectors
'''
if self.count_bits_sparse() == 1: return True
return False
isPowerOf2_sparse = is_power_of_2_sparse
def reverse(self):
'''
Returns a new bit vector by reversing the bits in the bit vector on
which the method is invoked.
'''
reverseList = []
i = 1
while ( i < self.size + 1 ):
reverseList.append( self[ -i ] )
i += 1
return BitVector( bitlist = reverseList )
def gcd(self, other):
'''
Using Euclid's Algorithm, returns the greatest common divisor of
the integer value of the bit vector on which the method is invoked
and the integer value of the argument bit vector.
'''
a = self.intValue(); b = other.intValue()
if a < b: a,b = b,a
while b != 0:
a, b = b, a % b
return BitVector( intVal = a )
def multiplicative_inverse(self, modulus):
'''
Calculates the multiplicative inverse of a bit vector modulo the
bit vector that is supplied as the argument. Code based on the
Extended Euclid's Algorithm.
'''
MOD = mod = modulus.intValue(); num = self.intValue()
x, x_old = 0, 1
y, y_old = 1, 0
while mod:
quotient = num // mod
num, mod = mod, num % mod
x, x_old = x_old - x * quotient, x
y, y_old = y_old - y * quotient, y
if num != 1:
return None
else:
MI = (x_old + MOD) % MOD
return BitVector( intVal = MI )
def length(self):
return self.size
def gf_multiply(self, b):
'''
In the set of polynomials defined over GF(2), multiplies
the bitvector on which the method is invoked with the
bitvector b. Returns the product bitvector.
'''
a = self.deep_copy()
b_copy = b.deep_copy()
a_highest_power = a.length() - a.next_set_bit(0) - 1
b_highest_power = b.length() - b_copy.next_set_bit(0) - 1
result = BitVector( size = a.length()+b_copy.length() )
a.pad_from_left( result.length() - a.length() )
b_copy.pad_from_left( result.length() - b_copy.length() )
for i,bit in enumerate(b_copy):
if bit == 1:
power = b_copy.length() - i - 1
a_copy = a.deep_copy()
a_copy.shift_left( power )
result ^= a_copy
return result
def gf_divide(self, mod, n):
'''
Carries out modular division of a bitvector by the
modulus bitvector mod in GF(2^n) finite field.
Returns both the quotient and the remainder.
'''
num = self
if mod.length() > n+1:
raise ValueError("Modulus bit pattern too long")
quotient = BitVector( intVal = 0, size = num.length() )
remainder = num.deep_copy()
i = 0
while 1:
i = i+1
if (i==num.length()): break
mod_highest_power = mod.length()-mod.next_set_bit(0)-1
if remainder.next_set_bit(0) == -1:
remainder_highest_power = 0
else:
remainder_highest_power = remainder.length() - remainder.next_set_bit(0) - 1
if (remainder_highest_power < mod_highest_power) or int(remainder)==0:
break
else:
exponent_shift = remainder_highest_power - mod_highest_power
quotient[quotient.length()-exponent_shift-1] = 1
quotient_mod_product = mod.deep_copy();
quotient_mod_product.pad_from_left(remainder.length() - mod.length())
quotient_mod_product.shift_left(exponent_shift)
remainder = remainder ^ quotient_mod_product
if remainder.length() > n:
remainder = remainder[remainder.length()-n:]
return quotient, remainder
def gf_multiply_modular(self, b, mod, n):
'''
Multiplies a bitvector with the bitvector b in GF(2^n)
finite field with the modulus bit pattern set to mod
'''
a = self
a_copy = a.deep_copy()
b_copy = b.deep_copy()
product = a_copy.gf_multiply(b_copy)
quotient, remainder = product.gf_divide(mod, n)
return remainder
def gf_MI(self, mod, n):
'''
Returns the multiplicative inverse of a vector in the GF(2^n)
finite field with the modulus polynomial set to mod
'''
num = self
NUM = num.deep_copy(); MOD = mod.deep_copy()
x = BitVector( size=mod.length() )
x_old = BitVector( intVal=1, size=mod.length() )
y = BitVector( intVal=1, size=mod.length() )
y_old = BitVector( size=mod.length() )
while int(mod):
quotient, remainder = num.gf_divide(mod, n)
num, mod = mod, remainder
x, x_old = x_old ^ quotient.gf_multiply(x), x
y, y_old = y_old ^ quotient.gf_multiply(y), y
if int(num) != 1:
return "NO MI. However, the GCD of ", str(NUM), " and ", \
str(MOD), " is ", str(num)
else:
z = x_old ^ MOD
quotient, remainder = z.gf_divide(MOD, n)
return remainder
def runs(self):
'''
Returns a list of the consecutive runs of 1's and 0's in
the bit vector. Each run is either a string of all 1's or
a string of all 0's.
'''
if self.size == 0:
raise ValueError('''An empty vector has no runs''')
allruns = []
run = ''
previous_bit = self[0]
if previous_bit == 0:
run = '0'
else:
run = '1'
for bit in list(self)[1:]:
if bit == 0 and previous_bit == 0:
run += '0'
elif bit == 1 and previous_bit == 0:
allruns.append( run )
run = '1'
elif bit == 0 and previous_bit == 1:
allruns.append( run )
run = '0'
else:
run += '1'
previous_bit = bit
allruns.append( run )
return allruns
def test_for_primality(self):
'''
Check if the integer value of the bitvector is a prime through the
Miller-Rabin probabilistic test of primality. If not found to be a
composite, estimate the probability of the bitvector being a prime
using this test.
'''
p = int(self)
probes = [2,3,5,7,11,13,17]
for a in probes:
if a == p: return 1
if any([p % a == 0 for a in probes]): return 0
k, q = 0, p-1
while not q&1:
q >>= 1
k += 1
for a in probes:
a_raised_to_q = pow(a, q, p)
if a_raised_to_q == 1 or a_raised_to_q == p-1: continue
a_raised_to_jq = a_raised_to_q
primeflag = 0
for j in range(k-1):
a_raised_to_jq = pow(a_raised_to_jq, 2, p)
if a_raised_to_jq == p-1:
primeflag = 1
break
if not primeflag: return 0
probability_of_prime = 1 - 1.0/(4 ** len(probes))
return probability_of_prime
def gen_rand_bits_for_prime(self, width):
'''
The bulk of the work here is done by calling random.getrandbits(
width) which returns an integer whose binary code representation
will not be larger than the argument 'width'. However, when random
numbers are generated as candidates for primes, you often want to
make sure that the random number thus created spans the full width
specified by 'width' and that the number is odd. This we do by
setting the two most significant bits and the least significant
bit. If you only want to set the most significant bit, comment out
the statement in line (pr29).
'''
import random
candidate = random.getrandbits( width )
candidate |= 1
candidate |= (1 << width-1)
candidate |= (2 << width-3)
return BitVector( intVal = candidate )
#----------------------- BitVectorIterator Class -----------------------
class BitVectorIterator:
def __init__( self, bitvec ):
self.items = []
for i in range( bitvec.size ):
self.items.append( bitvec._getbit(i) )
self.index = -1
def __iter__( self ):
return self
def next( self ):
self.index += 1
if self.index < len( self.items ):
return self.items[ self.index ]
else:
raise StopIteration
__next__ = next
#------------------------ End of Class Definition -----------------------
#------------------------ Test Code Follows -----------------------
if __name__ == '__main__':
# Construct an EMPTY bit vector (a bit vector of size 0):
print("\nConstructing an EMPTY bit vector (a bit vector of size 0):")
bv1 = BitVector( size = 0 )
print(bv1) # no output
# Construct a bit vector of size 2:
print("\nConstructing a bit vector of size 2:")
bv2 = BitVector( size = 2 )
print(bv2) # 00
# Joining two bit vectors:
print("\nOutput concatenation of two previous bit vectors:")
result = bv1 + bv2
print(result) # 00
# Construct a bit vector with a tuple of bits:
print("\nThis is a bit vector from a tuple of bits:")
bv = BitVector(bitlist=(1, 0, 0, 1))
print(bv) # 1001
# Construct a bit vector with a list of bits:
print("\nThis is a bit vector from a list of bits:")
bv = BitVector(bitlist=[1, 1, 0, 1])
print(bv) # 1101
# Construct a bit vector from an integer
bv = BitVector(intVal=5678)
print("\nBit vector constructed from integer 5678:")
print(bv) # 1011000101110
print("\nBit vector constructed from integer 0:")
bv = BitVector(intVal=0)
print(bv) # 0
print("\nBit vector constructed from integer 2:")
bv = BitVector(intVal=2)
print(bv) # 10
print("\nBit vector constructed from integer 3:")
bv = BitVector(intVal=3)
print(bv) # 11
print("\nBit vector constructed from integer 123456:")
bv = BitVector(intVal=123456)
print(bv) # 11110001001000000
print("\nInt value of the previous bit vector as computed by int_val():")
print(bv.int_val()) # 123456
print("\nInt value of the previous bit vector as computed by int():")
print(int(bv)) # 123456
# Construct a bit vector from a very large integer:
x = 12345678901234567890123456789012345678901234567890123456789012345678901234567890
bv = BitVector(intVal=x)
print("\nHere is a bit vector constructed from a very large integer:")
print(bv)
print("The integer value of the above bit vector is:%d" % int(bv))
# Construct a bit vector directly from a file-like object:
import io
x = "111100001111"
x = ""
if sys.version_info[0] == 3:
x = "111100001111"
else:
x = unicode("111100001111")
fp_read = io.StringIO(x)
bv = BitVector( fp = fp_read )
print("\nBit vector constructed directed from a file like object:")
print(bv) # 111100001111
# Construct a bit vector directly from a bit string:
bv = BitVector( bitstring = '00110011' )
print("\nBit Vector constructed directly from a bit string:")
print(bv) # 00110011
bv = BitVector(bitstring = '')
print("\nBit Vector constructed directly from an empty bit string:")
print(bv) # nothing
print("\nInteger value of the previous bit vector:")
print(bv.int_val()) # 0
print("\nConstructing a bit vector from the textstring 'hello':")
bv3 = BitVector(textstring = "hello")
print(bv3)
mytext = bv3.get_text_from_bitvector()
print("Text recovered from the previous bitvector: ")
print(mytext) # hello
print("\nConstructing a bit vector from the hexstring '68656c6c6f':")
bv4 = BitVector(hexstring = "68656c6c6f")
print(bv4)
myhexstring = bv4.get_hex_string_from_bitvector()
print("Hex string recovered from the previous bitvector: ")
print(myhexstring) # 68656c6c6f
print("\nDemonstrating the raw bytes mode of constructing a bit vector (useful for reading public and private keys):")
mypubkey = 'ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA5amriY96HQS8Y/nKc8zu3zOylvpOn3vzMmWwrtyDy+aBvns4UC1RXoaD9rDKqNNMCBAQwWDsYwCAFsrBzbxRQONHePX8lRWgM87MseWGlu6WPzWGiJMclTAO9CTknplG9wlNzLQBj3dP1M895iLF6jvJ7GR+V3CRU6UUbMmRvgPcsfv6ec9RRPm/B8ftUuQICL0jt4tKdPG45PBJUylHs71FuE9FJNp01hrj1EMFObNTcsy9zuis0YPyzArTYSOUsGglleExAQYi7iLh17pAa+y6fZrGLsptgqryuftN9Q4NqPuTiFjlqRowCDU7sSxKDgU7bzhshyVx3+pzXO4D2Q== kak@pixie'
import base64
if sys.version_info[0] == 3:
import binascii
keydata = base64.b64decode(bytes(mypubkey.split(None)[1], 'utf-8'))
else:
keydata = base64.b64decode(mypubkey.split(None)[1])
bv = BitVector( rawbytes = keydata )
print(bv)
# Test array-like indexing for a bit vector:
bv = BitVector( bitstring = '110001' )
print("\nPrints out bits individually from bitstring 110001:")
print(bv[0], bv[1], bv[2], bv[3], bv[4], bv[5]) # 1 1 0 0 0 1
print("\nSame as above but using negative array indexing:")
print(bv[-1], bv[-2], bv[-3], bv[-4], bv[-5], bv[-6]) # 1 0 0 0 1 1
# Test setting bit values with positive and negative
# accessors:
bv = BitVector( bitstring = '1111' )
print("\nBitstring for 1111:")
print(bv) # 1111
print("\nReset individual bits of above vector:")
bv[0]=0;bv[1]=0;bv[2]=0;bv[3]=0
print(bv) # 0000
print("\nDo the same as above with negative indices:")
bv[-1]=1;bv[-2]=1;bv[-4]=1
print(bv) # 1011
print("\nCheck equality and inequality ops:")
bv1 = BitVector( bitstring = '00110011' )
bv2 = BitVector( bitlist = [0,0,1,1,0,0,1,1] )
print(bv1 == bv2) # True
print(bv1 != bv2) # False
print(bv1 < bv2) # False
print(bv1 <= bv2) # True
bv3 = BitVector( intVal = 5678 )
print(bv3.int_val()) # 5678
print(bv3) # 10110000101110
print(bv1 == bv3) # False
print(bv3 > bv1) # True
print(bv3 >= bv1) # True
# Write a bit vector to a file like object
fp_write = io.StringIO()
bv.write_bits_to_fileobject( fp_write )
print("\nGet bit vector written out to a file-like object:")
print(fp_write.getvalue()) # 1011
print("\nExperiments with bitwise logical operations:")
bv3 = bv1 | bv2
print(bv3) # 00110011
bv3 = bv1 & bv2
print(bv3) # 00110011
bv3 = bv1 + bv2
print(bv3) # 0011001100110011
bv4 = BitVector( size = 3 )
print(bv4) # 000
bv5 = bv3 + bv4
print(bv5) # 0011001100110011000
bv6 = ~bv5
print(bv6) # 1100110011001100111
bv7 = bv5 & bv6
print(bv7) # 0000000000000000000
bv7 = bv5 | bv6
print(bv7) # 1111111111111111111
print("\nTry logical operations on bit vectors of different sizes:")
print(BitVector( intVal = 6 ) ^ BitVector( intVal = 13 )) # 1011
print(BitVector( intVal = 6 ) & BitVector( intVal = 13 )) # 0100
print(BitVector( intVal = 6 ) | BitVector( intVal = 13 )) # 1111
print(BitVector( intVal = 1 ) ^ BitVector( intVal = 13 )) # 1100
print(BitVector( intVal = 1 ) & BitVector( intVal = 13 )) # 0001
print(BitVector( intVal = 1 ) | BitVector( intVal = 13 )) # 1101
print("\nExperiments with setbit() and len():")
bv7[7] = 0
print(bv7) # 1111111011111111111
print(len( bv7 )) # 19
bv8 = (bv5 & bv6) ^ bv7
print(bv8) # 1111111011111111111
print("\nConstruct a bit vector from what is in the file testinput1.txt:")
bv = BitVector( filename = 'TestBitVector/testinput1.txt' )
#print bv # nothing to show
bv1 = bv.read_bits_from_file(64)
print("\nPrint out the first 64 bits read from the file:")
print(bv1)
# 0100000100100000011010000111010101101110011001110111001001111001
print("\nRead the next 64 bits from the same file:")
bv2 = bv.read_bits_from_file(64)
print(bv2)
# 0010000001100010011100100110111101110111011011100010000001100110
print("\nTake xor of the previous two bit vectors:")
bv3 = bv1 ^ (bv2)
print(bv3)
# 0110000101000010000110100001101000011001000010010101001000011111
print("\nExperiment with dividing an even-sized vector into two:")
[bv4, bv5] = bv3.divide_into_two()
print(bv4) # 01100001010000100001101000011010
print(bv5) # 00011001000010010101001000011111
# Permute a bit vector:
print("\nWe will use this bit vector for experiments with permute()")
bv1 = BitVector( bitlist = [1, 0, 0, 1, 1, 0, 1] )
print(bv1) # 1001101
bv2 = bv1.permute( [6, 2, 0, 1] )
print("\nPermuted and contracted form of the previous bit vector:")
print(bv2) # 1010
print("\nExperiment with writing an internally generated bit vector out to a disk file:")
bv1 = BitVector( bitstring = '00001010' )
FILEOUT = open( 'TestBitVector/test.txt', 'wb' )
bv1.write_to_file( FILEOUT )
FILEOUT.close()
bv2 = BitVector( filename = 'TestBitVector/test.txt' )
bv3 = bv2.read_bits_from_file( 32 )
print("\nDisplay bit vectors written out to file and read back from the file and their respective lengths:")
print( str(bv1) + " " + str(bv3))
print(str(len(bv1)) + " " + str(len(bv3)))
print("\nExperiments with reading a file from the beginning to end:")
bv = BitVector( filename = 'TestBitVector/testinput4.txt' )
print("\nHere are all the bits read from the file:")
while (bv.more_to_read):
bv_read = bv.read_bits_from_file( 64 )
print(bv_read)
print("\n")
print("\nExperiment with closing a file object and start extracting bit vectors from the file from the beginning again:")
bv.close_file_object()
bv = BitVector( filename = 'TestBitVector/testinput4.txt' )
bv1 = bv.read_bits_from_file(64)
print("\nHere are all the first 64 bits read from the file again after the file object was closed and opened again:")
print(bv1)
FILEOUT = open( 'TestBitVector/testinput5.txt', 'wb' )
bv1.write_to_file( FILEOUT )
FILEOUT.close()
print("\nExperiment in 64-bit permutation and unpermutation of the previous 64-bit bitvector:")
print("The permutation array was generated separately by the Fisher-Yates shuffle algorithm:")
bv2 = bv1.permute( [22, 47, 33, 36, 18, 6, 32, 29, 54, 62, 4,
9, 42, 39, 45, 59, 8, 50, 35, 20, 25, 49,
15, 61, 55, 60, 0, 14, 38, 40, 23, 17, 41,
10, 57, 12, 30, 3, 52, 11, 26, 43, 21, 13,
58, 37, 48, 28, 1, 63, 2, 31, 53, 56, 44, 24,
51, 19, 7, 5, 34, 27, 16, 46] )
print("Permuted bit vector:")
print(bv2)
bv3 = bv2.unpermute( [22, 47, 33, 36, 18, 6, 32, 29, 54, 62, 4,
9, 42, 39, 45, 59, 8, 50, 35, 20, 25, 49,
15, 61, 55, 60, 0, 14, 38, 40, 23, 17, 41,
10, 57, 12, 30, 3, 52, 11, 26, 43, 21, 13,
58, 37, 48, 28, 1, 63, 2, 31, 53, 56, 44, 24,
51, 19, 7, 5, 34, 27, 16, 46] )
print("Unpurmute the bit vector:")
print(bv3)
print("\nTry circular shifts to the left and to the right for the following bit vector:")
print(bv3) # 0100000100100000011010000111010101101110011001110111001001111001
print("\nCircular shift to the left by 7 positions:")
bv3 << 7
print(bv3) # 1001000000110100001110101011011100110011101110010011110010100000
print("\nCircular shift to the right by 7 positions:")
bv3 >> 7
print(bv3) # 0100000100100000011010000111010101101110011001110111001001111001
print("Test len() on the above bit vector:")
print(len( bv3 )) # 64
print("\nTest forming a [5:22] slice of the above bit vector:")
bv4 = bv3[5:22]
print(bv4) # 00100100000011010
print("\nTest the iterator:")
for bit in bv4:
print(bit) # 0 0 1 0 0 1 0 0 0 0 0 0 1 1 0 1 0
print("\nDemonstrate padding a bit vector from left:")
bv = BitVector(bitstring = '101010')
bv.pad_from_left(4)
print(bv) # 0000101010
print("\nDemonstrate padding a bit vector from right:")
bv.pad_from_right(4)
print(bv) # 00001010100000
print("\nTest the syntax 'if bit_vector_1 in bit_vector_2' syntax:")
try:
bv1 = BitVector(bitstring = '0011001100')
bv2 = BitVector(bitstring = '110011')
if bv2 in bv1:
print("%s is in %s" % (bv2, bv1))
else:
print("%s is not in %s" % (bv2, bv1))
except ValueError as arg:
print("Error Message: " + str(arg))
print("\nTest the size modifier when a bit vector is initialized with the intVal method:")
bv = BitVector(intVal = 45, size = 16)
print(bv) # 0000000000101101
bv = BitVector(intVal = 0, size = 8)
print(bv) # 00000000
bv = BitVector(intVal = 1, size = 8)
print(bv) # 00000001
print("\nTesting slice assignment:")
bv1 = BitVector( size = 25 )
print("bv1= " + str(bv1)) # 0000000000000000000000000
bv2 = BitVector( bitstring = '1010001' )
print("bv2= " + str(bv2)) # 1010001
bv1[6:9] = bv2[0:3]
print("bv1= " + str(bv1)) # 0000001010000000000000000
bv1[:5] = bv1[5:10]
print("bv1= " + str(bv1)) # 0101001010000000000000000
bv1[20:] = bv1[5:10]
print("bv1= " + str(bv1)) # 0101001010000000000001010
bv1[:] = bv1[:]
print("bv1= " + str(bv1)) # 0101001010000000000001010
bv3 = bv1[:]
print("bv3= " + str(bv3)) # 0101001010000000000001010
print("\nTesting reset function:")
bv1.reset(1)
print("bv1= " + str(bv1)) # 1111111111111111111111111
print(bv1[3:9].reset(0)) # 000000
print(bv1[:].reset(0)) # 0000000000000000000000000
print("\nTesting count_bit():")
bv = BitVector(intVal = 45, size = 16)
y = bv.count_bits()
print(y) # 4
bv = BitVector(bitstring = '100111')
print(bv.count_bits()) # 4
bv = BitVector(bitstring = '00111000')
print(bv.count_bits()) # 3
bv = BitVector(bitstring = '001')
print(bv.count_bits()) # 1
bv = BitVector(bitstring = '00000000000000')
print(bv.count_bits()) # 0
print("\nTest set_value idea:")
bv = BitVector(intVal = 7, size =16)
print(bv) # 0000000000000111
bv.set_value(intVal = 45)
print(bv) # 101101
print("\nTesting count_bits_sparse():")
bv = BitVector(size = 2000000)
bv[345234] = 1
bv[233]=1
bv[243]=1
bv[18]=1
bv[785] =1
print("The number of bits set: " + str(bv.count_bits_sparse())) # 5
print("\nTesting Jaccard similarity and distance and Hamming distance:")
bv1 = BitVector(bitstring = '11111111')
bv2 = BitVector(bitstring = '00101011')
print("Jaccard similarity: " + str(bv1.jaccard_similarity(bv2))) # 0.5
print("Jaccard distance: " + str(bv1.jaccard_distance(bv2))) # 0.5
print("Hamming distance: " + str(bv1.hamming_distance(bv2))) # 4
print("\nTesting next_set_bit():")
bv = BitVector(bitstring = '00000000000001')
print(bv.next_set_bit(5)) # 13
bv = BitVector(bitstring = '000000000000001')
print(bv.next_set_bit(5)) # 14
bv = BitVector(bitstring = '0000000000000001')
print(bv.next_set_bit(5)) # 15
bv = BitVector(bitstring = '00000000000000001')
print(bv.next_set_bit(5)) # 16
print("\nTesting rank_of_bit_set_at_index():")
bv = BitVector(bitstring = '01010101011100')
print(bv.rank_of_bit_set_at_index( 10 )) # 6
print("\nTesting is_power_of_2():")
bv = BitVector(bitstring = '10000000001110')
print("int value: " + str(int(bv))) # 826
print(bv.is_power_of_2()) # False
print("\nTesting is_power_of_2_sparse():")
print(bv.is_power_of_2_sparse()) # False
print("\nTesting reverse():")
bv = BitVector(bitstring = '0001100000000000001')
print("original bv: " + str(bv)) # 0001100000000000001
print("reversed bv: " + str(bv.reverse())) # 1000000000000011000
print("\nTesting Greatest Common Divisor (gcd):")
bv1 = BitVector(bitstring = '01100110')
print("first arg bv: " + str(bv1) + " of int value: " + str(int(bv1))) #102
bv2 = BitVector(bitstring = '011010')
print("second arg bv: " + str(bv2) + " of int value: " + str(int(bv2)))# 26
bv = bv1.gcd(bv2)
print("gcd bitvec is: " + str(bv) + " of int value: " + str(int(bv))) # 2
print("\nTesting multiplicative_inverse:")
bv_modulus = BitVector(intVal = 32)
print("modulus is bitvec: " + str(bv_modulus) + " of int value: " + str(int(bv_modulus)))
bv = BitVector(intVal = 17)
print("bv: " + str(bv) + " of int value: " + str(int(bv)))
result = bv.multiplicative_inverse(bv_modulus)
if result is not None:
print("MI bitvec is: " + str(result) + " of int value: " + str(int(result)))
else: print("No multiplicative inverse in this case")
# 17
print("\nTest multiplication in GF(2):")
a = BitVector(bitstring='0110001')
b = BitVector(bitstring='0110')
c = a.gf_multiply(b)
print("Product of a=" + str(a) + " b=" + str(b) + " is " + str(c))
# 00010100110
print("\nTest division in GF(2^n):")
mod = BitVector(bitstring='100011011') # AES modulus
n = 8
a = BitVector(bitstring='11100010110001')
quotient, remainder = a.gf_divide(mod, n)
print("Dividing a=" + str(a) + " by mod=" + str(mod) + " in GF(2^8) returns the quotient " \
+ str(quotient) + " and the remainder " + str(remainder))
# 10001111
print("\nTest modular multiplication in GF(2^n):")
modulus = BitVector(bitstring='100011011') # AES modulus
n = 8
a = BitVector(bitstring='0110001')
b = BitVector(bitstring='0110')
c = a.gf_multiply_modular(b, modulus, n)
print("Modular product of a=" + str(a) + " b=" + str(b) + " in GF(2^8) is " + str(c))
# 10100110
print("\nTest multiplicative inverses in GF(2^3) with " + \
"modulus polynomial = x^3 + x + 1:")
print("Find multiplicative inverse of a single bit array")
modulus = BitVector(bitstring='100011011') # AES modulus
n = 8
a = BitVector(bitstring='00110011')
mi = a.gf_MI(modulus,n)
print("Multiplicative inverse of " + str(a) + " in GF(2^8) is " + str(mi))
print("\nIn the following three rows shown, the first row shows the " +\
"\nbinary code words, the second the multiplicative inverses," +\
"\nand the third the product of a binary word with its" +\
"\nmultiplicative inverse:\n")
mod = BitVector(bitstring = '1011')
n = 3
bitarrays = [BitVector(intVal=x, size=n) for x in range(1,2**3)]
mi_list = [x.gf_MI(mod,n) for x in bitarrays]
mi_str_list = [str(x.gf_MI(mod,n)) for x in bitarrays]
print("bit arrays in GF(2^3): " + str([str(x) for x in bitarrays]))
print("multiplicati_inverses: " + str(mi_str_list))
products = [ str(bitarrays[i].gf_multiply_modular(mi_list[i], mod, n)) \
for i in range(len(bitarrays)) ]
print("bit_array * multi_inv: " + str(products))
# UNCOMMENT THE FOLLOWING LINES FOR
# DISPLAYING ALL OF THE MULTIPLICATIVE
# INVERSES IN GF(2^8) WITH THE AES MODULUS:
# print("\nMultiplicative inverses in GF(2^8) with " + \
# "modulus polynomial x^8 + x^4 + x^3 + x + 1:")
# print("\n(This may take a few seconds)\n")
# mod = BitVector(bitstring = '100011011')
# n = 8
# bitarrays = [BitVector(intVal=x, size=n) for x in range(1,2**8)]
# mi_list = [x.gf_MI(mod,n) for x in bitarrays]
# mi_str_list = [str(x.gf_MI(mod,n)) for x in bitarrays]
# print("\nMultiplicative Inverses:\n\n" + str(mi_str_list))
# products = [ str(bitarrays[i].gf_multiply_modular(mi_list[i], mod, n)) \
# for i in range(len(bitarrays)) ]
# print("\nShown below is the product of each binary code word " +\
# "in GF(2^3) and its multiplicative inverse:\n\n")
# print(products)
print("\nExperimenting with runs():")
bv = BitVector(bitlist = (1, 0, 0, 1))
print("For bit vector: " + str(bv))
print(" the runs are: " + str(bv.runs()))
bv = BitVector(bitlist = (1, 0))
print("For bit vector: " + str(bv))
print(" the runs are: " + str(bv.runs()))
bv = BitVector(bitlist = (0, 1))
print("For bit vector: " + str(bv))
print(" the runs are: " + str(bv.runs()))
bv = BitVector(bitlist = (0, 0, 0, 1))
print("For bit vector: " + str(bv))
print(" the runs are: " + str(bv.runs()))
bv = BitVector(bitlist = (0, 1, 1, 0))
print("For bit vector: " + str(bv))
print(" the runs are: " + str(bv.runs()))
print("\nExperiments with chained invocations of circular shifts:")
bv = BitVector(bitlist = (1,1, 1, 0, 0, 1))
print(bv)
bv >> 1
print(bv)
bv >> 1 >> 1
print(bv)
bv = BitVector(bitlist = (1,1, 1, 0, 0, 1))
print(bv)
bv << 1
print(bv)
bv << 1 << 1
print(bv)
print("\nExperiments with chained invocations of NON-circular shifts:")
bv = BitVector(bitlist = (1,1, 1, 0, 0, 1))
print(bv)
bv.shift_right(1)
print(bv)
bv.shift_right(1).shift_right(1)
print(bv)
bv = BitVector(bitlist = (1,1, 1, 0, 0, 1))
print(bv)
bv.shift_left(1)
print(bv)
bv.shift_left(1).shift_left(1)
print(bv)
# UNCOMMENT THE FOLLOWING LINES TO TEST THE
# PRIMALITY TESTING METHOD. IT SHOULD SHOW
# THAT ALL OF THE FOLLOWING NUMBERS ARE PRIME:
# print("\nExperiments with primality testing. If a number is not prime, its primality " +
# "test output must be zero. Otherwise, it should a number very close to 1.0.")
# primes = [179, 233, 283, 353, 419, 467, 547, 607, 661, 739, 811, 877, \
# 947, 1019, 1087, 1153, 1229, 1297, 1381, 1453, 1523, 1597, \
# 1663, 1741, 1823, 1901, 7001, 7109, 7211, 7307, 7417, 7507, \
# 7573, 7649, 7727, 7841]
# for p in primes:
# bv = BitVector(intVal = p)
# check = bv.test_for_primality()
# print("The primality test for " + str(p) + ": " + str(check))
print("\nGenerate 32-bit wide candidate for primality testing:")
bv = BitVector(intVal = 0)
bv = bv.gen_rand_bits_for_prime(32)
print(bv)
check = bv.test_for_primality()
print("The primality test for " + str(int(bv)) + ": " + str(check))
| StarcoderdataPython |
1673007 | import argparse
from datetime import timedelta
import json
import requests
from colorama import Fore, Style
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
from pandas.core.frame import DataFrame
import yfinance as yf
from gamestonk_terminal.helper_funcs import (
check_positive,
get_user_agent,
patch_pandas_text_adjustment,
parse_known_args_and_warn,
)
from gamestonk_terminal.config_terminal import USE_COLOR
def direction_color_red_green(val: str) -> str:
if val == "Buy":
ret = Fore.GREEN + val + Style.RESET_ALL
elif val == "Sell":
ret = Fore.RED + val + Style.RESET_ALL
else:
ret = val
return ret
def get_ark_orders() -> DataFrame:
url_orders = "https://cathiesark.com/ark-funds-combined/trades"
raw_page = requests.get(url_orders, headers={"User-Agent": get_user_agent()}).text
parsed_script = BeautifulSoup(raw_page, "lxml").find(
"script", {"id": "__NEXT_DATA__"}
)
parsed_json = json.loads(parsed_script.string)
df_orders = pd.json_normalize(parsed_json["props"]["pageProps"]["arkTrades"])
df_orders.drop(
[
"everything",
"everything.profile.customThumbnail",
"hidden",
"images.thumbnail",
],
axis=1,
inplace=True,
)
df_orders["date"] = pd.to_datetime(df_orders["date"], format="%Y-%m-%dZ").dt.date
return df_orders
def add_order_total(df_orders: DataFrame) -> DataFrame:
start_date = df_orders["date"].iloc[-1] - timedelta(days=1)
tickers = " ".join(df_orders["ticker"].unique())
print("")
prices = yf.download(tickers, start=start_date, progress=False)
for i, candle in enumerate(["Volume", "Open", "Close", "High", "Low", "Total"]):
df_orders.insert(i + 3, candle.lower(), 0)
for i, _ in df_orders.iterrows():
if np.isnan(
prices["Open"][df_orders.loc[i, "ticker"]][
df_orders.loc[i, "date"].strftime("%Y-%m-%d")
]
):
for candle in ["Volume", "Open", "Close", "High", "Low", "Total"]:
df_orders.loc[i, candle.lower()] = 0
continue
for candle in ["Volume", "Open", "Close", "High", "Low"]:
df_orders.loc[i, candle.lower()] = prices[candle][
df_orders.loc[i, "ticker"]
][df_orders.loc[i, "date"].strftime("%Y-%m-%d")]
df_orders.loc[i, "total"] = (
df_orders.loc[i, "close"] * df_orders.loc[i, "shares"]
)
return df_orders
def ark_orders(l_args):
parser = argparse.ArgumentParser(
add_help=False,
prog="ARK Orders",
description="""
Orders by ARK Investment Management LLC - https://ark-funds.com/. [Source: https://cathiesark.com]
""",
)
parser.add_argument(
"-n",
"--num",
action="store",
dest="n_num",
type=check_positive,
default=20,
help="Last N orders.",
)
ns_parser = parse_known_args_and_warn(parser, l_args)
if not ns_parser:
return
df_orders = get_ark_orders()
pd.set_option("mode.chained_assignment", None)
df_orders = add_order_total(df_orders.head(ns_parser.n_num))
if USE_COLOR:
df_orders["direction"] = df_orders["direction"].apply(direction_color_red_green)
patch_pandas_text_adjustment()
df_orders["link"] = "https://finviz.com/quote.ashx?t=" + df_orders["ticker"]
pd.set_option("display.max_colwidth", None)
pd.set_option("display.max_rows", None)
pd.set_option("display.float_format", "{:,.2f}".format)
print("")
print("Orders by ARK Investment Management LLC")
print("")
print(df_orders.to_string(index=False))
print("")
| StarcoderdataPython |
62997 | import torch
import torch.nn as nn
from .segmentation import deeplabv3_resnet50, deeplabv3_resnet101
__ALL__ = ["get_model"]
BatchNorm2d = nn.BatchNorm2d
BN_MOMENTUM = 0.01
class Transform(nn.Module):
def forward(self, input):
return 2 * input / 255 - 1
def load_pretrain_model(model, pretrain: str, city):
if pretrain != "":
if pretrain.find("{city}") != -1:
pretrain = pretrain.format(city=city)
pretrain = torch.load(pretrain, map_location="cpu")
weight = model.state_dict()
for k, v in pretrain.items():
if k in weight:
if v.shape == weight[k].shape:
weight[k] = v
model.load_state_dict(weight)
def get_hrnet(
input_channels, num_classes, model_version, pos_weight=None, cfg_path="", **kwargs
):
from .hrnet import get_cfg, get_seg_model
model_cfg = get_cfg()
if cfg_path != "":
model_cfg.merge_from_file(cfg_path)
model_cfg.NUM_CLASSES = num_classes
model = get_seg_model(
model_cfg,
model_version=model_version,
pos_weight=pos_weight,
all_cfg=kwargs["cfg"],
)
if model_version in ["HighResolutionNetGaussRank"]:
input_transform = nn.Identity()
else:
input_transform = Transform()
if "frozen_layers" in kwargs:
if kwargs["frozen_layers"]:
for param in model.parameters():
param.requires_grad = False
model.conv1 = nn.Sequential(
input_transform,
nn.Conv2d(input_channels, 64, kernel_size=3, stride=2, padding=1, bias=False),
)
last_inp_channels = model.last_layer[0].in_channels
# redefine last layer
model.last_layer = nn.Sequential(
nn.ConvTranspose2d(
in_channels=last_inp_channels,
out_channels=last_inp_channels // 2,
kernel_size=3,
stride=2,
padding=1,
),
BatchNorm2d(last_inp_channels // 2, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(
in_channels=last_inp_channels // 2,
out_channels=last_inp_channels // 2 // 2, # config.NUM_CLASSES,
kernel_size=3,
stride=2,
padding=0,
output_padding=(0, 1),
),
BatchNorm2d(last_inp_channels // 2 // 2, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True),
nn.Conv2d(
in_channels=last_inp_channels // 2 // 2,
out_channels=num_classes,
kernel_size=1,
stride=1,
padding=0,
),
)
return model
def get_unet(input_channels, num_classes, model_version, **kwargs):
from .unet import get_unet
model = get_unet(
model_version=model_version, in_channels=input_channels, classes=num_classes
)
return model
# useless
def get_deeplabv3(
input_channels, num_classes, model_version="deeplabv3_resnet50", **kwargs
):
model_map = {
"deeplabv3_resnet50": deeplabv3_resnet50,
"deeplabv3_resnet101": deeplabv3_resnet101,
}
model = model_map[model_version](
pretrained=False, progress=False, num_classes=num_classes, aux_loss=None
)
model.backbone.conv1 = nn.Conv2d(
input_channels,
64,
kernel_size=(7, 7),
stride=(2, 2),
padding=(3, 3),
bias=False,
)
return model
def replace_relu(model, activation="ReLU"):
for child_name, child in model.named_children():
if isinstance(child, nn.ReLU):
if activation in ["GELU", "CELU"]:
setattr(model, child_name, eval(f"nn.{activation}()"))
else:
setattr(model, child_name, eval(f"nn.{activation}(inplace=True)"))
else:
replace_relu(child, activation)
MODEL_MAPS = {"hrnet": get_hrnet, "deeplabv3": get_deeplabv3, "unet": get_unet}
def get_model(
cfg, city=None,
):
model_name = cfg.MODEL.NAME.lower()
model_version = cfg.MODEL.MODEL_VERSION
num_classes = cfg.DATASET.OUTPUT_CHANNELS # .lower()
input_channels = cfg.DATASET.INPUT_CHANNELS
assert model_name in MODEL_MAPS, "model is not allowed"
pos_weight = cfg.MODEL.POS_WEIGHT if cfg.MODEL.USE_POS_WEIGHT else None
model = MODEL_MAPS[model_name](
input_channels,
num_classes,
model_version,
pos_weight=pos_weight,
cfg_path=cfg.MODEL.MODEL_CONFIG_FILE,
cfg=cfg,
frozen_layers=cfg.MODEL.FROZEN_LAYERS,
)
load_pretrain_model(model, cfg.DIST.PRETRAIN_MODEL, city=city)
if cfg.MODEL.HIDDEN_ACTIVATION != "default":
replace_relu(model, cfg.MODEL.HIDDEN_ACTIVATION)
return model
| StarcoderdataPython |
4833869 | <gh_stars>0
class Methods:
USER_GET = 'users.get'
FRIENDS_GET = 'friends.get'
| StarcoderdataPython |
1737623 | # Combo helpers independent of GUI framework - these operate on
# SelectionCallbackProperty objects.
from __future__ import absolute_import, division, print_function
import weakref
from glue.core import Data, Subset
from glue.core.hub import HubListener
from glue.core.message import (DataReorderComponentMessage,
ComponentsChangedMessage,
DataCollectionAddMessage,
DataCollectionDeleteMessage,
DataUpdateMessage,
DataRenameComponentMessage)
from glue.external.echo import delay_callback, ChoiceSeparator
from glue.external.six import string_types
__all__ = ['ComponentIDComboHelper', 'ManualDataComboHelper',
'DataCollectionComboHelper', 'ComboHelper', 'BaseDataComboHelper']
def unique_data_iter(datasets):
"""
Return a list with only Data objects, with duplicates removed, but
preserving the original order.
"""
datasets_new = []
for dataset in datasets:
if isinstance(dataset, Data):
if dataset not in datasets_new:
datasets_new.append(dataset)
else:
if dataset.data not in datasets_new:
datasets_new.append(dataset.data)
return datasets_new
class ComboHelper(HubListener):
"""
Base class for any combo helper represented by a SelectionCallbackProperty.
This stores the state and selection property and exposes the ``state``,
``selection`` and ``choices`` properties.
Parameters
----------
state : :class:`~glue.core.state_objects.State`
The state to which the selection property belongs
selection_property : :class:`~glue.external.echo.SelectionCallbackProperty`
The selection property representing the combo.
"""
def __init__(self, state, selection_property):
self._state = weakref.ref(state)
self.selection_property = selection_property
@property
def state(self):
"""
The state to which the selection property belongs.
"""
return self._state()
@property
def selection(self):
"""
The current selected value.
"""
return getattr(self.state, self.selection_property)
@selection.setter
def selection(self, selection):
return setattr(self.state, self.selection_property, selection)
@property
def choices(self):
"""
The current valid choices for the combo.
"""
prop = getattr(type(self.state), self.selection_property)
return prop.get_choices(self.state)
@choices.setter
def choices(self, choices):
with delay_callback(self.state, self.selection_property):
prop = getattr(type(self.state), self.selection_property)
prop.set_choices(self.state, choices)
@property
def display(self):
"""
The current display function for the combo (the function that relates
the Python objects to the display label)
"""
prop = getattr(type(self.state), self.selection_property)
return prop.get_display_func(self.state)
@display.setter
def display(self, display):
prop = getattr(type(self.state), self.selection_property)
return prop.set_display_func(self.state, display)
def _on_rename(self, msg):
# If a component ID is renamed, we don't need to refresh because the
# list of actual component IDs is the same as before. However, we do
# need to trigger a refresh of any GUI combos that use this, so we
# make the property notify a change. However, if we are inside a
# delay_callback block, the property will not be enabled, and notify()
# won't have any effect, in which case we set the 'force_next_sync'
# option which means that when exiting from the delay_callback block,
# this property will show up as having changed
prop = getattr(type(self.state), self.selection_property)
if prop.enabled(self.state):
prop.notify(self.state, self.selection, self.selection)
else:
prop.force_next_sync(self.state)
class ComponentIDComboHelper(ComboHelper):
"""
The purpose of this class is to set up a combo (represented by a
SelectionCallbackProperty) showing componentIDs for one or more datasets, and to
update these componentIDs if needed, for example if new components are added
to a dataset, or if componentIDs are renamed. This is a GUI
framework-independent implementation.
Parameters
----------
state : :class:`~glue.core.state_objects.State`
The state to which the selection property belongs
selection_property : :class:`~glue.external.echo.SelectionCallbackProperty`
The selection property representing the combo.
data_collection : :class:`~glue.core.data_collection.DataCollection`, optional
The data collection to which the datasets belong - if specified,
this is used to remove datasets from the combo when they are removed
from the data collection.
data : :class:`~glue.core.data.Data`, optional
If specified, set up the combo for this dataset only and don't allow
datasets to be added/removed
numeric : bool, optional
Show numeric components
categorical : bool, optional
Show categorical components
pixel_coord : bool, optional
Show pixel coordinate components
world_coord : bool, optional
Show world coordinate components
derived : bool, optional
Show derived components
none : bool or str, optional
Add an entry that means `None`. If a string, this is the display string
that will be shown for the `None` entry, otherwise an empty string is
shown.
"""
def __init__(self, state, selection_property,
data_collection=None, data=None,
numeric=True, categorical=True,
pixel_coord=False, world_coord=False, derived=True, none=False):
super(ComponentIDComboHelper, self).__init__(state, selection_property)
if isinstance(none, string_types):
self._none = True
self._none_label = none
else:
self._none = none
self._none_label = ''
def display_func_label(cid):
if cid is None:
return self._none_label
else:
return cid.label
self.display = display_func_label
self._numeric = numeric
self._categorical = categorical
self._pixel_coord = pixel_coord
self._world_coord = world_coord
self._derived = derived
if data is None:
self._manual_data = False
self._data = []
else:
self._manual_data = True
self._data = [data]
self._data_collection = data_collection
if data_collection is None:
self.hub = None
else:
if data_collection.hub is None:
raise ValueError("Hub on data collection is not set")
else:
self.hub = data_collection.hub
if data is not None:
self.refresh()
def clear(self):
self._data.clear()
self.refresh()
@property
def numeric(self):
return self._numeric
@numeric.setter
def numeric(self, value):
self._numeric = value
self.refresh()
@property
def categorical(self):
return self._categorical
@categorical.setter
def categorical(self, value):
self._categorical = value
self.refresh()
@property
def pixel_coord(self):
return self._pixel_coord
@pixel_coord.setter
def pixel_coord(self, value):
self._pixel_coord = value
self.refresh()
@property
def world_coord(self):
return self._world_coord
@world_coord.setter
def world_coord(self, value):
self._world_coord = value
self.refresh()
@property
def derived(self):
return self._derived
@derived.setter
def derived(self, value):
self._derived = value
self.refresh()
@property
def none(self):
return self._none
@none.setter
def none(self, value):
if isinstance(value, string_types):
self._none = True
self._none_label = value
else:
self._none = value
self._none_label = ''
self.refresh()
def append_data(self, data, refresh=True):
if self._manual_data:
raise Exception("Cannot change data in ComponentIDComboHelper "
"initialized from a single dataset")
if isinstance(data, Subset):
data = data.data
if self.hub is None:
if data.hub is not None:
self.hub = data.hub
elif data.hub is not self.hub:
raise ValueError("Data Hub is different from current hub")
if data not in self._data:
self._data.append(data)
if refresh:
self.refresh()
def remove_data(self, data):
if self._manual_data:
raise Exception("Cannot change data in ComponentIDComboHelper "
"initialized from a single dataset")
if data in self._data:
self._data.remove(data)
self.refresh()
def set_multiple_data(self, datasets):
"""
Add multiple datasets to the combo in one go (and clear any previous datasets).
Parameters
----------
datasets : list
The list of :class:`~glue.core.data.Data` objects to add
"""
if self._manual_data:
raise Exception("Cannot change data in ComponentIDComboHelper "
"initialized from a single dataset")
try:
self._data.clear()
except AttributeError: # PY2
self._data[:] = []
for data in unique_data_iter(datasets):
self.append_data(data, refresh=False)
self.refresh()
@property
def hub(self):
return self._hub
@hub.setter
def hub(self, value):
self._hub = value
if value is not None:
self.register_to_hub(value)
def refresh(self, *args):
choices = []
if self._none:
choices.append(None)
for data in self._data:
derived_components = [cid for cid in data.derived_components if cid.parent is data]
if len(self._data) > 1:
if data.label is None or data.label == '':
choices.append(ChoiceSeparator('Untitled Data'))
else:
choices.append(ChoiceSeparator(data.label))
cids = [ChoiceSeparator('Main components')]
for cid in data.primary_components:
if cid not in data.coordinate_components:
comp = data.get_component(cid)
if ((comp.numeric and self.numeric) or
(comp.categorical and self.categorical)):
cids.append(cid)
if len(cids) > 1:
if self.pixel_coord or self.world_coord or (self.derived and len(derived_components) > 0):
choices += cids
else:
choices += cids[1:]
if self.numeric and self.derived:
cids = [ChoiceSeparator('Derived components')]
for cid in derived_components:
cids.append(cid)
if len(cids) > 1:
choices += cids
if self.pixel_coord or self.world_coord:
cids = [ChoiceSeparator('Coordinate components')]
if self.pixel_coord:
cids += data.pixel_component_ids
if self.world_coord:
cids += data.world_component_ids
if len(cids) > 1:
choices += cids
self.choices = choices
def _filter_msg(self, msg):
return msg.data in self._data or msg.sender in self._data_collection
def register_to_hub(self, hub):
hub.subscribe(self, DataRenameComponentMessage,
handler=self._on_rename,
filter=lambda msg: msg.sender in self._data)
hub.subscribe(self, DataReorderComponentMessage,
handler=self.refresh,
filter=lambda msg: msg.sender in self._data)
hub.subscribe(self, ComponentsChangedMessage,
handler=self.refresh,
filter=lambda msg: msg.sender in self._data)
if self._data_collection is not None:
hub.subscribe(self, DataCollectionDeleteMessage,
handler=self._remove_data)
def _remove_data(self, msg):
self.remove_data(msg.data)
def unregister(self, hub):
hub.unsubscribe_all(self)
class BaseDataComboHelper(ComboHelper):
"""
This is a base class for helpers for combo boxes that need to show a list
of data objects.
Parameters
----------
state : :class:`~glue.core.state_objects.State`
The state to which the selection property belongs
selection_property : :class:`~glue.external.echo.SelectionCallbackProperty`
The selection property representing the combo.
data_collection : :class:`~glue.core.data_collection.DataCollection`
The data collection to which the datasets belong - this is needed
because if a dataset is removed from the data collection, we want to
remove it here.
"""
def __init__(self, state, selection_property, data_collection=None):
super(BaseDataComboHelper, self).__init__(state, selection_property)
def display_func_label(cid):
return cid.label
self.display = display_func_label
self._component_id_helpers = []
self.state.add_callback(self.selection_property, self.refresh_component_ids)
self._data_collection = data_collection
if data_collection is not None:
if data_collection.hub is None:
raise ValueError("Hub on data collection is not set")
else:
self.hub = data_collection.hub
else:
self.hub = None
def refresh(self, *args):
self.choices = [data for data in self._datasets]
self.refresh_component_ids()
def refresh_component_ids(self, *args):
data = getattr(self.state, self.selection_property)
for helper in self._component_id_helpers:
helper.clear()
if data is not None:
helper.append_data(data)
helper.refresh()
def add_component_id_combo(self, combo):
helper = ComponentIDComboHelper(combo)
self._component_id_helpers.append_data(helper)
if self._data is not None:
helper.append_data(self._data)
@property
def hub(self):
return self._hub
@hub.setter
def hub(self, value):
self._hub = value
if value is not None:
self.register_to_hub(value)
def register_to_hub(self, hub):
pass
def _on_data_update(self, msg):
if msg.attribute == 'label':
self._on_rename(msg)
else:
self.refresh()
class ManualDataComboHelper(BaseDataComboHelper):
"""
This is a helper for combo boxes that need to show a list of data objects
that is manually curated.
Datasets are added and removed using the
:meth:`~ManualDataComboHelper.append_data` and
:meth:`~ManualDataComboHelper.remove_data` methods.
Parameters
----------
state : :class:`~glue.core.state_objects.State`
The state to which the selection property belongs
selection_property : :class:`~glue.external.echo.SelectionCallbackProperty`
The selection property representing the combo.
data_collection : :class:`~glue.core.data_collection.DataCollection`
The data collection to which the datasets belong - this is needed
because if a dataset is removed from the data collection, we want to
remove it here.
"""
def __init__(self, state, selection_property, data_collection=None):
super(ManualDataComboHelper, self).__init__(state, selection_property,
data_collection=data_collection)
self._datasets = []
def set_multiple_data(self, datasets):
"""
Add multiple datasets to the combo in one go (and clear any previous datasets).
Parameters
----------
datasets : list
The list of :class:`~glue.core.data.Data` objects to add
"""
try:
self._datasets.clear()
except AttributeError: # PY2
self._datasets[:] = []
for data in unique_data_iter(datasets):
self.append_data(data, refresh=False)
self.refresh()
def append_data(self, data, refresh=True):
if data in self._datasets:
return
if self.hub is None and data.hub is not None:
self.hub = data.hub
self._datasets.append(data)
if refresh:
self.refresh()
def remove_data(self, data):
if data not in self._datasets:
return
self._datasets.remove(data)
self.refresh()
def register_to_hub(self, hub):
super(ManualDataComboHelper, self).register_to_hub(hub)
hub.subscribe(self, DataUpdateMessage,
handler=self._on_data_update,
filter=lambda msg: msg.sender in self._datasets)
hub.subscribe(self, DataCollectionDeleteMessage,
handler=lambda msg: self.remove_data(msg.data),
filter=lambda msg: msg.sender is self._data_collection)
class DataCollectionComboHelper(BaseDataComboHelper):
"""
This is a helper for combo boxes that need to show a list of data objects
that is always in sync with a :class:`~glue.core.data_collection.DataCollection`.
Parameters
----------
state : :class:`~glue.core.state_objects.State`
The state to which the selection property belongs
selection_property : :class:`~glue.external.echo.SelectionCallbackProperty`
The selection property representing the combo.
data_collection : :class:`~glue.core.data_collection.DataCollection`
The data collection with which to stay in sync
"""
def __init__(self, state, selection_property, data_collection):
super(DataCollectionComboHelper, self).__init__(state, selection_property,
data_collection=data_collection)
self._datasets = data_collection
self.refresh()
def register_to_hub(self, hub):
super(DataCollectionComboHelper, self).register_to_hub(hub)
hub.subscribe(self, DataUpdateMessage,
handler=self._on_data_update,
filter=lambda msg: msg.sender in self._datasets)
hub.subscribe(self, DataCollectionAddMessage,
handler=self.refresh,
filter=lambda msg: msg.sender is self._datasets)
hub.subscribe(self, DataCollectionDeleteMessage,
handler=self.refresh,
filter=lambda msg: msg.sender is self._datasets)
| StarcoderdataPython |
3296824 | from django.shortcuts import get_object_or_404, render_to_response
from django.shortcuts import render
from registration.models import *
from events.models import EventNew
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.template import Context
from django.core.mail import send_mail, EmailMessage
from django.contrib.auth import logout
from django.contrib import auth
from django.db.models import F
# Create your views here.
@login_required
def index(request, pagename):
u_list = UserProfile.objects.order_by('college')[0:]
return render(request, 'pcradmin/'+pagename+'.html', {'u_list':u_list})
@login_required
def change_team_limit_list(request):
u_list = UserProfile.objects.order_by('college')[0:]
return render(request, 'pcradmin/change_team_limit_list.html', {'u_list':u_list})
@login_required
def change_team_limits(request):
if request.method == 'POST':
uid = request.POST.get('uid', False)
#fna = request.POST['fna']
#lna = request.POST['lna']
e_list = EventNew.objects.order_by('name')[0:]
message = ""
return render(request, 'pcradmin/changelimit.html', {'uid':uid, 'e_list':e_list, 'message':message})
@login_required
def change_limits(request):
if request.method == 'POST':
userid = request.POST['userid']
climit = request.POST['limit']
eventid = request.POST['eventid']
p = EventLimits()
p.event = EventNew.objects.get(id=int(eventid))
p.leader = UserProfile.objects.get(id=int(userid))
p.limit = climit
p.save()
return render(request, 'pcradmin/limit_changed.html')
@login_required
def change_sports_limits(request):
a_list = EventNew.objects.order_by('name')[0:]
return render(request, 'pcradmin/changesportslimit.html', {'a_list':a_list})
@login_required
def save_sports_limits(request):
if request.method == 'POST':
slimit = request.POST['limit']
eventid = request.POST['eventid']
p = EventNew.objects.get(id=int(eventid))
p.max_limit = slimit
p.save()
return render(request, 'pcradmin/sportslimitchanged.html')
@login_required
def index(request, pagename):
user_list = User.objects.all()
return render(request, 'pcradmin/'+pagename+'.html',{'users' : user_list})
@login_required
def set_status(request):
if request.method == 'POST':
user_name = request.POST['username']
#test1= Participant.objects.filter( gleader__contains = "test")
#k= test1.events.all()
#p = test1.name
#return render(request, 'pcradmin/setstatus.html',{'uname': user_name,'event' : k, 'xname': p })
return render(request, 'pcradmin/setstatus.html',{'uname': user_name})
@login_required
def save_status(request):
if request.method == 'POST':
stat = request.POST['status']
user_name = request.POST['uname']
gauss= User.objects.all()
tstat=2
if stat == '0':
for obj in gauss:
if obj.username == user_name:
obj.is_active = False
obj.save()
if obj.is_active == False:
tstat=0
if stat == '1':
for obj in gauss:
if obj.username == user_name:
obj.is_active = True
obj.save()
if obj.is_active == True:
tstat=1
return render(request, 'pcradmin/showstatus.html', {'tstat': tstat})
@login_required
def send_mail(request):
if request.method == 'POST':
sub=request.POST['sub']
body= request.POST['body']
send_to= request.POST['mailadd']
email = EmailMessage(sub, body, '<EMAIL>', [send_to])
email.send()
return render(request, "pcradmin/sent.html")
@login_required
def compose(request):
if request.method == 'POST':
emailadd = request.POST['email']
return render(request, 'pcradmin/compose.html', {'emailadd' : emailadd})
# def pcr_login(request):
# context = RequestContext(request)
# if request.method == 'POST':
# #return render(request, 'pcradmin/changelimit.html')
# username = request.POST['username']
# password = request.POST['password']
# user = authenticate(username=username, password=password)
# if user:
# if user.is_active:
# if user.is_staff:
# login(request, user)
# return HttpResponseRedirect('../dashboard/')
# else:
# context = {'error_heading' : "Access Denied", 'error_message' : 'You are not a PCr Member. <br> Return back <a href="/">home</a>'}
# return render(request, 'pcradmin/error.html', context)
# else:
# context = {'error_heading' : "Account Frozen", 'error_message' : 'No changes can be made now <br> Return back <a href="/">home</a>'}
# return render(request, 'pcradmin/error.html', context)
# else:
# context = {'error_heading' : "Invalid Login Credentials", 'error_message' : 'Please <a href=".">try again</a>'}
# return render(request, 'pcradmin/error.html', context)
# else:
# return render(request, 'pcradmin/login.html') | StarcoderdataPython |
3215924 | from enum import Enum, auto
from xzero import ZeroBase
class EventType(Enum):
MARKET = auto()
EXECUTION = auto()
SIGNAL = auto()
ORDER = auto()
MKT_ORDER = auto()
LMT_ORDER = auto()
FILL = auto()
TRANSACTION = auto()
class Event(ZeroBase):
"""
Base class for events that will flows through trading infrastructure
Examples:
>>> event = Event.from_dict(event_type='mkt')
>>> assert str(event) == 'Event()'
"""
def __init_subclass__(cls, event_type=None, **kwargs):
cls.event_type = event_type
super().__init_subclass__(**kwargs)
if __name__ == '__main__':
"""
CommandLine:
python -m xzero.events all
"""
import xdoctest
xdoctest.doctest_module(__file__)
| StarcoderdataPython |
3379057 | <reponame>mikekwright/py-type-registry<gh_stars>0
"""
This is a simple function to allow us to easily load yaml
and have it construct an object is specified
"""
import logging
import yaml
import os
from pprint import pprint
from string import Template
from .registrar import find_type
__all__ = ['load_yaml', 'load_yaml_str']
logger = logging.getLogger(__name__)
_yaml_initialized = False
def construct_yaml_stream(in_stream):
"""
A function that can be used to construct yaml into python objects
and utilizes the python constructor in doing so
:param in_stream: (io.Stream) a stream to load the contents from
:returns: (dict|obj) The constructed yaml results
"""
global _yaml_initialized
logger.info('Request to construct yaml')
if not _yaml_initialized:
def _object_creator(loader, node, deep=True):
mapping = {}
for key_node, value_node in node.value:
key = loader.construct_object(key_node, deep=deep)
value = loader.construct_object(value_node, deep=deep)
mapping[key] = value
if '__factory__' in mapping:
print('I am here')
try:
_cls = mapping['__factory__']
del mapping['__factory__']
logger.debug('__factory__ found in yaml, attempting to construct %s', _cls)
# This line is used for referencing modules by a registered alias
if type(_cls) == str:
registrar_values = find_type(_cls)
_cls = registrar_values['factory_method']
default_args = registrar_values['default_values']
mapping = {**default_args, **mapping}
return _cls(**mapping)
except Exception as e:
logger.error('Failed to construct yaml object %s, %s', e, str(mapping))
raise e
return loader.construct_mapping(node, deep)
logger.info(f'Registering yaml constructor for python !obj types')
yaml.add_constructor('!obj', _object_creator, yaml.Loader)
_yaml_initialized = True
return yaml.load(in_stream)
def construct_types_in_dict(config: dict) -> dict:
"""
This function can take in a dictionary type object and using depth-first iteration it
will dynamically construct types if they have a `__factory__` key.
"""
generated_dict = {}
# 1. First iterate through all children in the current object
for key, value in config.items():
if type(value) == dict:
generated_dict[key] = construct_types_in_dict(value)
else:
generated_dict[key] = config[key]
# 2. Second apply the construction method for the current type (if it has a __factory__ key)
if '__factory__' in generated_dict.keys():
factory_method = generated_dict['__factory__']
del generated_dict['__factory__']
if type(factory_method) == str:
registered_type = find_type(factory_method)
factory_method = registered_type['factory_method']
default_args = registered_type['default_values']
generated_dict = {**default_args, **generated_dict}
return factory_method(**generated_dict)
# This is the standard return if a __factory__ is not found
return generated_dict
def load_yaml_str(yaml_str, template_params=None, return_config_str=False):
"""
load_yaml_str will take in a string that has the correct yaml formatting
and generate classes or types according to the configuration in the yaml.
Example:
with open(my_file, 'r') as f:
yaml_str = f.read()
params = {'SOURCE_PATH': '/tmp/source', 'AGE': 30}
object = load_yaml_str(yaml_str, params)
:param yaml_str: (string) yaml string to construct
:param template_params: (optional: dict) key value pairs where the key
represents a value in the config to update.
:param return_config_str: (optional: bool) whether to return the entire config
string with the constructed type
:returns: The constructed type from the yaml config or tuple of type and config
if boolean option is set the True
"""
template_params = template_params or {}
config_str = Template(yaml_str).substitute({**os.environ, **template_params})
(config, config_str) = (construct_yaml_stream(config_str), config_str)
constructed = construct_types_in_dict(config)
if return_config_str:
return (constructed, config_str)
else:
return constructed
def load_yaml(yaml_file, template_params=None, return_config_str=False):
"""
load_yaml_str will take in a string that has the correct yaml formatting
and generate classes or types according to the configuration in the yaml.
Example:
params = {'SOURCE_PATH': '/tmp/source', 'AGE': 30}
object = load_yaml_str('my_file.yaml', params)
:param yaml_file: (string) yaml filename to load
:param template_params: (optional: dict) key value pairs where the key
represents a value in the config to update.
:returns: The constructed type from the yaml config
"""
with open(yaml_file, 'r') as yaml_config:
yaml_str = yaml_config.read()
return load_yaml_str(yaml_str, template_params, return_config_str)
| StarcoderdataPython |
1766686 | import maya.cmds as mc
import maya.api.OpenMaya as om
from dcc.abstract import afnskin
from dcc.maya import fnnode
from dcc.maya.libs import dagutils, skinutils
from dcc.maya.decorators import undo
import logging
logging.basicConfig()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
class FnSkin(afnskin.AFnSkin, fnnode.FnNode):
"""
Overload of AFnSkin that outlines function set behaviour for skin weighting in Maya.
This class also inherits from FnNode since skin clusters are node objects.
"""
__slots__ = ('_transform', '_shape', '_intermediateObject')
__loaded__ = mc.pluginInfo('TransferPaintWeightsCmd', query=True, loaded=True)
__colorsetname__ = 'paintWeightsColorSet1'
__colorramp__ = '1,0,0,1,1,1,0.5,0,0.8,1,1,1,0,0.6,1,0,1,0,0.4,1,0,0,1,0,1'
def __init__(self, *args, **kwargs):
"""
Private method called after a new instance is created.
"""
# Declare class variables
#
self._transform = om.MObjectHandle()
self._shape = om.MObjectHandle()
self._intermediateObject = om.MObjectHandle()
# Call parent method
#
super(FnSkin, self).__init__(*args, **kwargs)
def setObject(self, obj):
"""
Assigns an object to this function set for manipulation.
:type obj: Union[str, om.MObject, om.MDagPath]
:rtype: None
"""
# Call parent method
#
skinCluster = dagutils.findDeformerByType(obj, om.MFn.kSkinClusterFilter)
super(FnSkin, self).setObject(skinCluster)
# Store references to skin cluster components
#
transform, shape, intermediateObject = dagutils.decomposeDeformer(skinCluster)
self._transform = om.MObjectHandle(transform)
self._shape = om.MObjectHandle(shape)
self._intermediateObject = om.MObjectHandle(intermediateObject)
def transform(self):
"""
Returns the transform component of this deformer.
:rtype: om.MObject
"""
return self._transform.object()
def shape(self):
"""
Returns the shape component of this deformer.
:rtype: om.MObject
"""
return self._shape.object()
def intermediateObject(self):
"""
Returns the intermediate object of this deformer.
:rtype: om.MObject
"""
return self._intermediateObject.object()
def iterVertices(self):
"""
Returns a generator that yields all vertex indices.
:rtype: iter
"""
return range(self.numControlPoints())
def componentSelection(self):
"""
Returns the component selection for the associated shape.
:rtype: om.MObject
"""
# Collect components
#
shape = self.shape()
components = [component for (dagPath, component) in self.iterActiveComponentSelection() if dagPath.node() == shape]
numComponents = len(components)
if numComponents == 1:
return components[0]
else:
return om.MObject.kNullObj
def isPartiallySelected(self):
"""
Evaluates if this node is partially selected.
Useful for things like deformers or modifiers.
:rtype: bool
"""
return self.isSelected() or self.shape() in self.getActiveSelection()
def iterSelection(self, includeWeight=False):
"""
Returns the selected vertex elements.
:type includeWeight: bool
:rtype: list[int]
"""
# Inspect component selection
#
component = self.componentSelection()
if not component.hasFn(om.MFn.kMeshVertComponent):
return
# Iterate through component
#
fnComponent = om.MFnSingleIndexedComponent(component)
for i in range(fnComponent.elementCount):
yield fnComponent.element(i)
def setSelection(self, vertices):
"""
Updates the active selection with the supplied vertex elements.
:type vertices: list[int]
:rtype: None
"""
# Get dag path to object
#
dagPath = om.MDagPath.getAPathTo(self.shape())
# Create mesh component
#
fnComponent = om.MFnSingleIndexedComponent()
component = fnComponent.create(om.MFn.kMeshVertComponent)
fnComponent.addElements(vertices)
# Update selection list
#
selection = om.MSelectionList()
selection.add((dagPath, component))
om.MGlobal.setActiveSelectionList(selection)
def iterSoftSelection(self):
"""
Returns a generator that yields selected vertex and soft value pairs.
:rtype iter
"""
# Inspect component selection
#
component = self.componentSelection()
if not component.hasFn(om.MFn.kMeshVertComponent):
return
# Iterate through component
#
fnComponent = om.MFnSingleIndexedComponent(component)
for i in range(fnComponent.elementCount):
element = fnComponent.element(i)
if fnComponent.hasWeights:
yield element, fnComponent.weight(i)
else:
yield element, 1.0
@classmethod
def isPluginLoaded(cls):
"""
Evaluates if the plugin for color display is loaded.
:rtype: bool
"""
return cls.__loaded__
def showColors(self):
"""
Enables color feedback for the associated shape.
:rtype: None
"""
# Check if plugin is loaded
#
if not self.isPluginLoaded():
log.debug('showColors() requires the TransferPaintWeightsCmd.mll plugin!')
return
# Check if this instance supports vertex colours
#
shape = self.shape()
if not shape.hasFn(om.MFn.kMesh):
log.debug('showColors() expects a mesh (%s given)!' % shape.apiTypeStr)
return
# Check if intermediate object has colour set
#
intermediateObject = self.intermediateObject()
fnMesh = om.MFnMesh(intermediateObject)
colorSetNames = fnMesh.getColorSetNames()
if self.__colorsetname__ not in colorSetNames:
fnMesh.createColorSet(self.__colorsetname__, False)
fnMesh.setCurrentColorSetName(self.__colorsetname__)
# Set shape attributes
#
fnMesh.setObject(shape)
fullPathName = fnMesh.fullPathName()
mc.setAttr('%s.displayImmediate' % fullPathName, 0)
mc.setAttr('%s.displayVertices' % fullPathName, 0)
mc.setAttr('%s.displayEdges' % fullPathName, 0)
mc.setAttr('%s.displayBorders' % fullPathName, 0)
mc.setAttr('%s.displayCenter' % fullPathName, 0)
mc.setAttr('%s.displayTriangles' % fullPathName, 0)
mc.setAttr('%s.displayUVs' % fullPathName, 0)
mc.setAttr('%s.displayNonPlanar' % fullPathName, 0)
mc.setAttr('%s.displayInvisibleFaces' % fullPathName, 0)
mc.setAttr('%s.displayColors' % fullPathName, 1)
mc.setAttr('%s.vertexColorSource' % fullPathName, 1)
mc.setAttr('%s.materialBlend' % fullPathName, 0)
mc.setAttr('%s.displayNormal' % fullPathName, 0)
mc.setAttr('%s.displayTangent' % fullPathName, 0)
mc.setAttr('%s.currentColorSet' % fullPathName, '', type='string')
def hideColors(self):
"""
Disable color feedback for the associated shape.
:rtype: None
"""
# Check if plugin is loaded
#
if not self.isPluginLoaded():
log.debug('hideColors() requires the TransferPaintWeightsCmd.mll plugin!')
return
# Check if this instance supports vertex colours
#
shape = self.shape()
if not shape.hasFn(om.MFn.kMesh):
log.debug('hideColors() expects a mesh (%s given)!' % shape.apiTypeStr)
return
# Reset shape attributes
#
fnMesh = om.MFnMesh(shape)
fullPathName = fnMesh.fullPathName()
mc.setAttr('%s.displayColors' % fullPathName, 0)
mc.setAttr('%s.vertexColorSource' % fullPathName, 1)
# Delete color set
#
intermediateObject = self.intermediateObject()
fnMesh.setObject(intermediateObject)
colorSetNames = fnMesh.getColorSetNames()
if self.__colorsetname__ in colorSetNames:
fnMesh.deleteColorSet(self.__colorsetname__)
def invalidateColors(self):
"""
Forces the vertex colour display to redraw.
:rtype: None
"""
# Check if plugin is loaded
#
if not self.isPluginLoaded():
log.debug('invalidateColors() requires the TransferPaintWeightsCmd.mll plugin!')
return
# Check if this instance belongs to a mesh
#
intermediateObject = self.intermediateObject()
if not intermediateObject.hasFn(om.MFn.kMesh):
log.debug('invalidateColors() expects a mesh (%s given)!' % intermediateObject.apiTypeStr)
return
# Check if colour set is active
#
fnMesh = om.MFnMesh(intermediateObject)
if fnMesh.currentColorSetName() == self.__colorsetname__:
mc.dgdirty('%s.paintTrans' % self.name())
mc.transferPaintWeights(
'%s.paintWeights' % self.name(),
fnMesh.fullPathName(),
colorRamp=self.__colorramp__
)
def iterInfluences(self):
"""
Returns a generator that yields all of the influence objects from this skin.
:rtype: iter
"""
return skinutils.iterInfluences(self.object())
def numInfluences(self):
"""
Returns the number of influences being use by this skin.
:rtype: int
"""
return om.MFnDependencyNode(self.object()).findPlug('matrix', False).numConnectedElements()
def maxInfluences(self):
"""
Getter method that returns the max number of influences for this skin.
:rtype: int
"""
return om.MFnDependencyNode(self.object()).findPlug('maxInfluences', False).asFloat()
def selectInfluence(self, influenceId):
"""
Changes the color display to the specified influence id.
:type influenceId: int
:rtype: None
"""
skinutils.selectInfluence(self.object(), influenceId)
def addInfluence(self, influence):
"""
Adds an influence to this deformer.
:type influence: om.MObject
:rtype: bool
"""
index = skinutils.addInfluence(self.object(), influence)
self.influences()[index] = influence
def removeInfluence(self, influenceId):
"""
Removes an influence from this deformer.
:type influenceId: int
:rtype: bool
"""
skinutils.removeInfluence(self.object(), influenceId)
del self.influences()[influenceId]
def iterVertexWeights(self, *args):
"""
Returns a generator that yields weights for the supplied vertex indices.
If no vertex indices are supplied then all weights are yielded instead.
:rtype: iter
"""
return skinutils.iterWeightList(self.object(), vertexIndices=args)
@undo.undo(name='Apply Vertex Weights')
def applyVertexWeights(self, vertices):
"""
Assigns the supplied vertex weights to this deformer.
:type vertices: dict[int:dict[int:float]]
:rtype: None
"""
skinutils.setWeightList(self.object(), vertices)
@undo.undo(name='Reset Pre-Bind Matrices')
def resetPreBindMatrices(self):
"""
Resets the pre-bind matrices on the associated joints.
:rtype: None
"""
skinutils.resetPreBindMatrices(self.object())
@undo.undo(name='Reset Intermediate Object')
def resetIntermediateObject(self):
"""
Resets the control points on the associated intermediate object.
:rtype: None
"""
skinutils.resetIntermediateObject(self.object())
| StarcoderdataPython |
26620 | """
You work for a retail store that wants to increase sales on Tuesday and
Wednesday, which are the store's slowest sales days. On Tuesday and
Wednesday, if a customer's subtotal is greater than $50, the store will
discount the customer's purchase by 10%.
"""
# Import the datatime module so that
# it can be used in this program.
from datetime import datetime
# The discount rate is 10% and the sales tax rate is 6%.
DISC_RATE = 0.10
SALES_TAX_RATE = 0.06
subtotal = 0
done = False
while not done:
# Get the price from the user.
text = input("Please enter the price: ")
if text.lower() == "done":
done = True
else:
price = float(text)
# Get the quantity from the user.
quantity = int(input("Plesae enter the quantity: "))
subtotal += price * quantity
# Print a blank line.
print()
# Round the subtotal to two digits after
# the decimal and print the subtotal.
subtotal = round(subtotal, 2)
print(f"Subtotal: {subtotal}")
print()
# Call the now() method to get the current date and
# time as a datetime object from the computer's clock.
current_date_and_time = datetime.now()
# Call the isoweekday() method to get the day of
# the week from the current_date_and_time object.
weekday = current_date_and_time.isoweekday()
# if the subtotal is greater than 50 and
# today is Tuesday or Wednesday, compute the discount.
if weekday == 2 or weekday == 3:
if subtotal < 50:
insufficient = 50 - subtotal
print(f"To receive the discount, add {insufficient} to your order.")
else:
discount = round(subtotal * DISC_RATE, 2)
print(f"Discount amount: {discount}")
subtotal -= discount
# Compute the sales tax. Notice that we compute the sales tax
# after computing the discount because the customer does not
# pay sales tax on the full price but on the discounted price.
sales_tax = round(subtotal * SALES_TAX_RATE, 2)
print(f"Sales tax amount: {sales_tax}")
# Compute the total by adding the subtotal and the sales tax.
total = subtotal + sales_tax
# Display the total for the user to see.
print(f"Total: {total:.2f}")
| StarcoderdataPython |
1627216 | <gh_stars>0
import pickle
import sys
import tensorflow as tf
from tqdm import tqdm
def get_labels():
"""Return a list of our trained labels so we can
test our training accuracy. The file is in the
format of one label per line, in the same order
as the predictions are made. The order can change
between training runs."""
with open("./inception/retrained_labels.txt", 'r') as fin:
labels = [line.rstrip('\n') for line in fin]
return labels
def predict_on_frames(frames, batch):
"""Given a list of frames, predict all their classes."""
# Unpersists graph from file
with tf.gfile.FastGFile("./inception/retrained_graph.pb", 'rb') as fin:
graph_def = tf.GraphDef()
graph_def.ParseFromString(fin.read())
_ = tf.import_graph_def(graph_def, name='')
with tf.Session() as sess:
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
frame_predictions = []
image_path = './data/images/' + batch + '/'
pbar = tqdm(total=len(frames))
for i, frame in enumerate(frames):
filename = frame[0]
label = frame[1]
# Get the image path.
image = image_path + filename + '.jpg'
# Read in the image_data
image_data = tf.gfile.FastGFile(image, 'rb').read()
try:
predictions = sess.run(
softmax_tensor,
{'DecodeJpeg/contents:0': image_data}
)
prediction = predictions[0]
except KeyboardInterrupt:
print("You quit with ctrl+c")
sys.exit()
except:
print("Error making prediction, continuing.")
continue
# Save the probability that it's each of our classes.
frame_predictions.append([prediction, label])
if i > 0 and i % 10 == 0:
pbar.update(10)
pbar.close()
return frame_predictions
def get_accuracy(predictions, labels):
"""After predicting on each batch, check that batch's
accuracy to make sure things are good to go. This is
a simple accuracy metric, and so doesn't take confidence
into account, which would be a better metric to use to
compare changes in the model."""
correct = 0
for frame in predictions:
# Get the highest confidence class.
this_prediction = frame[0].tolist()
this_label = frame[1]
max_value = max(this_prediction)
max_index = this_prediction.index(max_value)
predicted_label = labels[max_index]
# Now see if it matches.
if predicted_label == this_label:
correct += 1
accuracy = correct / len(predictions)
return accuracy
def main():
batches = ['1']
labels = get_labels()
for batch in batches:
print("Doing batch %s" % batch)
with open('data/labeled-frames-' + batch + '.pkl', 'rb') as fin:
frames = pickle.load(fin)
# Predict on this batch and get the accuracy.
predictions = predict_on_frames(frames, batch)
accuracy = get_accuracy(predictions, labels)
print("Batch accuracy: %.5f" % accuracy)
# Save it.
with open('data/predicted-frames-' + batch + '.pkl', 'wb') as fout:
pickle.dump(predictions, fout)
print("Done.")
if __name__ == '__main__':
main() | StarcoderdataPython |
1652501 | <reponame>althayr/pyomr
import cv2
import numpy as np
import matplotlib.pyplot as plt
def to_rgb(img):
return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
def to_bgr(img):
return cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
def plot_rgb(img):
plt.figure(figsize=(9, 6))
return plt.imshow(img)
def plot_bgr(img):
plt.figure(figsize=(9, 6))
return plt.imshow(to_rgb(img))
def plot_gray(img):
plt.figure(figsize=(9, 6))
return plt.imshow(img, cmap="Greys_r")
| StarcoderdataPython |
4820203 | <gh_stars>0
import pytest
import rumps
from src.app_functions.exceptions.login_failed import LoginFailed
from src.app_functions.menu.change_credentials import change_credentials
@pytest.fixture(name="basic_app")
def create_app():
"""Creates a basic app object with some variables to pass to functions
Returns:
rumps.App: Basic app
"""
app = rumps.App("TestApp")
return app
def test_functions_called_correctly_succes(mocker, basic_app):
"""Check functionality if login succeeds"""
mock_function1 = mocker.patch("src.app_functions.menu.change_credentials.input_credentials")
mock_function2 = mocker.patch("src.app_functions.menu.change_credentials.duolingo_login")
mock_function3 = mocker.patch("src.app_functions.menu.change_credentials.update_menu")
change_credentials(basic_app)
mock_function1.assert_called_once_with()
mock_function2.assert_called_once_with(basic_app)
mock_function3.assert_called_once_with(basic_app)
assert basic_app.logged_in is True
def test_functions_called_correctly_failure(mocker, basic_app):
"""Check functionality if login fails"""
mock_function1 = mocker.patch("src.app_functions.menu.change_credentials.input_credentials")
mock_function2 = mocker.patch(
"src.app_functions.menu.change_credentials.duolingo_login", side_effect=LoginFailed
)
mock_function3 = mocker.patch("src.app_functions.menu.change_credentials.update_menu")
change_credentials(basic_app)
mock_function1.assert_called_once_with()
mock_function2.assert_called_once_with(basic_app)
mock_function3.assert_called_once_with(basic_app)
assert basic_app.logged_in is False
| StarcoderdataPython |
3332732 | # -*- coding: utf-8 -*-
from smartPeak.core.SequenceHandler import SequenceHandler
from smartPeak.core.SequenceProcessor import SequenceProcessor
from smartPeak.io.SequenceWriter import SequenceWriter
class __main__():
def example_LCMS_MRM_Unknowns(
self,
dir_I,
delimiter_I=",",
verbose_I=False,
*Args,
**Kwargs
):
"""Run the AbsoluteQuantitation python pipeline
Args:
dir_I (str): name of the directory (filenames will be created dynamically)
verbose (bool): print command line statements to stdout
"""
sequenceHandler = SequenceHandler()
sequenceProcessor = SequenceProcessor()
sequenceWriter = SequenceWriter()
# set the directory for all files and data
sequenceHandler.setDirStatic(dir_I)
sequenceHandler.setDirDynamic(dir_I)
sequenceProcessor.createSequence(
sequenceHandler,
delimiter=","
)
# process all files
raw_data_processing_methods = [
"load_raw_data",
# "load_features",
"pick_features",
"filter_features",
"filter_features",
"select_features",
# "validate_features",
"quantify_features",
"check_features",
"store_features",
# "plot_features"
]
# # load and plot all files
# raw_data_processing_methods = [
# "load_raw_data",
# "load_features",
# "plot_features"
# ]
sequenceProcessor.processSequence(
sequenceHandler,
raw_data_processing_methods_I=raw_data_processing_methods,
verbose_I=True)
# write out a summary of all files
sequenceSummary_csv_i = '''%s/SequenceSummary.csv''' % (dir_I)
sequenceWriter.write_dataMatrixFromMetaValue(
sequenceHandler,
filename=sequenceSummary_csv_i,
meta_data=['calculated_concentration'],
sample_types=['Unknown']
)
featureSummary_csv_i = '''%s/FeatureSummary.csv''' % (dir_I)
sequenceWriter.write_dataTableFromMetaValue(
sequenceHandler,
filename=featureSummary_csv_i,
meta_data=[
"peak_apex_int", "total_width", "width_at_50",
"tailing_factor", "asymmetry_factor", "baseline_delta_2_height",
"points_across_baseline", "points_across_half_height", "logSN",
"calculated_concentration",
"QC_transition_message", "QC_transition_pass", "QC_transition_score",
"QC_transition_group_message", "QC_transition_group_score"],
sample_types=['Unknown']
)
def example_LCMS_MRM_Standards(
self,
dir_I,
delimiter_I=",",
verbose_I=False,
*Args,
**Kwargs
):
"""Run the AbsoluteQuantitation python pipeline
Args:
dir_I (str): name of the directory (filenames will be created dynamically)
verbose (bool): print command line statements to stdout
"""
sequenceHandler = SequenceHandler()
sequenceProcessor = SequenceProcessor()
sequenceWriter = SequenceWriter()
# set the directory for all files and data
sequenceHandler.setDirStatic(dir_I)
sequenceHandler.setDirDynamic(dir_I)
sequenceProcessor.createSequence(
sequenceHandler,
delimiter=","
)
# 1. process all files
raw_data_processing_methods = [
"load_raw_data",
# "load_features",
"pick_features",
"filter_features",
"filter_features",
"select_features",
"check_features",
"store_features",
# "plot_features"
]
sequenceProcessor.processSequence(
sequenceHandler,
raw_data_processing_methods_I=raw_data_processing_methods,
verbose_I=True)
# 2. process optimize calibrators
sequence_segment_processing_methods = [
"calculate_calibration",
"plot_calibrators",
"store_quantitation_methods",
# "load_quantitation_methods",
# "store_components_to_concentrations"
]
sequenceProcessor.processSequenceSegments(
sequenceHandler,
sequence_segment_processing_methods_I=sequence_segment_processing_methods,
verbose_I=True)
# 3. quantify standards for QC
raw_data_processing_methods = [
"quantify_features",
"check_features",
"store_features",
# "plot_features"
]
# # load and plot all files
# raw_data_processing_methods = [
# "load_raw_data",
# "load_features",
# "plot_features"
# ]
sequenceProcessor.processSequence(
sequenceHandler,
raw_data_processing_methods_I=raw_data_processing_methods,
verbose_I=True)
# write out a summary of all files
sequenceSummary_csv_i = '''%s/SequenceSummary.csv''' % (dir_I)
sequenceWriter.write_dataMatrixFromMetaValue(
sequenceHandler,
filename=sequenceSummary_csv_i,
meta_data=['calculated_concentration'],
sample_types=['Standard']
)
def example_LCMS_MRM_Validation(
self,
dir_I,
delimiter_I=",",
verbose_I=False,
*Args,
**Kwargs
):
"""Run the AbsoluteQuantitation python pipeline
Args:
dir_I (str): name of the directory (filenames will be created dynamically)
verbose (bool): print command line statements to stdout
"""
sequenceHandler = SequenceHandler()
sequenceProcessor = SequenceProcessor()
sequenceWriter = SequenceWriter()
# set the directory for all files and data
sequenceHandler.setDirStatic(dir_I)
sequenceHandler.setDirDynamic(dir_I)
sequenceProcessor.createSequence(
sequenceHandler,
delimiter=","
)
# process all files
raw_data_processing_methods = [
"load_raw_data",
# "load_features",
"pick_features",
"filter_features",
"select_features",
"validate_features",
# "quantify_features",
# "check_features",
# "store_features",
# "plot_features"
]
# # process all files
# raw_data_processing_methods = [
# "load_raw_data",
# "load_features",
# ]
sequenceProcessor.processSequence(
sequenceHandler,
raw_data_processing_methods_I=raw_data_processing_methods,
verbose_I=True)
# write out a summary of all files
sequenceSummary_csv_i = '''%s/SequenceSummary.csv''' % (dir_I)
sequenceWriter.write_dataMatrixFromMetaValue(
sequenceHandler,
filename=sequenceSummary_csv_i,
meta_data=['calculated_concentration'],
sample_types=['Unknown']
)
featureSummary_csv_i = '''%s/FeatureSummary.csv''' % (dir_I)
sequenceWriter.write_dataTableFromMetaValue(
sequenceHandler,
filename=featureSummary_csv_i,
meta_data=[
"peak_apex_int", "total_width", "width_at_50",
"tailing_factor", "asymmetry_factor", "baseline_delta_2_height",
"points_across_baseline", "points_across_half_height", "logSN",
"calculated_concentration",
"QC_transition_message", "QC_transition_pass", "QC_transition_score",
"QC_transition_group_message", "QC_transition_group_score"],
sample_types=['Unknown']
) | StarcoderdataPython |
3343150 | <reponame>cnm06/Competitive-Programming
f = open('sample-input.in')
o = open('sample-output.out', 'w')
t = int(f.readline().strip())
for i in xrange(1, t + 1):
x = [str(j) for j in f.readline().strip().split(" ")]
x[1] = int(x[1])
for j in xrange(0, len(x[0])-x[1]):
if x[0][j] == '-':
for k in xrange(j, j+x[1]):
if x[0][j] == '-':
x[0][j] = '+'
else:
x[0][j] = '-'
print x[0]
o.write("Case #{}: {}\n".format(i, x))
| StarcoderdataPython |
3332184 | import operator
from OOSML import SmlObject, SmlPredicate
import Python_sml_ClientInterface as sml
def iterator_is_empty(iter):
try:
iter.next()
except StopIteration:
return True
return False
def output_event_handler(id, userData, kernel, runFlags):
userData.update()
def init_event_handler(id, userData, agent):
userData.init()
class PddlBaseEnv:
def __init__(self, agent):
self.agent = agent
kernel = agent.GetKernel()
self.__update_event_id = kernel.RegisterForUpdateEvent(sml.smlEVENT_AFTER_ALL_OUTPUT_PHASES, output_event_handler, self)
self.__reinit_event_id = kernel.RegisterForAgentEvent(sml.smlEVENT_AFTER_AGENT_REINITIALIZED, init_event_handler, self)
# A dict from predicate names to dicts, where each value dict is a map
# from tuples of object refs (representing the parameters of a
# particular instance of the predicate) to SmlPredicate references
self.predicates = {}
# dict from object ID strings to their SmlObject references
self.objects = {}
# dict from object ID strings to their types
self.types = {}
# if we initialize from a static state representation, then we should
# be able to reinit to it. So assign this variable to the static rep.
self.init_state = None
# We can't add and delete predicates immediately while handling an
# action, since effects listed earlier could interfere with conditional
# effects listed later. So maintain add and delete buffers
self.predicate_add_buffer = []
self.predicate_del_buffer = []
self.state_wme = self.agent.CreateIdWME(self.agent.GetInputLink(), 'state')
self.entity_count_wme = self.agent.CreateIntWME(self.agent.GetInputLink(), 'entity-count', 0)
self.num_updates_wme = self.agent.CreateIntWME(self.agent.GetInputLink(), 'num-updates', 0)
self.agent.Commit()
self.num_updates = 0
def init(self):
if not self.init_state:
raise NotImplementedError, "No initial state defined"
self.num_updates = 0
self.from_static(self.init_state)
def destroy_objs_preds(self):
for name, pmap in self.predicates.items():
for pred in pmap.values():
pred.destroy()
self.predicates[name] = {}
for obj in self.objects.values():
obj.destroy()
self.objects = {}
self.types = {}
def destroy(self):
self.agent.GetKernel().UnregisterForUpdateEvent(self.__update_event_id)
self.agent.GetKernel().UnregisterForAgentEvent(self.__reinit_event_id)
self.destroy_objs_preds()
self.agent.DestroyWME(self.state_wme)
self.agent.DestroyWME(self.entity_count_wme)
def update(self):
for i in range(self.agent.GetNumberCommands()):
cmd = self.agent.GetCommand(i)
if cmd.GetParameterValue('status') == None:
handler = getattr(self, 'handle_action_%s' % cmd.GetCommandName().strip('|'))
if handler:
err_msg = handler(cmd)
if err_msg:
self.agent.CreateStringWME(cmd, 'status', 'error')
self.agent.CreateStringWME(cmd, 'message', err_msg)
else:
cmd.AddStatusComplete()
self.agent.Commit()
def buffer_predicate_add(self, pred_name, *params):
self.predicate_add_buffer.append((pred_name, tuple(params)))
def buffer_predicate_delete(self, pred_name, *params):
self.predicate_del_buffer.append((pred_name, tuple(params)))
def do_buffered_actions(self):
for pred_name, params in self.predicate_add_buffer:
add_method = getattr(self, 'add_predicate_%s' % pred_name)
if not add_method:
raise Exception('Illegal predicate %s' % pred_name)
add_method(*params)
for pred_name, params in self.predicate_del_buffer:
pred = self.predicates[pred_name].pop(params, None)
# I used to assume that the predicate being falsified must be
# currently true, but that doesn't seem to be the case with
# D1S1.pddl. So if a predicate is already not true, I'm just going
# to do nothing.
if pred:
pred.destroy()
self.predicate_add_buffer = []
self.predicate_del_buffer = []
self.agent.Update(self.entity_count_wme, sum(len(x) for x in self.predicates.values()) + len(self.objects))
self.num_updates += 1
self.agent.Update(self.num_updates_wme, self.num_updates)
self.agent.Commit()
# Creates a static, recoverable, and comparable representation of the
# current state, suitable for pickling.
def get_static(self):
# map from object refs to id strings
obj_lookup = dict(reversed(x) for x in self.objects.items())
# The predicates dict is transformed into a list of pairs
# (predicate name, frozensets of tuples of ID strings)
# the list is sorted by predicate name
predicate_table = []
for predname in sorted(self.predicates.keys()):
relmap = self.predicates[predname]
true_set = frozenset(tuple(obj_lookup[y] for y in x) for x in relmap.keys())
predicate_table.append((predname, true_set))
# the type dict is changed into a list of pairs (object id, type),
# sorted by object id
type_table = tuple(sorted(self.types.items(), key=operator.itemgetter(0)))
return (type_table, tuple(predicate_table))
# Reconstruct the state from the static representation
def from_static(self, static_rep):
self.init_state = static_rep
self.destroy_objs_preds()
types_table, predicate_table = static_rep
self.types = dict(types_table)
for obj_id, obj_type in types_table:
self.objects[obj_id] = SmlObject(self.agent, obj_type, obj_id)
for predname, true_set in predicate_table:
# These methods should be defined by the domain-specific subclass
predicate_add_method = getattr(self, 'add_predicate_%s' % predname)
if not predicate_add_method:
raise Exception('Static representation contains an illegal predicate "%s"' % predname)
args_to_rels = {}
for s in true_set:
obj_params = [self.objects[i] for i in s]
predicate_add_method(*obj_params)
| StarcoderdataPython |
159242 | <reponame>factioninc/snmp-unity-agent<filename>snmpagent_unity/unity_impl/HostInitiators.py
class HostInitiators(object):
def read_get(self, name, idx_name, unity_client):
return unity_client.get_host_initiators(idx_name)
class HostInitiatorsColumn(object):
def get_idx(self, name, idx, unity_client):
return unity_client.get_hosts()
| StarcoderdataPython |
3287467 | <gh_stars>10-100
# --------------------------------------------------------
# (c) Copyright 2014 by <NAME>.
# Licensed under BSD 3-clause licence.
# --------------------------------------------------------
import unittest
from pymonad.Reader import *
@curry
def neg(x): return -x
@curry
def sub(x, y): return x - y
@curry
def add(x, y): return x + y
@curry
def mul(x, y): return x * y
class ReaderTests(unittest.TestCase):
def testCurry(self):
@curry
def add(x, y, z): return x + y + z
@curry
def sub(x, y, z): return x - y - z
self.assertEqual(add(1, 2, 3), add(1)(2, 3))
self.assertEqual(add(1, 2, 3), add(1, 2)(3))
self.assertEqual(add(1, 2, 3), add(1)(2)(3))
self.assertEqual(add(1, 2, 3), 6)
self.assertEqual(sub(3, 2, 1), sub(3)(2, 1))
self.assertEqual(sub(3, 2, 1), sub(3, 2)(1))
self.assertEqual(sub(3, 2, 1), sub(3)(2)(1))
self.assertEqual(sub(3, 2, 1), 0)
def testReaderFunctor(self):
comp1 = neg << sub(4)
comp2 = sub(4) << neg
comp3 = neg << sub(4) << neg
self.assertEqual(comp1(3), -1)
self.assertEqual(comp2(3), 7)
self.assertEqual(comp3(3), -7)
def testReaderApplicative(self):
x = add << mul(5) & mul(6)
self.assertEqual(x(5), 55)
def testReaderMonad(self):
x = (mul(2) >> (lambda a: add(10) >> (lambda b: Reader(a+b))))
self.assertEqual(x(3), 19)
class TestReaderUnit(unittest.TestCase):
def testUnitOnReader(self):
self.assertEqual(Reader.unit(8)("dummy value not used"), 8)
self.assertEqual(unit(Reader, 8)("dummy value not used"), 8)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
3323960 | <filename>dnv_rp_c205_functions.py<gh_stars>0
# -*- coding: utf-8 -*-
"""
Éd<NAME>
Ceci est un script temporaire.
"""
import numpy as np
from scipy import interpolate
def am_3d_square_prism(a,b):
"""
"""
xi = b/a
data = np.array([[1.00,0.68],
[2.00,0.36],
[3.00,0.24],
[4.00,0.19],
[5.00,0.15],
[6.00,0.13],
[7.00,0.11],
[10.0,0.08]])
x = data[:,0]
y = data[:,1]
# Define an interpolation function
interpolation_function = interpolate.interp1d(x,y,
kind='quadratic',
bounds_error=False,
fill_value=(y[0], y[-1]))
Ca = interpolation_function(xi)
Vr = a**2*b
return Ca,Vr
def am_3d_circular_cylinder(a,b):
"""
"""
xi = b/(2*a)
data = np.array([[1.20,0.62],
[2.50,0.78],
[5.00,0.90],
[9.00,0.96],
[99.00,1.00]])
x = data[:,0]
y = data[:,1]
# Define an interpolation function
interpolation_function = interpolate.interp1d(x,y,
kind='quadratic',
bounds_error=False,
fill_value=(y[0], y[-1]))
Ca = interpolation_function(xi)
Vr = np.pi*a**2*b
return Ca,Vr
def am_3d_rectangular_plates(a,b):
"""
"""
xi = b/(a)
data = np.array([[1.00,0.579],
[1.25,0.642],
[1.50,0.690],
[1.59,0.704],
[2.00,0.757],
[2.50,0.801],
[3.00,0.830],
[3.17,0.840],
[4.00,0.872],
[5.00,0.897],
[6.25,0.917],
[8.00,0.934],
[10.0,0.947],
[99.0,1.00]])
x = data[:,0]
y = data[:,1]
# Define an interpolation function
interpolation_function = interpolate.interp1d(x,y,
kind='quadratic',
bounds_error=False,
fill_value=(y[0], y[-1]))
Ca = interpolation_function(xi)
Vr = np.pi*a**2*b/4.
return Ca,Vr
def am_2d_I_beam(a,b,c):
"""
"""
xi = b/a
zi = c/a
data = np.array([[1.00,0.579],
[1.25,0.642],
[1.50,0.690],
[1.59,0.704],
[2.00,0.757],
[2.50,0.801],
[3.00,0.830],
[3.17,0.840],
[4.00,0.872],
[5.00,0.897],
[6.25,0.917],
[8.00,0.934],
[10.0,0.947],
[99.0,1.00]])
x = data[:,0]
y = data[:,1]
# Define an interpolation function
interpolation_function = interpolate.interp1d(x,y,
kind='quadratic',
bounds_error=False,
fill_value=(y[0], y[-1]))
Ca = interpolation_function(xi)
Vr = np.pi*a**2*b/4.
return Ca,Vr
"""
test functions
"""
print(am_3d_square_prism(4,2))
xnew = np.linspace(0, 50, num=1000, endpoint=True)
import matplotlib.pyplot as plt
plt.plot(xnew, am_3d_circular_cylinder(1,xnew)[0], '-')
plt.plot(xnew, am_3d_rectangular_plates(1,xnew)[0], '--')
def am_2d_I_beam(H,B,tp,L):
"""
"""
#am_2d_I_beam
c = H-tp
a = B
b = tp
x_1 = np.array([0.1,0.2,0.4,1.0])
y_1 = np.array([0.5,1.0,1.5,2.0,3.0,4.0])
z_1 = np.array([[4.7, 2.6 , 1.3 ,np.NAN],
[5.2, 3.2 , 1.7 ,0.6],
[5.8, 3.7 , 2.0 ,0.7],
[6.4, 4.0 , 2.3 ,0.9],
[7.2, 4.6 , 2.5 ,1.1],
[np.NAN, 4.8 , np.NAN ,np.NAN]])
| StarcoderdataPython |
3318261 | <gh_stars>0
from application import app#, login_manager
from flask import render_template, request, redirect, url_for, flash, make_response, session, abort, json, jsonify
# from sqlalchemy import or_
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import login_required, login_user,current_user, logout_user
from .models import User, UserInfo, UserSocialPages, Student, Teacher, EducationalGroup, EducationalCourse, CourseMaterial, db, db_add_objects, CourseResponsiblePerson, CourseHometask, StudentHometask
from .forms import LoginForm, RegisterForm, PersonalCabinetForm, ChangePassword, CourseAddMaterialForm, CourseHometaskForm, StudentHometaskForm
from datetime import datetime
@app.errorhandler(404)
def error404(error):
return '<h1>Ошибка 404</h1><p>К сожалению, такой страницы не существет, либо у вас недостаточно прав для ее просмотра</p>'
@app.route('/', methods=['GET'])
@login_required
def index():
student = db.session.query(Student).filter(Student.user_id == current_user.id).first()
teacher = db.session.query(Teacher).filter(Teacher.user_id == current_user.id).first()
groupmates = []
group_name = ''
courses = []
if student:
educational_group = db.session.query(EducationalGroup).filter(EducationalGroup.id == student.educational_group_id).first()
group_name = educational_group.group_name
query_groupmates= db.session.execute('SELECT t2.id as user_id, t2.surname, t2.name, t2.second_name FROM students t1 INNER JOIN users t2 ON (t1.user_id = t2.id) WHERE t1.educational_group_id = :group_id', {'group_id': educational_group.id})
groupmates = []
for r in query_groupmates:
groupmate = {'id':r[0],'surname':r[1],'name':r[2],'second_name':r[3]}
groupmates.append(groupmate)
query_courses= db.session.execute('SELECT t1.id as course_id, t1.course_name FROM "EducationalСourse" t1 INNER JOIN "Course_X_Group" t2 ON (t1.id = t2.course_id) WHERE t2.group_id = :group_id', {'group_id': educational_group.id})
for r in query_courses:
course = {'id':r[0],'course_name':r[1]}
courses.append(course)
if teacher:
query_courses = db.session.execute(
"""SELECT t1.id as course_id, t1.course_name
FROM "EducationalСourse" t1
INNER JOIN teachers t2 ON (t1.teacher_id = t2.id)
WHERE t2.user_id = :user_id""",
{'user_id': current_user.id})
for r in query_courses:
course = {'id':r[0],'course_name':r[1]}
courses.append(course)
return render_template('index.html', group_name=group_name, groupmates=groupmates, courses=courses )
@app.route('/admin/')
@login_required
def admin():
return render_template('admin.html')
@app.route('/register/', methods=['post', 'get'])
def register():
form = RegisterForm()
if form.validate_on_submit():
# DataBase Logic - check, if user with such validation_password exists
user_with_equal_email = db.session.query(User).filter(User.email == form.email.data).first()
if user_with_equal_email != None:
flash("This E-mail is already reserved", 'danger')
return redirect(url_for('register'))
else:
unregistered_users = db.session.query(User).filter(User.email == None).filter(User.registration_password_hash != None).all()
is_finded = 0
for checked_user in unregistered_users:
if check_password_hash(checked_user.registration_password_hash, form.registration_password.data):
checked_user.set_email(form.email.data)
checked_user.set_password(form.password.data)
db_add_objects(checked_user)
login_user(checked_user)
is_finded = 1
return redirect(url_for('index'))
if is_finded == 0:
flash("Invalid verification code", 'danger')
return redirect(url_for('register'))
return render_template('register.html', form=form)
@app.route('/login/', methods=['post', 'get'])
def login():
print('go')
print('URL = ',request.url)
print('Сервер получил запрос метода',request.method)
print('request.form = ,', request.form)
print('request.values = ',request.values)
if current_user.is_authenticated:
return redirect(url_for('index'))
form = LoginForm()
if form.validate_on_submit():
user = db.session.query(User).filter(User.email == form.email.data).first()
if user and user.check_password(form.password.data):
login_user(user, remember=form.remember.data)
flash("Вы вошли в систему", 'success')
return redirect(url_for('index'))
flash("Invalid email/password", 'danger')
# flash("Success!", "success")
# flash("This is a warning", "warning")
# flash("DANGER DANGER", "danger")
return redirect(url_for('login'))
return render_template('login.html', form=form)
@app.route('/logout/')
@login_required
def logout():
logout_user()
flash("You have been logged out.", 'warning')
return redirect(url_for('login'))
# :<user_id>/edit_user_info
@app.route('/personal_cabinet/user_id/<user_id>', methods=['PUT'])
def edit_user_info(user_id):
user_id = int(user_id)
user = db.session.query(User).filter(User.id == user_id).first_or_404()
form = PersonalCabinetForm(request.form)
print('form.is_submitted() = ', form.is_submitted())
form.submit()
print('form.is_submitted() = ', form.is_submitted())
print('form.errors = ', form.errors)
for field in form:
print('field = ',field.label)
for error in field.errors:
print('field = ',field.label,' has error:', error)
print('form.validate() = ',form.validate())
print('form.errors = ', form.errors)
print('URL = ',request.url)
print('Сервер получил запрос метода',request.method)
print('user_id = ',user_id)
print('request.form = ,', request.form)
print('request.values = ',request.values)
print('type user_id',type(user_id))
if current_user.id == user_id:
if form.validate():
print('form succussfully validated')
user_info = db.session.query(UserInfo).filter(UserInfo.user_id == user_id).first()
user_social_pages = db.session.query(UserSocialPages).filter(UserSocialPages.user_id == user_id).first()
user_info.set_phone(form.phone.data)
user_info.set_home_region(form.home_region.data)
user_info.set_detailed_description(form.detailed_description.data)
user_social_pages.set_vk_page(form.vk.data)
print('form.phone.data = ',form.phone.data)
user_social_pages.set_facebook_page(form.facebook.data)
user_social_pages.set_linked_in_page(form.linked_in.data)
user_social_pages.set_instagram_page(form.instagram.data)
db_add_objects(user_info, user_social_pages)
print('updated in db')
print('form.data = ', form.data)
response_data = {'message': 'null', 'code': 'SUCCESS', 'form_data': form.data}
response = make_response(jsonify(response_data), 200)
flash("Данные обновлены!", 'success')
else:
print('ошибка валидации и доступов' )
response_data = {'message': 'validation error', 'code': 'ERROR', 'form_errors': form.errors}
# response_data = {'message': 'validation error', 'code': 'ERROR', 'validation_errors': form.data}
response = make_response(jsonify(response_data), 400)
# response.headers
# contentType: "application/json",
# dataType: 'json'
print('server response = ',response)
return response
@app.route('/personal_cabinet/user_id:<user_id>', methods=['POST', 'GET'])
@login_required
def personal_cabinet(user_id):
user = db.session.query(User).filter(User.id == user_id).first_or_404()
user_info = db.session.query(UserInfo).filter(UserInfo.user_id == user_id).first()
user_social_pages = db.session.query(UserSocialPages).filter(UserSocialPages.user_id == user_id).first()
student = db.session.query(Student).filter(Student.user_id == user_id).first()
educational_group_name = ''
if student:
educational_group = db.session.query(EducationalGroup).filter(EducationalGroup.id == student.educational_group_id).first()
educational_group_name = educational_group.group_name
watch_only = True
if user.id == current_user.id:
watch_only = False
form = PersonalCabinetForm(
email=user.email,
phone = user_info.phone,
home_region = user_info.home_region,
detailed_description = user_info.detailed_description,
vk = user_social_pages.vk,
facebook = user_social_pages.facebook,
linked_in = user_social_pages.linked_in,
instagram = user_social_pages.instagram
)
print('user.email = ',user.email)
for field in form:
print('field = ',field.label,' = ',field.data)
for error in field.errors:
print('field = ',field.label,' has error:', error)
print('1')
return render_template('personal_cabinet.html', form=form, user = user, user_info=user_info, user_social_pages=user_social_pages, student=student, educational_group_name=educational_group_name, watch_only=watch_only)
@app.route('/change_password/', methods=['POST', 'GET'])
@login_required
def change_password():
user = db.session.query(User).filter(User.id == current_user.id).first_or_404()
form = ChangePassword()
if form.validate_on_submit():
if user.check_password(form.old_password.data):
user.set_password(form.new_password.data)
db_add_objects(user)
login_user(user)
flash("Пароль успешно изменен", 'success')
return redirect(url_for('personal_cabinet', user_id=user.id))
flash("Неправильный пароль", 'danger')
return render_template('change_password.html', form=form, user=user)
return render_template('change_password.html', form=form, user=user)
@app.route('/course/<course_id>/', methods=['POST', 'GET'])
@app.route('/course/<course_id>/description', methods=['POST', 'GET'])
@app.route('/course/<course_id>/description/added_responsible_person:<added_person_id>', methods=['POST', 'GET'])
@app.route('/course/<course_id>/description/deleted_responsible_person:<deleted_person_id>', methods=['POST', 'GET'])
@login_required
def course(course_id, added_person_id=None, deleted_person_id=None):
active_page = 'course_description'
course = db.session.query(EducationalCourse).filter(EducationalCourse.id == course_id).first_or_404()
query_teacher = db.session.execute('SELECT t1.id as teacher_id, t1.surname, t1.name, t1.second_name FROM users t1 INNER JOIN teachers t2 ON (t2.user_id=t1.id) INNER JOIN "EducationalСourse" t3 ON (t3.teacher_id = t2.id) WHERE t3.id = :course_id LIMIT 1', {'course_id': course_id})
teacher_role = db.session.query(EducationalCourse).filter(EducationalCourse.id == course_id).filter(EducationalCourse.teacher_id == Teacher.id).filter(Teacher.user_id == current_user.id).first()
# Условие что может редактировать материалы курса
can_edit_responsible_persons = False
if (teacher_role):
can_edit_responsible_persons = True
if can_edit_responsible_persons==True and added_person_id:
added_person = course.add_responsible_person(added_person_id)
db_add_objects(added_person)
flash("Староста курса добавлен!", 'success')
if can_edit_responsible_persons==True and deleted_person_id:
deleted_person = course.remove_responsible_person(deleted_person_id)
db_add_objects(deleted_person)
flash("Староста курса удален!", 'success')
teacher = {}
for r in query_teacher:
teacher = {'teacher_id':r[0], 'surname':r[1], 'name':r[2], 'second_name':r[3]}
# Добавить переход с юзеров на студентов
query_responsible_persons = db.session.execute(
"""SELECT t3.id as user_id, t3.surname, t3.name, t3.second_name, t4.id as student_id
FROM "EducationalСourse" t1
INNER JOIN "Course_Responsible_Person" t2 ON (t1.id=t2.course_id)
INNER JOIN students t4 ON (t4.id=t2.person_id)
INNER JOIN users t3 ON (t3.id=t4.user_id)
WHERE t2.is_active = 1
and t1.id = :course_id
ORDER BY t2.updated_on""",
{'course_id': course_id})
responsible_persons = []
for r in query_responsible_persons:
person = {'user_id':r[0], 'surname':r[1], 'name':r[2], 'second_name':r[3], 'student_id':r[4]}
responsible_persons.append(person)
query_possible_responsible_persons = db.session.execute(
"""SELECT t4.id as student_id, t5.surname, t5.name, t5.second_name
FROM "EducationalСourse" t1
INNER JOIN "Course_X_Group" t2 ON (t1.id=t2.course_id)
INNER JOIN "EducationalGroup" t3 ON (t3.id=t2.group_id)
INNER JOIN students t4 ON (t4.educational_group_id=t3.id)
INNER JOIN users t5 ON (t5.id=t4.user_id)
LEFT JOIN "Course_Responsible_Person" t6 ON ( (t6.course_id=t1.id) and (t6.person_id=t4.id) and (t6.is_active=1) )
WHERE t1.id = :course_id
and t2.is_active = 1
and t6.id IS NULL
ORDER BY t5.surname, t5.name, t5.second_name""",
{'course_id': course_id})
possible_responsible_persons = []
for r in query_possible_responsible_persons:
person = {'student_id':r[0], 'surname':r[1], 'name':r[2], 'second_name':r[3]}
possible_responsible_persons.append(person)
return render_template('course.html', active_page=active_page, course=course, teacher=teacher, responsible_persons=responsible_persons, can_edit_responsible_persons=can_edit_responsible_persons, possible_responsible_persons=possible_responsible_persons)
@app.route('/course/<course_id>/program', methods=['POST', 'GET'])
@app.route('/course/<course_id>/program/deleted_material_id:<material_id>', methods=['POST', 'GET'])
@login_required
def course_program(course_id, material_id=None):
active_page = 'course_program'
course = db.session.query(EducationalCourse).filter(EducationalCourse.id == course_id).first_or_404()
can_add_material = False
teacher = db.session.query(EducationalCourse).filter(EducationalCourse.id == course_id).filter(EducationalCourse.teacher_id == Teacher.id).filter(Teacher.user_id == current_user.id).first()
responsible_person = db.session.query(EducationalCourse).filter(EducationalCourse.id == course_id).filter(CourseResponsiblePerson.course_id == course_id).filter(CourseResponsiblePerson.person_id == Student.id).filter(current_user.id == Student.user_id).filter(CourseResponsiblePerson.is_active == 1).first()
# if (teacher or student):
if (teacher or responsible_person):
can_add_material = True
if can_add_material==True and material_id:
deleted_material = db.session.query(CourseMaterial).filter(CourseMaterial.id == material_id).first()
deleted_material.delete()
db_add_objects(deleted_material)
flash("Материал удален!", 'success')
query_course_materials = db.session.execute(
"""SELECT t1.id as course_material_id, t1.name, t1.content, date_trunc('second', t1.created_on)
FROM "Course_Material" t1
INNER JOIN "EducationalСourse" t2
ON (t2.id=t1.course_id)
WHERE t2.id = :course_id and (t1.deleted !=1 or t1.deleted is null)
ORDER BY t1.created_on""",
{'course_id': course_id})
course_materials = []
for r in query_course_materials:
course_material = {'course_material_id':r[0], 'name':r[1], 'content':r[2], 'created_dttm':r[3]}
course_materials.append(course_material)
return render_template('course_program.html', active_page=active_page, course=course, course_materials=course_materials, can_add_material=can_add_material)
@app.route('/course/<course_id>/hometasks', methods=['POST', 'GET', 'DELETE'])
@app.route('/course/<course_id>/hometasks/deleted_hometask_id:<hometask_id>', methods=['GET', 'POST', 'DELETE'])
@login_required
def course_hometasks(course_id, hometask_id=None):
active_page = 'course_hometasks'
course = db.session.query(EducationalCourse).filter(EducationalCourse.id == course_id).first_or_404()
can_edit_hometask = False
teacher = db.session.query(EducationalCourse).filter(EducationalCourse.id == course_id).filter(EducationalCourse.teacher_id == Teacher.id).filter(Teacher.user_id == current_user.id).first()
if (teacher):
can_edit_hometask = True
if can_edit_hometask==True and hometask_id:
deleted_hometask = db.session.query(CourseHometask).filter(CourseHometask.id == hometask_id).first()
deleted_hometask.delete()
db_add_objects(deleted_hometask)
flash("Домашнее задание удалено!", 'success')
is_student = False
student = db.session.execute(
"""SELECT t4.id as student_id, t5.surname, t5.name, t5.second_name
FROM "EducationalСourse" t1
INNER JOIN "Course_X_Group" t2 ON (t1.id=t2.course_id)
INNER JOIN "EducationalGroup" t3 ON (t3.id=t2.group_id)
INNER JOIN students t4 ON (t4.educational_group_id=t3.id)
INNER JOIN users t5 ON (t5.id=t4.user_id)
WHERE t1.id = :course_id
and t2.is_active = 1
and t5.id = :current_user_id
LIMIT 1
""",
{'course_id': course_id, 'current_user_id':current_user.id}).first()
current_dttm = datetime.utcnow()
if student:
is_student = True
query_course_hometasks = db.session.execute(
"""SELECT t1.id as course_hometask_id, t1.name, t1.content, t1.start_dttm, t1.end_dttm, to_char(t1.end_dttm, 'dd-Mon-YYYY,HH24:MM') as trunced_end_dttm
FROM course_hometask t1
INNER JOIN "EducationalСourse" t2
ON (t2.id=t1.course_id)
WHERE t2.id = :course_id and (t1.deleted !=1 or t1.deleted is null)
ORDER BY t1.created_on""",
{'course_id': course_id})
course_hometasks = []
for r in query_course_hometasks:
course_hometask = {'course_hometask_id':r[0], 'name':r[1], 'content':r[2], 'start_dttm':r[3], 'end_dttm':r[4], 'trunced_end_dttm':r[5]}
course_hometasks.append(course_hometask)
return render_template('course_hometasks.html', active_page=active_page, course=course, can_edit_hometask=can_edit_hometask, course_hometasks=course_hometasks, is_student=is_student, current_dttm=current_dttm)
@app.route('/course/<course_id>/add_hometasks/', methods=['POST', 'GET'])
@login_required
def course_add_hometasks(course_id):
active_page = 'course_hometasks'
course = db.session.query(EducationalCourse).filter(EducationalCourse.id == course_id).first_or_404()
teacher = db.session.query(EducationalCourse).filter(EducationalCourse.id == course_id).filter(EducationalCourse.teacher_id == Teacher.id).filter(Teacher.user_id == current_user.id).first()
if not (teacher):
abort(403)
form = CourseHometaskForm()
if form.validate_on_submit():
new_hometask = course.add_hometask(form.name.data, form.content.data, form.start_dttm.data, form.end_dttm.data)
db_add_objects(new_hometask)
flash("Домашнее задание успешно добавлено", 'success')
return redirect(url_for('course_hometasks', course_id=course_id))
return render_template('course_add_hometask.html', course=course, active_page=active_page, form=form)
@app.route('/course/<course_id>/edit_hometask/<hometask_id>', methods=['POST', 'GET'])
@login_required
def course_edit_hometask(course_id, hometask_id):
active_page = 'course_hometasks'
course = db.session.query(EducationalCourse).filter(EducationalCourse.id == course_id).first_or_404()
hometask = db.session.query(CourseHometask).filter(CourseHometask.id == hometask_id).first_or_404()
teacher = db.session.query(EducationalCourse).filter(EducationalCourse.id == course_id).filter(EducationalCourse.teacher_id == Teacher.id).filter(Teacher.user_id == current_user.id).first()
if not (teacher):
abort(403)
form = CourseHometaskForm(content = hometask.content, start_dttm=hometask.start_dttm, end_dttm=hometask.end_dttm)
# Непослушный TextArea требует заранее костыльно задавать ему изначальное значение. Потому что placeholder не работает =(
if form.validate_on_submit():
hometask.name = form.name.data
hometask.content = form.content.data
hometask.start_dttm = form.start_dttm.data
hometask.end_dttm = form.end_dttm.data
db_add_objects(hometask)
flash("Задание успешно обновлено", 'success')
return redirect(url_for('course_hometasks', course_id=course_id))
return render_template('course_edit_hometask.html', course=course, active_page=active_page, form=form, hometask=hometask)
@app.route('/course/<course_id>/hometasks/<hometask_id>', methods=['POST', 'GET'])
@login_required
def course_hometask(course_id, hometask_id):
course = db.session.query(EducationalCourse).filter(EducationalCourse.id == course_id).first_or_404()
hometask = db.session.query(CourseHometask).filter(CourseHometask.id == hometask_id).first_or_404()
deadline = hometask.end_dttm.strftime('%d-%m-%Y %H:%M')
can_edit_hometask = False
groups = []
student_hometasks = []
teacher = db.session.query(EducationalCourse).filter(EducationalCourse.id == course_id).filter(EducationalCourse.teacher_id == Teacher.id).filter(Teacher.user_id == current_user.id).first()
if (teacher):
can_edit_hometask = True
query_students_hometasks = db.session.execute(
"""SELECT t4.id as student_id, t5.surname, t5.name, t5.second_name, t3.group_name,
case when t7.id IS NOT NULL THEN 1 ELSE 0 end as is_hometask_passed,
to_char(t7.updated_on, 'dd-Mon-YYYY, HH24:mi') as hometask_pass_dttm,
t7.id as student_hometask_id
FROM "EducationalСourse" t1
INNER JOIN "Course_X_Group" t2 ON (t1.id=t2.course_id)
INNER JOIN "EducationalGroup" t3 ON (t3.id=t2.group_id)
INNER JOIN students t4 ON (t4.educational_group_id=t3.id)
INNER JOIN users t5 ON (t5.id=t4.user_id)
INNER JOIN course_hometask t6 ON (t6.course_id = t1.id)
LEFT JOIN student_hometask t7 ON ((t7.course_hometask_id = t6.id) AND (t7.student_id = t4.id))
WHERE t1.id = :course_id
and t2.is_active = 1
and t6.id = :hometask_id
ORDER BY t3.group_name, t5.surname, t5.name, t5.second_name
""",
{'course_id': course_id, 'hometask_id':hometask_id })
for r in query_students_hometasks:
student_hometask = {'student_id':r[0], 'surname':r[1], 'name':r[2], 'second_name':r[3], 'group_name':r[4], 'is_hometask_passed':r[5], 'hometask_pass_dttm':r[6], 'student_hometask_id':r[7] }
student_hometasks.append(student_hometask)
# Список групп обучающихся на курсе
if student_hometask['group_name'] not in groups:
groups.append(student_hometask['group_name'])
is_student = False
query_student = db.session.execute(
"""SELECT t4.id as student_id, t5.surname, t5.name, t5.second_name
FROM "EducationalСourse" t1
INNER JOIN "Course_X_Group" t2 ON (t1.id=t2.course_id)
INNER JOIN "EducationalGroup" t3 ON (t3.id=t2.group_id)
INNER JOIN students t4 ON (t4.educational_group_id=t3.id)
INNER JOIN users t5 ON (t5.id=t4.user_id)
WHERE t1.id = :course_id
and t2.is_active = 1
and t5.id = :current_user_id
LIMIT 1
""",
{'course_id': course_id, 'current_user_id':current_user.id}).first()
current_dttm = datetime.utcnow()
student_hometask = None
form = StudentHometaskForm()
if query_student:
student_id = query_student[0]
student = db.session.query(Student).filter(Student.id == student_id).first()
is_student = True
student_hometask = db.session.query(StudentHometask).filter(StudentHometask.course_hometask_id == hometask_id).filter(StudentHometask.student_id == Student.id).filter(Student.user_id == current_user.id).first()
if student_hometask:
form = StudentHometaskForm(content = student_hometask.content)
if is_student and current_dttm < hometask.start_dttm:
abort(403)
if form.validate_on_submit():
if not student_hometask:
student_hometask = StudentHometask(course_hometask_id=hometask.id, student_id=student.id, content=form.content.data)
else:
student_hometask.content = form.content.data
db_add_objects(student_hometask)
flash("Решение задания сохранено", 'success')
return redirect(url_for('course_hometasks', course_id=course_id))
return render_template('course_hometask.html', course=course, hometask=hometask, form=form, can_edit_hometask=can_edit_hometask,
student_hometask=student_hometask, is_student=is_student, deadline=deadline, current_dttm=current_dttm, groups=groups, student_hometasks=student_hometasks)
@app.route('/course/<course_id>/hometask/<hometask_id>/student_hometask/<student_hometask_id>', methods=['POST', 'GET'])
@login_required
def course_student_hometask(course_id, hometask_id, student_hometask_id):
teacher = db.session.query(EducationalCourse).filter(EducationalCourse.id == course_id).filter(EducationalCourse.teacher_id == Teacher.id).filter(Teacher.user_id == current_user.id).first_or_404()
course = db.session.query(EducationalCourse).filter(EducationalCourse.id == course_id).first_or_404()
hometask = db.session.query(CourseHometask).filter(CourseHometask.id == hometask_id).first_or_404()
student_hometask = db.session.query(StudentHometask).filter(StudentHometask.id == student_hometask_id).first_or_404()
student = db.session.query(Student).filter(Student.id == StudentHometask.student_id).filter(StudentHometask.id == student_hometask_id).first_or_404()
user_student = db.session.query(User).filter(Student.id == student.id).filter(User.id == Student.user_id).first_or_404()
deadline = hometask.end_dttm.strftime('%d-%m-%Y %H:%M')
return render_template('course_student_hometask.html', course=course, hometask=hometask, student_hometask=student_hometask, deadline=deadline, user_student=user_student)
@app.route('/course/<course_id>/add_material/', methods=['POST', 'GET'])
@login_required
def course_add_material(course_id):
active_page = 'course_program'
course = db.session.query(EducationalCourse).filter(EducationalCourse.id == course_id).first_or_404()
teacher = db.session.query(EducationalCourse).filter(EducationalCourse.id == course_id).filter(EducationalCourse.teacher_id == Teacher.id).filter(Teacher.user_id == current_user.id).first()
responsible_person = db.session.query(EducationalCourse).filter(EducationalCourse.id == course_id).filter(CourseResponsiblePerson.course_id == course_id).filter(CourseResponsiblePerson.person_id == Student.id).filter(current_user.id == Student.user_id).filter(CourseResponsiblePerson.is_active == 1).first()
if not (teacher or responsible_person):
abort(403)
form = CourseAddMaterialForm()
if form.validate_on_submit():
new_material = course.add_course_material(form.name.data, form.content.data)
db_add_objects(new_material)
flash("Материал успешно добавлен", 'success')
return redirect(url_for('course_program', course_id=course_id))
return render_template('course_add_material.html', course=course, active_page=active_page, form=form)
@app.route('/course/<course_id>/edit_material/<material_id>', methods=['POST', 'GET'])
@login_required
def course_edit_material(course_id, material_id):
# Условие что студент может редактировать материалы курса
active_page = 'course_program'
course = db.session.query(EducationalCourse).filter(EducationalCourse.id == course_id).first_or_404()
material = db.session.query(CourseMaterial).filter(CourseMaterial.id == material_id).first_or_404()
teacher = db.session.query(EducationalCourse).filter(EducationalCourse.id == course_id).filter(EducationalCourse.teacher_id == Teacher.id).filter(Teacher.user_id == current_user.id).first()
responsible_person = db.session.query(EducationalCourse).filter(EducationalCourse.id == course_id).filter(CourseResponsiblePerson.course_id == course_id).filter(CourseResponsiblePerson.person_id == Student.id).filter(current_user.id == Student.user_id).filter(CourseResponsiblePerson.is_active == 1).first()
# if not (teacher or student):
if not (teacher or responsible_person):
abort(403)
form = CourseAddMaterialForm(content = material.content)
# Непослушный TextArea требует заранее костыльно задавать ему изначальное значение. Потому что placeholder не работает =(
if form.validate_on_submit():
material.name = form.name.data
material.content = form.content.data
db_add_objects(material)
flash("Материал успешно обновлен", 'success')
return redirect(url_for('course_program', course_id=course_id))
return render_template('course_edit_material.html', course=course, active_page=active_page, form=form, material=material) | StarcoderdataPython |
89665 | <gh_stars>0
import typing
from functools import partial
import six
from dagster import check
from dagster.core.storage.type_storage import TypeStoragePlugin
from .builtin_config_schemas import BuiltinSchemas
from .builtin_enum import BuiltinEnum
from .config import List as ConfigList
from .config import Nullable as ConfigNullable
from .config_schema import InputHydrationConfig, OutputMaterializationConfig
from .dagster_type import check_dagster_type_param
from .field_utils import Dict
from .marshal import PickleSerializationStrategy, SerializationStrategy
from .typing_api import (
is_closed_python_dict_type,
is_closed_python_set_type,
is_closed_python_tuple_type,
)
from .wrapping import WrappingListType, WrappingNullableType
class RuntimeType(object):
'''
The class backing DagsterTypes as they are used during execution.
'''
def __init__(
self,
key,
name,
is_builtin=False,
description=None,
input_hydration_config=None,
output_materialization_config=None,
serialization_strategy=None,
auto_plugins=None,
):
type_obj = type(self)
if type_obj in RuntimeType.__cache:
check.failed(
(
'{type_obj} already in cache. You **must** use the inst() class method '
'to construct RuntimeType and not the ctor'.format(type_obj=type_obj)
)
)
self.key = check.str_param(key, 'key')
self.name = check.opt_str_param(name, 'name')
self.description = check.opt_str_param(description, 'description')
self.input_hydration_config = check.opt_inst_param(
input_hydration_config, 'input_hydration_config', InputHydrationConfig
)
self.output_materialization_config = check.opt_inst_param(
output_materialization_config,
'output_materialization_config',
OutputMaterializationConfig,
)
self.serialization_strategy = check.opt_inst_param(
serialization_strategy,
'serialization_strategy',
SerializationStrategy,
PickleSerializationStrategy(),
)
auto_plugins = check.opt_list_param(auto_plugins, 'auto_plugins', of_type=type)
check.param_invariant(
all(
issubclass(auto_plugin_type, TypeStoragePlugin) for auto_plugin_type in auto_plugins
),
'auto_plugins',
)
self.auto_plugins = auto_plugins
self.is_builtin = check.bool_param(is_builtin, 'is_builtin')
__cache = {}
@classmethod
def inst(cls):
if cls not in RuntimeType.__cache:
RuntimeType.__cache[cls] = cls() # pylint: disable=E1120
return RuntimeType.__cache[cls]
@staticmethod
def from_builtin_enum(builtin_enum):
check.invariant(BuiltinEnum.contains(builtin_enum), 'must be member of BuiltinEnum')
return _RUNTIME_MAP[builtin_enum]
@property
def display_name(self):
return self.name
def type_check(self, value):
pass
@property
def is_any(self):
return False
@property
def is_scalar(self):
return False
@property
def is_list(self):
return False
@property
def is_nullable(self):
return False
@property
def inner_types(self):
return []
@property
def is_nothing(self):
return False
class BuiltinScalarRuntimeType(RuntimeType):
def __init__(self, *args, **kwargs):
name = type(self).__name__
super(BuiltinScalarRuntimeType, self).__init__(
key=name, name=name, is_builtin=True, *args, **kwargs
)
@property
def is_scalar(self):
return True
class Int(BuiltinScalarRuntimeType):
def __init__(self):
super(Int, self).__init__(
input_hydration_config=BuiltinSchemas.INT_INPUT,
output_materialization_config=BuiltinSchemas.INT_OUTPUT,
)
def type_check(self, value):
from dagster.core.definitions.events import Failure
if not isinstance(value, six.integer_types):
raise Failure(_typemismatch_error_str(value, 'int'))
def _typemismatch_error_str(value, expected_type_desc):
return 'Value "{value}" of python type "{python_type}" must be a {type_desc}.'.format(
value=value, python_type=type(value).__name__, type_desc=expected_type_desc
)
def _throw_if_not_string(value):
from dagster.core.definitions.events import Failure
if not isinstance(value, six.string_types):
raise Failure(_typemismatch_error_str(value, 'string'))
class String(BuiltinScalarRuntimeType):
def __init__(self):
super(String, self).__init__(
input_hydration_config=BuiltinSchemas.STRING_INPUT,
output_materialization_config=BuiltinSchemas.STRING_OUTPUT,
)
def type_check(self, value):
_throw_if_not_string(value)
class Path(BuiltinScalarRuntimeType):
def __init__(self):
super(Path, self).__init__(
input_hydration_config=BuiltinSchemas.PATH_INPUT,
output_materialization_config=BuiltinSchemas.PATH_OUTPUT,
)
def type_check(self, value):
_throw_if_not_string(value)
class Float(BuiltinScalarRuntimeType):
def __init__(self):
super(Float, self).__init__(
input_hydration_config=BuiltinSchemas.FLOAT_INPUT,
output_materialization_config=BuiltinSchemas.FLOAT_OUTPUT,
)
def type_check(self, value):
from dagster.core.definitions.events import Failure
if not isinstance(value, float):
raise Failure(_typemismatch_error_str(value, 'float'))
class Bool(BuiltinScalarRuntimeType):
def __init__(self):
super(Bool, self).__init__(
input_hydration_config=BuiltinSchemas.BOOL_INPUT,
output_materialization_config=BuiltinSchemas.BOOL_OUTPUT,
)
def type_check(self, value):
from dagster.core.definitions.events import Failure
if not isinstance(value, bool):
raise Failure(_typemismatch_error_str(value, 'bool'))
class Anyish(RuntimeType):
def __init__(
self,
key,
name,
input_hydration_config=None,
output_materialization_config=None,
is_builtin=False,
description=None,
):
super(Anyish, self).__init__(
key=key,
name=name,
input_hydration_config=input_hydration_config,
output_materialization_config=output_materialization_config,
is_builtin=is_builtin,
description=description,
)
@property
def is_any(self):
return True
class Any(Anyish):
def __init__(self):
super(Any, self).__init__(
key='Any',
name='Any',
input_hydration_config=BuiltinSchemas.ANY_INPUT,
output_materialization_config=BuiltinSchemas.ANY_OUTPUT,
is_builtin=True,
)
def define_any_type(name, description=None):
class NamedAnyType(Anyish):
def __init__(self):
super(NamedAnyType, self).__init__(key=name, name=name, description=description)
return NamedAnyType
class Nothing(RuntimeType):
def __init__(self):
super(Nothing, self).__init__(
key='Nothing',
name='Nothing',
input_hydration_config=None,
output_materialization_config=None,
is_builtin=True,
)
@property
def is_nothing(self):
return True
def type_check(self, value):
from dagster.core.definitions.events import Failure
if value is not None:
raise Failure('Value {value} must be None.')
class PythonObjectType(RuntimeType):
def __init__(
self,
python_type=None,
key=None,
name=None,
typecheck_metadata_fn=None,
type_check=None,
**kwargs
):
name = check.opt_str_param(name, 'name', type(self).__name__)
key = check.opt_str_param(key, 'key', name)
super(PythonObjectType, self).__init__(key=key, name=name, **kwargs)
self.python_type = check.type_param(python_type, 'python_type')
self.typecheck_metadata_fn = check.opt_callable_param(
typecheck_metadata_fn, 'typecheck_metadata_fn'
)
self._user_type_check = check.opt_callable_param(type_check, 'type_check')
def type_check(self, value):
from dagster.core.definitions.events import Failure
if self._user_type_check is not None:
self._user_type_check(value)
elif not isinstance(value, self.python_type):
raise Failure(
'Value of type {value_type} failed type check for Dagster type {dagster_type}, '
'expected value to be of Python type {expected_type}.'.format(
value_type=type(value),
dagster_type=self.name,
expected_type=self.python_type.__name__,
)
)
if self.typecheck_metadata_fn:
return self.typecheck_metadata_fn(value)
PYTHON_DAGSTER_TYPE_ARGS_DOCSTRING = '''Args:
python_type (cls): The python type to wrap as a Dagster type.
name (Optional[str]): Name of the new Dagster type. If None, the name (__name__) of the
python_type will be used. Default: None
description (Optional[str]): A user-readable description of the type. Default: None.
input_hydration_config (Optional[InputHydrationConfig]): An instance of a class that inherits
from :py:class:`InputHydrationConfig <dagster.InputHydrationConfig>` and can map config
data to a value of this type. Specify this argument if you will need to shim values of this
type using the config machinery. As a rule, you should use the
:py:func:`@input_hydration_config <dagster.InputHydrationConfig>` decorator to construct
these arguments. Default: None
output_materialization_config (Optiona[OutputMaterializationConfig]): An instance of a class
that inherits from
:py:class:`OutputMaterializationConfig <dagster.OutputMaterializationConfig>` that can
persist values of this type. As a rule, you should use the
:py:func:`@output_materialization_config <dagster.output_materialization_config>` decorator
to construct these arguments. Default: None
serialization_strategy (Optional[SerializationStrategy]): An instance of a class that inherits
from :py:class:`SerializationStrategy <dagster.SerializationStrategy>`. The default strategy
for serializing this value when automatically persisting it between execution steps. You
should set this value if the ordinary serialization machinery (e.g., pickle) will not be
adequate for this type. Default: None.
auto_plugins (Optional[List[TypeStoragePlugin]]): If types must be serialized differently
depending on the storage being used for intermediates, they should specify this argument.
In these cases the serialization_strategy argument is not sufficient because serialization
requires specialized API calls, e.g. to call an s3 API directly instead of using a generic
file object. See dagster_pyspark.DataFrame for an example using auto_plugins. Default: None.
typecheck_metadata_fn (Optional[Callable[[Any], TypeCheck]]): If specified, this function will
be called to emit metadata when you successfully check a type. The typecheck_metadata_fn
will be passed the value being type-checked and should return an instance of
:py:class:`TypeCheck <dagster.TypeCheck>`. See dagster_pandas.DataFrame for an example.
Default: None.
type_check (Optional[Callable[[Any], Any]]): If specified, this function will be called in
place of the default isinstance type check. This function should raise Failure if the
type check fails, and otherwise pass. Its return value will be ignored.'''
def define_python_dagster_type(
python_type,
name=None,
description=None,
input_hydration_config=None,
output_materialization_config=None,
serialization_strategy=None,
auto_plugins=None,
typecheck_metadata_fn=None,
type_check=None,
):
'''Define a dagster type corresponding to an existing python type.
It's very common to want to generate a dagster type corresponding to an existing Python type.
Typically this is done using the @dagster_type decorator or using as_dagster_type, each of
which defer to this function as a workhorse.
Usage:
DateTime = define_python_dagster_type(datetime.datetime, name='DateTime')
{args_docstring}
'''.format(
args_docstring=PYTHON_DAGSTER_TYPE_ARGS_DOCSTRING
)
check.type_param(python_type, 'python_type')
check.opt_str_param(name, 'name', python_type.__name__)
check.opt_str_param(description, 'description')
check.opt_inst_param(input_hydration_config, 'input_hydration_config', InputHydrationConfig)
check.opt_inst_param(
output_materialization_config, 'output_materialization_config', OutputMaterializationConfig
)
check.opt_inst_param(
serialization_strategy,
'serialization_strategy',
SerializationStrategy,
default=PickleSerializationStrategy(),
)
auto_plugins = check.opt_list_param(auto_plugins, 'auto_plugins', of_type=type)
check.param_invariant(
all(issubclass(auto_plugin_type, TypeStoragePlugin) for auto_plugin_type in auto_plugins),
'auto_plugins',
)
check.opt_callable_param(typecheck_metadata_fn, 'typecheck_metadata_fn')
check.opt_callable_param(type_check, 'type_check')
class _ObjectType(PythonObjectType):
def __init__(self):
super(_ObjectType, self).__init__(
python_type=python_type,
name=name,
description=description,
input_hydration_config=input_hydration_config,
output_materialization_config=output_materialization_config,
serialization_strategy=serialization_strategy,
auto_plugins=auto_plugins,
typecheck_metadata_fn=typecheck_metadata_fn,
type_check=type_check,
)
return _ObjectType
def _create_nullable_input_schema(inner_type):
if not inner_type.input_hydration_config:
return None
nullable_type = ConfigNullable(inner_type.input_hydration_config.schema_type).inst()
class _NullableSchema(InputHydrationConfig):
@property
def schema_type(self):
return nullable_type
def construct_from_config_value(self, context, config_value):
if config_value is None:
return None
return inner_type.input_hydration_config.construct_from_config_value(
context, config_value
)
return _NullableSchema()
class NullableType(RuntimeType):
def __init__(self, inner_type):
key = 'Optional.' + inner_type.key
super(NullableType, self).__init__(
key=key, name=None, input_hydration_config=_create_nullable_input_schema(inner_type)
)
self.inner_type = inner_type
@property
def display_name(self):
return self.inner_type.display_name + '?'
def type_check(self, value):
return None if value is None else self.inner_type.type_check(value)
@property
def is_nullable(self):
return True
@property
def inner_types(self):
return [self.inner_type] + self.inner_type.inner_types
def _create_list_input_schema(inner_type):
if not inner_type.input_hydration_config:
return None
list_type = ConfigList(inner_type.input_hydration_config.schema_type).inst()
class _ListSchema(InputHydrationConfig):
@property
def schema_type(self):
return list_type
def construct_from_config_value(self, context, config_value):
convert_item = partial(
inner_type.input_hydration_config.construct_from_config_value, context
)
return list(map(convert_item, config_value))
return _ListSchema()
class ListType(RuntimeType):
def __init__(self, inner_type):
key = 'List.' + inner_type.key
super(ListType, self).__init__(
key=key, name=None, input_hydration_config=_create_list_input_schema(inner_type)
)
self.inner_type = inner_type
@property
def display_name(self):
return '[' + self.inner_type.display_name + ']'
def type_check(self, value):
from dagster.core.definitions.events import Failure
if not isinstance(value, list):
raise Failure('Value must be a list, got {value}'.format(value=value))
for item in value:
self.inner_type.type_check(item)
@property
def is_list(self):
return True
@property
def inner_types(self):
return [self.inner_type] + self.inner_type.inner_types
def Optional(inner_type):
check.inst_param(inner_type, 'inner_type', RuntimeType)
class _Nullable(NullableType):
def __init__(self):
super(_Nullable, self).__init__(inner_type)
return _Nullable.inst()
def List(inner_type):
check.inst_param(inner_type, 'inner_type', RuntimeType)
class _List(ListType):
def __init__(self):
super(_List, self).__init__(inner_type)
return _List.inst()
class Stringish(RuntimeType):
def __init__(self, key=None, name=None, **kwargs):
name = check.opt_str_param(name, 'name', type(self).__name__)
key = check.opt_str_param(key, 'key', name)
super(Stringish, self).__init__(
key=key,
name=name,
input_hydration_config=BuiltinSchemas.STRING_INPUT,
output_materialization_config=BuiltinSchemas.STRING_OUTPUT,
**kwargs
)
@property
def is_scalar(self):
return True
def type_check(self, value):
return _throw_if_not_string(value)
_RUNTIME_MAP = {
BuiltinEnum.ANY: Any.inst(),
BuiltinEnum.BOOL: Bool.inst(),
BuiltinEnum.FLOAT: Float.inst(),
BuiltinEnum.INT: Int.inst(),
BuiltinEnum.PATH: Path.inst(),
BuiltinEnum.STRING: String.inst(),
BuiltinEnum.NOTHING: Nothing.inst(),
}
def resolve_to_runtime_type(dagster_type):
# circular dep
from .mapping import remap_python_type
from .python_dict import PythonDict, create_typed_runtime_dict
from .python_set import PythonSet, create_typed_runtime_set
from .python_tuple import PythonTuple, create_typed_tuple
dagster_type = remap_python_type(dagster_type)
# do not do in remap because this is runtime system only.
if is_closed_python_dict_type(dagster_type):
return create_typed_runtime_dict(dagster_type.__args__[0], dagster_type.__args__[1]).inst()
if is_closed_python_tuple_type(dagster_type):
return create_typed_tuple(*dagster_type.__args__).inst()
if is_closed_python_set_type(dagster_type):
return create_typed_runtime_set(dagster_type.__args__[0]).inst()
check_dagster_type_param(dagster_type, 'dagster_type', RuntimeType)
if dagster_type is None:
return Any.inst()
if dagster_type is Dict or dagster_type is typing.Dict:
return PythonDict.inst()
if dagster_type is typing.Tuple:
return PythonTuple.inst()
if dagster_type is typing.Set:
return PythonSet.inst()
if BuiltinEnum.contains(dagster_type):
return RuntimeType.from_builtin_enum(dagster_type)
if isinstance(dagster_type, WrappingListType):
return resolve_to_runtime_list(dagster_type)
if isinstance(dagster_type, WrappingNullableType):
return resolve_to_runtime_nullable(dagster_type)
if issubclass(dagster_type, RuntimeType):
return dagster_type.inst()
check.failed('should not reach')
def resolve_to_runtime_list(list_type):
check.inst_param(list_type, 'list_type', WrappingListType)
return List(resolve_to_runtime_type(list_type.inner_type))
def resolve_to_runtime_nullable(nullable_type):
check.inst_param(nullable_type, 'nullable_type', WrappingNullableType)
return Optional(resolve_to_runtime_type(nullable_type.inner_type))
ALL_RUNTIME_BUILTINS = set(_RUNTIME_MAP.values())
def construct_runtime_type_dictionary(solid_defs):
type_dict = {t.name: t for t in ALL_RUNTIME_BUILTINS}
for solid_def in solid_defs:
for runtime_type in solid_def.all_runtime_types():
type_dict[runtime_type.name] = runtime_type
return type_dict
| StarcoderdataPython |
196997 | #!/usr/bin/env python
'''
napalm-logs client, without authentication.
Listens to the napalm-logs server started using the following settings:
napalm-logs --publish-address 127.0.0.0.1
--publish-port 49017
--transport zmq
--disable-security
This client example listens to messages published via ZeroMQ (default transport).
'''
import zmq
import napalm_logs.utils
server_address = '127.0.0.1' # --publish-address
server_port = 49017 # --publish-port
# Using zmq
context = zmq.Context()
socket = context.socket(zmq.SUB)
socket.connect('tcp://{address}:{port}'.format(address=server_address,
port=server_port))
socket.setsockopt(zmq.SUBSCRIBE, '')
while True:
raw_object = socket.recv()
print(napalm_logs.utils.unserialize(raw_object))
| StarcoderdataPython |
73386 | import setuptools
setuptools.setup(
name="ngboost",
version="0.1.3",
author="<NAME>",
author_email="<EMAIL>",
description="Library for probabilistic predictions via gradient boosting.",
long_description="Please see Github for full description.",
long_description_content_type="text/markdown",
url="https://github.com/stanfordmlgroup/ngboost",
license='Apache License 2.0',
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
install_requires=[
"numpy>=1.17.2",
"scipy>=1.3.1",
"scikit-learn>=0.21.3",
"tqdm>=4.36.1",
"lifelines>=0.22.8",
]
)
| StarcoderdataPython |
1781032 | <reponame>t-persson/jsontas
# Copyright 2020 Axis Communications AB.
#
# For a full list of individual contributors, please see the commit history.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Condition datastructure."""
from .datastructure import DataStructure
from .operator import Operator
# pylint:disable=too-few-public-methods
class Condition(DataStructure):
"""Condition datastructure.
Example::
{
"$condition": {
"if": {
"key": "Key to match",
"operator": "$eq",
"value": "something",
},
"then": "returnvalue",
"else": "Default value"
}
}
Example::
{
"$condition": {
"if": [
{
"key": "Key to match",
"operator": "$eq",
"value": "something",
},
{
"key": "Another key to match",
"operator": "$in",
"value": "somethingelse"
}
],
"then": "returnvalue",
"else": "Default value"
}
}
Supported operators defined here: :obj:`jsontas.data_structures.operator.Operator`
.. document private functions
.. automethod:: _if
.. automethod:: _else
"""
def _if(self, operator):
"""If operator.
:param operator: Data to check "if" on.
:type operator: dict
:return: Operator result.
:rtype: bool
"""
if isinstance(operator, (list, set, tuple)):
for sub_operator in operator:
if not Operator(self.jsonkey, self.datasubset, self.dataset,
**sub_operator).execute()[1]:
return False
else: # pylint:disable=useless-else-on-loop
return True
return Operator(self.jsonkey, self.datasubset, self.dataset, **operator).execute()[1]
@staticmethod
def _else(data):
"""Else operator. Just return the data.
Note that this method is added for readability in :obj:`execute` only.
:param data: Data to just return.
:type data: any
:return: Condition.
:rtype: str
"""
return data
def execute(self):
"""Execute data.
:return: None and value from either 'else' or 'then'.
:rtype: tuple
"""
_if = self.data.get("if")
_else = self.data.get("else")
_then = self.data.get("then")
if self._if(_if):
return None, _then
return None, self._else(_else)
| StarcoderdataPython |
135670 | <reponame>joshuaroot/chaostoolkit-lib<gh_stars>10-100
# just keep this as-is
def not_an_activity():
print("boom")
| StarcoderdataPython |
113913 | <reponame>wbthomason/minigrade
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from minigrade import minigrade, PORT_NUMBER
import logging
logging.basicConfig(filename='grader.log',level=logging.DEBUG)
logging.debug('Started logging on port: ' + str(PORT_NUMBER))
http_server = HTTPServer(WSGIContainer(minigrade))
http_server.listen(PORT_NUMBER)
IOLoop.instance().start()
logging.debug('Finished tornados!');
| StarcoderdataPython |
32006 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for the Keras implementations of models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import tensorflow as tf
from tensorflow.python.eager import profiler
class BatchTimestamp(object):
"""A structure to store batch time stamp."""
def __init__(self, batch_index, timestamp):
self.batch_index = batch_index
self.timestamp = timestamp
def __repr__(self):
return "'BatchTimestamp<batch_index: {}, timestamp: {}>'".format(
self.batch_index, self.timestamp)
class TimeHistory(tf.keras.callbacks.Callback):
"""Callback for Keras models."""
def __init__(self, batch_size, log_steps):
"""Callback for logging performance (# examples/second).
Args:
batch_size: Total batch size.
log_steps: Interval of time history logs.
"""
self.batch_size = batch_size
super(TimeHistory, self).__init__()
self.log_steps = log_steps
# Logs start of step 0 then end of each step based on log_steps interval.
self.timestamp_log = []
def on_train_begin(self, logs=None):
self.record_batch = True
def on_train_end(self, logs=None):
self.train_finish_time = time.time()
def on_batch_begin(self, batch, logs=None):
if self.record_batch:
timestamp = time.time()
self.start_time = timestamp
self.record_batch = False
if batch == 0:
self.timestamp_log.append(BatchTimestamp(batch, timestamp))
def on_batch_end(self, batch, logs=None):
if batch % self.log_steps == 0:
timestamp = time.time()
elapsed_time = timestamp - self.start_time
examples_per_second = (self.batch_size * self.log_steps) / elapsed_time
if batch != 0:
self.record_batch = True
self.timestamp_log.append(BatchTimestamp(batch, timestamp))
tf.compat.v1.logging.info(
"BenchmarkMetric: {'num_batches':%d, 'time_taken': %f,"
"'examples_per_second': %f}" %
(batch, elapsed_time, examples_per_second))
def get_profiler_callback(model_dir, profile_steps, enable_tensorboard):
"""Validate profile_steps flag value and return profiler callback."""
profile_steps_error_message = (
'profile_steps must be a comma separated pair of positive integers, '
'specifying the first and last steps to be profiled.'
)
try:
profile_steps = [int(i) for i in profile_steps.split(',')]
except ValueError:
raise ValueError(profile_steps_error_message)
if len(profile_steps) != 2:
raise ValueError(profile_steps_error_message)
start_step, stop_step = profile_steps
if start_step < 0 or start_step > stop_step:
raise ValueError(profile_steps_error_message)
if enable_tensorboard:
tf.compat.v1.logging.warn(
'Both TensorBoard and profiler callbacks are used. Note that the '
'TensorBoard callback profiles the 2nd step (unless otherwise '
'specified). Please make sure the steps profiled by the two callbacks '
'do not overlap.')
return ProfilerCallback(model_dir, start_step, stop_step)
class ProfilerCallback(tf.keras.callbacks.Callback):
"""Save profiles in specified step range to log directory."""
def __init__(self, log_dir, start_step, stop_step):
super(ProfilerCallback, self).__init__()
self.log_dir = log_dir
self.start_step = start_step
self.stop_step = stop_step
def on_batch_begin(self, batch, logs=None):
if batch == self.start_step:
profiler.start()
tf.compat.v1.logging.info('Profiler started at Step %s', self.start_step)
def on_batch_end(self, batch, logs=None):
if batch == self.stop_step:
results = profiler.stop()
profiler.save(self.log_dir, results)
tf.compat.v1.logging.info(
'Profiler saved profiles for steps between %s and %s to %s',
self.start_step, self.stop_step, self.log_dir)
| StarcoderdataPython |
4802923 | <reponame>hassanakbar4/ietfdb<gh_stars>10-100
# Copyright The IETF Trust 2015, All Rights Reserved
from inspect import getsourcelines
from django.shortcuts import render, get_object_or_404
from ietf.mailtrigger.models import MailTrigger, Recipient
def show_triggers(request, mailtrigger_slug=None):
mailtriggers = MailTrigger.objects.all()
if mailtrigger_slug:
get_object_or_404(MailTrigger,slug=mailtrigger_slug)
mailtriggers = mailtriggers.filter(slug=mailtrigger_slug)
return render(request,'mailtrigger/trigger.html',{'mailtrigger_slug':mailtrigger_slug,
'mailtriggers':mailtriggers})
def show_recipients(request, recipient_slug=None):
recipients = Recipient.objects.all()
if recipient_slug:
get_object_or_404(Recipient,slug=recipient_slug)
recipients = recipients.filter(slug=recipient_slug)
for recipient in recipients:
fname = 'gather_%s'%recipient.slug
if hasattr(recipient,fname):
recipient.code = ''.join(getsourcelines(getattr(recipient,fname))[0])
return render(request,'mailtrigger/recipient.html',{'recipient_slug':recipient_slug,
'recipients':recipients})
| StarcoderdataPython |
129959 | <gh_stars>0
from django.urls import path, re_path
app_name = 'mainapp'
import mainapp.views as mainapp
urlpatterns = [
path('', mainapp.index, name='index'),
path('about/', mainapp.about, name='about'),
path('find/', mainapp.find, name='find'),
path('select-products/<int:pk>/', mainapp.select_product, name='select_product'),
re_path(r'^order/set/(-*\w+)/$', mainapp.places_order_set, name='places_order_set')
# re_path('^places/order/set/(\-*\w+)/$', mainapp.places_order_set, name='places_order_set'),
# re_path('^places/order/set/()/', mainapp.places_order_set, name='places_order_set'),
]
| StarcoderdataPython |
138013 | import cmocean
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import cm
from scipy import interpolate
cmap = cm.ScalarMappable(cmap=cmocean.cm.phase)
cmap.to_rgba([0., 0.5, 1.])
def make_item(c, f, n=None):
theta = [0]
clr = cmap.to_rgba(c)
if not n:
n = np.random.randint(3, 9)
m = 2 * np.pi / n
for j in range(n):
a = (j * m) + (np.random.rand() * m)
theta.append(a)
theta.append(2 * np.pi)
theta = np.array(theta) + np.random.uniform(0, 2 * np.pi)
r = np.random.uniform(0.2, 1, len(theta))
r[-1] = r[0]
x = r * np.cos(theta)
y = r * np.sin(theta)
tck, u = interpolate.splprep([x, y], s=0, t=1)
unew = np.arange(0, 1.01, 0.01)
out = interpolate.splev(unew, tck)
theta = np.linspace(0, 2 * np.pi, 100)
# the radius of the circle
r = np.sqrt(0.6)
# compute x1 and x2
x = r * np.cos(theta)
y = r * np.sin(theta)
fig, ax = plt.subplots(1, figsize=(1, 1))
ax.plot(out[0], out[1], "k", lw=2)
ax.fill(out[0], out[1], c=clr)
# ax.plot(x, y, "k", lw=2)
# ax.fill(x, y, c=clr)
plt.tick_params(
left=False, labelleft=False, right=False, bottom=False, labelbottom=False
)
plt.box(False)
plt.tight_layout(0)
p = "/Users/sm2286/Documents/Projects/Charlie2/charlie2/stimuli/visual/visualmemory/"
f = p + f
plt.savefig(f, transparent=True)
plt.close(fig)
if __name__ == "__main__":
for load in [4]:
for trial in range(30):
# n_ = list(range(5, 12))
# np.random.shuffle(n_)
clrs = []
for item in range(load):
# if item == 0:
# clrs.append(np.random.uniform(0, 1))
# c = clrs[0]
# else:
# while any(abs(c - c_) <= 0.01 for c_ in clrs):
c = np.random.uniform(0, 1)
# clrs.append(c)
f = "l%i_t%i_i%i.png" % (load, trial, item)
make_item(c, f)
if item == 0:
if c > 0.5:
c -= 0.5
else:
c += 0.5
f = "l%i_t%i_i%i_r.png" % (load, trial, item)
make_item(c, f)
| StarcoderdataPython |
3222522 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 25 16:32:24 2021
@authors: moritz, marlin
"""
import pysam as ps
import argparse
#for deduplicating lists, will be used to calculate number of distinct clusters and output files
def deduplicateList (listWithDuplicates):
deduplicated=[]
for entry in listWithDuplicates:
if entry in deduplicated:
continue
else:
deduplicated.append(entry)
return deduplicated
#for getting a list of all cell Barcodes, that belong to the provided cluster.
#no longer in use may be removed
def getSingleClusterBarcodeList (clusterID,combinedIDs):
cellBarcodesForCluster=[]
for pair in combinedIDs:
if pair[1] == clusterID:
cellBarcodesForCluster.append(pair[0])
return cellBarcodesForCluster
#Method recives a single cluster ID and a list of CellIDs for this cluster. It the writes a singular BAM file
#containing alls lines that have matching cellIDs
#No longer in use may be removed
def writeClusterBam (clusterID,cellIDsForCluster,sourceFilePath,outputDir):
#debug print
print("working on cluster: "+str(clusterID))
#print("Debug, cellbacodes for this cluster: "+str(cellIDsForCluster))
sourceFile = ps.AlignmentFile(sourceFilePath,"rb")
clusterFile = ps.AlignmentFile(outputDir+"cluster"+str(clusterID)+".bam","wb",template=sourceFile)
for read in sourceFile.fetch():
if read.has_tag('CB'):
if read.get_tag('CB') in cellIDsForCluster:
#print("Debug, matching read for tag: "+str(read.get_tag('CB')))
clusterFile.write(read)
sourceFile.close()
clusterFile.close()
#faster way to write single cluster bam files
def writeClusters (deduplicatedClusterIDs,cellClusterDict,sourceFilePath,outputDir):
#creating file dictionary and opening source
sourceFile = ps.AlignmentFile(sourceFilePath,"rb")
clusterFileDict = {}
#creating new files
for cluster in deduplicatedClusterIDs:
singleClusterFile = ps.AlignmentFile(outputDir+"cluster"+str(cluster)+".bam","wb",template=sourceFile)
clusterFileDict.update({cluster:singleClusterFile})
#scanning source for lines and writing to single cluster files
for read in sourceFile.fetch():
if read.has_tag('CB:Z'):
try:
clusterID = cellClusterDict[read.get_tag('CB:Z')]
except:
#uncomment next line if you wish to be made aware of Reads that don't have a cluster assigned to them.
#print("Line with CB:Z: "+read.get_tag('CB:Z')+" does not have a cluster assignment and will be ignored.")
continue
clusterFileDict[clusterID].write(read)
for file in clusterFileDict.values():
file.close()
sourceFile.close()
#uses tsv file path as input and returns three lists
#cellIDs and clusterIDs contains all IDs in the order they appear, combined IDs is a list of lists with
#two elements containing a pair of IDs that belong together
#No longer in use may be removed
def listifyTSV(tsvPath):
cellIDs = []
clusterIDs = []
combinedIDs = []
cellClusterDict = {} #{CellID:ClusterID}
IDs = open(tsvPath, "r")
for line in IDs:
left, right = line.split('\t')
if right.endswith('\n'):
right, garbage = right.split('\n')
if right[0].isdigit():
right = int(right)
else:
continue
cellIDs.append(left)
clusterIDs.append(right)
combinedIDs.append([left,right])
cellClusterDict.update({left:right})
IDs.close()
return cellIDs,clusterIDs,combinedIDs,cellClusterDict
#returns a dictionary that contains {Cellbarcode:ClusterID}
def tsvToDict(tsvPath):
assignmentDict = {}
clusterList = []
tsv = open(tsvPath, "r")
for line in tsv:
barcode, clusterID = line.split("\t")
assignmentDict.update({barcode.strip():clusterID.strip()})
clusterList.append(clusterID.strip())
tsv.close()
return assignmentDict, clusterList
#generates a text file that can be pasted into the TOBIAS snakemake pipline config file
def generateSnakemakeInput(outputDir, clusterIDs):
f = open(outputDir+"snakemakeIn.txt","a")
for ID in clusterIDs:
f.write(" cluster"+str(ID)+": ["+outputDir+"cluster"+str(ID)+".bam"+"]\n")
f.close()
def main():
#Argparser
aParser = argparse.ArgumentParser(description='This is a tool for splitting a .bam file into multiple .bam files.')
aParser.add_argument('-Bam', '-b',dest='bam', nargs='?', help='Use this to set the input Path for the .bam file.',
default="inputWP3/testdata.bam")
aParser.add_argument('-Tsv', '-t',dest='tsv', nargs='?', help='Use this to set the input Path for the .tsv file,'+
'containing the cluster Assingnments and cell barcodes.',
default="inputWP3/clusterIDs.tsv")
aParser.add_argument('-Out', '-o',dest='outputDir', nargs='?', help='Use this to set the Path to a directory,'+
'that should be used to save this programs output.',
default="outputWP3/")
args=aParser.parse_args()
#read tsv file and convert it into lists (Deprecated)
#cellIDs,clusterIDs,combinedIDs,cellClusterDict = listifyTSV(args.tsv)
clusterDict, clusterIDs = tsvToDict(args.tsv)
deduplicatedClusterIDs = deduplicateList(clusterIDs)
writeClusters(deduplicatedClusterIDs, clusterDict, args.bam, args.outputDir)
#generating file for TOBIAS Snakemake pipeline
generateSnakemakeInput(args.outputDir, deduplicatedClusterIDs)
if __name__ == "__main__":
main()
| StarcoderdataPython |
1653327 | <reponame>Haiiliin/PyAbaqus<filename>src/abaqus/PlotOptions/MdbDataInstance.py
class MdbDataInstance:
"""The MdbDataInstance object instance data. It corresponds to same named part instance
with a mesh in the cae model.
Attributes
----------
name: str
A String specifying the instance name. This attribute is read-only.
Notes
-----
This object can be accessed by:
.. code-block:: python
import visualization
session.mdbData[name].instances[i]
"""
# A String specifying the instance name. This attribute is read-only.
name: str = ''
| StarcoderdataPython |
1648087 | <reponame>lanl/MPRAD
import numpy as np
from numba import jit
from matplotlib import pyplot as plt
import yt
from mpl_toolkits.axes_grid1 import AxesGrid
@jit(nopython=True, nogil=True, cache=True, parallel=True)
def cal_div(x, y):
return x / y
@jit(cache=True, parallel=True, nogil=True)
def cal_sum(x, axis):
return np.sum(x, axis)
@jit(cache=True, parallel=True, nogil=True)
def cal_average(x, weights):
return np.average(x, weights=weights)
@jit(nopython=True, nogil=True, cache=True, parallel=True)
def cal_square(x):
return x**2
@jit(nopython=True, nogil=True, cache=True, parallel=True)
def cal_sqrt(x):
return np.sqrt(x)
@jit(nopython=True, nogil=True, cache=True, parallel=True)
def cal_minus(x, y):
return x - y
class data_prad():
def __init__(self, fn, xylim, erange, mag):
self.data = np.load(fn)['data']
xmin, xmax = xylim[0]
ymin, ymax = xylim[1]
xbin, ybin = self.data.shape[0:2]
dx, dy = (xmax - xmin) / xbin, (xmax - xmin) / ybin
if self.data.shape[2] != len(erange)-1:
raise KeyError('Dimension for energy bin not consistent')
self.xrange = cal_div(np.linspace(xmin+dx*0.5, xmax-dx*0.5, xbin, endpoint=True), mag/1.0e4)
self.yrange = cal_div(np.linspace(ymin+dx*0.5, ymax-dx*0.5, ybin, endpoint=True), mag/1.0e4)
self.erange = np.array(erange)
def get_data(self, xylim=[[-np.inf,np.inf],[-np.inf,np.inf]], elim=[-np.inf,np.inf], norm=1.0, \
vmax_list=None):
i0_x, i1_x = np.searchsorted(self.xrange, xylim[0][0]), \
np.searchsorted(self.xrange, xylim[0][1])
i0_y, i1_y = np.searchsorted(self.xrange, xylim[1][0]), \
np.searchsorted(self.xrange, xylim[1][1])
i0_e, i1_e = np.searchsorted(self.erange, elim[0]), \
np.searchsorted(self.erange, elim[1])
data_img = cal_div(self.data[i0_x:i1_x, i0_y:i1_y, i0_e:i1_e], norm)
data_xrange = self.xrange[i0_x:i1_x]
data_yrange = self.yrange[i0_y:i1_y]
data_erange = self.erange[i0_e:i1_e+1]
if vmax_list is None:
return data_img, data_xrange, data_yrange, data_erange
else:
return data_img, data_xrange, data_yrange, data_erange, vmax_list[i0_e:i1_e]
def get_mo(self, xylim=[[-np.inf,np.inf],[-np.inf,np.inf]], elim=[-np.inf,np.inf], composite=False):
img, x_grid, y_grid, e_grid = self.get_data(xylim=xylim, elim=elim)
if composite:
print(y_grid, cal_sum(img[:,:,:], axis=(0,2)))
mo_y1 = cal_average(y_grid, cal_sum(img[:,:,:], axis=(0,2)))
e_grid = np.array([e_grid[0], e_grid[-1]])
else: mo_y1 = np.array([cal_average(\
y_grid, cal_sum(img[:,:,i], axis=0)) for i in range(len(e_grid)-1)])
return np.array(mo_y1), e_grid
def plot_prad(fn_in_base_list, fn_in_range, mag, xylim_in, erange_in, \
xylim_out, elim_out, count_norm, vmax_list, fn_out, \
title_doc, runs_doc, nrows, ncols, show=False,
Limg=3.0, Lpadx=0.8, Lpady=0.2, Lfont=12, Lmargin=2.0, LfontTitle=20):
n_panel = len(fn_in_base_list)
for n_time in fn_in_range:
t = n_time/10.0
plt.clf()
fig = plt.figure(figsize=((Limg+Lpadx)*ncols+2.0*Lmargin,\
(Limg+Lpady)*nrows*n_panel+2.0*Lmargin))
plt.subplots_adjust(left=Lmargin/((Limg+Lpadx)*ncols+2.0*Lmargin), \
right=1.0-Lmargin/((Limg+Lpadx)*ncols+2.0*Lmargin), \
bottom=Lmargin/((Limg+Lpady)*nrows*n_panel+2.0*Lmargin), \
top=1.0-Lmargin/((Limg+Lpady)*nrows*n_panel+2.0*Lmargin))
#plt.rc('font', size=Lfont, family="Times New Roman")
fig.text(0.1, 1.0 - 0.5*Lmargin/((Limg+Lpady)*nrows*2+2.0*Lmargin), \
title_doc + '\n' + 'From top to Bottom: ' + ', '.join(runs_doc) + \
"\n At t={0}ns".format(t), size=LfontTitle)
for i_panel, fn_in_base in enumerate(fn_in_base_list):
grid = AxesGrid(fig, int(n_panel*100+10+i_panel+1),
nrows_ncols=(nrows, ncols), axes_pad=(Lpadx, Lpady), \
label_mode="L", share_all=False, \
cbar_location="right", cbar_mode="each", \
cbar_size="10%", cbar_pad="0%",
cbar_set_cax=True,)
ds = data_prad('{0}_{1:04d}.npz'.format(fn_in_base, n_time), xylim_in, erange_in, mag)
data_img, x_grid, y_grid, erange, vmax_list1 = ds.get_data(xylim_out, elim_out, \
count_norm, vmax_list)
for i in range(len(erange)-1):
im = grid[i].imshow(data_img[:,:,i], vmax=vmax_list1[i], vmin=0.0, origin='low', \
cmap='bds_highcontrast', extent=(\
y_grid[0]*1.5-y_grid[1]*0.5, \
y_grid[-1]*1.5-y_grid[-2]*0.5, \
x_grid[0]*1.5-x_grid[1]*0.5, \
x_grid[-1]*1.5-x_grid[-2]*0.5),)
cax = grid.cbar_axes[i]
cax.colorbar(im)
cax.toggle_label(True)
cax.axis[cax.orientation].set_label("{0:.1f}MeV to {1:.1f}MeV".format(erange[i], erange[i+1]))
if show: plt.show()
else:
fn_out_fig = "{0}_{1:04d}.png".format(fn_out, n_time)
plt.savefig(fn_out_fig)
print(fn_out_fig)
def plot_shift(fn_in_base_list, fn_in_range, mag, xylim_in, erange_in, xylim_out, elim_out, \
fn_out, title_doc, runs_doc, nrows, ncols, plot_mark, y1_min, y1_max, tmin, tmax, \
show=False, Limg=3.0, Lpadx=0.3, Lpady=0.5, Lfont=12, Lmargin=2.0, LfontTitle=20):
data_t = []
data_y1 = []
for n_time in fn_in_range:
data_t.append(n_time/10.0)
data_y1.append([])
for fn_in_base in fn_in_base_list:
ds = data_prad('{0}_{1:04d}.npz'.format(fn_in_base, n_time), xylim_in, erange_in, mag)
mo_y1, erange = ds.get_mo(xylim=xylim_out, elim=elim_out)
data_y1[-1].append(mo_y1)
data_t = np.array(data_t)
data_y1 = np.array(data_y1)
plt.clf()
fig = plt.figure(figsize=((Limg+Lpadx)*ncols+2.0*Lmargin,\
(Limg+Lpady)*nrows+2.0*Lmargin))
plt.subplots_adjust(left=Lmargin/((Limg+Lpadx)*ncols+2.0*Lmargin), \
right=1.0-Lmargin/((Limg+Lpadx)*ncols+2.0*Lmargin), \
bottom=Lmargin/((Limg+Lpady)*nrows+2.0*Lmargin), \
top=1.0-Lmargin/((Limg+Lpady)*nrows+2.0*Lmargin))
#plt.rc('font', size=Lfont, family="Times New Roman")
#fig.text(0.1, 1.0 - 0.5*Lmargin/((Limg+Lpady)*nrows*2+2.0*Lmargin), \
# title_doc, size=LfontTitle)
grid = AxesGrid(fig, 111, # similar to subplot(144)
nrows_ncols=(nrows, ncols),
axes_pad=(Lpadx, Lpady),
label_mode="L",
share_all=False)
for i_run in range(len(fn_in_base_list)):
grid[0].plot(data_t, np.full_like(data_t, i_run*10.0), plot_mark[i_run], label=runs_doc[i_run])
grid[0].legend()
grid[0].set_title(r'${\langle}x{\rangle}$')
for i in range(len(erange)-1):
for i_run in range(len(fn_in_base_list)):
grid[i+1].plot(data_t, data_y1[:,i_run,i], plot_mark[i_run])
grid[i+1].set_title("{0:.1f}MeV to {1:.1f}MeV".format(erange[i], erange[i+1]))
for img in grid:
img.set_xlim(tmin, tmax)
img.set_ylim(y1_min, y1_max)
img.set_aspect((tmax-tmin)/(y1_max-y1_min))
img.set_xlabel('t(ns)')
img.set_ylabel(r'shift(${\mu}$m)')
if show: plt.show()
else:
fn_out_fig = "{0}.png".format(fn_out)
plt.savefig(fn_out_fig)
print(fn_out_fig) | StarcoderdataPython |
30645 | #!/usr/bin/env python
import sys
import string
import re
for line in sys.stdin:
if '"' in line:
entry = re.split(''',(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', line)
else:
entry = line.split(",")
licence_type = entry[2]
amount_due = entry[-6]
print("%s\t%s" % (licence_type, amount_due))
| StarcoderdataPython |
3307086 | <reponame>theroyakash/AKDPRFramework<gh_stars>1-10
import av
class Extractor(object):
def __init__(self):
pass
def extract_frames(self, path):
'''
Extract frames from a video from a given path.
Args:
- path: path to the video
Usage:
- Import like this: ``from AKDPRFramework.video import process as p``
- Then use the exractor class like: ``extractor = p.Extractor()``
- Now mention the path in the ``extract_path`` method like: ``extractor.extract_frames(path='/path')``
'''
video = av.open(path)
for frame in video.decode(0):
yield frame.to_image() | StarcoderdataPython |
4808562 | # Title: ex_real_data_errors.py
# Description: Testing online PSP algorithm population and batch error on artificially generated data
# Author: <NAME> (<EMAIL>) and <NAME> (<EMAIL>)
# Notes: Adapted from code by <NAME>
# Reference: None
# imports
from online_psp.online_psp_simulations import run_simulation
import os
import pylab as plt
import numpy as np
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
# general parameters
error_options = {
'n_skip': 50,
'compute_batch_error': True,
'compute_population_error': True,
'compute_reconstruction_error': False
}
generator_options = {
'method': 'real_data',
'scale_data': True,
'shuffle' : True
}
simulation_options = {
'D': None,
'K': 16,
'N': 'auto', # can set a number here, will select frames multiple times
'N0': 0,
'error_options': error_options,
'pca_init': False,
'init_ortho': True,
}
algorithm_options = {
'pca_algorithm': None,
}
def run_test(simulation_options=None, algorithm_options=None, generator_options=None):
errs = run_simulation(simulation_options, generator_options, algorithm_options)
return errs
def run_test_wrapper(params):
generator_options, simulation_options, algorithm_options, data_fold, n_repetitions = params
errs_batch = []
for _ in range(n_repetitions):
err = run_test(simulation_options, algorithm_options, generator_options, )
errs_batch.append(err['batch_err'])
errs_batch = np.array(errs_batch)
output_dict = {
'generator_options': generator_options,
'simulation_options': simulation_options,
'algorithm_options': algorithm_options,
'D': simulation_options['D'],
'K': simulation_options['K'],
'N': simulation_options['N'],
'filename': generator_options['filename'],
'n_epoch': simulation_options['n_epoch'],
'N0': 0,
'batch_err': errs_batch,
}
K = simulation_options['K']
filename = generator_options['filename']
algo = algorithm_options['pca_algorithm']
save_name = os.path.join(data_fold,
'__'.join(['fname', filename.split('/')[-1], 'K', str(K), 'algo', algo]) + '.npz')
print('Saving in:' + save_name)
np.savez(save_name, **output_dict)
return errs_batch
#####################
# parameters figure generation
rhos = np.logspace(-4, -0.5, 10) # controls SNR
rerun_simulation = True # whether to rerun from scratch or just show the results
all_pars = []
gammas = [0.6, 1.5, 2]
for gamma_ in gammas:
algorithm_options['gamma'] = gamma_
data_fold = os.path.abspath('./real_data_learning_curves_gamma_' + str(gamma_))
os.makedirs(data_fold, exist_ok=True)
names = ['YaleB_32x32.mat','ATT_faces_112_92.mat', 'MNIST.mat']
n_epochs = [10, 30, 1]
Ks = [16, 64, 128]
if gamma_ == gammas[0]:
algos = ['FSM', 'IPCA', 'CCIPCA']
else:
algos = ['FSM']
colors = ['b', 'r', 'g']
n_repetitions = 10
simulation_options['N'] = 'auto'
for counter_K, K in enumerate(Ks):
counter_name = 0
for n_ep, name in zip(n_epochs, names):
simulation_options['n_epoch'] = n_ep
counter_name += 1
generator_options['filename'] = '../datasets/' + name
ax = plt.subplot(len(Ks), len(names), len(names) * counter_K + counter_name)
for algo, algor in enumerate(algos):
print(name)
algorithm_options['pca_algorithm'] = algor
print((name, K))
simulation_options['K'] = K
all_pars.append(
[generator_options.copy(), simulation_options.copy(), algorithm_options.copy(), data_fold[:],
n_repetitions])
if rerun_simulation:
errs_batch = run_test_wrapper(all_pars[-1])
batch_err_avg = np.median(errs_batch, 0)
batch_err_avg = batch_err_avg[batch_err_avg > 0]
else:
fname = os.path.join(data_fold, '__'.join(
['fname', name, 'K', str(K), 'algo', algor]) + '.npz')
with np.load(fname) as ld:
batch_err_avg = np.median(ld['batch_err'][()], 0)
batch_err_avg = batch_err_avg[batch_err_avg > 0]
if batch_err_avg is not None:
plt.title('k=' + str(K) + ',' + name)
line_bat, = ax.loglog(batch_err_avg.T)
line_bat.set_label('gamma_' + str(gamma_) + ',' + algos[algo])
plt.ylabel('subspace error')
if batch_err_avg is not None:
ax.legend()
plt.xlabel('sample (x n_skip)')
plt.savefig('./real_data.png')
plt.show()
| StarcoderdataPython |
168874 | import numpy as np
from scipy.spatial import Voronoi
from scipy.spatial import Delaunay
from ..graph import Graph
from ...core.utils import as_id_array
class VoronoiGraph(Graph):
"""Graph of a voronoi grid.
Examples
--------
>>> from landlab.graph import VoronoiGraph
"""
def __init__(self, nodes, **kwds):
"""Create a voronoi grid.
Parameters
----------
nodes : tuple of array_like
Coordinates of every node. First *y*, then *x*.
Examples
--------
>>> from landlab.graph import VoronoiGraph
>>> node_x = [0, 1, 2,
... 1, 2, 3]
>>> node_y = [0, 0, 0,
... 2, 2, 2]
>>> graph = VoronoiGraph((node_y, node_x))
>>> graph.x_of_node
array([ 0., 1., 2., 1., 2., 3.])
>>> graph.y_of_node
array([ 0., 0., 0., 2., 2., 2.])
>>> graph.nodes_at_link # doctest: +NORMALIZE_WHITESPACE
array([[0, 1], [1, 2],
[0, 3], [1, 3], [1, 4], [2, 4], [2, 5],
[3, 4], [4, 5]])
>>> graph.links_at_node # doctest: +NORMALIZE_WHITESPACE
array([[ 0, 2, -1, -1], [ 1, 4, 3, 0], [ 6, 5, 1, -1],
[ 7, 2, 3, -1], [ 8, 7, 4, 5], [ 8, 6, -1, -1]])
>>> graph.links_at_patch # doctest: +NORMALIZE_WHITESPACE
array([[3, 2, 0], [5, 4, 1], [4, 7, 3], [6, 8, 5]])
>>> graph.nodes_at_patch # doctest: +NORMALIZE_WHITESPACE
array([[3, 0, 1], [4, 1, 2], [4, 3, 1], [5, 4, 2]])
"""
# xy_sort = kwds.pop('xy_sort', True)
# rot_sort = kwds.pop('rot_sort', True)
max_node_spacing = kwds.pop('max_node_spacing', None)
from .ext.delaunay import _setup_links_at_patch, remove_tris
node_y, node_x = (np.asarray(nodes[0], dtype=float),
np.asarray(nodes[1], dtype=float))
delaunay = Delaunay(list(zip(node_x, node_y)))
# nodes_at_patch = delaunay.simplices
nodes_at_patch = np.array(delaunay.simplices, dtype=int)
neighbors_at_patch = np.array(delaunay.neighbors, dtype=int)
if max_node_spacing is not None:
max_node_dist = np.ptp(delaunay.simplices, axis=1)
bad_patches = as_id_array(np.where(max_node_dist >
max_node_spacing)[0])
if len(bad_patches) > 0:
remove_tris(nodes_at_patch, neighbors_at_patch, bad_patches)
nodes_at_patch = nodes_at_patch[:-len(bad_patches), :]
neighbors_at_patch = neighbors_at_patch[:-len(bad_patches), :]
n_patches = len(nodes_at_patch)
n_shared_links = np.count_nonzero(neighbors_at_patch > -1)
n_links = 3 * n_patches - n_shared_links // 2
links_at_patch = np.empty((n_patches, 3), dtype=int)
nodes_at_link = np.empty((n_links, 2), dtype=int)
_setup_links_at_patch(nodes_at_patch,
neighbors_at_patch,
nodes_at_link, links_at_patch)
super(VoronoiGraph, self).__init__((node_y.flat, node_x.flat),
links=nodes_at_link,
patches=links_at_patch)
| StarcoderdataPython |
160855 | from __future__ import division
import pywt
import numpy as np
import itertools as itt
from scipy.interpolate import interp1d
from functools import partial
from .common import *
class SimpleWaveletDensityEstimator(object):
def __init__(self, wave_name, j0=1, j1=None, thresholding=None):
self.wave = pywt.Wavelet(wave_name)
self.j0 = j0
self.j1 = j1 if j1 is not None else (j0 - 1)
#self.multi_supports = wave_support_info(self.wave)
self.pdf = None
if thresholding is None:
self.thresholding = lambda n, j, dn, c: c
else:
self.thresholding = thresholding
def fit(self, xs):
"Fit estimator to data. xs is a numpy array of dimension n x d, n = samples, d = dimensions"
self.dim = xs.shape[1]
self.dimpow = 2 ** self.dim
self.set_wavefuns(self.dim)
self.minx = np.amin(xs, axis=0)
self.maxx = np.amax(xs, axis=0)
self.n = xs.shape[0]
self.calc_coefficients(xs)
self.pdf = self.calc_pdf()
return True
def set_wavefuns(self, dim):
self.wave_funs = self.calc_wavefuns(dim, self.multi_supports['base'], self.wave)
self.dual_wave_funs = self.calc_wavefuns(dim, self.multi_supports['dual'], self.wave)
@staticmethod
def calc_wavefuns(dim, supports, wave):
resp = {}
phi_support, psi_support = supports
phi, psi, _ = wave.wavefun(level=12)
phi = interp1d(np.linspace(*phi_support, num=len(phi)), phi, fill_value=0.0, bounds_error=False)
psi = interp1d(np.linspace(*psi_support, num=len(psi)), psi, fill_value=0.0, bounds_error=False)
for wave_x, qx in all_qx(dim):
f = partial(wave_tensor, qx, phi, psi)
f.qx = qx
f.support = support_tensor(qx, phi_support, psi_support)
f.suppf = partial(suppf_tensor, qx, phi_support, psi_support)
resp[tuple(qx)] = f
return resp
def calc_coefficients(self, xs):
self.coeffs = {}
self.nums = {}
qxs = list(all_qx(self.dim))
self.do_calculate_j(self.j0, qxs[0:1], xs)
for j in range(self.j0, self.j1 + 1):
self.do_calculate_j(j, qxs[1:], xs)
def do_calculate_j(self, j, qxs, xs):
jpow2 = 2 ** j
if j not in self.coeffs:
self.coeffs[j] = {}
self.nums[j] = {}
for ix, qx in qxs:
wavef = self.wave_funs[qx]
zs_min, zs_max = zs_range(wavef, self.minx, self.maxx, j)
self.coeffs[j][qx] = {}
self.nums[j][qx] = {}
for zs in itt.product(*all_zs_tensor(zs_min, zs_max)):
self.coeffs[j][qx][zs] = calc_coeff_simple(wavef, jpow2, zs, xs)
self.nums[j][qx][zs] = calc_num(wavef.suppf, jpow2, zs, xs)
def get_betas(self, j):
return [coeff for ix, qx in list(all_qx(self.dim))[1:] for coeff in self.coeffs[j][qx].values()]
def get_nums(self):
return [coeff
for j in self.nums
for ix, qx in list(all_qx(self.dim))[1:]
for coeff in self.nums[j][qx].values()]
def calc_pdf(self):
def pdffun_j(coords, xs_sum, j, qxs, threshold):
jpow2 = 2 ** j
for ix, qx in qxs:
wavef = self.dual_wave_funs[qx]
for zs, coeff in self.coeffs[j][qx].iteritems():
num = self.nums[j][qx][zs]
coeff_t = self.thresholding(self.n, j - self.j0, num, coeff) if threshold else coeff
vals = coeff_t * wavef(jpow2, zs, coords)
xs_sum += vals
def pdffun(coords):
xs_sum = np.zeros(coords[0].shape, dtype=np.float64)
qxs = list(all_qx(self.dim))
pdffun_j(coords, xs_sum, self.j0, qxs[0:1], False)
for j in range(self.j0, self.j1 + 1):
pdffun_j(coords, xs_sum, j, qxs[1:], True)
return xs_sum
return pdffun
| StarcoderdataPython |
3352179 | <reponame>sofiavegaz/Bringing-Old-Photos-Back-to-Life
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os.path
import io
import zipfile
from data.base_dataset import BaseDataset, get_params, get_transform, normalize
from data.image_folder import make_dataset
from PIL import Image
import torchvision.transforms as transforms
import numpy as np
from data.Load_Bigfile import BigFileMemoryLoader
import random
import cv2
from io import BytesIO
def pil_to_np(img_PIL):
'''Converts image in PIL format to np.array.
From W x H x C [0...255] to C x W x H [0..1]
'''
ar = np.array(img_PIL)
if len(ar.shape) == 3:
ar = ar.transpose(2, 0, 1)
else:
ar = ar[None, ...]
return ar.astype(np.float32) / 255.
def np_to_pil(img_np):
'''Converts image in np.array format to PIL image.
From C x W x H [0..1] to W x H x C [0...255]
'''
ar = np.clip(img_np * 255, 0, 255).astype(np.uint8)
if img_np.shape[0] == 1:
ar = ar[0]
else:
ar = ar.transpose(1, 2, 0)
return Image.fromarray(ar)
def synthesize_salt_pepper(image,amount,salt_vs_pepper):
## Give PIL, return the noisy PIL
img_pil=pil_to_np(image)
out = img_pil.copy()
p = amount
q = salt_vs_pepper
flipped = np.random.choice([True, False], size=img_pil.shape,
p=[p, 1 - p])
salted = np.random.choice([True, False], size=img_pil.shape,
p=[q, 1 - q])
peppered = ~salted
out[flipped & salted] = 1
out[flipped & peppered] = 0.
noisy = np.clip(out, 0, 1).astype(np.float32)
return np_to_pil(noisy)
def synthesize_gaussian(image,std_l,std_r):
## Give PIL, return the noisy PIL
img_pil=pil_to_np(image)
mean=0
std=random.uniform(std_l/255.,std_r/255.)
gauss=np.random.normal(loc=mean,scale=std,size=img_pil.shape)
noisy=img_pil+gauss
noisy=np.clip(noisy,0,1).astype(np.float32)
return np_to_pil(noisy)
def synthesize_speckle(image,std_l,std_r):
## Give PIL, return the noisy PIL
img_pil=pil_to_np(image)
mean=0
std=random.uniform(std_l/255.,std_r/255.)
gauss=np.random.normal(loc=mean,scale=std,size=img_pil.shape)
noisy=img_pil+gauss*img_pil
noisy=np.clip(noisy,0,1).astype(np.float32)
return np_to_pil(noisy)
def synthesize_low_resolution(img):
w,h=img.size
new_w=random.randint(int(w/2),w)
new_h=random.randint(int(h/2),h)
img=img.resize((new_w,new_h),Image.BICUBIC)
if random.uniform(0,1)<0.5:
img=img.resize((w,h),Image.NEAREST)
else:
img = img.resize((w, h), Image.BILINEAR)
return img
def convertToJpeg(im,quality):
with BytesIO() as f:
im.save(f, format='JPEG',quality=quality)
f.seek(0)
return Image.open(f).convert('RGB')
def blur_image_v2(img):
x=np.array(img)
kernel_size_candidate=[(3,3),(5,5),(7,7)]
kernel_size=random.sample(kernel_size_candidate,1)[0]
std=random.uniform(1.,5.)
#print("The gaussian kernel size: (%d,%d) std: %.2f"%(kernel_size[0],kernel_size[1],std))
blur=cv2.GaussianBlur(x,kernel_size,std)
return Image.fromarray(blur.astype(np.uint8))
def online_add_degradation_v2(img):
task_id=np.random.permutation(4)
for x in task_id:
if x==0 and random.uniform(0,1)<0.7:
img = blur_image_v2(img)
if x==1 and random.uniform(0,1)<0.7:
flag = random.choice([1, 2, 3])
if flag == 1:
img = synthesize_gaussian(img, 5, 50)
if flag == 2:
img = synthesize_speckle(img, 5, 50)
if flag == 3:
img = synthesize_salt_pepper(img, random.uniform(0, 0.01), random.uniform(0.3, 0.8))
if x==2 and random.uniform(0,1)<0.7:
img=synthesize_low_resolution(img)
if x==3 and random.uniform(0,1)<0.7:
img=convertToJpeg(img,random.randint(40,100))
return img
def irregular_hole_synthesize(img,mask):
img_np=np.array(img).astype('uint8')
mask_np=np.array(mask).astype('uint8')
mask_np=mask_np/255
img_new=img_np*(1-mask_np)+mask_np*255
hole_img=Image.fromarray(img_new.astype('uint8')).convert("RGB")
return hole_img,mask.convert("L")
def zero_mask(size):
x=np.zeros((size,size,3)).astype('uint8')
mask=Image.fromarray(x).convert("RGB")
return mask
class UnPairOldPhotos_SR(BaseDataset): ## Synthetic + Real Old
def initialize(self, opt):
self.opt = opt
self.isImage = 'domainA' in opt.name
self.task = 'old_photo_restoration_training_vae'
self.dir_AB = opt.dataroot
self.switch = 1
if self.isImage:
self.load_img_dir_L_old=os.path.join(self.dir_AB,"Real_L_old.bigfile")
self.load_img_dir_RGB_old=os.path.join(self.dir_AB,"Real_RGB_old.bigfile")
self.load_img_dir_clean=os.path.join(self.dir_AB,"VOC_RGB_JPEGImages.bigfile")
self.loaded_imgs_L_old=BigFileMemoryLoader(self.load_img_dir_L_old)
self.loaded_imgs_RGB_old=BigFileMemoryLoader(self.load_img_dir_RGB_old)
self.loaded_imgs_clean=BigFileMemoryLoader(self.load_img_dir_clean)
else:
# self.load_img_dir_clean=os.path.join(self.dir_AB,self.opt.test_dataset)
self.load_img_dir_clean=os.path.join(self.dir_AB,"VOC_RGB_JPEGImages.bigfile")
self.loaded_imgs_clean=BigFileMemoryLoader(self.load_img_dir_clean)
####
print("-------------Filter the imgs whose size <256 in VOC-------------")
self.filtered_imgs_clean=[]
for i in range(len(self.loaded_imgs_clean)):
img_name,img=self.loaded_imgs_clean[i]
h,w=img.size
if h<256 or w<256:
continue
self.filtered_imgs_clean.append((img_name,img))
print("--------Origin image num is [%d], filtered result is [%d]--------" % (
len(self.loaded_imgs_clean), len(self.filtered_imgs_clean)))
## Filter these images whose size is less than 256
# self.img_list=os.listdir(load_img_dir)
self.pid = os.getpid()
def __getitem__(self, index):
is_real_old=0
sampled_dataset=None
degradation=None
if self.isImage: ## domain A , contains 2 kinds of data: synthetic + real_old
P=random.uniform(0,2)
if self.switch == 1:
sampled_dataset=self.loaded_imgs_L_old
self.load_img_dir=self.load_img_dir_L_old
self.switch = 2
is_real_old=1
elif self.switch == 2:
sampled_dataset=self.loaded_imgs_RGB_old
self.load_img_dir=self.load_img_dir_RGB_old
self.switch = 3
is_real_old=1
else:
sampled_dataset=self.filtered_imgs_clean
self.load_img_dir=self.load_img_dir_clean
degradation=1
if self.switch == 3:
self.switch = 4
else:
self.switch = 1
else:
sampled_dataset=self.filtered_imgs_clean
self.load_img_dir=self.load_img_dir_clean
sampled_dataset_len=len(sampled_dataset)
index=random.randint(0,sampled_dataset_len-1)
img_name,img = sampled_dataset[index]
if degradation is not None:
img=online_add_degradation_v2(img)
path=os.path.join(self.load_img_dir,img_name)
# AB = Image.open(path).convert('RGB')
# split AB image into A and B
# apply the same transform to both A and B
if random.uniform(0,1) <0.1:
img=img.convert("L")
img=img.convert("RGB")
## Give a probability P, we convert the RGB image into L
A=img
w,h=A.size
if w<256 or h<256:
A=transforms.Scale(256,Image.BICUBIC)(A)
## Since we want to only crop the images (256*256), for those old photos whose size is smaller than 256, we first resize them.
transform_params = get_params(self.opt, A.size)
A_transform = get_transform(self.opt, transform_params)
B_tensor = inst_tensor = feat_tensor = 0
A_tensor = A_transform(A)
input_dict = {'label': A_tensor, 'inst': is_real_old, 'image': A_tensor,
'feat': feat_tensor, 'path': path}
return input_dict
def __len__(self):
return len(self.loaded_imgs_clean) ## actually, this is useless, since the selected index is just a random number
def name(self):
return 'UnPairOldPhotos_SR'
class PairOldPhotos(BaseDataset):
def initialize(self, opt):
self.opt = opt
self.isImage = 'imagegan' in opt.name
self.task = 'old_photo_restoration_training_mapping'
self.dir_AB = opt.dataroot
if opt.isTrain:
self.load_img_dir_clean= os.path.join(self.dir_AB, "VOC_RGB_JPEGImages.bigfile")
self.loaded_imgs_clean = BigFileMemoryLoader(self.load_img_dir_clean)
print("-------------Filter the imgs whose size <256 in VOC-------------")
self.filtered_imgs_clean = []
for i in range(len(self.loaded_imgs_clean)):
img_name, img = self.loaded_imgs_clean[i]
h, w = img.size
if h < 256 or w < 256:
continue
self.filtered_imgs_clean.append((img_name, img))
print("--------Origin image num is [%d], filtered result is [%d]--------" % (
len(self.loaded_imgs_clean), len(self.filtered_imgs_clean)))
else:
self.load_img_dir=os.path.join(self.dir_AB,opt.test_dataset)
self.loaded_imgs=BigFileMemoryLoader(self.load_img_dir)
self.pid = os.getpid()
def __getitem__(self, index):
if self.opt.isTrain:
img_name_clean,B = self.filtered_imgs_clean[index]
path = os.path.join(self.load_img_dir_clean, img_name_clean)
if self.opt.use_v2_degradation:
A=online_add_degradation_v2(B)
### Remind: A is the input and B is corresponding GT
else:
if self.opt.test_on_synthetic:
img_name_B,B=self.loaded_imgs[index]
A=online_add_degradation_v2(B)
img_name_A=img_name_B
path = os.path.join(self.load_img_dir, img_name_A)
else:
img_name_A,A=self.loaded_imgs[index]
img_name_B,B=self.loaded_imgs[index]
path = os.path.join(self.load_img_dir, img_name_A)
if random.uniform(0,1)<0.1 and self.opt.isTrain:
A=A.convert("L")
B=B.convert("L")
A=A.convert("RGB")
B=B.convert("RGB")
## In P, we convert the RGB into L
##test on L
# split AB image into A and B
# w, h = img.size
# w2 = int(w / 2)
# A = img.crop((0, 0, w2, h))
# B = img.crop((w2, 0, w, h))
w,h=A.size
if w<256 or h<256:
A=transforms.Scale(256,Image.BICUBIC)(A)
B=transforms.Scale(256, Image.BICUBIC)(B)
# apply the same transform to both A and B
transform_params = get_params(self.opt, A.size)
A_transform = get_transform(self.opt, transform_params)
B_transform = get_transform(self.opt, transform_params)
B_tensor = inst_tensor = feat_tensor = 0
A_tensor = A_transform(A)
B_tensor = B_transform(B)
input_dict = {'label': A_tensor, 'inst': inst_tensor, 'image': B_tensor,
'feat': feat_tensor, 'path': path}
return input_dict
def __len__(self):
if self.opt.isTrain:
return len(self.filtered_imgs_clean)
else:
return len(self.loaded_imgs)
def name(self):
return 'PairOldPhotos'
class PairOldPhotos_with_hole(BaseDataset):
def initialize(self, opt):
self.opt = opt
self.isImage = 'imagegan' in opt.name
self.task = 'old_photo_restoration_training_mapping'
self.dir_AB = opt.dataroot
if opt.isTrain:
self.load_img_dir_clean= os.path.join(self.dir_AB, "VOC_RGB_JPEGImages.bigfile")
self.loaded_imgs_clean = BigFileMemoryLoader(self.load_img_dir_clean)
print("-------------Filter the imgs whose size <256 in VOC-------------")
self.filtered_imgs_clean = []
for i in range(len(self.loaded_imgs_clean)):
img_name, img = self.loaded_imgs_clean[i]
h, w = img.size
if h < 256 or w < 256:
continue
self.filtered_imgs_clean.append((img_name, img))
print("--------Origin image num is [%d], filtered result is [%d]--------" % (
len(self.loaded_imgs_clean), len(self.filtered_imgs_clean)))
else:
self.load_img_dir=os.path.join(self.dir_AB,opt.test_dataset)
self.loaded_imgs=BigFileMemoryLoader(self.load_img_dir)
self.loaded_masks = BigFileMemoryLoader(opt.irregular_mask)
self.pid = os.getpid()
def __getitem__(self, index):
if self.opt.isTrain:
img_name_clean,B = self.filtered_imgs_clean[index]
path = os.path.join(self.load_img_dir_clean, img_name_clean)
B=transforms.RandomCrop(256)(B)
A=online_add_degradation_v2(B)
### Remind: A is the input and B is corresponding GT
else:
img_name_A,A=self.loaded_imgs[index]
img_name_B,B=self.loaded_imgs[index]
path = os.path.join(self.load_img_dir, img_name_A)
#A=A.resize((256,256))
A=transforms.CenterCrop(256)(A)
B=A
if random.uniform(0,1)<0.1 and self.opt.isTrain:
A=A.convert("L")
B=B.convert("L")
A=A.convert("RGB")
B=B.convert("RGB")
## In P, we convert the RGB into L
if self.opt.isTrain:
mask_name,mask=self.loaded_masks[random.randint(0,len(self.loaded_masks)-1)]
else:
mask_name, mask = self.loaded_masks[index%100]
mask = mask.resize((self.opt.loadSize, self.opt.loadSize), Image.NEAREST)
if self.opt.random_hole and random.uniform(0,1)>0.5 and self.opt.isTrain:
mask=zero_mask(256)
if self.opt.no_hole:
mask=zero_mask(256)
A,_=irregular_hole_synthesize(A,mask)
if not self.opt.isTrain and self.opt.hole_image_no_mask:
mask=zero_mask(256)
transform_params = get_params(self.opt, A.size)
A_transform = get_transform(self.opt, transform_params)
B_transform = get_transform(self.opt, transform_params)
if transform_params['flip'] and self.opt.isTrain:
mask=mask.transpose(Image.FLIP_LEFT_RIGHT)
mask_tensor = transforms.ToTensor()(mask)
B_tensor = inst_tensor = feat_tensor = 0
A_tensor = A_transform(A)
B_tensor = B_transform(B)
input_dict = {'label': A_tensor, 'inst': mask_tensor[:1], 'image': B_tensor,
'feat': feat_tensor, 'path': path}
return input_dict
def __len__(self):
if self.opt.isTrain:
return len(self.filtered_imgs_clean)
else:
return len(self.loaded_imgs)
def name(self):
return 'PairOldPhotos_with_hole'
| StarcoderdataPython |
1723428 | """
Double-entry Bookkeeping System
Copyright (c) 2021 <NAME>
MIT License
"""
from collections import namedtuple
from urllib.parse import parse_qs
from datetime import date
from html import escape
import os
try:
from pysqlcipher3 import dbapi2 as sqlite3
except ImportError:
import sqlite3
from math import ceil
THOUSAND_SEP=" "
DECIMAL_SEP=","
LIMIT=100
ATYPES=[("E","Equity"),("A","Assets"),("L","Liabilities"),("i","Income"),("e","Expenses")]
STYLE="""
body { background-color: #fff1e5; }
div.center { text-align: center; }
div.indent { margin-left: 5ch; }
form.inline { display: inline; }
input.comm { width: 75%; }
input.w2 { width: 2ch; }
input.w4 { width: 4ch; }
input.w12 { width: 12ch; }
span.atype { color: #b0b0b0; }
table { border-spacing: 0; }
table td { padding: 2px; }
table.center { margin: auto; }
table.full { width: 100%; }
td.r { text-align: right; }
th.date,td.date { width: 15%; text-align: left; }
th.dr,td.dr { width: 10%; text-align: right; }
th.cr,td.cr { width: 10%; text-align: right; }
th.bal,td.bal { width: 15%; text-align: right; }
th.opp,td.opp { width: 20%; text-align: right; }
th.comm,td.comm { text-align: center; }
tr.line { white-space: nowrap; }
tr.sep td { border-top: 2px solid #e0e0e0; }
tr.sep_month td { border-top: 2px solid #b0b0b0; }
tr.sep_year td { border-top: 2px solid #808080; }
tr.sep_tot td { border-top: 2px solid #b0b0b0; }
"""
# a named tuple for storing HTML response components
HTMLResponse=namedtuple("HTMLResponse",["status","headers","body"])
class BadInput(Exception):
"""invalid user input"""
def application(environ,start_response):
"""entry point"""
try:
# connect to the database
cnx=None
if "DB" in os.environ:
# try OS environment
db=os.environ["DB"]
elif "DB" in environ:
# try request environment
db=environ["DB"]
else:
raise sqlite3.Error("No file given")
if not os.path.exists(db):
raise sqlite3.Error("File does not exist")
cnx=sqlite3.connect(db)
cnx.isolation_level=None # we manage transactions explicitly
crs=cnx.cursor()
crs.execute("BEGIN") # execute each request in a transaction
# main selector
p=environ["PATH_INFO"]
qs=environ.get("QUERY_STRING")
with cnx:
if p=="/ask_dbkey":
r=ask_dbkey()
elif p=="/set_dbkey":
application.dbkey=get_dbkey(environ)
r=HTMLResponse("303 See Other",[("Location",".")],"")
elif p=="/clr_dbkey":
application.dbkey=None
r=HTMLResponse("303 See Other",[("Location","ask_dbkey")],"")
elif not valid_dbkey(crs,application.dbkey):
r=HTMLResponse("303 See Other",[("Location","ask_dbkey")],"")
elif p=="/":
r=main(crs)
elif p=="/acct":
r=acct(crs,qs)
elif p=="/ins_xact":
r=ins_xact(crs,environ)
elif p=="/del_xact":
r=del_xact(crs,environ)
elif p=="/creat_acct":
r=creat_acct(crs,environ)
elif p=="/close_acct":
r=close_acct(crs,environ)
else:
raise ValueError("Wrong access")
except sqlite3.Error as e:
r=HTMLResponse("500 Internal Server Error",[("Content-type","text/plain")],"Database error: {}".format(e))
except ValueError as e:
r=HTMLResponse("400 Bad Request",[("Content-type","text/plain")],"{}".format(e))
except KeyError as e:
r=HTMLResponse("400 Bad Request",[("Content-type","text/plain")],"Parameter expected: {}".format(e))
except BadInput as e:
r=HTMLResponse("400 Bad Request",[("Content-type","text/plain")],"Error: {}".format(e))
if cnx:
cnx.close()
start_response(r.status,r.headers+[("Cache-Control","max-age=0")])
return [r.body.encode()]
# database key
application.dbkey=None
def ask_dbkey():
"""ask for a database key"""
b="""
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Double-entry Bookkeeping System</title>
</head>
<body>
<form action=set_dbkey method=post>
Key: <input type=password name=dbkey> <input type=submit>
</form>
</body>
</html>
"""
# return success
return HTMLResponse("200 OK",[("Content-type","text/html")],b)
def get_dbkey(environ):
"""get a database key submitted in a POST query"""
q=parse_qs(environ["wsgi.input"].readline().decode())
try:
# get argument
k=q["dbkey"][0]
# sanitize: drop everything except hexadecimal digits
return ''.join(filter(lambda x: x in "0123456789abcdefABCDEF",k))
except KeyError:
return None
def valid_dbkey(crs,key):
"""check the key"""
try:
if key is not None:
crs.execute("PRAGMA key=\"x'{}'\"".format(key))
crs.execute("SELECT COUNT(*) FROM sqlite_master")
except sqlite3.Error:
return False
return True
def cur2int(s):
"""convert currency string to integer"""
s=s.replace(" ","") # drop spaces
s=s.replace(",",DECIMAL_SEP) # always accept "," as a decimal separator
s=s.replace(".",DECIMAL_SEP) # always accept "." as a decimal separator
r=""
i=0
for i,c in enumerate(s):
if c is DECIMAL_SEP:
break
r=r+c
return int(r+s[i+1:i+3].ljust(2,"0"))
def int2cur(v):
"""convert integer to currency string"""
s=str(abs(v))
r=""
for i,c in enumerate(s[::-1].ljust(3,"0"),1):
if i==3:
r=DECIMAL_SEP+r
elif i%3==0:
r=THOUSAND_SEP+r
r=c+r
if v<0:
r="−"+r
return r
def res(crs):
"""return the only result of a transaction"""
return crs.fetchone()[0]
def balance(crs,aid):
"""return the current balance of account aid"""
crs.execute("SELECT max(xid) FROM xacts WHERE aid=?",[aid])
maxxid=res(crs)
if maxxid is not None:
crs.execute("SELECT bal FROM xacts WHERE xid=? and aid=?",[maxxid,aid])
return int(res(crs))
return 0
def new_balance(atype,bal,dr,cr):
"""compute the new balance after transaction"""
if atype in ("E","L","i"):
return bal+cr-dr
if atype in ("A","e"):
return bal+dr-cr
raise ValueError("Bad account type")
def main(crs):
"""show main page"""
# header
b="""
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="format-detection" content="telephone=no">
<style>
{}
</style>
<title>Double-entry Bookkeeping System</title>
</head>
""".format(STYLE)
# body
b+="""
<body>
"""
# accounts
totals={}
for atc,atn in ATYPES:
b+="""
<strong>{}</strong>
<div class=indent>
<table>
""".format(atn)
totals[atc]=0
crs.execute("SELECT aid,name FROM accts WHERE type=? AND cdt=0 ORDER BY name",[atc])
for (aid,name) in crs.fetchall():
bal=balance(crs,aid)
totals[atc]+=bal
b+="""
<tr>
<td><a href="acct?aid={}">{}</a></td>
<td class=r> {}</td>
</tr>
""".format(aid,name,int2cur(bal))
b+="""
<tr class=sep_tot>
<td>Total</td>
<td class=r> {}</td>
</tr>
<tr><td colspan=2> </td></tr>
</table>
</div>
""".format(int2cur(totals[atc]))
# verify accounting equation
d=0
for atc in ("E","L","i"):
d+=totals[atc]
for atc in ("A","e"):
d-=totals[atc]
if d!=0:
raise sqlite3.Error("Accounting equation doesn't hold")
# new account
b+="""
<hr>
<form action=creat_acct method=post>
New account
<select name=atype>
<option value=""> </option>
"""
for atc,atn in ATYPES:
b+="""
<option value="{}">{}</option>
""".format(atc,atn)
b+="""
</select>
<input type=text name=aname>
<input type=submit value=Create>
</form>
"""
# closed accounts
b+="""
<hr>
<h3>Closed accounts</h3>
"""
for atc,atn in ATYPES:
b+="""
<strong>{}</strong>
<div class=indent>
""".format(atn)
crs.execute("SELECT aid,name FROM accts WHERE type=? AND cdt<>0 ORDER BY name",[atc])
for aid,name in crs:
b+="""
<a href="acct?aid={}">{}</a><br>
""".format(aid,name)
b+="""
</div>
"""
# show clear key link
if application.dbkey is not None:
b+="""
<hr>
<a href="clr_dbkey">Close session</a>
"""
# cellar
b+="""
</body>
</html>
"""
# return success
return HTMLResponse("200 OK",[("Content-type","text/html")],b)
def acct(crs,qs):
"""show account statement page"""
# get arguments
q=parse_qs(qs,keep_blank_values=True)
# get and check aid
try:
aid=q["aid"][0]
except KeyError as e:
raise ValueError("Wrong access") from e
crs.execute("SELECT COUNT(*) FROM accts WHERE aid=?",[aid])
if res(crs)==0:
raise ValueError("Bad aid")
# get and check the page number
crs.execute("SELECT COUNT(*) FROM xacts WHERE aid=?",[aid])
lastpage=ceil(res(crs)/LIMIT)
if lastpage==0:
lastpage=1
try:
page=int(q["page"][0])
if page<1 or page>lastpage:
raise ValueError
except (KeyError,ValueError):
page=1
# get commonly used account properties
crs.execute("SELECT name,cdt FROM accts WHERE aid=?",[aid])
aname,cdt=crs.fetchone()
bal=balance(crs,aid)
crs.execute("SELECT MAX(dt) FROM xacts WHERE aid=?",[aid])
maxdt=res(crs)
if maxdt is None:
maxdt=0
crs.execute("SELECT MAX(xid) FROM xacts WHERE aid=?",[aid])
maxxid=res(crs)
if maxxid is None:
maxxid=0
# header
b="""
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="format-detection" content="telephone=no">
<style>
{}
</style>
<title>Double-entry Bookkeeping System</title>
</head>
""".format(STYLE)
# body
b+="""
<body>
<div class=center>
<h2>{}</h2>
</div>
<a href=".">Back to list</a>
<hr>
""".format(aname)
# transactions
b+="""
<table class=full>
<tr class=line>
<th class=date>Date</th>
<th class=dr>Dr</th>
<th class=cr>Cr</th>
<th class=bal>Balance</th>
<th class=opp>Opposing account</th>
<th class=comm>Comment</th>
</tr>
"""
# new transaction
if cdt==0:
d=date.today()
yyyy=d.year
mm=d.month
dd=d.day
b+="""
<tr class=line><td colspan=6>
<form action=ins_xact method=post>
<table class=full>
<tr class=line>
<td class=date>
<input type=text name=yyyy size=4 maxlength=4 class=w4 value="{}">
<input type=text name=mm size=2 maxlength=2 class=w2 value="{}">
<input type=text name=dd size=2 maxlength=2 class=w2 value="{}">
</td>
<td class=dr><input type=text size=12 class=w12 name=dr></td>
<td class=cr><input type=text size=12 class=w12 name=cr></td>
<td class=bal><input type=text size=12 class=w12 name=newbal></td>
<td class=opp>
<input type=hidden name=aid value="{}">
<select name=oaid>
<option value="-1"> </option>
""".format(yyyy,mm,dd,aid)
for atc,atn in ATYPES:
b+="""
<optgroup label="{}">
""".format(atn)
crs.execute("SELECT aid,name FROM accts WHERE type=? AND cdt=0 ORDER BY name",[atc])
opts=crs.fetchall()
for oaid,oaname in opts:
b+="""
<option value="{}">{}</option>
""".format(oaid,oaname)
if len(opts)==0:
b+="""
<option> </option>
"""
b+="""
</optgroup>
"""
b+="""
</select>
</td>
<td class=comm>
<input type=text name=comment size=20 class=comm maxlength=255>
<input type=submit value=Insert>
</td>
</tr>
</table>
</form>
</td></tr>
"""
# past transactions
prev_year=None
prev_month=None
crs.execute("SELECT * FROM xacts WHERE aid=? ORDER BY xid DESC LIMIT ? OFFSET ?",
[aid,LIMIT,(page-1)*LIMIT])
for (xid,dt,aid,oaid,dr,cr,x_bal,comment) in crs.fetchall():
dt_d=date.fromordinal(dt)
dr=int2cur(int(dr)) if dr!="0" else ""
cr=int2cur(int(cr)) if cr!="0" else ""
x_bal=int2cur(int(x_bal))
x_year=dt_d.year
x_month=dt_d.month
if prev_year is None and prev_month is None:
sep_class=""
elif x_year!=prev_year:
sep_class="sep_year"
elif x_month!=prev_month:
sep_class="sep_month"
else:
sep_class="sep"
prev_year=x_year
prev_month=x_month
crs.execute("SELECT type,name FROM accts WHERE aid=?",[oaid])
oatype,oaname=crs.fetchone()
b+="""
<tr class="line {}">
<td class=date>{}</td>
<td class=dr>{}</td>
<td class=cr>{}</td>
<td class=bal>{}</td>
<td class=opp><span class=atype>{}</span> {}</td>
<td class=comm> <small>{}</small>
""".format(sep_class,dt_d,dr,cr,x_bal,oatype,oaname,comment)
# we can delete the transaction if it is the last one for both aid and oaid
if xid==maxxid:
crs.execute("SELECT MAX(xid) FROM xacts WHERE aid=?",[oaid])
if xid==res(crs):
b+="""
<form class=inline action=del_xact method=post>
<input type=hidden name=xid value="{}">
<input type=hidden name=aid value="{}">
<input type=submit value="Delete">
</form>
""".format(xid,aid)
b+="""
</td>
</tr>
"""
b+="""
</table>
"""
# links to pages
b+="""
<hr>
Page
"""
for p in range(1,lastpage+1):
if p!=page:
b+="""
<a href="acct?aid={0}&page={1}">{1}</a>
""".format(aid,p)
else:
b+="""
{}
""".format(p)
# close the account
if bal==0 and cdt==0:
b+="""
<hr>
<div class="center form">
<form action=close_acct method=post>
<input type=hidden name=aid value="{}">
<input type=submit value="Close account">
</form>
</div>
""".format(aid)
# cellar
b+="""
</body>
</html>
"""
# return success
return HTMLResponse("200 OK",[("Content-type","text/html")],b)
def ins_xact(crs,environ):
"""insert a new transaction"""
# get arguments
try:
qs=environ["wsgi.input"].readline().decode()
q=parse_qs(qs,keep_blank_values=True)
yyyy=q["yyyy"][0]
mm=q["mm"][0]
dd=q["dd"][0]
dr=q["dr"][0]
cr=q["cr"][0]
newbal=q["newbal"][0]
aid=q["aid"][0]
oaid=q["oaid"][0]
comment=escape(q["comment"][0])
except KeyError as e:
raise ValueError("Wrong access") from e
# check accounts
try:
aid=int(aid)
except ValueError as e:
raise ValueError("Bad aid") from e
crs.execute("SELECT COUNT(aid) FROM accts WHERE aid=? AND cdt=0",[aid])
if res(crs)==0:
raise ValueError("Non-existent aid")
try:
oaid=int(oaid)
except ValueError as e:
raise ValueError("Bad oaid") from e
crs.execute("SELECT COUNT(aid) FROM accts WHERE aid=? AND cdt=0",[oaid])
if res(crs)==0 and oaid!=-1:
raise ValueError("Non-existent oaid")
if oaid==-1:
raise BadInput("Please select the opposing account")
if aid==oaid:
raise BadInput("Transaction with the same account")
# check date
try:
dt=date(int(yyyy),int(mm),int(dd)).toordinal()
except ValueError as e:
raise BadInput("Bad date") from e
# check transaction values
if dr=="":
dr="0"
try:
dr=cur2int(dr)
except ValueError as e:
raise BadInput("Bad Dr") from e
if cr=="":
cr="0"
try:
cr=cur2int(cr)
except ValueError as e:
raise BadInput("Bad Cr") from e
if dr<0:
raise BadInput("Dr cannot be negative")
if cr<0:
raise BadInput("Cr cannot be negative")
if dr!=0 and cr!=0:
raise BadInput("Dr and Cr cannot both be set")
if (dr!=0 or cr!=0) and newbal!="":
raise BadInput("Dr or Cr and Balance cannot all be set")
if dr==0 and cr==0:
if newbal=="":
raise BadInput("Set one of Dr, Cr, or Balance")
try:
newbal=cur2int(newbal)
except ValueError as e:
raise BadInput("Bad Balance") from e
# check dates
if dt>date.today().toordinal():
raise BadInput("Date cannot be in the future")
crs.execute("SELECT odt FROM accts WHERE aid=?",[aid])
if dt<res(crs):
raise BadInput("Date before the account's opening date")
crs.execute("SELECT odt FROM accts WHERE aid=?",[oaid])
if dt<res(crs):
raise BadInput("Date before the opposing account's opening date")
crs.execute("SELECT COUNT(*) FROM xacts WHERE aid=? AND dt>?",[aid,dt])
if res(crs)!=0:
raise BadInput("Current account has newer transactions")
crs.execute("SELECT COUNT(*) FROM xacts WHERE aid=? AND dt>?",[oaid,dt])
if res(crs)!=0:
raise BadInput("Opposing account has newer transactions")
# input data OK, prepare to insert transaction
# get account types
crs.execute("SELECT type FROM accts WHERE aid=?",[aid])
atype=res(crs)
crs.execute("SELECT type FROM accts WHERE aid=?",[oaid])
oatype=res(crs)
# get account balances
bal=balance(crs,aid)
obal=balance(crs,oaid)
if dr==0 and cr==0:
# derive dr and cr from new and old balances
if atype in ("E","L","i"):
if newbal>bal:
cr=newbal-bal
else:
dr=bal-newbal
elif atype in ("A","e"):
if newbal>bal:
dr=newbal-bal
else:
cr=bal-newbal
else:
raise ValueError("Bad account type")
else:
newbal=new_balance(atype,bal,dr,cr)
# compute new balance of the opposing account, with dr and cr exchanged
onewbal=new_balance(oatype,obal,cr,dr)
# insert transaction
crs.execute("SELECT MAX(xid) FROM xacts")
maxxid=res(crs)
if maxxid is None:
xid=0
else:
xid=maxxid+1
crs.execute("INSERT INTO xacts VALUES(?,?,?,?,?,?,?,?)",
[xid,dt,aid,oaid,str(dr),str(cr),str(newbal),comment])
crs.execute("INSERT INTO xacts VALUES(?,?,?,?,?,?,?,?)",
[xid,dt,oaid,aid,str(cr),str(dr),str(onewbal),comment])
# return redirect
return HTMLResponse("303 See Other",[("Location","acct?aid={}".format(aid))],"")
def del_xact(crs,environ):
"""delete transaction"""
# get arguments
qs=environ["wsgi.input"].readline().decode()
q=parse_qs(qs)
try:
xid=q["xid"][0]
aid=q["aid"][0]
except KeyError as e:
raise ValueError("Wrong access") from e
# check accounts
crs.execute("SELECT COUNT(aid) FROM accts WHERE aid=? AND cdt=0",[aid])
if res(crs)==0:
raise ValueError("Bad aid")
crs.execute("SELECT COUNT(*) FROM xacts WHERE xid=? AND aid=?",[xid,aid])
if res(crs)==0:
raise ValueError("Bad xid")
crs.execute("SELECT oaid FROM xacts WHERE xid=? AND aid=?",[xid,aid])
oaid=res(crs)
crs.execute("SELECT COUNT(aid) FROM accts WHERE aid=? AND cdt=0",[oaid])
if res(crs)==0:
raise ValueError("Bad oaid")
crs.execute("SELECT COUNT(*) FROM xacts WHERE xid>? AND aid=?",[xid,aid])
if res(crs)!=0:
raise ValueError("Current account has newer transactions")
crs.execute("SELECT COUNT(*) FROM xacts WHERE xid>? AND aid=?",[xid,oaid])
if res(crs)!=0:
raise ValueError("Opposing account has newer transactions")
# delete transaction
crs.execute("DELETE FROM xacts WHERE xid=?",[xid])
# return redirect
return HTMLResponse("303 See Other",[("Location","acct?aid={}".format(aid))],"")
def creat_acct(crs,environ):
"""create a new account"""
# get arguments
qs=environ["wsgi.input"].readline().decode()
q=parse_qs(qs,keep_blank_values=True)
try:
atype=q["atype"][0]
aname=escape(q["aname"][0])
except KeyError as e:
raise ValueError("Wrong access") from e
# check argument
if not atype in [x for x,_ in ATYPES]+[""]:
raise ValueError("Wrong account type")
# validate user input
if atype=="":
raise BadInput("Please select the account type")
if aname=="":
raise BadInput("Please set the account name")
crs.execute("SELECT COUNT(*) FROM accts WHERE name=?",[aname])
if res(crs)!=0:
raise BadInput("Account with the same name already exists")
# create account
odt=date.today().toordinal()
crs.execute("INSERT INTO accts VALUES (NULL,?,?,?,0)",[atype,aname,odt])
# return redirect
return HTMLResponse("303 See Other",[("Location",".")],"")
def close_acct(crs,environ):
"""close account"""
# get argument
qs=environ["wsgi.input"].readline().decode()
q=parse_qs(qs)
try:
aid=q["aid"][0]
except KeyError as e:
raise ValueError("Wrong access") from e
# check argument
crs.execute("SELECT COUNT(*) FROM accts WHERE aid=?",[aid])
if res(crs)==0:
raise ValueError("Wrong aid")
crs.execute("SELECT cdt FROM accts WHERE aid=?",[aid])
if res(crs)!=0:
raise ValueError("Account already closed")
if balance(crs,aid)!=0:
raise ValueError("Non-zero balance")
# close account
now=date.today().toordinal()
crs.execute("UPDATE accts SET cdt=? WHERE aid=?",[now,aid])
# return redirect
return HTMLResponse("303 See Other",[("Location","acct?aid={}".format(aid))],"")
| StarcoderdataPython |
3212780 | #!/usr/bin/python
import argparse
import plistlib
import subprocess
import fileinput
PLIST_PATH = "MVVMKit/Info.plist"
def increment_version(version):
components = str(version).split('.')
init, last = components[:-1], components[-1:]
init.append(str(int(last[0]) + 1))
return ".".join(init)
def get_version():
global PLIST_PATH
plist = plistlib.readPlist(PLIST_PATH)
return plist["CFBundleVersion"]
def set_plist_version(version):
subprocess.check_output(["agvtool", "new-version", "-all", version])
def set_version(version):
old_version = get_version()
set_plist_version(version)
print "Updated version from [%s] to [%s]." \
% (old_version, version)
def set_version_automatically():
current_version = get_version()
next_version = increment_version(current_version)
set_version(next_version)
def git_add_all():
subprocess.check_output(["git", "add", "-A", "."])
def git_commit(message):
subprocess.check_output(["git", "commit", "-m", message])
def git_push():
subprocess.check_output(["git", "push"])
subprocess.check_output(["git", "push", "--tags"])
def git_tag(name, message):
subprocess.check_output(["git", "tag", "-a", name, "-m", message])
def push_version():
version = get_version()
git_add_all()
git_commit("Bumped version %s;" % version)
git_tag(version, "%s tag;" % version)
git_push()
def main():
parser = argparse.ArgumentParser(description='Version bumper')
parser.add_argument("-n", "--new", help="set new version number")
parser.add_argument("-a", "--auto", help="autoincrement version number",
action="store_true")
parser.add_argument("-p", "--push",
help="commit and push changes to repositories",
action="store_true")
args = parser.parse_args()
if args.auto:
set_version_automatically()
elif args.new:
set_version(args.new)
elif args.push:
push_version()
if __name__ == "__main__":
main()
| StarcoderdataPython |
1714470 | import ada as ada
from datetime import datetime
import time
from random import randint
from instamanager import InstaManager as SocialGuard
import toml
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='It is a console interface for InstaManager')
parser.add_argument('-c', dest='config_file', default="black_list_merge.toml",
help='Config file')
args = parser.parse_args()
print("{}-------------------------{}".format(ada.bcolors.HEADER, ada.bcolors.ENDC))
print("{}Social guard 0.3 starting{}".format(ada.bcolors.HEADER, ada.bcolors.ENDC))
print("{}-------------------------{}".format(ada.bcolors.HEADER, ada.bcolors.ENDC))
print("")
print("config file: {}".format(args.config_file))
config = toml.load(args.config_file.strip())
_sg = SocialGuard()
for connection in config["connection"]:
if connection["active"]:
print("{} connecting:".format(connection["name"]), end='\r')
if connection["active"] is True:
user_name = connection["name"]
password = connection["password"]
result = _sg.connect(user_name, password)
if result:
print("{} connecting: {}OK{}".format(connection["name"],
ada.bcolors.OKGREEN, ada.bcolors.ENDC))
else:
print("{} connecting: {}Failed{}".format(connection["name"],
ada.bcolors.FAIL, ada.bcolors.ENDC))
print("blocked user control list: {}".format(str(len(_sg.get_blocked_users_control_list()))))
print("")
print("{}### - conection to Instagram:{}".format(ada.bcolors.HEADER, ada.bcolors.ENDC))
print("")
print("{}### - blocked users lists check:{}".format(ada.bcolors.HEADER, ada.bcolors.ENDC))
blocked_user = _sg.diff_user_lists(_sg.get_blocked_users_control_list(), _sg.get_blocked_users_list(control_user_name))
print("number of new blocked users: " + str(len(blocked_user)))
unblocked_user = _sg.diff_user_lists(_sg.get_blocked_users_list(
control_user_name), _sg.get_blocked_users_control_list())
print("number of unblocked users: " + str(len(unblocked_user)))
print("writing user lists diff to the files")
_sg.save_user_list(unblocked_user, "unblocked_" + str(datetime.timestamp(datetime.now())) + ".usr")
_sg.save_user_list(blocked_user, "blocked_" + str(datetime.timestamp(datetime.now())) + ".usr")
print("")
print("{}### - blocked users lists syncing:{}".format(ada.bcolors.HEADER, ada.bcolors.ENDC))
# собираем все блок листы в единый список заблокированных пользователей
for connection_name in _sg.get_connections():
print(connection_name + ' block list len: ' + str(len(_sg.get_blocked_users_list(connection_name))))
_sg.add_users_to_blocked_users_control_list(
_sg.get_blocked_users_list(connection_name))
print("merged blocked user control list: " + str(len(_sg.get_blocked_users_control_list())))
# пишем в файл единый список блокировок
print("blocked users control list file updating")
_sg.save_blocked_users_control_list()
to_block = _sg.diff_user_lists(
_sg.get_blocked_users_list(control_user_name), _sg.get_blocked_users_control_list())
print("number of users to block: " + str(len(to_block)))
i = 0
new_user_blocked = 0
for user in to_block:
i += 1
ada.printProgressBar(i, len(
to_block), prefix='Progress:', suffix='Complete', decimals=2, length=50)
result = _sg.get_blocked_status(control_user_name, user)
if result is False:
if _sg.block_user(control_user_name, user):
new_user_blocked += 1
print("block list updating done, new users blocked: " + str(new_user_blocked))
print("")
print("{}All done{}".format(ada.bcolors.HEADER, ada.bcolors.ENDC))
| StarcoderdataPython |
3278845 | <filename>pytorch_get_started/1_install/verification.py<gh_stars>0
import torch
def verify():
x = torch.rand(5, 3)
print(type(x))
print(x)
if __name__ == '__main__':
verify() | StarcoderdataPython |
4810965 | class Response:
__url: str
__status_code: int
__header: dict
__time_elapsed: str
__content_length: int
__html: str
def __init__(self, url: str, status_code: int, header: dict, time_elapsed: str, content_length: int, html: str):
self.__url = url
self.__status_code = status_code
self.__header = header
self.__time_elapsed = time_elapsed
self.__content_length = content_length
self.__html = html
def get_url(self):
return self.__url
def set_url(self, url: str):
self.__url = url
def get_status_code(self):
return self.__status_code
def set_status_code(self, status_code: int):
self.__status_code = status_code
def get_header(self):
return self.__header
def set_header(self, header: dict):
self.__header = header
def get_time_elapsed(self):
return self.__time_elapsed
def set_time_elapsed(self, time_elapsed: str):
self.__time_elapsed = time_elapsed
def get_content_length(self):
return self.__content_length
def set_content_length(self, content_length: int):
self.__content_length = content_length
def get_html(self):
return self.__html
def set_html(self, html: str):
self.__html = html
def build_dict(self, verbose:int):
if verbose == 1:
return {
"url": self.__url,
"status_code": self.__status_code,
"header": self.__header,
"time_elapsed": self.__time_elapsed,
"content_length": self.__content_length,
"html": self.__html
}
else:
return {
"url": self.__url,
"status_code": self.__status_code,
"time_elapsed": self.__time_elapsed,
"content_length": self.__content_length,
}
| StarcoderdataPython |
3238871 | class Solution(object):
def reverseVowels(self, s):
"""
:type s: str
:rtype: str
"""
vowels = ['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U']
pos = []
char = []
new_s = list(s)
for i in xrange(len(s)):
if s[i] in vowels:
pos.append(i)
char.append(s[i])
# print pos
# print char
# reverse vowels
for i in xrange(len(char)):
new_pos = pos[len(pos)-1-i]
new_s[new_pos] = char[i]
# print new_s
return ''.join(x for x in new_s)
def main():
s = "leetcode"
solution = Solution()
print solution.reverseVowels(s)
if __name__ == '__main__':
main() | StarcoderdataPython |
3357723 | #!/usr/bin/python -i
import numpy as np
from NuclearNormMinimization import NuclearNormMinimization
from sklearn.metrics import mean_squared_error
U = np.random.random((10,10))
S = np.zeros((10,10))
S[0,0] = 500
S[1,1] = 100
S[2,2] = 50
V = np.random.random((10,20))
matrix = np.matmul(U, np.matmul(S, V))
incomplete_matrix = matrix.copy()
blah = np.random.random(incomplete_matrix.shape)
hidden_entries = (blah >= 0.7)
sampled_entries = np.logical_not(hidden_entries)
incomplete_matrix[ hidden_entries ] = np.nan
solver = NuclearNormMinimization()
completed_matrix = solver.complete(incomplete_matrix)
mse = mean_squared_error(matrix, completed_matrix)
print('mean_squared_error = {0:.6f}'.format(mse))
| StarcoderdataPython |
176071 | # CloudShell L1 resource autoload XML helper
#
# It should not be necessary to edit this file.
#
# - Generates the autoload XML resource format to return to CloudShell
# - Subresources are also represented with nested instances of this class
# - See example usage in <project>_l1_handler.py
class L1DriverResourceInfo:
def __init__(self, name, full_address, family, model, map_path=None, serial='-1'):
"""
:param name: str
:param full_address: str
:param family: str
:param model: str
:param map_path: str
:param serial: str
"""
self.name = name
self.address = full_address
self.family = family
self.model = model
self.serial = serial
self.map_path = map_path
self.subresources = []
self.attrname2typevaluetuple = {}
if False:
self.subresources.append(L1DriverResourceInfo(None, None, None, None))
def add_subresource(self, subresource):
"""
:param subresource: L1DriverResourceInfo
:return: None
"""
self.subresources.append(subresource)
def set_attribute(self, name, value, typename='String'):
"""
:param name: str
:param value: str
:param typename: str: String, Lookup,
:return: None
"""
self.attrname2typevaluetuple[name] = (typename, value)
def get_attribute(self, name):
"""
:param name: str
:return: str
"""
return self.attrname2typevaluetuple[name][1]
def to_string(self, tabs=''):
"""
:param tabs: str
:return: str
"""
def indent(t, s):
return t + (('\n' + t).join(s.split('\n'))).strip() + '\n'
return indent(tabs,
'''<ResourceInfo Name="{name}" Address="{address}" ResourceFamilyName="{family}" ResourceModelName="{model}" SerialNumber="{serial}">
<ChildResources>
{children} </ChildResources>
<ResourceAttributes>
{attributes} </ResourceAttributes>
{mapping}</ResourceInfo>'''.format(
name=self.name,
address=self.address,
family=self.family,
model=self.model,
serial=self.serial,
children=''.join([x.to_string(tabs=tabs + ' ') for x in self.subresources]),
attributes=''.join([''' <Attribute Name="{name}" Type="{type}" Value="{value}" />\n'''.format(name=attrname, type=type_value[0], value=type_value[1])
for attrname, type_value in self.attrname2typevaluetuple.iteritems()
]),
mapping=''' <ResourceMapping><IncomingMapping>''' + self.map_path + '''</IncomingMapping></ResourceMapping>\n''' if self.map_path else ''
))
| StarcoderdataPython |
3381808 | <reponame>jsaied99/sentimint
from flask import Flask, jsonify, escape, g,request
from flask_cors import CORS
import db_conn
from time import time
from twitter_api import get_tweets
app = Flask(__name__)
CORS(app, resources={r"/*": {"origins": "*"}})
app.config["CORS_HEADER"] = "Content-Type"
@app.before_request
def initialize_firebase():
print("Initializing Firebase")
g.db = db_conn.initialize_db()
@app.route('/all_queries', methods=['GET', 'POST'])
def uui_request():
body = request.get_json()
if 'uid' in body.keys():
uid = body['uid']
data = db_conn.get_all_searched_text(g.db, 'users', uid)
return jsonify({
"data": data,
"status": 1
})
@app.route('/sentiment_data/<uid>', methods=['GET'])
def get_sentiment_data(uid):
if hasattr(g, 'db'):
user_sentiment_data_list = db_conn.get_data_by_uid(g.db, 'sentiment_data', uid)
if user_sentiment_data_list:
return jsonify({"data": user_sentiment_data_list})
return jsonify({"No Data": []})
return jsonify({"error": "No database connection"})
@app.route('/sentiment_analysis', methods=['POST'])
def analyze_data():
body = request.get_json()
text = body['text']
uid = body['uid']
start = time()
data = db_conn.analyze_text(g.db,u'users', uid, text)
return jsonify({
"data": data,
"execution_time": time() - start,
"success": 1})
@app.route('/twitter_api', methods=['GET','POST'])
def analyze_tweet_topic():
body = request.get_json() if request.get_json() else request.form
if 'uid' in body.keys() and 'topic' in body.keys() and 'limit' in body.keys():
uid = body['uid']
topic = body['topic']
limit = body['limit']
start = time()
text_array = get_tweets(topic, limit)
print(text_array)
data = db_conn.analyze_text_twitter(g.db, u'users', uid, text_array)
return jsonify({
"data": data,
"execution_time": time() - start,
"success": 1})
if __name__ == '__main__':
app.run(host='0.0.0.0',port=5001, debug=True)
| StarcoderdataPython |
3225355 | from flask import Flask, request, render_template
import pandas as pd
import pickle
import numpy as np
from sklearn.externals import joblib
from sklearn.preprocessing import StandardScaler
import re
app = Flask(__name__, template_folder="templates")
# Load the model
model = joblib.load('./models/model.p')
scaler = joblib.load('./models/scaler.p')
@app.route('/', methods=['GET','POST'])
def home():
if request.method == 'POST':
principal_balance = request.form.get('principal_balance')
principal_ptd = request.form.get('principal_ptd')
down = request.form.get('down')
NADA = request.form.get('NADA')
finance_charge = request.form.get('finance_charge')
term = request.form.get('term')
seasoning = request.form.get('seasoning')
DPD = request.form.get('DPD')
text = request.form.get('text')
data = [principal_balance,principal_ptd,down,NADA,finance_charge,term,seasoning,DPD,text]
input = pd.DataFrame([data],
columns=['principal_balance', 'principal_ptd', 'down', 'NADA', 'finance_charge','term','seasoning', 'DPD', 'content'])
input['LTV'] = float(input.principal_balance)/float(input.NADA)
input['WLTV'] = input.LTV*float(input.principal_balance)
input['down_%'] = float(input.down)/(float(input.principal_balance)+float(input.principal_ptd))
print(input)
df = input[['DPD', 'term', 'seasoning', 'finance_charge', 'principal_ptd', 'down_%','LTV', 'WLTV']]
# df = pd.DataFrame(input.content.str.split().tolist(), columns=['DPD', 'term', 'seasoning', 'finance_charge', 'principal_ptd', 'down_%','LTV', 'WLTV'])
# Make prediction
print(df)
rescaled_df = scaler.transform(df)
pred = model.predict(rescaled_df)
pred = np.round(pred, decimals=2)
#print(preds)
#pred = pred*100
print(pred)
return render_template('index.html', value=pred[0])
return render_template('index.html', value='')
if __name__ == '__main__':
app.run(port=3000, debug=True) | StarcoderdataPython |
1623979 | import logging
import re
from netmiko import ConnectHandler
from time import sleep
log = logging.getLogger(__name__)
class SSHSession(object):
"""
Generic SSHSession which can be used to run commands
"""
def __init__(self, host, username, password, timeout=60):
"""
Establish SSH Connection using given hostname, username and
password which can be used to run commands.
"""
self._host = host
_cisco_device = {
'device_type': 'cisco_nxos',
'host': self._host,
'username': username,
'password': password,
'timeout': 60
}
self._connection = ConnectHandler(**_cisco_device)
self.prompt = self._connection.find_prompt()
def __repr__(self):
"""
Return a representation string
"""
return "<%s (%s)>" % (self.__class__.__name__, self._host)
def __del__(self):
"""Try to close connection if possible"""
try:
sleep(2)
self._connection.disconnect()
except Exception:
pass
def _check_error(self, output):
for eachline in output.strip().splitlines():
eachline = eachline.strip()
eachline = eachline.replace("at '^' marker.", "")
eachline = eachline.replace("^", "")
if "Invalid command" in eachline:
return True
return False
def show(self, cmd):
output = self._connection.send_command(cmd)
if self._check_error(output):
return output.splitlines(), output # There is error - invalid command
return output.splitlines(), None # There is no error
def config(self, cmd):
retout = []
output = self._connection.send_config_set(cmd,
strip_prompt=True,
strip_command=True,
config_mode_command="configure terminal",
exit_config_mode=True)
start = False
for eachline in output.strip().splitlines():
eachline = eachline.strip()
eachline = eachline.replace("at '^' marker.", "")
eachline = eachline.replace("^", "")
if re.match(r'^\s*$', eachline):
continue
if cmd in eachline:
start = True
continue
if "# end" in eachline:
break
if start:
retout.append(eachline)
if retout:
return retout, ''.join(retout) # There is some error
return retout, None # there is no error
| StarcoderdataPython |
73947 | <filename>tasks.py
#!/usr/bin/env python3
"""
Task execution tool & library
"""
import os
import re
import sys
from datetime import datetime
from logging import basicConfig, getLogger
from pathlib import Path
import docker
import git
from bumpversion.cli import main as bumpversion
from easy_infra import __project_name__, __version__, constants, utils
from invoke import task
from tests import test as run_test
CWD = Path(".").absolute()
REPO = git.Repo(CWD)
COMMIT_HASH = REPO.head.object.hexsha
TESTS_PATH = CWD.joinpath("tests")
LOG = getLogger(__project_name__)
CLIENT = docker.from_env()
CONFIG = utils.parse_config(config_file=constants.CONFIG_FILE)
TARGETS: dict[str, dict[str, list[str]]] = {}
for target in constants.TARGETS:
TARGETS[target] = {}
if target == "final":
TARGETS[target]["tags"] = [
constants.IMAGE + ":" + __version__,
constants.IMAGE + ":latest",
]
else:
TARGETS[target]["tags"] = [
constants.IMAGE + ":" + __version__ + "-" + target,
constants.IMAGE + ":" + "latest" + "-" + target,
]
basicConfig(level=constants.LOG_DEFAULT, format=constants.LOG_FORMAT)
def process_container(*, container: docker.models.containers.Container) -> None:
"""Process a provided container"""
response = container.wait(condition="not-running")
decoded_response = container.logs().decode("utf-8")
response["logs"] = decoded_response.strip().replace("\n", " ")
container.remove()
if not response["StatusCode"] == 0:
LOG.error(
"Received a non-zero status code from docker (%s); additional details: %s",
response["StatusCode"],
response["logs"],
)
sys.exit(response["StatusCode"])
else:
LOG.info("%s", response["logs"])
def log_build_log(*, build_err: docker.errors.BuildError) -> None:
"""Log the docker build log"""
iterator = iter(build_err.build_log)
finished = False
while not finished:
try:
item = next(iterator)
if "stream" in item:
if item["stream"] != "\n":
LOG.error("%s", item["stream"].strip())
elif "errorDetail" in item:
LOG.error("%s", item["errorDetail"])
else:
LOG.error("%s", item)
except StopIteration:
finished = True
# Tasks
@task
def update(_c, debug=False):
"""Update the core components of easy_infra"""
if debug:
getLogger().setLevel("DEBUG")
for package in constants.APT_PACKAGES:
version = utils.get_latest_release_from_apt(package=package)
utils.update_config_file(thing=package, version=version)
for repo in constants.GITHUB_REPOS_RELEASES:
version = utils.get_latest_release_from_github(repo=repo)
utils.update_config_file(thing=repo, version=version)
for repo in constants.GITHUB_REPOS_TAGS:
version = utils.get_latest_tag_from_github(repo=repo)
utils.update_config_file(thing=repo, version=version)
for project in constants.HASHICORP_PROJECTS:
version = utils.get_latest_release_from_hashicorp(project=project)
utils.update_config_file(thing=project, version=version)
for package in constants.PYTHON_PACKAGES:
version = utils.get_latest_release_from_pypi(package=package)
utils.update_config_file(thing=package, version=version)
# On github they use aquasecurity but on docker hub it's aquasec, and the
# github releases are prefaced with v but not on docker hub
version = utils.get_latest_release_from_github(repo="aquasecurity/trivy").lstrip(
"v"
)
utils.update_container_security_scanner(image="aquasec/trivy", tag=version)
# Update the CI dependencies
image = "python:3.9"
working_dir = "/usr/src/app/"
volumes = {CWD: {"bind": working_dir, "mode": "rw"}}
CLIENT.images.pull(repository=image)
command = '/bin/bash -c "python3 -m pip install --upgrade pipenv &>/dev/null && pipenv update"'
utils.opinionated_docker_run(
image=image,
volumes=volumes,
working_dir=working_dir,
auto_remove=True,
detach=False,
command=command,
)
@task
def reformat(_c, debug=False):
"""Reformat easy_infra"""
if debug:
getLogger().setLevel("DEBUG")
entrypoint_and_command = [
("isort", ". --settings-file /action/lib/.automation/.isort.cfg"),
("black", "."),
]
image = "seiso/goat:latest"
working_dir = "/goat/"
volumes = {CWD: {"bind": working_dir, "mode": "rw"}}
LOG.info("Pulling %s...", image)
CLIENT.images.pull(image)
LOG.info("Reformatting the project...")
for entrypoint, command in entrypoint_and_command:
container = CLIENT.containers.run(
auto_remove=False,
command=command,
detach=True,
entrypoint=entrypoint,
image=image,
volumes=volumes,
working_dir=working_dir,
)
process_container(container=container)
@task
def lint(_c, debug=False):
"""Lint easy_infra"""
if debug:
getLogger().setLevel("DEBUG")
environment = {}
# Default to disabling the goat built-in terrascan
environment["INPUT_DISABLE_TERRASCAN"] = "true"
environment["INPUT_DISABLE_MYPY"] = "true"
if REPO.is_dirty(untracked_files=True):
LOG.error("Linting requires a clean git directory to function properly")
sys.exit(1)
# Pass in all of the host environment variables starting with INPUT_
for element in dict(os.environ):
if element.startswith("INPUT_"):
environment[element] = os.environ.get(element)
image = "seiso/goat:latest"
environment["RUN_LOCAL"] = True
working_dir = "/goat/"
volumes = {CWD: {"bind": working_dir, "mode": "rw"}}
LOG.info("Pulling %s...", image)
CLIENT.images.pull(image)
LOG.info("Running %s...", image)
container = CLIENT.containers.run(
auto_remove=False,
detach=True,
environment=environment,
image=image,
volumes=volumes,
working_dir=working_dir,
)
process_container(container=container)
LOG.info("Linting completed successfully")
@task
def build(_c, debug=False):
"""Build easy_infra"""
if debug:
getLogger().setLevel("DEBUG")
utils.render_jinja2(
template_file=constants.JINJA2_FILE,
config=CONFIG,
output_file=constants.OUTPUT_FILE,
)
buildargs = {
"VERSION": __version__,
"COMMIT_HASH": COMMIT_HASH,
}
for command in CONFIG["commands"]:
if "version" in CONFIG["commands"][command]:
# Normalize the build args
arg = command.upper().replace("-", "_") + "_VERSION"
buildargs[arg] = CONFIG["commands"][command]["version"]
# pylint: disable=redefined-outer-name
for target in constants.TARGETS:
first_image = TARGETS[target]["tags"][0]
LOG.info("Building %s...", first_image)
try:
image = CLIENT.images.build(
path=str(CWD),
target=target,
rm=True,
tag=first_image,
buildargs=buildargs,
)[0]
except docker.errors.BuildError as build_err:
LOG.exception(
"Failed to build target %s, retrieving and logging the more detailed build error...",
target,
)
log_build_log(build_err=build_err)
sys.exit(1)
for tag in TARGETS[target]["tags"][1:]:
LOG.info("Tagging %s...", tag)
image.tag(constants.IMAGE, tag=tag.split(":")[-1])
@task(pre=[lint, build])
def test(_c, debug=False):
"""Test easy_infra"""
if debug:
getLogger().setLevel("DEBUG")
default_working_dir = "/iac/"
default_volumes = {TESTS_PATH: {"bind": default_working_dir, "mode": "ro"}}
# pylint: disable=redefined-outer-name
for target in constants.TARGETS:
# Only test using the last tag for each target
image = TARGETS[target]["tags"][-1]
LOG.info("Testing %s...", image)
if target == "minimal":
run_test.run_terraform(image=image)
run_test.run_ansible(image=image)
run_test.run_security(image=image)
elif target == "az":
run_test.run_az_stage(image=image)
run_test.run_security(image=image)
elif target == "aws":
run_test.run_aws_stage(image=image)
run_test.run_security(image=image)
elif target == "final":
run_test.run_path_check(image=image)
run_test.version_arguments(
image=image, volumes=default_volumes, working_dir=default_working_dir
)
run_test.run_terraform(image=image, final=True)
run_test.run_ansible(image=image)
run_test.run_cli(image=image)
run_test.run_security(image=image)
else:
LOG.error("Untested stage of %s", target)
@task
def release(_c, debug=False):
"""Make a new release of easy_infra"""
if debug:
getLogger().setLevel("DEBUG")
if REPO.head.is_detached:
LOG.error("In detached HEAD state, refusing to release")
sys.exit(1)
# Get the current date info
date_info = datetime.now().strftime("%Y.%m")
pattern = re.compile(r"v2[0-1][0-9]{2}.(0[0-9]|1[0-2]).[0-9]{2}")
# Identify and set the increment
for tag in reversed(REPO.tags):
if pattern.fullmatch(tag.name):
latest_release = tag.name
break
else:
latest_release = None
if latest_release and date_info == latest_release[1:8]:
increment = str(int(latest_release[9:]) + 1).zfill(2)
else:
increment = "01"
new_version = date_info + "." + increment
bumpversion(["--new-version", new_version, "unusedpart"])
@task
def publish(_c, tag, debug=False):
"""Publish easy_infra"""
if debug:
getLogger().setLevel("DEBUG")
if tag not in ["latest", "release"]:
LOG.error("Please provide a tag of either latest or release")
sys.exit(1)
elif tag == "release":
tag = __version__
# pylint: disable=redefined-outer-name
for target in constants.TARGETS:
for repository in TARGETS[target]["tags"]:
# Skip tags which don't start with the tag we are publishing
if not repository.startswith(f"{constants.IMAGE}:{tag}"):
continue
LOG.info("Pushing %s to docker hub...", repository)
CLIENT.images.push(repository=repository)
LOG.info("Done publishing the %s Docker image", repository)
LOG.info("Done publishing all of the %s easy_infra Docker images", tag)
@task
def clean(_c, debug=False):
"""Clean up local easy_infra artifacts"""
if debug:
getLogger().setLevel("DEBUG")
temp_dir = TESTS_PATH.joinpath("tmp")
for tarball in temp_dir.glob("*.tar"):
tarball.unlink()
| StarcoderdataPython |
3300095 | from typing import Any
import aws_cdk as cdk
from constructs import Construct
from api.infrastructure import Api
class UuidGeneratorBackend(cdk.Stack):
def __init__(self, scope: Construct, id_: str, **kwargs: Any) -> None:
super().__init__(scope, id_, **kwargs)
api = Api(self, "Api")
cdk.CfnOutput(
self,
"ApiEndpoint",
# Api doesn't disable create_default_stage, hence URL will be defined
value=api.api_gateway_http_api.url, # type: ignore
)
| StarcoderdataPython |
1744888 | <reponame>robert-anderson/pyscf<gh_stars>1-10
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import reduce
import unittest
import numpy
from pyscf import gto, lib
from pyscf.symm import Dmatrix, geom
class KnownValues(unittest.TestCase):
def test_Dmatrix(self):
self.assertAlmostEqual(lib.finger(Dmatrix.Dmatrix(0, -.7, .5, .2)), 1, 12)
self.assertAlmostEqual(lib.finger(Dmatrix.Dmatrix(1, -.7, .5, .2)), 0.7014811805222106, 12)
self.assertAlmostEqual(lib.finger(Dmatrix.Dmatrix(2, -.7, .5, .2)), 1.247436140965072 , 12)
self.assertAlmostEqual(lib.finger(Dmatrix.Dmatrix(3, -.7, .5, .2)), 0.9226598665854279, 12)
self.assertAlmostEqual(lib.finger(Dmatrix.Dmatrix(4, -.7, .5, .2)), -0.425143083298510, 12)
def test_real_sph_vec(self):
c0 = c = numpy.random.random(3)
mol1 = gto.M(atom=['H1 0 0 0', ['H2', c]],
basis = {'H1': [[0, (1, 1)]],
'H2': [[l, (1, 1)] for l in range(1,6)]})
alpha = .2
beta = .4
gamma = -.3
c1 = numpy.dot(geom.rotation_mat((0,0,1), gamma), c0)
c1 = numpy.dot(geom.rotation_mat((0,1,0), beta), c1)
c1 = numpy.dot(geom.rotation_mat((0,0,1), alpha), c1)
mol2 = gto.M(atom=['H1 0 0 0', ['H2', c1]],
basis = {'H1': [[0, (1, 1)]],
'H2': [[l, (1, 1)] for l in range(1,6)]})
for l in range(1, 6):
s1 = mol1.intor('int1e_ovlp', shls_slice=(0,1,l,l+1))
s2 = mol2.intor('int1e_ovlp', shls_slice=(0,1,l,l+1))
# Rotating a basis is equivalent to an inversed rotation over the axes.
# The Eular angles that rotates molecule to a new geometry (axes
# transformation) corresponds to the inversed rotation over basis.
#r = small_dmatrix(l, -beta, reorder_p=True)
r = Dmatrix.Dmatrix(l, -gamma, -beta, -alpha, reorder_p=True)
self.assertAlmostEqual(abs(numpy.dot(s1, r) - s2).max(), 0, 12)
def test_euler_angles(self):
c0 = numpy.random.random(3)
c2 = numpy.random.random(3)
self.assertRaises(AssertionError, Dmatrix.get_euler_angles, c0, c2)
c0 /= numpy.linalg.norm(c0)
c2 /= numpy.linalg.norm(c2)
alpha, beta, gamma = Dmatrix.get_euler_angles(c0, c2)
c1 = numpy.dot(geom.rotation_mat((0,0,1), gamma), c0)
c1 = numpy.dot(geom.rotation_mat((0,1,0), beta), c1)
c1 = numpy.dot(geom.rotation_mat((0,0,1), alpha), c1)
self.assertAlmostEqual(abs(c2 - c1).max(), 0, 12)
if __name__ == "__main__":
print("Full Tests for Dmatrix")
unittest.main()
| StarcoderdataPython |
1671207 | <filename>gui/notes-to-text.py
#!/usr/bin/env python3
# GUI automation. For Mac only.
import os, sys
FILE_EXT_IN, FILE_EXT_OUT = '.notesairdropdocument', '.txt'
USAGE = '''%s source-dir dest-dir
This uses GUI automation to open note files (using the Mac `open` command) from
source-dir and copy their text. It then puts the copied text in a file in
dest-dir with the same name as the original file, but with the extension "%s".
The script will guide you on how to set it up so it copies properly.
Running the script takes more than a second per note file, and since it takes
control of the mouse and keyboard during that time, it's probably best to run
it while you don't need the computer.''' % (sys.argv[0], FILE_EXT_OUT)
try:
source_dir = sys.argv[1]
if source_dir == 'help':
print(USAGE)
sys.exit()
dest_dir = sys.argv[2]
except IndexError:
sys.exit(USAGE)
# import stuff after checking args because pyautogui is so slow to import
import pyautogui, pyperclip, subprocess, time
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir)
print('Open the notes app and move the mouse to the section containing editable text.')
print('The notes app does not need focus now, but it needs to be clickable there.')
print('The script needs this to select the text.')
if input('Type "y" when done. ') != 'y':
sys.exit()
x_text, y_text = pyautogui.position()
print('Move the mouse to the menu at the left to click on the note\'s title.')
print('The notes app does not need focus now, but it needs to be clickable there.')
print('The script needs this to delete the note.')
if input('Type "y" when done. The script will begin right after. ') != 'y':
sys.exit()
x_del, y_del = pyautogui.position()
pyautogui.moveTo(x_text, y_text)
pyautogui.click()
num_note_files_copied = 0
for file_name in os.listdir(source_dir):
index_ext = file_name.rfind(FILE_EXT_IN)
if index_ext != len(file_name) - len(FILE_EXT_IN):
continue
subprocess.run(['open', os.path.join(source_dir, file_name)])
time.sleep(1)
pyautogui.moveTo(x_text, y_text)
pyautogui.click()
pyautogui.hotkey('command', 'a')
pyautogui.hotkey('command', 'c')
content = pyperclip.paste()
new_file_name = os.path.join(dest_dir, file_name[:index_ext] + FILE_EXT_OUT)
with open(new_file_name, 'w') as f:
f.write(content)
print('File %s copied to %s' % (file_name, new_file_name))
pyautogui.moveTo(x_del, y_del)
pyautogui.click()
pyautogui.press('delete')
pyautogui.press('enter')
num_note_files_copied += 1
pyautogui.hotkey('command', 'tab')
print('--------------------\n%s note files copied' % num_note_files_copied)
| StarcoderdataPython |
1747752 | <reponame>AwesomeGitHubRepos/adventofcode
import os.path
import re
from collections import deque
HERE = os.path.dirname(os.path.abspath(__file__))
def create_bot(source, low, high):
def bot(namespace):
chips = namespace.get(source)
if chips is not None and len(chips) > 1:
return {high: max(chips), low: min(chips)}
bot.__qualname__ = source
return bot
_input = re.compile('value (\d+) goes to ((?:bot|output) \d+)')
_bot = re.compile('(bot \d+) gives low to ((?:bot|output) \d+) '
'and high to ((?:bot|output) \d+)')
def solve(instructions):
namespace = {}
bots = deque()
for instruction in instructions:
input = _input.search(instruction)
if input:
value, target = input.groups()
namespace.setdefault(target, []).append(int(value))
continue
source, low, high = _bot.search(instruction).groups()
bots.append(create_bot(source, low, high))
while bots:
bot = bots.popleft()
output = bot(namespace)
if output:
for target, value in output.items():
namespace.setdefault(target, []).append(value)
else:
bots.append(bot)
return namespace
def test():
test_instructions = '''\
value 5 goes to bot 2
bot 2 gives low to bot 1 and high to bot 0
value 3 goes to bot 1
bot 1 gives low to output 1 and high to bot 0
bot 0 gives low to output 2 and high to output 0
value 2 goes to bot 2
'''.splitlines(True)
namespace = solve(test_instructions)
assert namespace['output 0'] == [5]
assert namespace['output 1'] == [2]
assert namespace['output 2'] == [3]
if __name__ == '__main__':
import sys
if '-t' in sys.argv:
test()
sys.exit(0)
with open(os.path.join(HERE, 'puzzle10_input.txt'), 'r') as instructions:
namespace = solve(instructions)
botname = next(target for target, chips in namespace.items()
if set(chips) == {17, 61})
print('Star 1:', botname)
out1, out2, out3 = (namespace['output {}'.format(i)][0] for i in range(3))
print('Star 2:', out1 * out2 * out3)
| StarcoderdataPython |
1649883 | <filename>test/end_to_end_test/test_delete_operation.py
#!/usr/bin/env python
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import range
from builtins import *
from mapr.ojai.ojai.OJAIDocument import OJAIDocument
from mapr.ojai.storage.ConnectionFactory import ConnectionFactory
from test.test_utils.constants import CONNECTION_STR, CONNECTION_OPTIONS, DICT_STREAM
try:
import unittest2 as unittest
except ImportError:
import unittest
class DeleteTest(unittest.TestCase):
def test_delete_document(self):
connection = ConnectionFactory.get_connection(connection_str=CONNECTION_STR,
options=CONNECTION_OPTIONS)
if connection.is_store_exists(store_path='/delete-test-store1'):
document_store = connection.get_store(store_path='/delete-test-store1')
else:
document_store = connection.create_store(store_path='/delete-test-store1')
document = None
for doc in DICT_STREAM:
document = connection.new_document(dictionary=doc)
document_store.insert_or_replace(doc=document)
document_store.delete(doc=document)
self.assertEqual(document_store.find_by_id('id10'), {})
def test_delete_id(self):
connection = ConnectionFactory.get_connection(connection_str=CONNECTION_STR,
options=CONNECTION_OPTIONS)
if connection.is_store_exists(store_path='/delete-test-store1'):
document_store = connection.get_store(store_path='/delete-test-store1')
else:
document_store = connection.create_store(store_path='/delete-test-store1')
document_store.delete(_id='id09')
self.assertEqual(document_store.find_by_id('id09'), {})
def test_delete_document_stream(self):
connection = ConnectionFactory.get_connection(connection_str=CONNECTION_STR,
options=CONNECTION_OPTIONS)
if connection.is_store_exists(store_path='/delete-test-store1'):
document_store = connection.get_store(store_path='/delete-test-store1')
else:
document_store = connection.create_store(store_path='/delete-test-store1')
doc_stream = []
for i in range(1, 5):
doc_stream.append(OJAIDocument().from_dict(DICT_STREAM[i]))
document_store.delete(doc_stream=doc_stream)
if __name__ == '__main__':
test_classes_to_run = [DeleteTest]
loader = unittest.TestLoader()
suites_list = []
for test_class in test_classes_to_run:
suite = loader.loadTestsFromTestCase(test_class)
suites_list.append(suite)
big_suite = unittest.TestSuite(suites_list)
runner = unittest.TextTestRunner()
results = runner.run(big_suite)
| StarcoderdataPython |
3208639 | <reponame>iubica/wx-portfolio<filename>agw/PyProgress.py
#!/usr/bin/env python
import wx
import wx.lib.colourselect as csel
import os
import sys
try:
dirName = os.path.dirname(os.path.abspath(__file__))
except:
dirName = os.path.dirname(os.path.abspath(sys.argv[0]))
sys.path.append(os.path.split(dirName)[0])
try:
from agw import pyprogress as PP
except ImportError: # if it's not there locally, try the wxPython lib.
import wx.lib.agw.pyprogress as PP
class PyProgressDemo(wx.Panel):
def __init__(self, parent, log):
wx.Panel.__init__(self, parent)
self.panel = wx.Panel(self, -1)
self.log = log
self.LayoutItems()
def LayoutItems(self):
mainsizer = wx.BoxSizer(wx.HORIZONTAL)
rightsizer = wx.FlexGridSizer(7, 2, 5, 5)
startbutton = wx.Button(self.panel, -1, "Start PyProgress!")
self.elapsedchoice = wx.CheckBox(self.panel, -1, "Show Elapsed Time")
self.elapsedchoice.SetValue(1)
self.cancelchoice = wx.CheckBox(self.panel, -1, "Enable Cancel Button")
self.cancelchoice.SetValue(1)
static1 = wx.StaticText(self.panel, -1, "Gauge Proportion (%): ")
self.slider1 = wx.Slider(self.panel, -1, 20, 1, 99, style=wx.SL_HORIZONTAL|
wx.SL_AUTOTICKS|wx.SL_LABELS)
self.slider1.SetTickFreq(10)
self.slider1.SetValue(20)
static2 = wx.StaticText(self.panel, -1, "Gauge Steps: ")
self.slider2 = wx.Slider(self.panel, -1, 50, 2, 100, style=wx.SL_HORIZONTAL|
wx.SL_AUTOTICKS|wx.SL_LABELS)
self.slider2.SetTickFreq(10)
self.slider2.SetValue(50)
static3 = wx.StaticText(self.panel, -1, "Gauge Background Colour: ")
self.csel3 = csel.ColourSelect(self.panel, -1, "Choose...", wx.WHITE)
static4 = wx.StaticText(self.panel, -1, "Gauge First Gradient Colour: ")
self.csel4 = csel.ColourSelect(self.panel, -1, "Choose...", wx.WHITE)
static5 = wx.StaticText(self.panel, -1, "Gauge Second Gradient Colour: ")
self.csel5 = csel.ColourSelect(self.panel, -1, "Choose...", wx.BLUE)
rightsizer.Add(self.elapsedchoice, 0, wx.EXPAND|wx.TOP, 10)
rightsizer.Add((10, 0))
rightsizer.Add(self.cancelchoice, 0, wx.EXPAND|wx.TOP, 3)
rightsizer.Add((10, 0))
rightsizer.Add(static1, 0, wx.ALIGN_CENTER_VERTICAL, 10)
rightsizer.Add(self.slider1, 0, wx.EXPAND|wx.TOP, 10)
rightsizer.Add(static2, 0, wx.ALIGN_CENTER_VERTICAL, 10)
rightsizer.Add(self.slider2, 0, wx.EXPAND|wx.TOP|wx.BOTTOM, 10)
rightsizer.Add(static3, 0, wx.ALIGN_CENTER_VERTICAL)
rightsizer.Add(self.csel3, 0)
rightsizer.Add(static4, 0, wx.ALIGN_CENTER_VERTICAL)
rightsizer.Add(self.csel4, 0)
rightsizer.Add(static5, 0, wx.ALIGN_CENTER_VERTICAL)
rightsizer.Add(self.csel5, 0)
mainsizer.Add(startbutton, 0, wx.ALL, 20)
mainsizer.Add(rightsizer, 1, wx.EXPAND|wx.ALL, 10)
self.panel.SetSizer(mainsizer)
mainsizer.Layout()
framesizer = wx.BoxSizer(wx.VERTICAL)
framesizer.Add(self.panel, 1, wx.EXPAND)
self.SetSizer(framesizer)
framesizer.Layout()
startbutton.Bind(wx.EVT_BUTTON, self.OnStartProgress)
def OnStartProgress(self, event):
event.Skip()
style = wx.PD_APP_MODAL
if self.elapsedchoice.GetValue():
style |= wx.PD_ELAPSED_TIME
if self.cancelchoice.GetValue():
style |= wx.PD_CAN_ABORT
dlg = PP.PyProgress(None, -1, "PyProgress Example",
"An Informative Message",
agwStyle=style)
proportion = self.slider1.GetValue()
steps = self.slider2.GetValue()
backcol = self.csel3.GetColour()
firstcol = self.csel4.GetColour()
secondcol = self.csel5.GetColour()
dlg.SetGaugeProportion(proportion/100.0)
dlg.SetGaugeSteps(steps)
dlg.SetGaugeBackground(backcol)
dlg.SetFirstGradientColour(firstcol)
dlg.SetSecondGradientColour(secondcol)
max = 400
keepGoing = True
count = 0
while keepGoing and count < max:
count += 1
wx.MilliSleep(30)
if count >= max / 2:
keepGoing = dlg.UpdatePulse("Half-time!")
else:
keepGoing = dlg.UpdatePulse()
dlg.Destroy()
wx.SafeYield()
wx.GetApp().GetTopWindow().Raise()
#----------------------------------------------------------------------
def runTest(frame, nb, log):
win = PyProgressDemo(nb, log)
return win
#----------------------------------------------------------------------
overview = PP.__doc__
if __name__ == '__main__':
import sys,os
import run
run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])
| StarcoderdataPython |
100158 | # -*- coding: utf-8 -*-
"""
Display the battery level.
Configuration parameters:
- ac_info : path to adapter info (default: '/sys/class/power_supply/ADP0')
- battery_info : path to battery info (default: '/sys/class/power_supply/BAT0')
- cache_timeout : seconds between battery checks (default: 5)
- capacity_degraded : percent below which colored as degraded (default: 50)
- capacity_bad : percent below which colored as bad (default: 15)
- format : display format (default: '{capacity}% {icon}')
"""
from __future__ import division # python2 compatibility
from time import time
import codecs
import math
BLOCKS = [' ','_','▁','▂','▃','▄','▅','▆','▇','█']
class Py3status:
# available configuration parameters
# path to ac info
ac_info = '/sys/class/power_supply/ADP0'
# path to battery info
battery_info = '/sys/class/power_supply/BAT0'
# check for updates every 5 seconds
cache_timeout = 5
# display as degraded below 50% capacity
capacity_degraded = 50
# display as bad below 15% capacity
capacity_bad = 15
# format as 66% ⌁
format = '{capacity}% {icon}'
def _read_info(self, path, name):
f = codecs.open(path + '/' + name, encoding='utf-8')
value = f.readline()
f.close()
return value
def j3_battery(self, i3s_output_list, i3s_config):
capacity = int(self._read_info(self.battery_info, 'capacity'))
ac_online = int(self._read_info(self.ac_info, 'online'))
icon = '⌁' # ⚡
if ac_online < 1:
icon = BLOCKS[int(math.ceil(capacity/100*(len(BLOCKS)-1)))]
text = self.py3.safe_format(self.format, {
'capacity': capacity,
'icon': icon,
})
color = i3s_config['color_good']
if capacity < self.capacity_degraded:
color = i3s_config['color_degraded']
elif capacity < self.capacity_bad:
color = i3s_config['color_bad']
return {
'full_text': text,
'color': color,
'cached_until': time() + self.cache_timeout,
}
if __name__ == "__main__":
from time import sleep
x = Py3status()
config = {
'color_good': '#00FF00',
'color_degraded': '#FFFF00',
'color_bad': '#FF0000',
}
while True:
print(x.j3_battery([], config))
sleep(1)
| StarcoderdataPython |
130220 | def get_string_count_to_by(to, by):
if by < 1:
raise ValueError("'by' must be > 0")
if to < 1:
raise ValueError("'to' must be > 0")
if to <= by:
return str(to)
return get_string_count_to_by(to - by, by) + ", " + str(to)
def count_to_by(to, by):
print(get_string_count_to_by(to, by))
def main():
count_to_by(10,1)
count_to_by(34,5)
count_to_by(17,3)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3314540 | import json
import numpy as np
import matplotlib.pyplot as plt
with open('zip.json') as data_file:
mapping = json.load(data_file)
data = eval("""[['43537', '91344', '05201', '01002', '90703', '06405', '55106',
'10309', '02138', '94533', '55107', '55369', '55436', '10003', '27510',
'42141', '93117', '55105', '54467', '01040', '17870', '20854', '50233',
'46538', '55105', '66315', '46260', '84010', '08403', '06472', '48118',
'60402', '73034', 'T8H1N', '44133', '55369', '89503', '11701', '68106',
'78155', '01913', '75206', '98006', '63129', '55423', '60613', '95032',
'16125', '90210', '67401', '22206', '22202', '20015', '73439', '60115',
'80236', '97365', '84408', '53211', '08904', '32250', '36117', '08832',
'V3N4P', '83814', '02143', '02139', '48103', '68767', '70808', '27606',
'55346', '80123', '55113', 'L9G2B', '80127', '53705', '30067', '78750',
'22306', '52302', '21911', '49512', '97403', '00000', '29440', '95014',
'55105', '75094', '93402', '01720', '03060', '55345', '58202', '14476',
'01080', '98682', '63146', '31404', '84103', '95110', '85032', '07733',
'22903', '39042', '77005', '77801', '48823', '89801', '85202', '55346',
'90064', '84601', '83716', '63119', '53706', '55414', '30033', '85251',
'22903', '55305', '53713', '15217', '31211', '44106', '94703', '92110',
'50325', '16803', '01581', '55106', '55439', '70802', '73071', '02110',
'08043', '13210', '06518', '22030', '24060', '50613', '02176', '33884',
'40504', 'V0R2M', '33775', '10522', '37901', '80123', '44405', '30093',
'90210', '45660', '97301', '49938', '60135', '98133', '61801', '85233',
'01810', '50670', '37411', '91335', '99206', '66046', '55116', '78746',
'37777', '18015', '98117', '94608', '75204', '45218', '43221', '36106',
'59801', '83686', '96819', '44092', '94551', '95129', '06811', '15222',
'38115', '85711', '92626', '43215', '55422', '85258', '55414', '92629',
'50311', '49705', '17345', '43204', '20817', '48076', '55013', '53202',
'11758', '30011', '29201', '02918', '94583', '02341', '78628', '77459',
'87544', '93711', '02125', '37771', '40256', '21208', '92121', '45218',
'60090', '03052', '10025', '23092', '92115', '03869', '28450', '21250',
'20090', '26241', '99709', '55320', '12603', '55443', '04102', '55104',
'44224', '94040', '92705', '05464', '80302', '30078', '80303', '91201',
'84302', '08052', '22911', '95468', '45680', '95453', '55414', '62901',
'62901', '23227', '30606', '11217', '63132', '32707', '55422', '53188',
'98281', '77845', '17961', '29631', '98501', '93063', '90034', '68767',
'77073', '84116', '43085', 'R3T5K', '02320', '34656', '47905', '11787',
'63044', '77008', '79070', '50613', '81648', '60402', '30067', '94306',
'44224', 'T8H1N', '48043', '61801', '95521', '63645', '45810', '39762',
'78739', '98121', '10016', '21114', '91919', '22906', '55337', '32712',
'99835', '90405', '97208', '26506', '60476', '45439', '60089', '06365',
'77042', '56321', '49512', '54494', '55454', 'N4T1A', '54901', '55343',
'44265', '84105', '64118', '16506', '11238', '17331', '91711', '94306',
'98405', '73162', '93612', '80919', '90034', '53706', '68503', '14211',
'62903', '00000', '14216', '15232', '27105', '01810', 'K7L5J', '94560',
'77081', '91040', '01754', '20064', '85281', '57197', '33755', '64131',
'55337', '92154', '34105', '61820', '93555', '90016', '80526', '73013',
'28806', '20755', '60152', '40205', '77845', '50322', '05452', '77048',
'64153', '11577', '01375', '55406', '47401', '93055', '47130', '02146',
'25652', '78390', '29646', '94086', '06492', '14085', '13820', '63021',
'92507', '74078', '35802', '77504', '55337', '40503', '27249', '17036',
'03062', '45243', '95823', '33484', 'L1V3W', '20850', '61073', '80526',
'53171', 'E2E3R', '55428', '55408', '33556', '06437', '22902', '66221',
'32789', '33319', '97229', '78209'], ['85711', '94043', '15213', '30329',
'29206', '37212', '30068', '40206', '48197', '30030', '94043', '78741',
'42459', '80525', '46260', '07102', '12550', '52245', '16509', '55414',
'01331', '52246', '97214', '43202', '80521', '55337', '60067', '41850',
'08816', '29379', '61801', '03755', '52241', '21218', '22902', '20003',
'46005', '80525', '23112', '71457', '10707', '90254', '05146', '55108',
'94043', '55125', '60466', '63130', '77840', '75013', '17110', '60615',
'20009', '15237', '38401', '48118', '20910', '97006', '53703', '66215',
'15610', '53715', '07030', '19104', '20755', '60202', '21218', '33884',
'76013', '16801', '95938', '95161', '90840', '93555', '32301', '06371',
'61401', '11231', '63033', '02215', '06513', '43212', '78205', '20685',
'27502', '47906', '92103', '99709', '22973', '44124', '95628', '20784',
'60201', '80525', '55109', '28734', '20770', '37235', '42647', '07029',
'78756', '22932', '95064', '55406', '06059', '20057', '92629', '23226',
'94619', '93550', '98103', '63108', '77904', '14853', '71701', '55454',
'95076', '91711', '60035', '18301', '19149', '06779', '91344', '30002',
'59717', '98006', '94117', '94143', '76059', '61455', '55105', '48197',
'92688', '10022', '98027', '44074', '87501', '20009', '92113', '10010',
'02859', '37412', '83702', '85016', '84604', '27514', '60008', '92374',
'78213', '55108', '93109', '03261', '61755', '98225', '44691', '78212',
'48103', '02140', '55105', '94533', '91606', '58644', '01602', '29205',
'11211', '60007', '85282', '33308', '55113', '55021', '48446', '28018',
'06333', '83709', '31820', 'Y1A6B', '90804', '91201', '10021', '94708',
'75230', '60440', '10019', '55409', '95821', '53711', '49428', '55414',
'50112', '75006', '94305', '27514', '20657', '19382', '98038', '20707',
'49508', '10021', '55454', '02146', '02159', '19711', '12180', '97408',
'02324', '21010', '60515', '95123', '14534', '68147', '10022', '10003',
'20879', '94591', '14627', '10003', '91903', '48103', '93003', '27511',
'79508', '14216', '92093', '97520', '31909', '33716', '02154', '29678',
'80227', '27705', '44134', '14850', '60187', '20723', '08034', '55408',
'38866', '55454', '23237', '01940', '60626', '55122', '53211', '91351',
'22911', '92626', '54248', '19102', '19341', '94115', '55412', '61820',
'01970', '90095', '61462', '54302', '55128', '55414', '63304', '38115',
'94920', '96754', '19146', '96349', '92020', '15203', '91206', '78741',
'56567', '32114', '98072', '95403', '22206', '63108', '92660', '47024',
'19047', '94720', '32303', '21201', '62901', '97007', '55104', '97302',
'02113', '10960', '06927', '90036', '51157', '48825', '23322', '05779',
'55420', '80913', '12205', '78212', '20009', '11217', '30803', '76234',
'02136', '27514', '37725', '80228', '80209', '53066', '77042', '10018',
'90814', '95662', '97215', 'V1G4L', '48322', '60089', '44648', '37076',
'20902', '83702', '43017', '50266', '55337', '95316', '61820', '78704',
'97301', '74075', '32301', '61755', '55116', '30350', '70124', '53210',
'N2L5N', '20006', '14216', '98801', '21114', '11753', '01701', '55038',
'77841'], ['32067', '98101', '97301', '06355', '95660', '21044', '27514',
'76111', '30040', '75240', '22904', '98034', '02215', '90291', '30220',
'55108', '90630', '97232', '06260', '99603', '20008', '07039', '94612',
'78602', '17325', '60641', '11217', '08360', '55104', '97212', '22207',
'27708', '49931', '02154', '55414', '17604', 'E2A4H', '60201', '10960',
'53115', '92037', '85710', '32605', '11727', '43512', '60659', '22003',
'22903', '94702', '53214', '20001', '78264', '19422', '43201', '10016',
'92064', '60804', '94086', '73132', '77009', '55413', '02139', '15235',
'11101', '01720', '42101', '28480', '08534', '55117', '01824', '10003',
'84107', '10019', '94025', '21206', '98199', '20009', '92653', '60201',
'10021', '97330', '60630', '98102', '75218', '05001', '98257', '43212',
'93101', '21012', 'V5A2B', '94618', '55408', '10011', '97124', '22902',
'29464', '60005', '01915', '14627', '01945', '20003', '48911', '46032',
'M7A1A', '94131', '82435', 'M4J2K', '99687', '10003', '55106', '21227',
'11201', '44212', '19807', '55414', '74101', '12065', '51250', '83814',
'02903', '55105', '60657', '10314', '78704', '77380', '20009', '28814',
'23509', '55409', '27713', '18053', '85210', '06906', '76309', '89104',
'91105', '07204', 'V0R2H', '94403', '40243', '80538', '70403', '60630',
'63108', '85719', '94618', '29210', '55113', '91206', '90247', '95050',
'33066', '12866', '55414', '80027', '01960', '33205', '98620', '08610',
'62522', '19716', '12345', '53144', '15017', '85282', '33765', '90019',
'55409', '44212', '97405', '55417', '40515', '55408', '04988', '09645',
'11231', '60302', '55303', '10025', '65203', '33763', '98027', '91505',
'18505', '97203', '68504', '29301', '06512', '76201', '08105', '60614',
'70116', '90008', '49036', '53711', '07310', '48105', '98072', '02215']]""")
for x in range(3):
d = {}
for o in data[x]:
if o in mapping:
if mapping[o] in d:
d[mapping[o]] += 1
else:
d[mapping[o]] = 1
else:
print("Couldn't find " + o)
plt.figure(x+1)
plt.title("State Distribution for Cluster " + str(x))
plt.pie(d.values(), labels=d.keys())
plt.show()
| StarcoderdataPython |
3247745 | """
Make html galleries from media directories. Organize by dates, by subdirs or by
the content of a diary file. The diary file is a markdown file organized by
dates, each day described by a text and some medias (photos and movies).
The diary file can be exported to:
* an html file with the text and subset of medias associated with each day,
* the previous html file extended with all medias in the media directory,
* an html file ready to import into Blogger.
"""
import sys
import os
import argparse
import glob
import shutil
import re
import io
import bisect
import locale
import textwrap
import base64
import datetime
import urllib
from configparser import ConfigParser
from collections import defaultdict
from subprocess import check_output, CalledProcessError, STDOUT
from urllib.request import urlopen
import colorama
import clipboard
import PIL
from PIL import Image, ImageChops
from lxml import objectify
import markdown
USAGE = """
galerie --gallery <root-dir> [--sourcedir <media-dir>]
[--bydir true|false*]
[--bydate true|false*]
[--diary true|false*]
[--recursive true|false*]
[--dates source*|diary|<yyyymmdd-yyyymmdd>]
[--github_pages true|false]
[--dest <directory>]
[--forcethumb]
galerie --update <root-dir>
galerie --create <root-dir> --sourcedir <media-dir>
[--recursive true|false*]
[--dates source*|<yyyymmdd-yyyymmdd>]
galerie --blogger <root-dir> --url <url>
[--check]
[--full]
[--dest <filename>]
Notes:
- * gives default
- all options can be abbreviated if there is no conflict with other options (--gallery --> --gal)
"""
# -- Post objects -------------------------------------------------------------
CAPTION_IMAGE_STYLE = '''\
<style type="text/css">
span { display:inline-table; }
</style>\
'''
STYLE = '''\
<style type="text/css">
p { margin-top:0px; margin-bottom:0px; }
h3 { font-size: 100%%; font-weight: bold; margin-top:0px; margin-bottom:0px; }
</style>
'''
START = f'''\
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
<title>%s</title>
<link rel="icon" href="favicon.ico" />
<meta name="viewport" content="width=device-width">
<link rel="stylesheet" href="photobox/photobox.css">
<script src="photobox/jquery.min.js"></script>
<script src="photobox/jquery.photobox.js"></script>
{CAPTION_IMAGE_STYLE}
{STYLE}
</head>
<body>\
'''
BUTTONS = '''\
<button id="btn_full" type="button" style="position: fixed; width: 50px; top: 20px; right: 20px; background-color:white">Full</button>
<button id="btn_blog" type="button" style="position: fixed; width: 50px; top: 40px; right: 20px; background-color:white">Diary</button>
<button id="btn_text" type="button" style="position: fixed; width: 50px; top: 60px; right: 20px; background-color:white">Text</button>
<script>
$('#btn_full').click(function() {
$("[id^=gallery-blog]").show();
$("[id^=gallery-dcim]").show();
$("div.extra").show();
});
$('#btn_text').click(function() {
$("[id^=gallery-blog]").hide();
$("[id^=gallery-dcim]").hide();
$("div.extra").hide();
});
$('#btn_blog').click(function() {
$("[id^=gallery-blog]").show();
$("[id^=gallery-dcim]").hide();
$("div.extra").hide();
});
</script>
'''
SUBDIR_BACKCOL = '#eee'
END = '</body>\n</html>'
SEP = '<hr color="#C0C0C0" size="1" />'
IMGPOST = '<a href="%s"><img src="%s" width="%d" height="%d" title="%s"/></a>'
VIDPOST = '<a href="%s" rel="video"><img src="%s" width="%d" height="%d" title="%s"/></a>'
IMGPOSTCAPTION = '''\
<span>
<a href="%s"><img src=%s width="%d" height="%d" title="%s"/></a>
<p>%s</p>
</span>
'''
VIDPOSTCAPTION = '''\
<span>
<a href="%s" rel="video"><img src=%s width="%d" height="%d" title="%s"/></a>
<p>%s</p>
</span>
'''
IMGDCIM = '<a href="%s"><img src="%s" width="%d" height="%d" title="%s"/></a>'
VIDDCIM = '<a href="%s" rel="video"><img src="%s" width="%d" height="%d" title="%s"/></a>'
# diminution de l'espace entre images, on utilise :
# "display: block;", "margin-bottom: 0em;" et "font-size: 0;"
# "display: block;" dans img : espacement correct ordi mais pas centré téléphone
# "display: block;" dans a : ok
DIRPOST = '<a href="%s"><img src="%s" width="%d" height="%d" style="border: 1px solid #C0C0C0;" /></a>'
DIRPOSTCAPTION = f'''
<span style="background-color:{SUBDIR_BACKCOL}; margin-bottom: 8px; border: 1px solid #C0C0C0;">
<a href="%s"><img src="%s" width="%d" height="%d" style="border: 1px solid #C0C0C0;" /></a>
<p style="margin-left:2px;">%s</p>
</span>
'''
BIMGPAT = '''\
<div class="separator" style="clear: both; text-align: center;">
<a href="%s" style="clear: left; margin-bottom: 0em; margin-right: 1em; font-size: 0; display: block;">
<img border="0" src="%s" width="640" />
</a></div>
'''
CAPTION_PAT = '''\
<div class="separator" style="clear: both; text-align: center;">
%s
</div>
'''
class Post:
def __init__(self, date, text, medias):
# date: yyyymmdd
self.date = date
self.text = text
self.medias = medias
self.dcim = []
self.daterank = 0
self.extra = False
def __lt__(self, other):
return self.date < other.date
@classmethod
def from_markdown(cls, post):
m = re.match(r'\[(\d\d\d\d/\d\d/\d\d)\]\n*', post[0])
if m:
date = m.group(1).replace('/', '')
if not validate_date(date):
error('Incorrect date value:', date)
del post[0]
else:
error('No date in post', ' '.join(post))
while post and not post[0].strip():
del post[0]
text = ''
while post and not re.match(r'!?\[\]', post[0]):
text += post[0]
del post[0]
# remove empty lines at end
text = re.sub(r'\n\n$', '\n', text)
medias = list()
while post and (match := re.match(r'!?\[\]\((.*)\)', post[0])):
media = match.group(1)
caption = None
del post[0]
if post and not re.match(r'!?\[\]', post[0]):
caption = post[0].strip()
del post[0]
if match.group(0)[0] == '!':
medias.append(PostImage(caption, media))
else:
medias.append(PostVideo(caption, media))
return cls(date, text, medias)
@classmethod
def from_date(cls, date):
dt = datetime.datetime.strptime(date, '%Y%m%d')
datetext = dt.strftime("%A %d %B %Y").capitalize()
post = cls(date, text=datetext, medias=[])
post.daterank = 1
return post
def to_html(self, args, target='regular'):
if target == 'regular':
if args.diary:
return self.to_html_diary(args)
else:
return self.to_html_regular(args)
if target == 'blogger':
return self.to_html_blogger()
def to_html_regular(self, args):
html = list()
if self.text:
# possible with --bydate
html.append(markdown.markdown(self.text))
subdirs, dcim = dispatch_post_items(self.dcim)
if self.dcim:
html.append(SEP)
for media in subdirs:
html.append(media.to_html_dcim(args))
if dcim:
html.append(f'<div id="gallery-dcim-{self.date}-{self.daterank}">')
for media in dcim:
html.append(media.to_html_dcim(args))
html.append('</div>')
html.append(SEP)
return html
def to_html_diary(self, args):
html = list()
if self.extra:
html.append('<div class="extra">')
if self.text:
html.append(markdown.markdown(self.text))
if self.medias:
html.append(f'<div id="gallery-blog-{self.date}-{self.daterank}">')
for media in self.medias:
html.append(media.to_html_post(args))
html.append('</div>')
_, dcim = dispatch_post_items(self.dcim)
if dcim:
html.append(f'<div id="gallery-dcim-{self.date}-{self.daterank}">')
html.append(SEP)
for media in dcim:
html.append(media.to_html_dcim(args))
html.append('</div>')
html.append(SEP)
if self.extra:
html.append('</div>')
return html
def to_html_blogger(self):
html = list()
html.append(markdown.markdown(self.text))
for image in self.medias:
html.append(image.to_html_blogger())
html.append(SEP)
return html
class PostItem:
def __init__(self, caption, uri, thumb=None, thumbsize=None, descr=''):
self.caption = caption
self.uri = uri
self.basename = os.path.basename(uri)
self.thumb = thumb
self.thumbsize = thumbsize
self.descr = descr
self.resized_url = None
class PostImage(PostItem):
def to_markdown(self):
if not self.caption:
return '' % (self.uri,)
else:
return '\n%s' % (self.uri, self.caption)
def to_html_post(self, args):
descr = self.descr if args.thumbnails.media_description else ''
if not self.caption:
return IMGPOST % (self.uri, self.thumb, *self.thumbsize, descr)
else:
return IMGPOSTCAPTION % (self.uri, self.thumb, *self.thumbsize, descr, self.caption)
def to_html_dcim(self, args):
descr = self.descr if args.thumbnails.media_description else ''
return IMGDCIM % (relative_url(self.uri, args.root), self.thumb, *self.thumbsize, descr)
def to_html_blogger(self):
if not self.caption:
return BIMGPAT % (self.uri, self.resized_url)
else:
return f'{BIMGPAT}\n{CAPTION_PAT}' % (self.uri, self.resized_url, self.caption)
class PostVideo(PostItem):
def to_markdown(self):
if not self.caption:
return '[](%s)' % (self.uri,)
else:
return '[](%s)\n%s' % (self.uri, self.caption)
def to_html_post(self, args):
descr = self.descr if args.thumbnails.media_description else ''
if not self.caption:
return VIDPOST % (self.uri, self.thumb, *self.thumbsize, descr)
else:
return VIDPOSTCAPTION % (self.uri, self.thumb, *self.thumbsize, descr, self.caption)
def to_html_dcim(self, args):
descr = self.descr if args.thumbnails.media_description else ''
return VIDDCIM % (relative_url(self.uri, args.root), self.thumb, *self.thumbsize, descr)
def to_html_blogger(self):
x = f'<p style="text-align: center;">{self.iframe}</p>'
if not self.caption:
return x
else:
return f'%s\n{CAPTION_PAT}' % (x, self.caption)
class PostSubdir(PostItem):
def to_html_dcim(self, args):
basename = os.path.basename(self.htmname)
posts = self.posts
title = self.caption
print_html(args, posts, title, self.htmname)
if not self.caption:
return DIRPOST % (basename, self.thumb, *self.thumbsize)
else:
return DIRPOSTCAPTION % (basename, self.thumb, *self.thumbsize, self.caption)
def relative_url(path, root):
"""
returns a normalized url to path relative from root
"""
try:
url = os.path.relpath(path, root)
except:
error('Unable to make a relative url:', url, root)
url = url.replace('\\', '/') if os.sep == '\\' else url
return urllib.parse.quote(url)
# -- Markdown parser ----------------------------------------------------------
def parse_markdown(filename):
"""
Generate Post objects from markdown. Date must be present in each post and
posts must be ordrered by date.
"""
if not os.path.exists(filename):
error('File not found', filename)
posts = list()
with open(filename, encoding='utf-8') as f:
line = next(f)
if line.startswith('# '):
title = line[2:].strip()
record = []
next(f)
else:
title = None
record = [line]
for line in f:
if not line.startswith('___'):
record.append(line)
else:
posts.append(Post.from_markdown(record))
record = []
# set rank of posts in date
daterank = defaultdict(int)
for post in posts:
daterank[post.date] += 1
post.daterank = daterank[post.date]
# check post order
for post1, post2 in zip(posts[:-1], posts[1:]):
if post1.date > post2.date:
error('Posts are not ordered', f'{post1.date} > {post2.date}')
return title, posts
# -- Markdown printer ---------------------------------------------------------
def print_markdown(posts, title, fullname):
with open(fullname, 'wt', encoding='utf-8') as fdst:
print(f'# {title}\n', file=fdst)
for post in posts:
date = f'[{post.date[0:4]}/{post.date[4:6]}/{post.date[6:8]}]'
print(date, file=fdst)
if post.text:
print(file=fdst)
for line in post.text.splitlines():
if not line:
print(file=fdst)
else:
for chunk in textwrap.wrap(line, width=78):
print(chunk, file=fdst)
if post.medias:
print(file=fdst)
for media in post.medias:
print(media.to_markdown(), file=fdst)
print('______', file=fdst)
# -- html printer -------------------------------------------------------------
def compose_html_reduced(args, posts, title, target):
html = list()
html.append(START % title)
for post in posts:
for line in post.to_html(args, target):
html.append(line.strip())
html.append('')
html.append(END)
return html
def compose_html_full(args, posts, title, target):
html = list()
html.append(START % title)
if args.diary:
html.append(BUTTONS)
for post in posts:
for line in post.to_html(args, target):
html.append(line.strip())
html.append('')
html.append('<script>')
for post in posts:
if post.medias:
gallery_id = f'gallery-blog-{post.date}-{post.daterank}'
html.append(gallery_call(args, gallery_id))
if post.dcim:
gallery_id = f'gallery-dcim-{post.date}-{post.daterank}'
html.append(gallery_call(args, gallery_id))
html.append('</script>')
html.append(END)
return html
def print_html_to_stream(args, posts, title, stream, target):
if target == 'regular':
for line in compose_html_full(args, posts, title, target):
print(line, file=stream)
else:
for line in compose_html_reduced(args, posts, title, target):
print(line, file=stream)
def print_html(args, posts, title, html_name, target='regular'):
assert target in ('regular', 'blogger')
with io.StringIO() as f:
print_html_to_stream(args, posts, title, f, target)
html = f.getvalue()
if html_name:
if os.path.exists(html_name):
# test if the generated html is identical to the one already on disk
with open(html_name, 'rt', encoding='utf-8') as f:
html0 = f.read()
if html == html0:
return None
with open(html_name, 'wt', encoding='utf-8') as f:
f.write(html)
return None
else:
return html
GALLERYCALL = """
$('#%s').photobox('a', {
loop:%s,
thumbs:%s,
autoplay:%s,
time:%d,
zoomable:%s ,
rotatable:%s,
wheelNextPrev:%s
});
"""
def gallery_call(args, gallery_id):
return GALLERYCALL.replace('\n', '') % (
gallery_id,
str(args.photobox.loop).lower(),
str(args.photobox.thumbs).lower(),
str(args.photobox.autoplay).lower(),
args.photobox.time,
str(args.photobox.zoomable).lower(),
str(args.photobox.rotatable).lower(),
str(args.photobox.wheelNextPrev).lower(),
)
# -- Media description --------------------------------------------------------
def is_image_file(name):
return os.path.splitext(name)[1].lower() in (
'.jpg', '.jpeg', '.png', '.gif', '.bmp', '.webp', '.tif'
)
def is_video_file(name):
return os.path.splitext(name)[1].lower() in (
'.mp4', '.webm', '.mkv', '.flv', '.m4v', '.avi', '.wmv', '.mts', '.vob', '.divx'
)
def is_media(name):
return is_image_file(name) or is_video_file(name)
def validate_date(datestr):
# datestr = yyyymmdd
try:
datetime.datetime.strptime(datestr, '%Y%m%d')
return True
except ValueError:
return False
def date_from_name(name):
# heuristics
if match := re.search(r'(?:\D|^)(\d{8})(?:\D|$)', name, re.ASCII):
digits = match.group(1)
if validate_date(digits):
return digits
return None
def date_from_item(filename):
if date := date_from_name(filename):
return date
else:
timestamp = os.path.getmtime(filename)
return datetime.datetime.fromtimestamp(timestamp).strftime('%Y%m%d')
def time_from_name(name):
# heuristics
if match := re.search(r'(?:\D|^)(\d{8})\D(\d{6})(?:\D|$)', name, re.ASCII):
digits = match.group(2)
hour, minute, second = int(digits[0:2]), int(digits[2:4]), int(digits[4:6])
if 0 <= hour < 24 and 0 <= minute < 60 and 0 <= second < 60:
return digits
return None
def time_from_item(filename):
if time := time_from_name(filename):
return time
else:
timestamp = os.path.getmtime(filename)
return datetime.datetime.fromtimestamp(timestamp).strftime('%H%M%S')
FFPROBE_CMD = '''\
ffprobe -v error
-select_streams v:0
-show_entries stream=width,height,avg_frame_rate,r_frame_rate:format=duration
-of csv=p=0
'''
def get_image_info(filename):
date = date_from_item(filename)
time = time_from_item(filename)
img = Image.open(filename)
width, height = img.size
size = round(os.path.getsize(filename) / 1e6, 1)
return (date, time, width, height, size), f'{date} {time}, dim={width}x{height}, {size} MB'
def get_video_info(filename, info_fullname):
if os.path.exists(info_fullname):
with open(info_fullname) as f:
info = f.readline().split()
date, time, width, height, size, duration, fps = info[0], info[1], int(info[2]), int(info[3]), float(info[4]), int(info[5]), float(info[6])
formatted_info = format_video_info(date, time, width, height, size, duration, fps)
return (date, time, width, height, size, duration, fps), formatted_info
else:
info, formatted_info = make_video_info(filename, info_fullname)
with open(info_fullname, 'wt') as f:
print(' '.join([str(_) for _ in info]), file=f)
return info, formatted_info
def make_video_info(filename, info_fullname):
# ffmpeg must be in path
date = date_from_item(filename)
time = time_from_item(filename)
command = [*FFPROBE_CMD.split(), filename]
try:
output = check_output(command, stderr=STDOUT).decode()
width, height, fps, duration = parse_ffprobe_output(output)
size = round(os.path.getsize(filename) / 1e6, 1)
output = format_video_info(date, time, width, height, size, duration, fps)
except CalledProcessError as e:
output = e.output.decode()
warning(output)
raise
return (date, time, width, height, size, duration, fps), output
def parse_ffprobe_output(ffprobe_output):
# parse first channel data and last line for duration
match = re.match(r'(\d+),(\d+),(\d+)/(\d+),(\d+/\d+).*\s(\d+\.\d+)', ffprobe_output, re.DOTALL)
width = int(match.group(1))
height = int(match.group(2))
fps = round(int(match.group(3)) / int(match.group(4)), 1)
duration = round(float(match.group(6)))
return width, height, fps, duration
def format_video_info(date, time, width, height, size, duration, fps):
return f'{date} {time}, dim={width}x{height}, {format_duration(duration)}, fps={fps}, {size} MB'
def format_duration(duration):
mn = duration // 60
sec = duration % 60
if mn <= 59:
return f'm:s={mn:02}:{sec:02}'
else:
hour = mn // 60
mn = mn % 60
return f'h:m:s={hour:02}:{mn:02}:{sec:02}'
# -- Thumbnails (image and video) ---------------------------------------------
def thumbname(name, key):
return key + '-' + name + '.jpg'
def size_thumbnail(width, height, maxdim):
if width >= height:
return maxdim, int(round(maxdim * height / width))
else:
return int(round(maxdim * width / height)), maxdim
def make_thumbnail_image(args, image_name, thumb_name, size):
if os.path.exists(thumb_name) and args.forcethumb is False:
pass
else:
print('Making thumbnail:', thumb_name)
create_thumbnail_image(image_name, thumb_name, size)
def create_thumbnail_image(image_name, thumb_name, size):
imgobj = Image.open(image_name)
if (imgobj.mode != 'RGBA'
and image_name.endswith('.jpg')
and not (image_name.endswith('.gif') and imgobj.info.get('transparency'))
):
imgobj = imgobj.convert('RGBA')
imgobj.thumbnail(size, Image.LANCZOS)
imgobj = imgobj.convert('RGB')
imgobj.save(thumb_name)
def make_thumbnail_video(args, video_name, thumb_name, size, duration):
if os.path.exists(thumb_name) and args.forcethumb is False:
pass
else:
print('Making thumbnail:', thumb_name)
create_thumbnail_video(args, video_name, thumb_name, size, duration)
# base64 video.png
VIDEO_ICON = '''\
iVBORw0KGgoAAAANSUhEUgAAABgAAAAUCAAAAACy3qJfAAAA4UlEQVR4
2m1QoRbCMAy88SaK69xscfuEWiS4SZBIcCCRfAL8An8AcnJzTOJSWdxwzJXSPUoHRPQlueYuucigxm
9kDGaMf8AjopGcYn8LmmyLoihBWBiThb+5MTuUsc3aL56upneZ9sByAIg8Z8BEn96EeZ65iU7DvmbP
PxqDcH6p1swXBC4l6yZskACkTN1WrQr2SlIFhTtgqeZa+zsOogLXegvEocZ5c/W5BcoVNNCg3hSudV
/hEh4ofw6cEb00Km8i0dpRDUXfKiaQOEAdrUDo4dFp9C33jjaRac9/gDF/AlplVYtfWGCjAAAAAElF
TkSuQmCC'''
def create_thumbnail_video(args, filename, thumbname, size, duration):
# ffmpeg must be in path
delay = min(duration - 1, args.thumbnails.thumbdelay)
sizearg = '%dx%d' % size
command = 'ffmpeg -y -v error -itsoffset -%d -i "%s" -vcodec mjpeg -vframes 1 -an -f rawvideo -s %s "%s"'
command = command % (delay, filename, sizearg, thumbname)
result = os.system(command)
# add a movie icon to the thumbnail to identify videos
try:
img1 = Image.open(thumbname)
except:
# ffmpeg was unable to save thumbnail
warning('Unable to save thumbnail for', filename)
return
img2 = Image.open(io.BytesIO(base64.b64decode(VIDEO_ICON)))
width, height = img1.size
img1.paste(img2, (6, height - 20 - 6), None)
img1.save(thumbname)
def make_thumbnail_subdir(args, subdir_name, thumb_name, size, items, thumbdir):
# subdir thumbnails are always created as they depend on the content of the
# directory
print('Making thumbnail:', thumb_name)
create_thumbnail_subdir(subdir_name, thumb_name, size, items, thumbdir)
def create_thumbnail_subdir(subdir_name, thumb_name, size, items, thumbdir):
def size_thumbnail(width, height, xmax, ymax):
width2 = xmax
height2 = int(round(xmax * height / width))
if height2 < ymax:
width2 = int(round(ymax * width / height))
height2 = ymax
return width2, height2
thumblist = [os.path.basename(item.thumb) for item in items]
widthnum, heightnum, width, height, offsetx, offsety = mosaic_geometry(size, thumblist)
thumbnum = widthnum * heightnum
img = Image.new('RGB', size, SUBDIR_BACKCOL)
for ind, thumb in enumerate(thumblist[:min(thumbnum, len(thumblist))]):
row = ind // widthnum
col = ind % widthnum
img2 = Image.open(os.path.join(thumbdir, thumb))
w, h = size_thumbnail(*img2.size, width[col], height[row])
cropdim = ((w - width[col]) // 2, (h - height[row]) // 2,
(w - width[col]) // 2 + width[col], (h - height[row]) // 2 + height[row])
img2 = img2.resize((w, h), Image.LANCZOS)
img2 = img2.crop(cropdim)
img.paste(img2, (offsetx[col], offsety[row]))
if os.path.exists(thumb_name):
# test if the generated thumbnail is identical to the one already on disk
imgref = Image.open(thumb_name)
# must save and reload before comparing
byteio = io.BytesIO()
img.save(byteio, "JPEG")
byteio.seek(0)
imgnew = Image.open(byteio)
diff = ImageChops.difference(imgnew, imgref)
if diff.getbbox() is None:
return
img.save(thumb_name)
def mosaic_geometry(size, thumblist):
if len(thumblist) == 1:
widthnum = 1
heightnum = 1
elif len(thumblist) <= 3:
widthnum = 1
heightnum = 2
elif len(thumblist) <= 8:
widthnum = 2
heightnum = 2
else:
widthnum = 3
heightnum = 3
if widthnum == 1:
width = [size[0] - 2]
else:
width = [size[0] // widthnum - 2] * (widthnum - 1)
width.append(size[0] - (1 + sum(width) + 2 * len(width) + 1))
if heightnum == 1:
height = [size[1] - 2]
else:
height = [size[1] // heightnum - 2] * (heightnum - 1)
height.append(size[1] - (1 + sum(height) + 2 * len(height) + 1))
offsetx = [1]
for w in width[:-1]:
offsetx.append(offsetx[-1] + w + 2)
offsety = [1]
for h in height[:-1]:
offsety.append(offsety[-1] + h + 2)
return widthnum, heightnum, width, height, offsetx, offsety
def list_of_htmlfiles(args, posts):
htmlist = list()
htmlist.append(os.path.join(args.dest, args.rootname))
for post in posts:
htmlist.extend(list_of_htmlfiles_in_items(post.dcim))
return htmlist
def list_of_htmlfiles_in_items(itemlist):
htmlist = list()
for item in itemlist:
if type(item) == PostSubdir:
htmlist.append(item.htmname)
htmlist.extend(list_of_htmlfiles_in_items(item.sublist))
return htmlist
def list_of_thumbnails(posts, diary=False):
thumblist = list()
for post in posts:
thumblist.extend(list_of_thumbnails_in_items(post.medias))
if diary is False:
thumblist.extend(list_of_thumbnails_in_items(post.dcim))
return thumblist
def list_of_thumbnails_in_items(itemlist):
thumblist = list()
for item in itemlist:
if type(item) == PostSubdir:
thumblist.append(os.path.basename(item.thumb))
thumblist.extend(list_of_thumbnails_in_items(item.sublist))
else:
thumblist.append(os.path.basename(item.thumb))
return thumblist
def purge_htmlfiles(args, posts):
"""
Purge root dir from irrelevant html files
"""
htmlist = list_of_htmlfiles(args, posts)
html_to_remove = list()
for fullname in glob.glob(os.path.join(args.root, '*.htm*')):
if fullname not in htmlist:
html_to_remove.append(fullname)
if len(html_to_remove) > args.thumbnails.threshold_htmlfiles:
inpt = 'x'
while inpt not in 'yn':
inpt = input(f'{len(html_to_remove)} html files to remove. Continue [y|n]? ').lower()
if inpt == 'n':
return
for name in html_to_remove:
print('Removing html files', name)
os.remove(name)
def purge_thumbnails(args, thumbdir, posts, diary=False):
"""
Purge thumbnail dir from irrelevant thumbnails
"""
thumblist = list_of_thumbnails(posts, diary)
thumbs_to_remove = list()
for fullname in glob.glob(os.path.join(thumbdir, '*.jpg')):
if os.path.basename(fullname) not in thumblist:
thumbs_to_remove.append(fullname)
if len(thumbs_to_remove) > args.thumbnails.threshold_thumbs:
inpt = 'x'
while inpt not in 'yn':
inpt = input(f'{len(thumbs_to_remove)} thumbnails to remove. Continue [y|n]? ').lower()
if inpt == 'n':
return
for name in thumbs_to_remove:
print('Removing thumbnail', name)
os.remove(name)
info_fullname = os.path.splitext(name)[0] + '.info'
if os.path.exists(info_fullname):
os.remove(info_fullname)
# -- List of medias helpers ---------------------------------------------------
def is_media_within_dates(fullname, dates):
if is_media(fullname):
if type(dates) == tuple:
return dates[0] <= date_from_item(fullname) <= dates[1]
else:
return True
else:
return False
def sorted_listdir(filelist):
like_windows_explorer = True
if not filelist:
return filelist
if like_windows_explorer:
maxlen = max(len(os.path.splitext(name)[0]) for name in filelist)
def keyfunc(name):
root, ext = os.path.splitext(name.lower())
return root.ljust(maxlen, ' ') + ext
else:
keyfunc = str.lower
return sorted(filelist, key=keyfunc)
def list_of_files(sourcedir, recursive):
"""
Return the list of full paths for files in source directory
"""
result = list()
if recursive is False:
listdir = sorted_listdir(os.listdir(sourcedir))
if '.nomedia' not in listdir:
for basename in listdir:
result.append(os.path.join(sourcedir, basename))
else:
for root, dirs, files in os.walk(sourcedir):
if '.nomedia' not in files:
for basename in sorted_listdir(files):
result.append(os.path.join(root, basename))
return result
def list_of_medias(args, sourcedir, recursive):
"""
Return the list of full paths for pictures and movies in source directory
"""
files = list_of_files(sourcedir, recursive)
return [_ for _ in files if is_media_within_dates(_, args.dates)]
def list_of_medias_ext(args, sourcedir):
"""
Return the list of full paths for pictures and movies in source directory
plus subdirectories containing media
"""
result = list()
listdir = sorted_listdir(os.listdir(sourcedir))
if '.nomedia' not in listdir:
for basename in listdir:
fullname = os.path.join(sourcedir, basename)
if os.path.isdir(fullname) and basename != '$RECYCLE.BIN' and contains_media(args, fullname):
result.append(fullname)
else:
if is_media_within_dates(fullname, args.dates):
result.append(fullname)
return result
def contains_media(args, dirname):
for root, dirs, files in os.walk(dirname):
if '.nomedia' not in files:
for basename in files:
if is_media_within_dates(os.path.join(root, basename), args.dates):
return True
else:
return False
def dispatch_post_items(list_of_post_items):
subdirs = [_ for _ in list_of_post_items if type(_) is PostSubdir]
medias = [_ for _ in list_of_post_items if type(_) is not PostSubdir]
return subdirs, medias
# -- Creation of gallery element ----------------------------------------------
def create_item(args, media_fullname, sourcedir, thumbdir, key, thumbmax):
if os.path.isfile(media_fullname):
if is_image_file(media_fullname):
return create_item_image(args, media_fullname, sourcedir, thumbdir, key, thumbmax)
else:
return create_item_video(args, media_fullname, sourcedir, thumbdir, key, thumbmax)
else:
return create_item_subdir(args, media_fullname, sourcedir, thumbdir, key, thumbmax)
def create_item_image(args, media_fullname, sourcedir, thumbdir, key, thumbmax):
media_basename = os.path.basename(media_fullname)
media_relname = relative_name(media_fullname, sourcedir)
thumb_basename = thumbname(media_relname, key)
thumb_fullname = os.path.join(thumbdir, thumb_basename)
try:
info, infofmt = get_image_info(media_fullname)
infofmt = media_basename + ': ' + infofmt
thumbsize = size_thumbnail(info[2], info[3], thumbmax)
make_thumbnail_image(args, media_fullname, thumb_fullname, thumbsize)
return PostImage(None, media_fullname, '/'.join((args.thumbrep, thumb_basename)),
thumbsize, infofmt)
except PIL.UnidentifiedImageError:
# corrupted image
warning('Unable to read image', media_fullname)
return None
def create_item_video(args, media_fullname, sourcedir, thumbdir, key, thumbmax):
media_basename = os.path.basename(media_fullname)
media_relname = relative_name(media_fullname, sourcedir)
thumb_basename = thumbname(media_relname, key)
thumb_fullname = os.path.join(thumbdir, thumb_basename)
info_fullname = os.path.splitext(thumb_fullname)[0] + '.info'
try:
info, infofmt = get_video_info(media_fullname, info_fullname)
infofmt = media_basename + ': ' + infofmt
thumbsize = size_thumbnail(info[2], info[3], thumbmax)
make_thumbnail_video(args, media_fullname, thumb_fullname, thumbsize, duration=info[5])
return PostVideo(None, media_fullname, '/'.join((args.thumbrep, thumb_basename)),
thumbsize, infofmt)
except CalledProcessError:
# corrupted video
warning('Unable to read video', media_fullname)
return None
def create_item_subdir(args, media_fullname, sourcedir, thumbdir, key, thumbmax):
media_basename = os.path.basename(media_fullname)
media_relname = relative_name(media_fullname, sourcedir)
thumb_basename = thumbname(media_relname, key)
thumb_fullname = os.path.join(thumbdir, thumb_basename)
info, infofmt = None, None
thumbsize = (thumbmax, int(round(thumbmax / 640 * 480)))
medias_ext = list_of_medias_ext(args, media_fullname)
if not medias_ext:
return None
item = PostSubdir(None, media_fullname, '/'.join((args.thumbrep, thumb_basename)),
thumbsize, infofmt)
item.htmname = os.path.join(os.path.dirname(thumbdir), media_relname + args.html_suffix)
if args.thumbnails.subdir_caption:
item.caption = media_basename
else:
item.caption = ''
_, posts = make_posts(args, media_fullname)
item.posts = posts
items = [item for post in posts for item in post.dcim]
item.sublist = items
make_thumbnail_subdir(args, media_fullname, thumb_fullname, thumbsize, items, thumbdir)
return item
def relative_name(media_fullname, sourcedir):
"""
/Gilles/Dev/journal/tests/subdir/deeper2/deepest/OCT_20000112_000004.jpg
-->
deeper2_deepest_OCT_20000112_000004.jpg
/Gilles/Dev/journal/tests/subdir/deeper2/deepest
-->
deeper2_deepest
"""
x = os.path.relpath(media_fullname, sourcedir)
x = x.replace('\\', '_').replace('/', '_').replace('#', '_')
return x
# -- Creation of posts --------------------------------------------------------
def make_posts(args, dirname):
if args.diary is True:
if not args.sourcedir:
return make_posts_from_diary(args)
else:
return make_posts_from_diary_and_dir(args)
elif args.bydate is False:
return make_posts_from_subdir(args, dirname)
else:
return make_posts_from_subdir_and_date(args, dirname)
def make_posts_from_diary(args):
md_filename = os.path.join(args.root, 'index.md')
if os.path.exists(md_filename):
title, posts = parse_markdown(md_filename)
else:
error('File not found', md_filename)
for post in posts:
for media in post.medias:
media_fullname = os.path.join(args.root, media.uri)
item = create_item(args, media_fullname, args.root, args.thumbdir, 'post', 400)
media.thumb = item.thumb
media.thumbsize = item.thumbsize
media.descr = item.descr
return title, posts
def create_items_by_date(args, medias, posts):
# list of required dates
if args.dates == 'diary':
required_dates = {post.date for post in posts}
else:
required_dates = {date_from_item(media) for media in medias}
if type(args.dates) == tuple:
date1, date2 = args.dates
required_dates = {date for date in required_dates if date1 <= date <= date2}
bydate = defaultdict(list)
for media_fullname in medias:
date = date_from_item(media_fullname)
if date in required_dates:
item = create_item(args, media_fullname, args.sourcedir, args.thumbdir, 'dcim', 300)
if item:
bydate[date].append(item)
for date, liste in bydate.items():
liste.sort(key=lambda item: time_from_item(item.uri))
return bydate
def make_posts_from_diary_and_dir(args):
title, posts = make_posts_from_diary(args)
# list of all pictures and movies
medias = list_of_medias(args, args.sourcedir, args.recursive)
bydate = create_items_by_date(args, medias, posts)
# make list of extra dates (not in posts)
extradates = set(bydate) - {post.date for post in posts}
# complete posts with extra dates
for date in extradates:
post = Post.from_date(date)
post.extra = True
bisect.insort(posts, post)
# several posts can have the same date, only the first one is completed with dcim medias
for post in posts:
if post.date in bydate and post.daterank == 1:
post.dcim = bydate[post.date]
return title, posts
def make_posts_from_subdir(args, dirname):
# list of pictures and movies plus subdirectories
if args.bydir is False:
medias_ext = list_of_medias(args, dirname, args.recursive)
else:
medias_ext = list_of_medias_ext(args, dirname)
#required_dates = get_required_dates(args, medias_ext, posts=None)
#medias_ext_bis = []
#for media in medias_ext:
# if complies_with_required_dates(media):
# medias_ext_bis.append(media)
# complete posts
postmedias = list()
for item in medias_ext:
postmedia = create_item(args, item, args.sourcedir, args.thumbdir, 'dcim', 300)
if postmedia is not None:
postmedias.append(postmedia)
post = Post(date='00000000', text='', medias=[])
post.dcim = postmedias
posts = [post]
title = os.path.basename(args.sourcedir) or os.path.splitdrive(args.sourcedir)[0]
return title, posts
def make_posts_from_subdir_and_date(args, dirname):
# list of all pictures and movies
if args.bydir is False:
medias = list_of_medias(args, dirname, args.recursive)
subdirs = []
else:
medias_ext = list_of_medias_ext(args, dirname)
medias = [_ for _ in medias_ext if is_media(_)]
subdirs = [_ for _ in medias_ext if not is_media(_)]
# create list of posts with a single post containing all subdirs
posts = list()
items = list()
for media_fullname in subdirs:
item = create_item(args, media_fullname, args.sourcedir, args.thumbdir, 'dcim', 300)
if item:
items.append(item)
if items:
post = Post(date='00000000', text='', medias=[])
post.dcim = items
posts.append(post)
bydate = create_items_by_date(args, medias, posts)
# add dates
for date in sorted(bydate):
post = Post.from_date(date)
post.dcim = bydate[post.date]
posts.append(post)
title = os.path.basename(args.sourcedir) or os.path.splitdrive(args.sourcedir)[0]
return title, posts
# -- Creation of html page from directory tree --------------------------------
def create_gallery(args):
title, posts = make_posts(args, args.sourcedir)
print_html(args, posts, title, os.path.join(args.dest, args.rootname), 'regular')
purge_htmlfiles(args, posts)
if args.diary and not args.sourcedir:
purge_thumbnails(args, args.thumbdir, posts, diary=True)
else:
purge_thumbnails(args, args.thumbdir, posts)
# -- Creation of diary from medias --------------------------------------------
def create_diary(args):
# list of all pictures and movies
medias = list_of_medias(args, args.sourcedir, args.recursive)
# list of required dates
if args.dates == 'diary':
assert 0
else:
required_dates = {date_from_item(media) for media in medias}
if type(args.dates) == tuple:
date1, date2 = args.dates
required_dates = {date for date in required_dates if date1 <= date <= date2}
title = args.sourcedir
posts = list()
for date in sorted(required_dates):
posts.append(Post.from_date(date))
os.makedirs(args.root, exist_ok=True)
print_markdown(posts, title, os.path.join(args.root, 'index.md'))
# -- Export to blogger---------------------------------------------------------
def online_images_url(args):
try:
if args.urlblogger.startswith('http:') or args.urlblogger.startswith('https:'):
with urlopen(args.urlblogger) as u:
buffer = u.read()
else:
with open(args.urlblogger, 'rb') as f:
buffer = f.read()
except:
error('Unable to read url', args.urlblogger)
buffer = buffer.decode('utf-8')
online_images = dict()
for match in re.finditer('<div class="separator"((?!<div).)*?</div>', buffer, flags=re.DOTALL):
div_separator = match.group(0)
div_separator = div_separator.replace(' ', '')
elem_div = objectify.fromstring(div_separator)
for elem_a in elem_div.iterchildren(tag='a'):
href = elem_a.get("href")
thumb = elem_a.img.get("src")
online_images[os.path.basename(href)] = (href, thumb)
# video insertion relies only on video order
online_videos = list()
for match in re.finditer('<iframe allowfullscreen="allowfullscreen".*?</iframe>', buffer, flags=re.DOTALL):
iframe = match.group(0)
online_videos.append(iframe)
return online_images, online_videos
def compare_image_buffers(imgbuf1, imgbuf2):
"""
return True if images read on file are identical, False otherwise
"""
with io.BytesIO(imgbuf1) as imgio1, io.BytesIO(imgbuf2) as imgio2:
img1 = Image.open(imgio1)
img2 = Image.open(imgio2)
diff = ImageChops.difference(img1, img2)
return not diff.getbbox()
def check_images(args, posts, online_images):
result = True
for post in posts:
for media in post.medias:
if type(media) is PostImage:
if media.basename in online_images:
with open(os.path.join(args.root, media.uri), 'rb') as f:
imgbuf1 = f.read()
try:
with urlopen(online_images[media.basename][0]) as u:
imgbuf2 = u.read()
except FileNotFoundError:
print('File not found', online_images[media.basename][0])
next
if compare_image_buffers(imgbuf1, imgbuf2) is False:
print('Files are different, upload', media.basename)
else:
if 1:
print('File already online', media.basename)
else:
print('File is absent, upload', media.basename)
result = False
elif type(media) is PostVideo:
# no check for the moment
print('Video not checked', media.basename)
else:
assert False
return result
def compose_blogger_html(args, title, posts, imgdata, online_videos):
""" Compose html with blogger image urls
"""
for post in posts:
for media in post.medias:
if type(media) is PostImage:
if media.uri not in imgdata:
print('Image missing: ', media.uri)
else:
img_url, resized_url = imgdata[media.uri]
media.uri = img_url
media.resized_url = resized_url
elif type(media) is PostVideo:
if not online_videos:
print('Video missing: ', media.uri)
else:
media.iframe = online_videos[0]
del online_videos[0]
else:
assert False
return print_html(args, posts, title, '', target='blogger')
def prepare_for_blogger(args):
"""
Export blogger html to clipboard.
If --full, export complete html, otherwise export html extract ready to
paste into blogger edit mode.
"""
title, posts = parse_markdown(os.path.join(args.root, 'index.md'))
online_images, online_videos = online_images_url(args)
if args.check_images and check_images(args, posts, online_images) is False:
pass
html = compose_blogger_html(args, title, posts, online_images, online_videos)
if args.full is False:
html = re.search('<body>(.*)?</body>', html, flags=re.DOTALL).group(1)
html = re.sub('<script>.*?</script>', '', html, flags=re.DOTALL)
html = STYLE.replace('%%', '%') + html
if args.dest:
with open(args.dest, 'wt', encoding='utf-8') as f:
f.write(html)
else:
clipboard.copy(html)
# -- Other commands -----------------------------------------------------------
def idempotence(args):
"""
For testing identity between a diary file and the fle obtained after reading
and printing it. See testing.
"""
title, posts = parse_markdown(os.path.join(args.root, 'index.md'))
print_markdown(posts, title, os.path.join(args.dest, 'index.md'))
# -- Configuration file ------------------------------------------------------
# The following docstring is used to create the configuration file.
CONFIG_DEFAULTS = """\
[source]
; source directory
; value: valid path
sourcedir = .
; one web page per directory
; value: true or false
bydir = false
; dispatch medias by dates
; value: true or false
bydate = false
; include text and medias from diary file
; value: true or false
diary = false
; include subdirectories recursively (used when bydir is false)
; value: true or false
recursive = false
; interval of dates to include
; value: source|diary|yyyymmdd-yyyymmdd or empty (= source)
dates =
; github Pages compatibility (.htlml extension and no dot in directory names)
; value: true or false
github_pages = false
[thumbnails]
; specifies whether or not the gallery displays media description (size, dimension, etc)
; value: true or false
media_description = true
; specifies whether subdir captions are empty or the name of the subdir
; value: true or false
subdir_caption = true
; timestamp of thumbnail in video
; value: number of seconds
thumbdelay = 5
; maximum number of thumbnails to remove without user confirmation
; value: integer
threshold_thumbs = 10
[photobox]
; Allows to navigate between first and last images
; value: true or false
loop = false
; Show gallery thumbnails below the presented photo
; value: true or false
thumbs = true
; Should autoplay on first time or not
; value: true or false
autoplay = false
; Autoplay interval (less than 1000 will hide the autoplay button)
; value: milliseconds
time = 3000
; Disable/enable mousewheel image zooming
; value: true or false
zoomable = true
; Allow rotation of the image
; value: true or false
rotatable = true
; Change image using mousewheel left/right
; value: true or false
wheelNextPrev = true
"""
class MyConfigParser (ConfigParser):
"""Add input checking."""
def __init__(self):
ConfigParser.__init__(self, inline_comment_prefixes=(';',))
def error(self, section, entry):
error('Missing or incorrect config value:', '[%s]%s' % (section, entry))
def getint(self, section, entry, default=None):
try:
if default is None:
return ConfigParser.getint(self, section, entry)
else:
return ConfigParser.getint(self, section, entry, raw=True, vars=None, fallback=default)
except Exception as e:
print(e)
self.error(section, entry)
def getboolean(self, section, entry, default=None):
try:
if default is None:
return ConfigParser.getboolean(self, section, entry)
else:
return ConfigParser.getboolean(self, section, entry, raw=True, vars=None, fallback=default)
except Exception as e:
print(e)
self.error(section, entry)
def configfilename(params):
return os.path.join(params.root, '.config.ini')
def createconfig(config_filename):
with open(config_filename, 'wt') as f:
f.writelines(CONFIG_DEFAULTS)
def read_config(params):
config_filename = configfilename(params)
try:
if not os.path.exists(config_filename) or params.resetcfg:
createconfig(config_filename)
except:
error('Error creating configuration file')
try:
getconfig(params, config_filename)
except Exception as e:
error('Error reading configuration file.', str(e), 'Use --resetcfg')
def getconfig(options, config_filename):
class Section:
pass
options.source = Section()
options.thumbnails = Section()
options.photobox = Section()
config = MyConfigParser()
config.read(config_filename)
# [source]
options.source.sourcedir = config.get('source', 'sourcedir')
options.source.bydir = config.getboolean('source', 'bydir')
options.source.bydate = config.getboolean('source', 'bydate')
options.source.diary = config.getboolean('source', 'diary')
options.source.recursive = config.getboolean('source', 'recursive')
options.source.dates = config.get('source', 'dates')
options.source.github_pages = config.getboolean('source', 'github_pages', default=False)
# [thumbnails]
options.thumbnails.media_description = config.getboolean('thumbnails', 'media_description')
options.thumbnails.subdir_caption = config.getboolean('thumbnails', 'subdir_caption')
options.thumbnails.thumbdelay = config.getint('thumbnails', 'thumbdelay')
options.thumbnails.threshold_thumbs = config.getint('thumbnails', 'threshold_thumbs')
options.thumbnails.threshold_htmlfiles = config.getint('thumbnails', 'threshold_htmlfiles', default=3)
# [photobox]
options.photobox.loop = config.getboolean('photobox', 'loop')
options.photobox.thumbs = config.getboolean('photobox', 'thumbs')
options.photobox.autoplay = config.getboolean('photobox', 'autoplay')
options.photobox.time = config.getint('photobox', 'time')
options.photobox.zoomable = config.getboolean('photobox', 'zoomable')
options.photobox.rotatable = config.getboolean('photobox', 'rotatable')
options.photobox.wheelNextPrev = config.getboolean('photobox', 'wheelNextPrev')
def setconfig(cfgname, section, key, value):
config = MyConfigParser()
config.read(cfgname)
config.set(section, key, value)
with open(cfgname, 'wt') as configfile:
config.write(configfile)
def setconfig_cmd(args):
config_filename = configfilename(args)
setconfig(config_filename, *args.setcfg)
def update_config(args):
# update only entries which can be modified from the command line (source section)
updates = (
('sourcedir', args.sourcedir),
('bydir', BOOL[args.bydir]),
('bydate', BOOL[args.bydate]),
('diary', BOOL[args.diary]),
('recursive', BOOL[args.recursive]),
('dates', args.dates),
('github_pages', BOOL[args.github_pages]),
)
# manual update to keep comments
cfgname = configfilename(args)
with open(cfgname) as f:
cfglines = [_.strip() for _ in f.readlines()]
for key, value in updates:
for iline, line in enumerate(cfglines):
if line.startswith(key):
cfglines[iline] = f'{key} = {value}'
break
with open(cfgname, 'wt') as f:
for line in cfglines:
print(line, file=f)
# -- Error handling -----------------------------------------------------------
def warning(*msg):
print(colorama.Fore.YELLOW + colorama.Style.BRIGHT +
' '.join(msg),
colorama.Style.RESET_ALL)
# Every error message error must be declared here to give a return code to the error
ERRORS = '''\
File not found
Directory not found
No date in post
Incorrect date value:
Posts are not ordered
Unable to read url
No image source (--sourcedir)
No blogger url (--url)
Missing or incorrect config value:
Error creating configuration file
Error reading configuration file.
Incorrect date format
Incorrect parameters:
'''
def errorcode(msg):
return ERRORS.splitlines().index(msg) + 1
def error(*msg):
print(colorama.Fore.RED + colorama.Style.BRIGHT +
' '.join(msg),
colorama.Style.RESET_ALL)
sys.exit(errorcode(msg[0]))
# -- Main ---------------------------------------------------------------------
BOOL = ('false', 'true')
def parse_command_line(argstring):
parser = argparse.ArgumentParser(description=None, usage=USAGE)
agroup = parser.add_argument_group('Commands')
xgroup = agroup.add_mutually_exclusive_group()
xgroup.add_argument('--gallery', help='source in --sourcedir',
action='store', metavar='<root-dir>')
agroup.add_argument('--update', help='updates gallery with parameters in config file',
action='store', metavar='<root-dir>')
xgroup.add_argument('--create', help='create journal from medias in --sourcedir',
action='store', metavar='<root-dir>')
# testing
xgroup.add_argument('--resetcfg', help='reset config file to defaults',
action='store', metavar='<root-dir>')
xgroup.add_argument('--setcfg', help=argparse.SUPPRESS,
action='store', nargs=4, metavar='<root-dir>')
xgroup.add_argument('--idem', help=argparse.SUPPRESS,
action='store', metavar='<root-dir>')
# blogger
xgroup.add_argument('--blogger',
help='input md, html blogger ready in clipboard',
action='store', metavar='<root-dir>')
agroup = parser.add_argument_group('Parameters')
agroup.add_argument('--bydir', help='organize gallery by subdirectory',
action='store', default=None, choices=BOOL)
agroup.add_argument('--bydate', help='organize gallery by date',
action='store', default=None, choices=BOOL)
agroup.add_argument('--diary', help='organize gallery using markdown file diary',
action='store', default=None, choices=BOOL)
agroup.add_argument('--recursive', help='--sourcedir scans recursively',
action='store', default=None, choices=BOOL)
agroup.add_argument('--dates', help='dates interval',
action='store', default=None)
agroup.add_argument('--sourcedir', help='media directory',
action='store', default=None)
agroup.add_argument('--github_pages', help='github Pages compatibility',
action='store', default=None, choices=BOOL)
agroup.add_argument('--dest', help='output directory',
action='store')
agroup.add_argument('--forcethumb', help='force calculation of thumbnails',
action='store_true', default=False)
agroup.add_argument('--full', help='full html (versus blogger ready html)',
action='store_true', default=False)
agroup.add_argument('--check', dest='check_images', help='check availability of medias on blogger',
action='store_true')
agroup.add_argument('--url', dest='urlblogger', help='blogger post url',
action='store')
if not argstring:
parser.print_help()
sys.exit(1)
else:
args = parser.parse_args(argstring.split())
if args.update and (args.bydir or args.bydate or args.diary or args.sourcedir or
args.recursive or args.dates or args.github_pages):
error('Incorrect parameters:',
'--update cannot be used with creation parameters, use explicit command')
args.bydir = args.bydir == 'true'
args.bydate = args.bydate == 'true'
args.diary = args.diary == 'true'
args.recursive = args.recursive == 'true'
args.dates = 'source' if (args.dates is None) else args.dates
args.github_pages = args.github_pages == 'true'
args.root = (
args.create or args.gallery or args.update
or args.blogger or args.idem or args.resetcfg
)
if args.setcfg:
args.root = args.setcfg[0]
args.setcfg = args.setcfg[1:]
return args
def setup_part1(args):
"""
Made before reading config file (config file located in args.root).
Check and normalize root path.
"""
args.rootarg = args.root
rootext = os.path.splitext(args.rootarg)[1]
if rootext == '':
pass
else:
args.root = os.path.dirname(args.root)
if args.root:
args.root = os.path.abspath(args.root)
if not os.path.isdir(args.root):
if args.gallery:
os.mkdir(args.root)
else:
error('Directory not found', args.root)
def setup_part2(args):
"""
Made after reading config file.
Check for ffmpeg in path.
Create .thumbnails dir if necessary and create .nomedia in it.
Copy photobox file to destination dir.
Handle priority between command line and config file.
"""
if args.update:
args.sourcedir = args.source.sourcedir
args.bydir = args.source.bydir
args.bydate = args.source.bydate
args.diary = args.source.diary
args.recursive = args.source.recursive
args.dates = args.source.dates
args.github_pages = args.source.github_pages
elif args.gallery:
args.source.sourcedir = args.sourcedir
args.source.bydir = args.bydir
args.source.bydate = args.bydate
args.source.diary = args.diary
args.source.recursive = args.recursive
args.source.dates = args.dates
args.source.github_pages = args.github_pages
update_config(args)
if args.github_pages:
args.html_suffix = '.html'
else:
args.html_suffix = '.htm'
rootext = os.path.splitext(args.rootarg)[1]
if rootext:
args.rootname = os.path.basename(args.rootarg)
else:
args.rootname = 'index' + args.html_suffix
if args.sourcedir:
args.sourcedir = os.path.abspath(args.sourcedir)
if os.path.splitdrive(args.sourcedir)[0]:
drive, rest = os.path.splitdrive(args.sourcedir)
args.sourcedir = drive.upper() + rest
if not os.path.isdir(args.sourcedir):
error('Directory not found', args.sourcedir)
else:
if args.gallery and args.diary is False and args.update is None:
error('Directory not found', 'Use --sourcedir')
if args.dest:
args.dest = os.path.abspath(args.dest)
if args.dest is None:
args.dest = args.root
if args.blogger and args.urlblogger is None:
error('No blogger url (--url)')
if args.gallery or args.update:
# check for ffmpeg and ffprobe in path
for exe in ('ffmpeg', 'ffprobe'):
try:
check_output([exe, '-version'])
except FileNotFoundError:
error('File not found', exe)
if args.github_pages:
args.thumbrep = 'thumbnails'
else:
args.thumbrep = '.thumbnails'
args.thumbdir = os.path.join(args.dest, args.thumbrep)
if not os.path.exists(args.thumbdir):
os.mkdir(args.thumbdir)
open(os.path.join(args.thumbdir, '.nomedia'), 'a').close()
favicondst = os.path.join(args.dest, 'favicon.ico')
if not os.path.isfile(favicondst):
faviconsrc = os.path.join(os.path.dirname(__file__), 'favicon.ico')
shutil.copyfile(faviconsrc, favicondst)
photoboxdir = os.path.join(args.dest, 'photobox')
if not os.path.exists(photoboxdir):
photoboxsrc = os.path.join(os.path.dirname(__file__), 'photobox')
shutil.copytree(photoboxsrc, photoboxdir)
if args.dates:
if not(args.gallery or args.create):
# silently ignored for the moment, otherwise all other commands will
# launch a warning or an error on the default --dates value
pass
if args.dates == 'source':
pass
elif args.dates == 'diary':
if args.create:
error('Incorrect date format', args.dates)
elif re.match(r'\d+-\d+', args.dates):
date1, date2 = args.dates.split('-')
if validate_date(date1) and validate_date(date2):
args.dates = date1, date2
else:
error('Incorrect date format', args.dates)
else:
error('Incorrect date format', args.dates)
def main(argstring=None):
colorama.init()
args = parse_command_line(argstring)
setup_part1(args)
read_config(args)
setup_part2(args)
try:
if args.gallery or args.update:
create_gallery(args)
elif args.create:
create_diary(args)
elif args.blogger:
prepare_for_blogger(args)
elif args.idem:
idempotence(args)
elif args.setcfg:
setconfig_cmd(args)
except KeyboardInterrupt:
warning('Interrupted by user.')
def main_entry_point():
locale.setlocale(locale.LC_ALL, '')
main(' '.join(sys.argv[1:]))
if __name__ == '__main__':
locale.setlocale(locale.LC_ALL, '')
main(' '.join(sys.argv[1:]))
| StarcoderdataPython |
191335 | <gh_stars>1-10
import pytest
# see also issue https://github.com/amsico/pyqtschema/issues/12
from pydantic import BaseModel
from pyqtschema.utils import build_example_widget
class Simple(BaseModel):
string: str
integer: int
schema = Simple.schema()
def test_hide_widget(qtbot):
ui_schema = {'string': {'ui:hidden': True}}
widget = build_example_widget(schema, ui_schema=ui_schema)
widget.show()
qtbot.addWidget(widget)
assert not widget.widget.widgets['string'].isVisible()
assert widget.widget.widgets['integer'].isVisible()
assert widget.widget.widgets['string'].isEnabled()
assert widget.widget.widgets['integer'].isEnabled()
def test_disable_widget(qtbot):
ui_schema = {'integer': {'ui:disabled': True}}
widget = build_example_widget(schema, ui_schema=ui_schema)
widget.show()
qtbot.addWidget(widget)
assert widget.widget.widgets['string'].isVisible()
assert widget.widget.widgets['integer'].isVisible()
assert widget.widget.widgets['string'].isEnabled()
assert not widget.widget.widgets['integer'].isEnabled()
| StarcoderdataPython |
70398 | # -*- coding: utf-8 -*-
import os
import sublime
import sublime_plugin
class CopyPythonPathCommand(sublime_plugin.TextCommand):
def run(self, edit):
python_path_items = []
head, tail = os.path.split(self.view.file_name())
module = tail.rsplit('.', 1)[0]
if module != '__init__':
python_path_items.append(module)
head, tail = os.path.split(head)
while tail:
if '__init__.py' in os.listdir(os.path.join(head, tail)):
python_path_items.insert(0, tail)
else:
break
head, tail = os.path.split(head)
caret_point = self.view.sel()[0].begin()
if 'entity.name.class.python' in self.view.scope_name(caret_point):
python_path_items.append(self.view.substr(self.view.word(caret_point)))
if 'entity.name.function.python' in self.view.scope_name(caret_point):
method_name = self.view.substr(self.view.word(caret_point))
if self.view.indentation_level(caret_point) > 0:
regions = self.view.find_by_selector('entity.name.class.python')
possible_class_point = 0
regions = list(filter(lambda reg: reg.b < caret_point, regions))
for region in reversed(regions):
if self.view.indentation_level(region.a) == 0:
possible_class_point = region.a
break
class_name = self.view.substr(self.view.word(possible_class_point))
python_path_items.append(class_name)
python_path_items.append(method_name)
python_path = '.'.join(python_path_items)
sublime.set_clipboard(python_path)
sublime.status_message('"%s" copied to clipboard' % python_path)
def is_enabled(self):
matcher = 'source.python'
return self.view.match_selector(self.view.sel()[0].begin(), matcher)
| StarcoderdataPython |
3349944 | <gh_stars>10-100
# GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Component:
DESCRIPTION = "Add scan results from a third-party vulnerability scanner"
class Input:
OPERATION = "operation"
SCAN_RESULTS = "scan_results"
class Output:
COMMANDS_PROCESSED = "commands_processed"
ERRORS = "errors"
class BulkAddScanResultInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"operation": {
"type": "string",
"title": "Operation",
"description": "The operation to be performed when adding scan results. ScanFlush to remove existing scan results or ScanUpdate to keep existing scan results",
"enum": [
"ScanUpdate",
"ScanFlush"
],
"order": 2
},
"scan_results": {
"type": "array",
"title": "Scan Results",
"description": "Scan results to add",
"items": {
"$ref": "#/definitions/scan_result"
},
"order": 1
}
},
"required": [
"operation"
],
"definitions": {
"host": {
"type": "object",
"title": "host",
"properties": {
"ip_address": {
"type": "string",
"title": "IP Address",
"default": "",
"order": 1
},
"operating_system": {
"$ref": "#/definitions/os",
"title": "Host Operating System",
"order": 2
}
},
"required": [
"ip_address"
],
"definitions": {
"os": {
"type": "object",
"title": "os",
"properties": {
"name": {
"type": "string",
"title": "Name",
"order": 2
},
"vendor": {
"type": "string",
"title": "Vendor",
"order": 1
},
"version": {
"type": "string",
"title": "Version",
"order": 3
}
}
}
}
},
"os": {
"type": "object",
"title": "os",
"properties": {
"name": {
"type": "string",
"title": "Name",
"order": 2
},
"vendor": {
"type": "string",
"title": "Vendor",
"order": 1
},
"version": {
"type": "string",
"title": "Version",
"order": 3
}
}
},
"result_details": {
"type": "object",
"title": "result_details",
"properties": {
"bugtraq_ids": {
"type": "array",
"title": "Bugtraq IDs",
"description": "The identification numbers associated with the vulnerability in the Bugtraq database (http://www.securityfocus.com/bid)",
"items": {
"type": "string"
},
"order": 9
},
"cve_ids": {
"type": "array",
"title": "CVE IDs",
"description": "The identification number associated with the vulnerability in MITRE’s Common Vulnerabilities and Exposures (CVE) database (http://www.cve.mitre.org/)",
"items": {
"type": "string"
},
"order": 8
},
"description": {
"type": "string",
"title": "Description",
"order": 7
},
"port": {
"type": "string",
"title": "Port",
"order": 5
},
"protocol_id": {
"type": "string",
"title": "Protocol ID",
"order": 6
},
"scanner_id": {
"type": "string",
"title": "Scanner ID",
"description": "Scanner ID for the scanner that obtained the scan results",
"order": 2
},
"source_id": {
"type": "string",
"title": "Source ID",
"description": "Application or source ID",
"order": 1
},
"vulnerability_id": {
"type": "string",
"title": "Vulnerability ID",
"order": 3
},
"vulnerability_title": {
"type": "string",
"title": "Vulnerability Title",
"description": "Title of the vulnerability",
"order": 4
}
},
"required": [
"scanner_id",
"source_id",
"vulnerability_id",
"vulnerability_title"
]
},
"scan_result": {
"type": "object",
"title": "scan_result",
"properties": {
"host": {
"$ref": "#/definitions/host",
"title": "Host",
"description": "Add an untracked host to the network map",
"order": 1
},
"scan_result_details": {
"$ref": "#/definitions/result_details",
"title": "Scan Result Details",
"description": "Scan result for the host",
"order": 2
}
},
"definitions": {
"host": {
"type": "object",
"title": "host",
"properties": {
"ip_address": {
"type": "string",
"title": "IP Address",
"default": "",
"order": 1
},
"operating_system": {
"$ref": "#/definitions/os",
"title": "Host Operating System",
"order": 2
}
},
"required": [
"ip_address"
],
"definitions": {
"os": {
"type": "object",
"title": "os",
"properties": {
"name": {
"type": "string",
"title": "Name",
"order": 2
},
"vendor": {
"type": "string",
"title": "Vendor",
"order": 1
},
"version": {
"type": "string",
"title": "Version",
"order": 3
}
}
}
}
},
"os": {
"type": "object",
"title": "os",
"properties": {
"name": {
"type": "string",
"title": "Name",
"order": 2
},
"vendor": {
"type": "string",
"title": "Vendor",
"order": 1
},
"version": {
"type": "string",
"title": "Version",
"order": 3
}
}
},
"result_details": {
"type": "object",
"title": "result_details",
"properties": {
"bugtraq_ids": {
"type": "array",
"title": "Bugtraq IDs",
"description": "The identification numbers associated with the vulnerability in the Bugtraq database (http://www.securityfocus.com/bid)",
"items": {
"type": "string"
},
"order": 9
},
"cve_ids": {
"type": "array",
"title": "CVE IDs",
"description": "The identification number associated with the vulnerability in MITRE’s Common Vulnerabilities and Exposures (CVE) database (http://www.cve.mitre.org/)",
"items": {
"type": "string"
},
"order": 8
},
"description": {
"type": "string",
"title": "Description",
"order": 7
},
"port": {
"type": "string",
"title": "Port",
"order": 5
},
"protocol_id": {
"type": "string",
"title": "Protocol ID",
"order": 6
},
"scanner_id": {
"type": "string",
"title": "Scanner ID",
"description": "Scanner ID for the scanner that obtained the scan results",
"order": 2
},
"source_id": {
"type": "string",
"title": "Source ID",
"description": "Application or source ID",
"order": 1
},
"vulnerability_id": {
"type": "string",
"title": "Vulnerability ID",
"order": 3
},
"vulnerability_title": {
"type": "string",
"title": "Vulnerability Title",
"description": "Title of the vulnerability",
"order": 4
}
},
"required": [
"scanner_id",
"source_id",
"vulnerability_id",
"vulnerability_title"
]
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class BulkAddScanResultOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"commands_processed": {
"type": "number",
"title": "Commands Processed",
"description": "Number of commands processed",
"order": 2
},
"errors": {
"type": "number",
"title": "Errors",
"description": "Number of errors",
"order": 1
}
},
"required": [
"commands_processed",
"errors"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| StarcoderdataPython |
158876 | <filename>src/units/_cluster_multiple.py
import logging
import numpy as np
from joblib import Parallel, delayed
from ..log import setup_logger
from ..units._evaluation import Eval_Silhouette
from ..utils.validation import _validate_n_jobs
def cluster_multiple(x, obj_def, k_list=np.array([2, 4, 8, 16]),
attribute_name='n_clusters', eval_obj=None,
method_name='fit_predict', n_jobs=None, **kwargs):
"""
Runs clustering for multiple n_clusters.
Parameters
__________
x: array, shape (n_samples, n_features)
The data array.
obj_def: object name
Object to be instantiated in this function.
k_list: array, shape (n_trials,), dtype int, default [2, 4, 8, 16]
Array containing the different values of clusters to try.
attribute_name: string, default 'n_clusters'
Name of the obj.attribute_name that corresponds to n_clusters.
eval_obj: Eval or None, default None
Evaluation object to compare performance of different trials.
If set to None, it will be initialized to SilhouetteScore by default.
method_name: string, default 'fit_predict'
Name of the method obj.method_name(x) that returns labels given x.
n_jobs: int or None, default None
Number of jobs to use if multithreading. See
https://joblib.readthedocs.io/en/latest/generated/joblib.Parallel.html.
If None or 1, will not run multithreading.
**kwargs: dictionary
Dictionary of parameters that will get passed to obj_def
when instantiating it.
Returns
_______
top_y: array, dtype=obj.method_name(x).dtype, shape (n_samples,)
List of labels that correspond to the best clustering k, as
evaluated by eval_obj.
"""
logger = setup_logger('Cluster.Multiple')
# Default evaluation object
if eval_obj is None:
eval_obj = Eval_Silhouette()
# If n_jobs = -1, run all threads
n_jobs = _validate_n_jobs(n_jobs)
if n_jobs == 1:
top_y, top_score, top_k = None, -np.Inf, -1
score_list = []
for i, k in enumerate(k_list):
kwargs[attribute_name] = k
# Cluster
y = getattr(obj_def(**kwargs), method_name)(x)
# Evaluate
score = eval_obj.get(x, y)
score_list.append(score)
# Update if better (higher) score (saves memory)
if score > top_score:
top_y, top_score, top_k = y, score, k
logger.info(
"Finished clustering with k={0}. Score={1:.2f}.".format(k, score))
else:
logger.info("Running multiple threads with n_jobs={0}.".format(n_jobs))
kwargs_list = []
for i, k in enumerate(k_list):
kcopy = kwargs.copy()
kcopy[attribute_name] = k
kwargs_list.append(kcopy)
# Run clustering in parallel
y_list = Parallel(n_jobs=n_jobs)(
delayed(getattr(obj_def(**kwargs_list[i]), method_name))(x)
for i in range(len(k_list)))
# Run evaluation in parallel
score_list = Parallel(n_jobs=n_jobs)(
delayed(eval_obj.get)(x, y)
for y in y_list)
# Find the best score
top_index = np.argmax(score_list)
top_y, top_k = y_list[top_index], k_list[top_index]
# Log scores
for k, score in zip(k_list, score_list):
logger.info(
"Finished clustering with k={0}. Score={1:.2f}.".format(k, score))
logger.info(
"Finished clustering. Best score achieved for k={0}.".format(top_k))
return top_y, score_list
| StarcoderdataPython |
1796500 | from m5stack import *
# from m5stack_ui import M5Screen, M5Label, M5Dropdown, M5Switch
from m5stack_ui import *
from uiflow import *
from menu_screen import MenuScreen
def default_protocol():
protocol = {
'n_animals': 4,
'n_stims': 30,
'paw_left': True,
'paw_right': True,
}
return protocol
class ProtocolScreen(MenuScreen):
def __init__(self, screen, actions=['', '', 'next']):
super().__init__(screen, actions)
self.ui_stim_label = None
self.ui_stim_count = None
self.ui_paw_left_lable = None
self.ui_paw_left_switch = None
self.ui_paw_right_lable = None
self.ui_paw_right_switch = None
self.protocol = default_protocol()
self.max_stims = 100
self._align_x_left = 10
self._align_x_right = 180
self._y_spacing = 40
self._color_bg = 0x333333
self._color_font = 0xFFFFFF
self._font = FONT_MONT_20
def set_protocol(self, protocol):
self.protocol = protocol
def build_ui(self):
screen.clean_screen()
screen.set_screen_bg_color(self._color_bg)
y = 20
# UI: number of stimulations
self.ui_stim_label = M5Label(
'stimulations',
x=self._align_x_left,
y=y,
color=self._color_font,
font=self._font,
)
self.ui_stim_count = M5Dropdown(
x=self._align_x_right,
y=y,
)
# set_options(list...) seems not to be working correctly
for i in range(self.max_stims):
self.ui_stim_count.add_option(str(i), i)
stim_index = self.protocol['n_stims']
self.ui_stim_count.set_sel_index(stim_index)
# UI: Paw toggles
y += self._y_spacing
self.ui_paw_left_lable = M5Label(
text='left paw',
x=self._align_x_left,
y=y,
color=self._color_font,
font=self._font,
)
self.ui_paw_left_switch = M5Switch(
x=self._align_x_right,
y=y,
)
if self.protocol['paw_left']:
self.ui_paw_left_switch.set_on()
else:
self.ui_paw_left_switch.set_off()
y += self._y_spacing
self.ui_paw_right_lable = M5Label(
text='right paw',
x=self._align_x_left,
y=y,
color=self._color_font,
font=self._font,
)
self.ui_paw_right_switch = M5Switch(
x=self._align_x_right,
y=y,
)
if self.protocol['paw_right']:
self.ui_paw_right_switch.set_on()
else:
self.ui_paw_right_switch.set_off()
def build_response(self):
index = self.ui_stim_count.get_sel_index()
self.protocol['n_stims'] = index
self.protocol['paw_left'] = self.ui_paw_left_switch.get_state()
self.protocol['paw_right'] = self.ui_paw_right_switch.get_state()
return self.protocol
class DebugScreen(MenuScreen):
def __init__(self, screen, actions=['prev', '', 'next']):
super().__init__(screen, actions)
self.ui_stim_label = None
self.ui_left_paw_label = None
self.ui_right_paw_label = None
self.protocol = default_protocol()
def set_protocol(self, protocol):
self.protocol = protocol
def build_ui(self):
screen.clean_screen()
screen.set_screen_bg_color(0xAA33AA)
self.ui_stim_label = M5Label('# of stimulations:' + str(self.protocol['n_stims']), x=10, y=20)
self.ui_left_paw_label = M5Label('left paw:' + str(self.protocol['paw_left']), x=10, y=60)
self.ui_right_paw_label = M5Label('right paw:' + str(self.protocol['paw_right']), x=10, y=100)
def build_response(self):
return self.protocol
screen = M5Screen()
proto_screen = ProtocolScreen(screen, actions=['', 'stim', 'debug'])
# proto_screen.set_nav_color(0xFF00FF)
dbg_screen = DebugScreen(screen, actions=['stim', '', ''])
protocol = default_protocol()
action = 'stim'
while action:
wait_ms(5)
if action == 'stim':
dbg_screen.set_protocol(protocol)
action, protocol = proto_screen.activate()
elif action == 'debug':
dbg_screen.set_protocol(protocol)
action, protocol = dbg_screen.activate()
else:
break
screen.clean_screen()
screen.set_screen_bg_color(0xFF3300)
ui_stim_label = M5Label('# STIMS:' + action + ' -- ' + str(protocol['n_stims']), x=10, y=30)
| StarcoderdataPython |
1767693 | <reponame>muyuuuu/PyQt-learn
"""
.. moduleauthor:: <NAME> and <NAME> (active)
.. default-domain:: py
.. highlight:: python
Version |release|
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import str
from builtins import int
from future import standard_library
standard_library.install_aliases()
import sys
if sys.hexversion >= 0x020600F0:
runningPython26 = True
else:
runningPython26 = False
if sys.hexversion >= 0x030000F0:
runningPython3 = True
else:
runningPython3 = False
# Try to import the Python Image Library. If it doesn't exist, only .gif
# images are supported.
try:
from PIL import Image as PILImage
from PIL import ImageTk as PILImageTk
except:
pass
if runningPython3:
from tkinter import *
import tkinter.filedialog as tk_FileDialog
from io import StringIO
else:
from tkinter import *
import tkinter.filedialog as tk_FileDialog
from io import StringIO
# Set up basestring appropriately
if runningPython3:
str = str
if TkVersion < 8.0:
stars = "*" * 75
print(("""\n\n\n""" + stars + """
You are running Tk version: """ + str(TkVersion) + """
You must be using Tk version 8.0 or greater to use EasyGui.
Terminating.
""" + stars + """\n\n\n"""))
sys.exit(0)
rootWindowPosition = "+300+200"
PROPORTIONAL_FONT_FAMILY = ("MS", "Sans", "Serif")
MONOSPACE_FONT_FAMILY = ("Courier")
PROPORTIONAL_FONT_SIZE = 10
# a little smaller, because it it more legible at a smaller size
MONOSPACE_FONT_SIZE = 9
TEXT_ENTRY_FONT_SIZE = 12 # a little larger makes it easier to see
STANDARD_SELECTION_EVENTS = ["Return", "Button-1", "space"]
# Initialize some global variables that will be reset later
__choiceboxMultipleSelect = None
__replyButtonText = None
__choiceboxResults = None
__firstWidget = None
__enterboxText = None
__enterboxDefaultText = ""
__multenterboxText = ""
choiceboxChoices = None
choiceboxWidget = None
entryWidget = None
boxRoot = None
#-------------------------------------------------------------------
# textbox
#-------------------------------------------------------------------
def textbox(msg="", title=" ", text="", codebox=0, get_updated_text=None):
"""
Display some text in a proportional font with line wrapping at word breaks.
This function is suitable for displaying general written text.
The text parameter should be a string, or a list or tuple of lines to be
displayed in the textbox.
:param str msg: the msg to be displayed
:param str title: the window title
:param str text: what to display in the textbox
:param str codebox: if 1, act as a codebox
"""
if msg is None:
msg = ""
if title is None:
title = ""
global boxRoot, __replyButtonText, __widgetTexts, buttonsFrame
global rootWindowPosition
choices = ["OK"]
__replyButtonText = choices[0]
boxRoot = Tk()
# Quit when x button pressed
boxRoot.protocol('WM_DELETE_WINDOW', boxRoot.quit)
screen_width = boxRoot.winfo_screenwidth()
screen_height = boxRoot.winfo_screenheight()
root_width = int((screen_width * 0.8))
root_height = int((screen_height * 0.5))
root_xpos = int((screen_width * 0.1))
root_ypos = int((screen_height * 0.05))
boxRoot.title(title)
boxRoot.iconname('Dialog')
rootWindowPosition = "+0+0"
boxRoot.geometry(rootWindowPosition)
boxRoot.expand = NO
boxRoot.minsize(root_width, root_height)
rootWindowPosition = '+{0}+{1}'.format(root_xpos, root_ypos)
boxRoot.geometry(rootWindowPosition)
mainframe = Frame(master=boxRoot)
mainframe.pack(side=TOP, fill=BOTH, expand=YES)
# ---- put frames in the window -----------------------------------
# we pack the textboxFrame first, so it will expand first
textboxFrame = Frame(mainframe, borderwidth=3)
textboxFrame.pack(side=BOTTOM, fill=BOTH, expand=YES)
message_and_buttonsFrame = Frame(mainframe)
message_and_buttonsFrame.pack(side=TOP, fill=X, expand=NO)
messageFrame = Frame(message_and_buttonsFrame)
messageFrame.pack(side=LEFT, fill=X, expand=YES)
buttonsFrame = Frame(message_and_buttonsFrame)
buttonsFrame.pack(side=RIGHT, expand=NO)
# -------------------- put widgets in the frames --------------------
# put a textArea in the top frame
if codebox:
character_width = int((root_width * 0.6) / MONOSPACE_FONT_SIZE)
textArea = Text(
textboxFrame, height=25, width=character_width, padx="2m", pady="1m")
textArea.configure(wrap=NONE)
textArea.configure(font=(MONOSPACE_FONT_FAMILY, MONOSPACE_FONT_SIZE))
else:
character_width = int((root_width * 0.6) / MONOSPACE_FONT_SIZE)
textArea = Text(
textboxFrame, height=25, width=character_width, padx="2m", pady="1m"
)
textArea.configure(wrap=WORD)
textArea.configure(
font=(PROPORTIONAL_FONT_FAMILY, PROPORTIONAL_FONT_SIZE))
# some simple keybindings for scrolling
mainframe.bind("<Next>", textArea.yview_scroll(1, PAGES))
mainframe.bind("<Prior>", textArea.yview_scroll(-1, PAGES))
mainframe.bind("<Right>", textArea.xview_scroll(1, PAGES))
mainframe.bind("<Left>", textArea.xview_scroll(-1, PAGES))
mainframe.bind("<Down>", textArea.yview_scroll(1, UNITS))
mainframe.bind("<Up>", textArea.yview_scroll(-1, UNITS))
# add a vertical scrollbar to the frame
rightScrollbar = Scrollbar(
textboxFrame, orient=VERTICAL, command=textArea.yview)
textArea.configure(yscrollcommand=rightScrollbar.set)
# add a horizontal scrollbar to the frame
bottomScrollbar = Scrollbar(
textboxFrame, orient=HORIZONTAL, command=textArea.xview)
textArea.configure(xscrollcommand=bottomScrollbar.set)
# pack the textArea and the scrollbars. Note that although we must define
# the textArea first, we must pack it last, so that the bottomScrollbar will
# be located properly.
# Note that we need a bottom scrollbar only for code.
# Text will be displayed with wordwrap, so we don't need to have a horizontal
# scroll for it.
if codebox:
bottomScrollbar.pack(side=BOTTOM, fill=X)
rightScrollbar.pack(side=RIGHT, fill=Y)
textArea.pack(side=LEFT, fill=BOTH, expand=YES)
# ---------- put a msg widget in the msg frame-------------------
messageWidget = Message(
messageFrame, anchor=NW, text=msg, width=int(root_width * 0.9))
messageWidget.configure(
font=(PROPORTIONAL_FONT_FAMILY, PROPORTIONAL_FONT_SIZE))
messageWidget.pack(side=LEFT, expand=YES, fill=BOTH, padx='1m', pady='1m')
# put the buttons in the buttonsFrame
okButton = Button(
buttonsFrame, takefocus=YES, text="Update", height=1, width=6)
okButton.pack(
expand=NO, side=TOP, padx='2m', pady='1m', ipady="1m", ipadx="2m")
def __update_myself(event):
new_text = get_updated_text()
textArea.delete(1.0, END)
textArea.insert('end', new_text, "normal")
# for the commandButton, bind activation events to the activation event
# handler
commandButton = okButton
handler = __textboxOK
handler = __update_myself
for selectionEvent in ["Return", "Button-1", "Escape"]:
commandButton.bind("<%s>" % selectionEvent, handler)
# ----------------- the action begins ------------------------------------
try:
# load the text into the textArea
if isinstance(text, str):
pass
else:
try:
text = "".join(text) # convert a list or a tuple to a string
except:
msgbox(
"Exception when trying to convert {} to text in textArea".format(type(text)))
sys.exit(16)
textArea.insert('end', text, "normal")
except:
msgbox("Exception when trying to load the textArea.")
sys.exit(16)
try:
okButton.focus_force()
except:
msgbox("Exception when trying to put focus on okButton.")
sys.exit(16)
boxRoot.mainloop()
# this line MUST go before the line that destroys boxRoot
areaText = textArea.get(0.0, 'end-1c')
boxRoot.destroy()
return areaText # return __replyButtonText
def __textboxOK(event):
global boxRoot
boxRoot.quit()
def update(reply=None):
return "To close, use the x button"
def _demo_textbox():
title = "Demo of updatable textbox"
msg = "Push update button to update. " * 16
text_snippet = ((
"Update button!!!. " * 5) + "\n\n") * 10
reply = textbox(msg, title, text_snippet, get_updated_text=update)
print(("Reply was: {!s}".format(reply))) | StarcoderdataPython |
3379145 | <filename>pydm/widgets/embedded_display.py
from qtpy.QtWidgets import QFrame, QApplication, QLabel, QVBoxLayout, QWidget
from qtpy.QtCore import Qt, QSize
from qtpy.QtCore import Property
import json
import os.path
import logging
from .base import PyDMPrimitiveWidget
from ..utilities import (is_pydm_app, establish_widget_connections,
close_widget_connections, macro, is_qt_designer)
from ..utilities.display_loading import (load_ui_file, load_py_file)
logger = logging.getLogger(__name__)
class PyDMEmbeddedDisplay(QFrame, PyDMPrimitiveWidget):
"""
A QFrame capable of rendering a PyDM Display
Parameters
----------
parent : QWidget
The parent widget for the Label
"""
def __init__(self, parent=None):
QFrame.__init__(self, parent)
PyDMPrimitiveWidget.__init__(self)
self.app = QApplication.instance()
self._filename = None
self._macros = None
self._embedded_widget = None
self._disconnect_when_hidden = True
self._is_connected = False
self._only_load_when_shown = True
self._needs_load = True
self.base_path = ""
self.base_macros = {}
if is_pydm_app():
self.base_path = self.app.directory_stack[-1]
self.base_macros = self.app.macro_stack[-1]
self.layout = QVBoxLayout(self)
self.err_label = QLabel(self)
self.err_label.setAlignment(Qt.AlignHCenter)
self.layout.addWidget(self.err_label)
self.layout.setContentsMargins(0, 0, 0, 0)
self.err_label.hide()
if not is_pydm_app():
self.setFrameShape(QFrame.Box)
else:
self.setFrameShape(QFrame.NoFrame)
def minimumSizeHint(self):
"""
This property holds the recommended minimum size for the widget.
Returns
-------
QSize
"""
# This is totally arbitrary, I just want *some* visible nonzero size
return QSize(100, 100)
@Property(str)
def macros(self):
"""
JSON-formatted string containing macro variables to pass to the embedded file.
Returns
-------
str
"""
if self._macros is None:
return ""
return self._macros
@macros.setter
def macros(self, new_macros):
"""
JSON-formatted string containing macro variables to pass to the embedded file.
.. warning::
If the macros property is not defined before the filename property,
The widget will not have any macros defined when it loads the embedded file.
This behavior will be fixed soon.
Parameters
----------
new_macros : str
"""
new_macros = str(new_macros)
if new_macros != self._macros:
self._macros = new_macros
self._needs_load = True
self.load_if_needed()
@Property(str)
def filename(self):
"""
Filename of the display to embed.
Returns
-------
str
"""
if self._filename is None:
return ""
return self._filename
@filename.setter
def filename(self, filename):
"""
Filename of the display to embed.
Parameters
----------
filename : str
"""
filename = str(filename)
if filename != self._filename:
self._filename = filename
self._needs_load = True
self.load_if_needed()
def parsed_macros(self):
"""
Dictionary containing the key value pair for each macro specified.
Returns
--------
dict
"""
m = macro.find_base_macros(self)
m.update(macro.parse_macro_string(self.macros))
return m
def load_if_needed(self):
if (not self._only_load_when_shown) or self.isVisible() or is_qt_designer():
self.embedded_widget = self.open_file()
def open_file(self, force=False):
"""
Opens the widget specified in the widget's filename property.
Returns
-------
display : QWidget
"""
if (not force) and (not self._needs_load):
return
if not self.filename:
return
# Expand user (~ or ~user) and environment variables.
fname = os.path.expanduser(os.path.expandvars(self.filename))
if self.base_path:
full_fname = os.path.join(self.base_path, fname)
else:
full_fname = fname
if not is_pydm_app():
(filename, extension) = os.path.splitext(full_fname)
if extension == ".ui":
loadfunc = load_ui_file
elif extension == ".py":
loadfunc = load_py_file
try:
w = loadfunc(full_fname, macros=self.parsed_macros())
self._needs_load = False
self.clear_error_text()
return w
except Exception as e:
logger.exception("Exception while opening embedded display file.")
self.display_error_text(e)
return None
# If you get this far, you are running inside a PyDMApplication, load
# using that system.
try:
if os.path.isabs(full_fname) and os.path.exists(full_fname):
w = self.app.open_file(full_fname, macros=self.parsed_macros())
else:
w = self.app.open_relative(fname, self,
macros=self.parsed_macros())
self._needs_load = False
self.clear_error_text()
return w
except (ValueError, IOError) as e:
self.display_error_text(e)
def clear_error_text(self):
self.err_label.clear()
self.err_label.hide()
def display_error_text(self, e):
self.err_label.setText(
"Could not open {filename}.\nError: {err}".format(
filename=self._filename, err=e))
self.err_label.show()
@property
def embedded_widget(self):
"""
The embedded widget being displayed.
Returns
-------
QWidget
"""
return self._embedded_widget
@embedded_widget.setter
def embedded_widget(self, new_widget):
"""
Defines the embedded widget to display inside the QFrame
Parameters
----------
new_widget : QWidget
"""
should_reconnect = False
if new_widget is self._embedded_widget:
return
if self._embedded_widget is not None:
self.layout.removeWidget(self._embedded_widget)
self._embedded_widget.deleteLater()
self._embedded_widget = None
if new_widget is not None:
self._embedded_widget = new_widget
self._embedded_widget.setParent(self)
self.layout.addWidget(self._embedded_widget)
self.err_label.hide()
self._embedded_widget.show()
self._is_connected = True
def connect(self):
"""
Establish the connection between the embedded widget and
the channels associated with it.
"""
if self._is_connected or self.embedded_widget is None:
return
establish_widget_connections(self.embedded_widget)
def disconnect(self):
"""
Disconnects the embedded widget from the channels
associated with it.
"""
if not self._is_connected or self.embedded_widget is None:
return
close_widget_connections(self.embedded_widget)
@Property(bool)
def loadWhenShown(self):
"""
If True, only load and display the file once the
PyDMEmbeddedDisplayWidget is visible on screen. This is very useful
if you have many different PyDMEmbeddedWidgets in different tabs of a
QTabBar or PyDMTabBar: only the tab that the user is looking at will
be loaded, which can greatly speed up the launch time of a display.
If this property is changed from 'True' to 'False', and the file has
not been loaded yet, it will be loaded immediately.
Returns
-------
bool
"""
return self._only_load_when_shown
@loadWhenShown.setter
def loadWhenShown(self, val):
self._only_load_when_shown = val
self.load_if_needed()
@Property(bool)
def disconnectWhenHidden(self):
"""
Disconnect from PVs when this widget is not visible.
Returns
-------
bool
"""
return self._disconnect_when_hidden
@disconnectWhenHidden.setter
def disconnectWhenHidden(self, disconnect_when_hidden):
"""
Disconnect from PVs when this widget is not visible.
Parameters
----------
disconnect_when_hidden : bool
"""
self._disconnect_when_hidden = disconnect_when_hidden
def showEvent(self, e):
"""
Show events are sent to widgets that become visible on the screen.
Parameters
----------
event : QShowEvent
"""
if self._only_load_when_shown:
w = self.open_file()
if w:
self.embedded_widget = w
if self.disconnectWhenHidden:
self.connect()
def hideEvent(self, e):
"""
Hide events are sent to widgets that become invisible on the screen.
Parameters
----------
event : QHideEvent
"""
if self.disconnectWhenHidden:
self.disconnect()
| StarcoderdataPython |
80786 | <filename>gb.py
import numpy as np
from math import floor
import h5py
duration = 300.0
dt = 1e-3
with h5py.File('data_gb/Achilles_10252013_sessInfo.mat', 'r') as f:
# epoch info
pre_epoch = f['sessInfo']['Epochs']['PREEpoch'][:].flatten()
maze_epoch = f['sessInfo']['Epochs']['MazeEpoch'][:].flatten()
post_epoch = f['sessInfo']['Epochs']['POSTEpoch'][:].flatten()
sessDuration = float(f['sessInfo']['Epochs']['sessDuration'][0])
# position info
TwoDLocation = np.array(f['sessInfo']['Position']['TwoDLocation'])
TimeStamps = f['sessInfo']['Position']['TimeStamps'][:].flatten()
MazeType = f['sessInfo']['Position']['MazeType']
valididx = ~np.isnan(TwoDLocation[0]) & ~np.isnan(TwoDLocation[1])
TwoDLocation = TwoDLocation[:,valididx]
TwoDLocation[0] = TwoDLocation[0] - np.min(TwoDLocation[0])
TwoDLocation[1] = TwoDLocation[1] - np.min(TwoDLocation[1])
TimeStamps = TimeStamps[valididx]
# spike info
IntIDs = f['sessInfo']['Spikes']['IntIDs'][0].astype(int) # (putative) interneuron IDs
PyrIDs = f['sessInfo']['Spikes']['PyrIDs'][0].astype(int) # (putative) pyramidal cell IDs
SpikeTimes = f['sessInfo']['Spikes']['SpikeTimes'][0] # spike times
SpikeIDs = f['sessInfo']['Spikes']['SpikeIDs'][0].astype(int) # cluster IDs of corresponding spike
# (multiple) spike info
#ShankIDs = list(sorted(set(np.floor(np.concatenate([IntIDs, PyrIDs])/100))))
#MspikeIDs = np.floor(SpikeIDs/100)
# rename neurons
for i, ni in enumerate(np.concatenate([IntIDs, PyrIDs])): # rename neuron IDs to be 0,1,2,...
#for i, ni in enumerate(ShankIDs): # rename neuron IDs to be 0,1,2,...
IntIDs[IntIDs == ni] = i
PyrIDs[PyrIDs == ni] = i
SpikeIDs[SpikeIDs == ni] = i
N = i + 1
pos = np.array([TimeStamps,TwoDLocation[0],TwoDLocation[1]]).T
spk = np.array([SpikeTimes[SpikeIDs==i] for i in PyrIDs])
#spk = np.array([SpikeTimes[SpikeIDs==i] for i in np.concatenate([IntIDs, PyrIDs])])
spatial_bin_length = 0.04
| StarcoderdataPython |
3258337 | <reponame>LucasLaibly/Intrusion
from flask import jsonify
from app.src.server import create
from app.src.profanity.profanity_check import ProfanityCheck
from browser_history.browsers import Firefox, Chrome, Safari
app = create('development')
profanity_checker = ProfanityCheck()
@app.route('/user', methods=['GET'])
def base():
this_dict = {
"0": "https://www.uranus.com",
"1": "https://www.intrusion.io",
"2": "https://www.myanus.com",
"3": "https://www.uranus.com",
"4": "https://www.chess.com"
}
response = profanity_checker.is_dirty(this_dict)
if response:
return jsonify(response)
else:
return jsonify(response)
@app.route('/censor/<string:word>')
def add_word(word):
profanity_checker.add_word(word)
return 'Added word!'
if __name__ == "__main__":
app.run(host='127.0.0.1', port=5000)
| StarcoderdataPython |
1744582 | <filename>wagtail/wagtailredirects/migrations/0001_initial.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0002_initial_data'),
]
operations = [
migrations.CreateModel(
name='Redirect',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)),
('old_path', models.CharField(
verbose_name='Redirect from', max_length=255, unique=True, db_index=True
)),
('is_permanent', models.BooleanField(
verbose_name='Permanent', default=True, help_text="""Recommended. Permanent redirects \
ensure search engines forget the old page (the 'Redirect from') and index the new page instead."""
)),
('redirect_link', models.URLField(blank=True, verbose_name='Redirect to any URL')),
('redirect_page', models.ForeignKey(
on_delete=models.CASCADE,
blank=True, null=True, verbose_name='Redirect to a page', to='wagtailcore.Page'
)),
('site', models.ForeignKey(
on_delete=models.CASCADE,
blank=True, to='wagtailcore.Site', editable=False, null=True, related_name='redirects'
)),
],
options={
},
bases=(models.Model,),
),
]
| StarcoderdataPython |
3387780 | import os
import discord
ENTILZHA_ID = 200801932728729600
class BD1(discord.Client):
async def on_ready(self):
print("Logged on as {0}!".format(self.user))
async def on_message(self, message):
print("Message from {0.author}: {0.content}".format(message))
print(f"{message.author.id}")
def main():
bot = BD1()
token = os.environ.get("DISCORD_BD1_TOKEN")
if token is None:
raise ValueError("DISCORD_BD1_TOKEN not set")
bot.run(token)
if __name__ == "__main__":
main()
| StarcoderdataPython |
148471 | <reponame>hluk/product-definition-center<filename>pdc/apps/releaseschedule/tests.py<gh_stars>10-100
#
# Copyright (c) 2017 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
from datetime import date, datetime, timedelta
from django.core.urlresolvers import reverse
from rest_framework.test import APITestCase
from rest_framework import status
from pdc.apps.release.models import Release
from pdc.apps.componentbranch.models import SLA
from pdc.apps.releaseschedule.models import ReleaseSchedule
def backend_url(viewname, *args):
return 'http://testserver' + reverse(viewname, args=args)
class ReleaseScheduleAPITestCase(APITestCase):
fixtures = [
'pdc/apps/releaseschedule/fixtures/tests/release.json',
'pdc/apps/releaseschedule/fixtures/tests/sla.json',
'pdc/apps/releaseschedule/fixtures/tests/releaseschedule.json',
]
def test_create(self):
url = reverse('releaseschedule-list')
data = {
'release': 'test-release-0.1',
'sla': 'bug_fixes',
'date': '2017-01-01',
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
expected = {
'active': False,
'date': '2017-01-01',
'release': 'test-release-0.1',
'sla': 'bug_fixes',
'release_url': backend_url('release-detail', 'test-release-0.1'),
'sla_url': backend_url('sla-detail', 2),
'id': 2,
}
self.assertEqual(response.data, expected)
def test_create_duplicate(self):
# This release schedule already exists.
url = reverse('releaseschedule-list')
data = {
'release': 'test-release-0.1',
'sla': 'development',
'date': '2017-01-01',
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_patch(self):
url = reverse('releaseschedule-detail', args=[1])
changes = [
('date', '2018-01-01'),
('release', 'test-release-0.2'),
('sla', 'bug_fixes'),
]
for change in changes:
response = self.client.patch(url, dict([change]), format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['id'], 1)
self.assertEqual(response.data[change[0]], change[1])
def test_get(self):
url = reverse('releaseschedule-list')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
self.assertEqual(response.data['results'][0], {
'active': False,
'date': '2017-01-01',
'release': 'test-release-0.1',
'sla': 'development',
'release_url': backend_url('release-detail', 'test-release-0.1'),
'sla_url': backend_url('sla-detail', 1),
'id': 1,
})
def test_get_filter(self):
url = reverse('releaseschedule-list')
# Define some dates
today = datetime.utcnow().date()
tomorrow = today + timedelta(days=1)
day_after = today + timedelta(days=2)
yesterday = today - timedelta(days=1)
# Create test data
release_1 = Release.objects.get(pk=1)
release_2 = Release.objects.get(pk=2)
sla_dev = SLA.objects.get(pk=1)
sla_bug = SLA.objects.get(pk=2)
sla_sec = SLA.objects.get(pk=3)
expired_schedule_1 = ReleaseSchedule.objects.get(pk=1)
active_schedule_1 = ReleaseSchedule.objects.create(
release=release_1, sla=sla_bug, date=tomorrow)
future_schedule_1 = ReleaseSchedule.objects.create(
release=release_1, sla=sla_sec, date=day_after)
expired_schedule_2 = ReleaseSchedule.objects.create(
release=release_2, sla=sla_dev, date=yesterday)
active_schedule_2 = ReleaseSchedule.objects.create(
release=release_2, sla=sla_bug, date=tomorrow)
future_schedule_2 = ReleaseSchedule.objects.create(
release=release_2, sla=sla_sec, date=day_after)
# Assert that we get all release schedules by default.
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 6)
# Filter on release
response = self.client.get("{}?ordering=id&release=test-release-0.1".format(url))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 3)
self.assertEqual(
[result["id"] for result in response.data['results']],
[expired_schedule_1.id, active_schedule_1.id, future_schedule_1.id]
)
# Filter on sla
response = self.client.get("{}?ordering=id&sla=bug_fixes".format(url))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 2)
self.assertEqual(
[result["id"] for result in response.data['results']],
[active_schedule_1.id, active_schedule_2.id])
# Filter on active state
response = self.client.get("{}?ordering=id&active=1".format(url))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 4)
self.assertEqual(
[result["id"] for result in response.data['results']],
[
active_schedule_1.id, future_schedule_1.id,
active_schedule_2.id, future_schedule_2.id,
]
)
# Filter on date
response = self.client.get(
"{}?ordering=id&date_after={}".format(url, tomorrow.isoformat()))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 4)
self.assertEqual(
[result["id"] for result in response.data['results']],
[active_schedule_1.id, future_schedule_1.id,
active_schedule_2.id, future_schedule_2.id]
)
response = self.client.get(
"{}?ordering=id&date_before={}".format(url, tomorrow.isoformat()))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 4)
self.assertEqual(
[
result["id"] for result in response.data['results']
],
[
expired_schedule_1.id, active_schedule_1.id,
expired_schedule_2.id, active_schedule_2.id,
]
)
def test_use_case_current_releases(self):
# Get the current releases, defined by "releases that have an active
# `bug_fixes` SLA".
url = reverse('releaseschedule-list')
# Define some dates
today = datetime.utcnow().date()
tomorrow = today + timedelta(days=1)
yesterday = today - timedelta(days=1)
# Create test data
release_1 = Release.objects.get(pk=1)
release_2 = Release.objects.get(pk=2)
release_3 = Release.objects.get(pk=3)
sla_dev = SLA.objects.get(pk=1)
sla_bug = SLA.objects.get(pk=2)
# Release 1 is old
ReleaseSchedule.objects.get(pk=1)
ReleaseSchedule.objects.create(
release=release_1, sla=sla_bug, date=yesterday)
# Release 2 is current
ReleaseSchedule.objects.create(
release=release_2, sla=sla_dev, date=yesterday)
ReleaseSchedule.objects.create(
release=release_2, sla=sla_bug, date=tomorrow)
# Release 3 is still in dev
ReleaseSchedule.objects.create(
release=release_3, sla=sla_dev, date=tomorrow)
# Assert that we get all release schedules by default.
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 5)
# Filter current releases
response = self.client.get("{}?active=1&sla=bug_fixes".format(url))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
self.assertEqual(
response.data['results'][0]["release"], "test-release-0.2")
def test_use_case_default_slas(self):
# Get the default SLAs for a release.
url = reverse('releaseschedule-list')
# Define some dates
day1 = date(2018, 1, 1)
day2 = date(2019, 1, 1)
day3 = date(2020, 1, 1)
# Create test data
release = Release.objects.get(pk=1)
sla_bug = SLA.objects.get(pk=2)
sla_sec = SLA.objects.get(pk=3)
sla_api = SLA.objects.create(name="stable_api")
ReleaseSchedule.objects.create(
release=release, sla=sla_bug, date=day1)
ReleaseSchedule.objects.create(
release=release, sla=sla_sec, date=day2)
ReleaseSchedule.objects.create(
release=release, sla=sla_api, date=day3)
# Assert that we get all release schedules by default.
response = self.client.get(
"{}?ordering=date&release=test-release-0.1".format(url))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 4)
self.assertEqual(
[
(result["sla"], result["date"])
for result in response.data['results']
], [
("development", "2017-01-01"),
("bug_fixes", "2018-01-01"),
("security_fixes", "2019-01-01"),
("stable_api", "2020-01-01")
])
class ReleaseScheduleModelTestCase(APITestCase):
fixtures = [
'pdc/apps/releaseschedule/fixtures/tests/release.json',
'pdc/apps/releaseschedule/fixtures/tests/sla.json',
]
def test_active(self):
today = datetime.utcnow().date()
release = Release.objects.get(pk=1)
sla_dev = SLA.objects.get(pk=1)
sla_bug = SLA.objects.get(pk=2)
expired_schedule = ReleaseSchedule.objects.create(
release=release, sla=sla_dev, date=(today - timedelta(days=1))
)
active_schedule = ReleaseSchedule.objects.create(
release=release, sla=sla_bug, date=(today + timedelta(days=1))
)
self.assertFalse(expired_schedule.active)
self.assertTrue(active_schedule.active)
| StarcoderdataPython |
293 | <gh_stars>0
import discord
client = discord.Client() # 接続に使用するオブジェクト
# 起動時
@client.event
async def on_ready():
print('ログイン成功')
# メッセージを監視
@client.event
async def on_message(message):
# 「/box」が頭についたメッセージならオウム返しする
if message.content.startswith('/box'):
# 文字から「/box」を抜く
question = message.content[len('/box'):].strip()
# 質問させたいチャンネルのid
target_channel_id = getTargetChannelId()
# id=0なら質問者にエラー報告DM
# idが0以外なら匿名質問する
if target_channel_id == 0:
dm = await message.author.create_dm() # 質問者へDM作成
await dm.send(
'Sorry, メッセージを送信できませんでした.'
'もう1度試してみてください.\n'
'【質問文】' + question)
else:
# 匿名質問させたいチャンネル
target_channel = client.get_channel(target_channel_id)
# チャンネルに質問メッセージ送信
await target_channel.send(question)
# 匿名質問させたいチャンネルのidを取得
# 指定したカテゴリにある最初のTextチャンネル=質問させたいチャンネルとみなす
# ただしカテゴリにチャンネルが無い時は0を返す
def getTargetChannelId() -> int:
# 質問させたいチャンネル(対象チャンネル)
target_channel = {'id': 0, 'position': 99999999}
# ***********************************************************
# 指定カテゴリ(対象チャンネルが含まれたカテゴリ)の名前
category_id = 711238137598181396 # カテゴリidを指定
target_category_name = client.get_channel(category_id).name
# ***********************************************************
# 指定したサーバにある全てのTextチャンネル一覧
all_channels = client.get_guild(602423784946925568).text_channels
# 全てのTextチャンネルから「指定カテゴリに属する最初のチャンネル」を探す
for channel in all_channels:
# 指定カテゴリに属する場合だけ対象チャンネル候補とみなす
if str(channel.category) == target_category_name:
# positionが小さいほうを「より対象チャンネルに近い」として交換
# 初期値はpositionが大きい(99999999)ので,必ず入れ替わる想定
# 繰り返せば,最後にはpositionが最も小さいチャンネルを代入できる
if target_channel['position'] > int(channel.position):
target_channel['id'] = int(channel.id)
target_channel['position'] = int(channel.position)
# 最終的に代入されたidを返す
return target_channel['id']
# botとしてDiscordに接続(botのトークンを指定)
client.run('605042341715378176')
| StarcoderdataPython |
190045 | <reponame>Aditya-aot/ION
from django import forms
from django.forms import ModelForm
from .models import stock_port , crypto_port
class stock_port_form(ModelForm) :
name = forms.CharField(label='',widget=forms.TextInput(attrs={"placholder":"write here"}))
price = forms.CharField(label='',widget=forms.TextInput(attrs={"placholder":"write here"}))
quantity = forms.CharField(label='',widget=forms.TextInput(attrs={"placholder":"write here"}))
class Meta:
model = stock_port
fields = [
'name' ,
'price',
'quantity'
]
from bs4 import BeautifulSoup
import pandas as pd
import requests
import re
url = 'https://coinmarketcap.com/'
page = requests.get(url)
soup = BeautifulSoup(page.text,'html.parser')
name_list=[]
names = soup.find_all("div", {"class": "sc-16r8icm-0 sc-1teo54s-1 dNOTPP"})
for name in names :
# print(name.text)
name_list.append((name.text , name.text))
name_list= (name_list[9:19])
tables = soup.find_all("div", {"class": "sc-131di3y-0 cLgOOr"})
class crypto_port_form(ModelForm) :
name = forms.CharField(label='Select Crypto ', widget=( forms.Select(choices=name_list ) ) )
price = forms.CharField(label='Price',widget=forms.TextInput(attrs={"placholder":"write here"}))
quantity = forms.CharField(label='Quantity',widget=forms.TextInput(attrs={"placholder":"write here"}))
class Meta:
model = crypto_port
fields = [
'name' ,
'price',
'quantity'
]
| StarcoderdataPython |
3351116 | <reponame>Yat-o/Aoi
from dataclasses import dataclass
from sqlite3 import Row
from typing import List
class GuildSettingModel:
def __init__(self, ok_color: int = 0x00aa00,
error_color: int = 0xaa0000,
info_color: int = 0x0000aa,
perm_errors: bool = True,
currency_img: str = "",
currency_chance: int = 4,
currency_max: int = 8,
currency_min: int = 10,
currency_gen_channels: List[int] = [], # noqa
delete_on_ban: bool = False,
reply_embeds: bool = True):
self.colors = _GuildSettingColorModel(ok_color, error_color, info_color)
self.perm_errors = perm_errors
self.currency_img = currency_img
self.currency_chance = currency_chance
self.currency_max = currency_max
self.currency_min = currency_min
self.currency_gen_channels = currency_gen_channels
self.delete_on_ban = delete_on_ban
self.reply_embeds = reply_embeds
@property
def ok_color(self):
return self.colors.ok
@property
def error_color(self):
return self.colors.error
@property
def info_color(self):
return self.colors.info
@classmethod
def from_row(cls, row: Row):
return cls(
int(row[1], 16),
int(row[2], 16),
int(row[3], 16),
row[5], row[6],
int(row[7]),
int(row[8]),
int(row[9]),
[int(x) for x in row[10].split(",")] if row[10] else [],
row[11] == 1, row[12] == 1
)
@dataclass
class _GuildSettingColorModel:
ok: int
error: int
info: int
@dataclass
class _GuildSettingCurrencyModel:
img: str
chance: int
| StarcoderdataPython |
3373968 | import threading
import time
from random import randint
from sense_hat import SenseHat
sense = SenseHat()
#Define the colours red and green
red = (255, 0, 0)
green = (0, 255, 0)
black = (0,0,0)
orange = (255, 255, 0)
white = (255,255,255)
blue = (0, 0, 255)
exitFlag = 0
class TestThread(threading.Thread):
def __init__(self, threadID, name, counter):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.counter = counter
def run(self):
time.sleep(self.counter)
print("Starting " + self.name)
if self.threadID == 1:
acq_sensori(self.name, self.counter, 2)
if self.threadID == 2:
print_time(self.name, self.counter, 50)
if self.threadID == 3:
print_counter(self.name, self.counter, 50)
# DEFINIZIONE THREAD ID = 1
# Dichiarazione di tutte le azioni che devono essere svolte dal THREAD
def acq_sensori(threadName, delay, counter):
# background
bg = red
# colore testo
tx = white
# Lettura dai sensori del SenseHat acquisizione Temperatura, Pressione, Humidity
t = sense.get_temperature()
p = sense.get_pressure()
h = sense.get_humidity()
# Arrotondamento ad una cifra decimale
t = round(t, 1)
p = round(p, 1)
h = round(h, 1)
# str() conversione valori int in string per poterli concatenare
message = "Temperature: " + str(t) + "Pressure: " + str(p) + "Humidity: " + str(h)
# Visualizzazione messaggio scorrevole SenseHat
sense.show_message(message, text_colour=tx, scroll_speed=0.50, back_colour=bg)
# DEFINIZIONE THREAD ID = 2
# Dichiarazione di tutte le azioni che devono essere svolte dal THREAD
def print_time(threadName, delay, counter):
while counter:
if exitFlag:
threadName.exit()
time.sleep(delay)
print("%s: %s" % (threadName, time.ctime(time.time())))
counter -= 1
# DEFINIZIONE THREAD ID = 3
# Dichiarazione di tutte le azioni che devono essere svolte dal THREAD
def print_counter(threadName, delay, counter):
while counter:
if exitFlag:
threadName.exit()
time.sleep(delay)
print(threadName, "ciclo", str(counter))
counter -= 1
# Create new threads
thread1 = TestThread(1, "Thread 1", 1)
thread2 = TestThread(2, "Thread 2", 2)
thread3 = TestThread(3, "Thread 3", 3)
# Start new Threads
thread1.start()
thread2.start()
thread3.start()
thread1.join()
thread2.join()
thread3.join()
print("Fine del main thread")
| StarcoderdataPython |
1715377 | <reponame>Consolatis/wl_framework<filename>examples/wlctrl.py<gh_stars>1-10
#!/usr/bin/env python3
from wl_framework.network.connection import (
WaylandConnection,
WaylandDisconnected
)
from wl_framework.protocols.foreign_toplevel import ForeignTopLevel
class WlCtrl(WaylandConnection):
def __init__(self, sys_args, *args, **kwargs):
if not sys_args:
self.action = 'list'
else:
actions = {
'focus': ('activate', None),
'activate': ('activate', None),
'maximize': ('set_maximize', (True,)),
'minimize': ('set_minimize', (True,)),
'fullscreen': ('set_fullscreen', (True,)),
'unmaximize': ('set_maximize', (False,)),
'unminimize': ('set_minimize', (False,)),
'unfullscreen': ('set_fullscreen', (False,)),
'close': ('close', tuple())
}
self.target = sys_args[0]
self.action = actions.get(sys_args[1])
if (
not self.action
or not self.target
or self.target[0] not in '#@=:'
or len(self.target) < 2
):
raise RuntimeError(usage)
super().__init__(*args, **kwargs)
def quit(self, data=None):
self.shutdown()
def on_initial_sync(self, data):
super().on_initial_sync(data)
if self.action[0] == 'activate':
# As we are subclassing the connection itself
# we have to patch the action here because
# the seat is only set in this very function.
self.action = ('activate', (self.display.seat,))
self.toplevels = ForeignTopLevel(self)
# ForeignTopLevel will .bind() in its constructor
# which will then cause the server to send all of
# the initial toplevel states. Thus we just wait
# for that to happen by queueing a callback and
# then looping over the results.
self.sync(self.info_done)
def info_done(self, data):
if self.action == 'list':
self._print_list()
self.quit()
return
target_matches = {
'#': lambda window, target : int(target[1:]) == window.obj_id,
'@': lambda window, target : target[1:].lower() == window.app_id.lower(),
'=': lambda window, target : target[1:].lower() == window.title.lower(),
':': lambda window, target : target[1:].lower() in window.title.lower()
}.get(self.target[0])
windows_found = list()
for window in self.toplevels.windows.values():
if target_matches(window, self.target):
windows_found.append(window)
if not windows_found:
print(f"No window matches target {self.target}")
self.quit()
return
if len(windows_found) > 1:
print(f"Found multiple windows. Not doing anything.")
self._print_list(windows_found)
self.quit()
return
func_name, func_args = self.action
func = getattr(windows_found[0], func_name)
func(*func_args)
# Wait for roundtrip to return before closing the connection
self.sync(self.quit)
def _print_list(self, windows=None):
if windows is None:
windows = self.toplevels.windows.values()
if not windows:
print("No windows opened")
self.quit()
return
class d:
obj_id = ' Handle'
app_id = ' AppID'
title = ' Title'
handle_max = max(len(d.obj_id), max(len(str(x.obj_id)) for x in windows))
app_id_max = max(len(d.app_id), max(len(x.app_id) for x in windows))
title_max = max(len(d.title), max(len(x.title) for x in windows))
fmt = " {0.obj_id:{1}} {0.app_id:{2}} {0.title:{3}}"
print()
print(fmt.format(d, handle_max, app_id_max, title_max))
print(" {:-<{}} {:-^{}} {:->{}}".format('', handle_max, '', app_id_max, '', title_max))
for window in windows:
print(fmt.format(window, handle_max, app_id_max, title_max))
print()
if __name__ == '__main__':
import sys
from wl_framework.loop_integrations import PollIntegration
usage = \
f"""
Usage: {sys.argv[0]} [<window-handle> <action>]
Without arguments: List windows
<window-handle> should be one of:
#handle match handle
@app_id match app_id
=title match title
:title match part of title
Warning: #window-handle might be reused or differ completely on the next call!
<action> should be one of:
activate | focus
close
maximize
minimize
fullscreen
unmaximize
unminimize
unfullscreen
"""
if len(sys.argv) not in (1, 3):
print(usage)
sys.exit(1)
loop = PollIntegration()
try:
app = WlCtrl(sys.argv[1:], eventloop_integration=loop)
except RuntimeError as e:
print(e)
sys.exit(1)
try:
loop.run()
except WaylandDisconnected:
pass
| StarcoderdataPython |
3316333 | #coding:utf-8
import os
import configparser
configName='config.conf'
def init():
print('init config')
confEdit=os.path.isfile(configName)
if not confEdit:
print('creat config')
open(configName,'w')
global conf
conf=configparser.ConfigParser()
conf.read(configName,'utf-8')
def write():
configFile=open(configName,'w')
conf.write(configFile)
if __name__=='__main__':
init() | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.