id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
209792 | <gh_stars>0
class Solution:
def generateTrees(self, n: int) -> List[TreeNode]:
if n == 0:
return []
def generateTrees(mini: int, maxi: int) -> List[Optional[int]]:
if mini > maxi:
return [None]
ans = []
for i in range(mini, maxi + 1):
for left in generateTrees(mini, i - 1):
for right in generateTrees(i + 1, maxi):
ans.append(TreeNode(i))
ans[-1].left = left
ans[-1].right = right
return ans
return generateTrees(1, n)
| StarcoderdataPython |
8112542 | #!/usr/bin/env python3
from __future__ import print_function
import os
import platform
import time
import matplotlib
matplotlib.use('TkAgg') # to get rid of runtime error
import matplotlib.pyplot as plt
import numpy as np
# Check if the code runs on Mac (which almost all modern ones have AMD GPUs)
if platform.system() == 'Darwin':
USE_AMD_GPU = False
else:
USE_AMD_GPU = False
if USE_AMD_GPU:
# Switch the backend
# Be sure to install 'plaidml-keras'
# and run the 'plaidml-setup'
#
# https://www.varokas.com/keras-with-gpu-on-plaidml/
os.environ["KERAS_BACKEND"] = "plaidml.keras.backend"
import keras
from keras.models import Sequential
from keras.layers import Dense, Conv2D, Flatten, MaxPooling2D, Reshape, Dropout, BatchNormalization, ReLU
else:
import tensorflow.keras as keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, MaxPooling2D, Reshape, Dropout, BatchNormalization, ReLU
"""
Tensorflow example for creating a MNIST image classification model with Keras
See: https://www.tensorflow.org/tutorials/images/classification
See: https://www.tensorflow.org/guide/keras/save_and_serialize
ToDo: Extract the weights and store them in a non-binary format
"""
IMG_HEIGHT = 28
IMG_WIDTH = 28
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
model = keras.models.Sequential([
BatchNormalization(axis=[-1, -2], input_shape=(IMG_HEIGHT, IMG_WIDTH)),
Reshape((IMG_HEIGHT, IMG_WIDTH, 1)),
Conv2D(16, 5, padding='same', activation='linear', use_bias=True), # 3x3x4 filter
BatchNormalization(axis=-1),
ReLU(),
Dropout(0.2),
MaxPooling2D(),
Conv2D(32, 5, padding='same', activation='linear', use_bias=True), # 3x3x8 filter
BatchNormalization(axis=-1),
ReLU(),
Dropout(0.2),
MaxPooling2D(),
Flatten(),
Dense(32, activation='linear'),
BatchNormalization(axis=-1),
Dropout(0.2),
Dense(10, activation='softmax')
])
# loss: 0.0341 - accuracy: 0.9787
# loss: 0.0326 - accuracy: 0.9825
# You must install pydot and graphviz for `pydotprint` to work.
# keras.utils.plot_model(model, 'multi_input_and_output_model.png', show_shapes=True)
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.build()
# Display the model's architecture
model.summary()
checkpoint_path = "training_1/cp.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
# Create a callback that saves the model's weights
cp_callback = keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
save_weights_only=True,
verbose=1)
class TimeHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.times = []
def on_epoch_begin(self, batch, logs={}):
self.epoch_time_start = time.time()
def on_epoch_end(self, batch, logs={}):
self.times.append(time.time() - self.epoch_time_start)
time_callback = TimeHistory()
# For higher GPU Utilization it is useful to increase batch_size but this can slow down training
history = model.fit(x_train, y_train, epochs=3, batch_size=50, validation_split=0.1, callbacks=[time_callback])
times = time_callback.times
print('\nEpoch Time '.join(map(str, times)))
print('Average: ', np.mean(times))
# With AMD RADEON 550 PRO GPU
# 24.548192024230957
# Epoch Time 23.452439069747925
# Epoch Time 23.493314027786255
# Epoch Time 23.409918785095215
# Epoch Time 23.45715093612671
# Epoch Time 23.192039966583252
# Epoch Time 23.245102167129517
# Epoch Time 23.274284839630127
# Epoch Time 23.248417854309082
# Epoch Time 23.290798902511597
# Average: 23.461165857315063
# Plot training & validation accuracy values
plt.figure()
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
# Plot training & validation loss values
plt.figure()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# Save JSON config to disk
json_config = model.to_json()
with open('training_1/model_config.json', 'w') as json_file:
json_file.write(json_config)
# Save weights in binary to disk
model.save_weights('training_1/weights.h5')
model.evaluate(x_test, y_test, verbose=2)
| StarcoderdataPython |
361525 | <reponame>parallelwindfarms/byteparsing
from byteparsing.trampoline import Call, Trampoline, Parser
from byteparsing.parsers import item, choice, char
from byteparsing.cursor import Cursor
import pytest
class A(Trampoline):
pass
def test_trampoline():
a = A()
with pytest.raises(NotImplementedError):
a.invoke()
def test_call():
c = Cursor.from_bytes(b"Hello, World!")
a = "some unique object"
assert isinstance(item(c, a), Call)
assert isinstance(item(c, a)(), tuple)
assert item(c, a)()[0] == c.at
assert item(c, a)()[1] == c.increment()
assert item(c, a)()[2] is a
assert item(c, a)() == item(c, a).invoke()
def test_choice():
c = Cursor.from_bytes(b"Hello, World!")
a = "some other unique object"
ps = (char('a'), char('b'), char('H'))
assert isinstance(choice(*ps), Parser)
assert isinstance(choice(*ps)(c, a), Call)
x = choice(*ps)(c, a).invoke()
assert isinstance(x, tuple)
assert x[0] == c.at
assert x[1] == c.increment()
| StarcoderdataPython |
5128428 | from django.conf import settings
from dcu.active_memory import upload_rotate
import datetime
import os.path
import subprocess
import logging
logger = logging.getLogger(__name__)
class BackupFailed(Exception):
pass
def backup_media():
'''
make a backup of media files
'''
timestamp = datetime.datetime.utcnow().strftime('%Y-%m-%d')
media_parent = os.path.dirname(settings.MEDIA_ROOT)
media_folder = os.path.basename(settings.MEDIA_ROOT)
backup_dest = os.path.join(settings.BACKUP_DIR, '{0}-{1}.tar.bz2'.format(media_folder, timestamp))
cmd_template = 'tar -cjf {backup_dest} -C {media_parent} {media_folder}'
cmd = cmd_template.format(
backup_dest=backup_dest,
media_parent=media_parent,
media_folder=media_folder
)
logger.debug('Backing up media with following command: {0}'.format(cmd))
return_code = subprocess.call(cmd.split(' '))
if return_code != 0:
raise BackupFailed('could not create media backup')
# Upload to S3
upload_rotate(
backup_dest,
settings.BACKUP_AWS_STORAGE_BUCKET_NAME,
settings.BACKUP_AWS_KEY_PREFIX,
aws_key=settings.BACKUP_AWS_ACCESS_KEY_ID,
aws_secret=settings.BACKUP_AWS_SECRET_ACCESS_KEY
)
def backup_db():
'''
make a backup of the database
'''
if settings.DATABASES['default']['ENGINE'] != 'django.db.backends.postgresql_psycopg2':
raise BackupFailed('Database engine not supported')
timestamp = datetime.datetime.utcnow().strftime('%Y-%m-%d')
backup_dest = os.path.join(settings.BACKUP_DIR, 'db-{0}.dump'.format(timestamp))
cmd_template = 'pg_dump -Fc -w -h {db_host_name} -U {db_user} {db_name} -f {backup_dest}'
cmd = cmd_template.format(
db_host_name=settings.DATABASES['default']['HOST'],
db_user=settings.DATABASES['default']['USER'],
db_name=settings.DATABASES['default']['NAME'],
backup_dest=backup_dest
)
logger.debug('Backing up db with following command: {0}'.format(cmd))
env = os.environ.copy()
env['PGPASSWORD'] = settings.DATABASES['default']['PASSWORD']
#TODO not the best way to pass the password!
process = subprocess.Popen(cmd.split(' '), env=env, stderr=subprocess.PIPE)
return_code = process.wait()
if return_code != 0:
_, err = process.communicate()
raise BackupFailed('could not create database backup ' + err)
# Upload to S3
upload_rotate(
backup_dest,
settings.BACKUP_AWS_STORAGE_BUCKET_NAME,
settings.BACKUP_AWS_KEY_PREFIX,
aws_key=settings.BACKUP_AWS_ACCESS_KEY_ID,
aws_secret=settings.BACKUP_AWS_SECRET_ACCESS_KEY
)
| StarcoderdataPython |
9681308 | <gh_stars>1-10
from typing import Any, Callable, Generator, Optional, TypeVar
import azure
from azure.data.tables import TableClient, TableServiceClient
from azure.identity import DefaultAzureCredential
ModelType = TypeVar("ModelType")
def table_client() -> TableServiceClient:
"""connect to the table service."""
credential = DefaultAzureCredential()
client = TableServiceClient(
endpoint="https://frybot.table.core.windows.net/", credential=credential
)
return client
def connect(table: str) -> TableClient:
"""connect to the table service."""
conn = table_client()
return conn.get_table_client(table_name=table)
class DataConnection:
"""Manage a connection to the StorageTable."""
_table: str = ""
_table_client: Optional[TableClient] = None
def __init__(self, table: str, client: TableClient = None):
if client is not None:
self._table_client = client
self._table = table
@property
def table(self) -> str:
"""Get the table name."""
return self._table
@property
def table_client(self) -> TableClient:
"""Gets the table client."""
if self._table_client is None:
self._table_client = connect(self.table)
return self._table_client
def save(self, entity: dict[str, Any]):
"""Save an entity."""
self.table_client.upsert_entity(entity=entity)
def query(
self, hydrator: Callable[[dict], ModelType], query: str
) -> Generator[ModelType, None, None]:
"""Query the table."""
for result in self.table_client.query_entities(query):
yield hydrator(result)
def get(
self, partition_key: str, row_key: str, hydrator: Callable[[dict], ModelType]
) -> Optional[ModelType]:
"""Get an entity."""
try:
entity = self.table_client.get_entity(
partition_key=partition_key, row_key=row_key
)
return hydrator(entity)
except azure.core.exceptions.ResourceNotFoundError:
return None
| StarcoderdataPython |
1681712 | <filename>dmpipe/dm_plotting.py
#!/usr/bin/env python
#
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Top level scripts to make castro plot and limits plots in mass / sigmav space
"""
import os
from os.path import splitext
import numpy as np
from astropy.table import Table
from fermipy.utils import init_matplotlib_backend, load_yaml
from fermipy.jobs.utils import is_not_null
from fermipy.jobs.link import Link
from fermipy.jobs.scatter_gather import ScatterGather
from fermipy.jobs.slac_impl import make_nfs_path
from dmpipe.dm_spectral_utils import DMCastroData, DMSpecTable
from dmpipe.dm_plotting_utils import plot_dm_castro
from dmpipe.dm_plotting_utils import plot_dm_spectra_by_mass, plot_dm_spectra_by_channel
from dmpipe.dm_plotting_utils import plot_limits_from_arrays, plot_mc_truth
from dmpipe.name_policy import NameFactory
from dmpipe import defaults
init_matplotlib_backend()
NAME_FACTORY = NameFactory(basedir='.')
def is_decay_profile(profile):
""" Check if a profile string is for DM decay """
tokens = profile.split('_')
return tokens[-1] in ['point', 'dmap', 'dradial']
def is_ann_profile(profile):
""" Check if a profile string is for DM annihilation """
tokens = profile.split('_')
return tokens[-1] in ['point', 'map', 'radial']
def select_channels(channels, profile):
""" Select the relevent channels (for decay or annihilation) for a given profile """
sed_ok_decay = is_decay_profile(profile)
sed_ok_ann = is_ann_profile(profile)
ochans = []
for chan in channels:
chan_is_decay = chan.find('_decay') >= 0
if chan_is_decay:
if sed_ok_decay:
ochans.append(chan)
else:
if sed_ok_ann:
ochans.append(chan)
return ochans
def get_ul_bands(table, prefix):
""" Get the upper limit bands a table
Parameters
----------
table : `astropy.table.Table`
Table to get the limits from.
prefix : str
Prefix to append to the column names for the limits
Returns
-------
output : dict
A dictionary with the limits bands
"""
o = dict(q02=np.squeeze(table["%s_q02" % prefix]),
q16=np.squeeze(table["%s_q16" % prefix]),
q84=np.squeeze(table["%s_q84" % prefix]),
q97=np.squeeze(table["%s_q97" % prefix]),
median=np.squeeze(table["%s_median" % prefix]))
return o
class PlotDMSpectra(Link):
"""Small class to plot the DM spectra from pre-computed tables.
"""
appname = 'dmpipe-plot-dm-spectra'
linkname_default = 'plot-dm-spectra'
usage = '%s [options]' % (appname)
description = "Plot the DM spectra stored in pre-computed tables"
default_options = dict(infile=defaults.generic['infile'],
outfile=defaults.generic['outfile'],
chan=defaults.common['chan'],
mass=defaults.common['mass'],
spec_type=defaults.common['spec_type'])
__doc__ += Link.construct_docstring(default_options)
def run_analysis(self, argv):
"""Run this analysis"""
args = self._parser.parse_args(argv)
dm_spec_table = DMSpecTable.create_from_fits(args.infile)
dm_plot_by_mass = plot_dm_spectra_by_mass(
dm_spec_table, chan=args.chan, spec_type=args.spec_type)
dm_plot_by_chan = plot_dm_spectra_by_channel(
dm_spec_table, mass=args.mass, spec_type=args.spec_type)
if args.outfile:
dm_plot_by_mass[0].savefig(
args.outfile.replace(
'.png', '_%s.png' %
args.chan))
dm_plot_by_chan[0].savefig(
args.outfile.replace(
'.png', '_%1.FGeV.png' %
args.mass))
class PlotLimits(Link):
"""Small class to Plot DM limits on <sigma v> versus mass.
"""
appname = 'dmpipe-plot-limits'
linkname_default = 'plot-limits'
usage = '%s [options]' % (appname)
description = "Plot DM limits on <sigma v> versus mass"
default_options = dict(infile=defaults.generic['infile'],
outfile=defaults.generic['outfile'],
chan=defaults.common['chan'],
bands=defaults.collect['bands'],
sim=defaults.sims['sim'])
__doc__ += Link.construct_docstring(default_options)
def run_analysis(self, argv):
"""Run this analysis"""
args = self._parser.parse_args(argv)
if args.chan.find('_decay') >= 0:
decay = True
limit_col = 'll_0.95'
ylims = (1e+22, 1e+28)
else:
decay = False
limit_col = 'ul_0.95'
ylims = (1e-28, 1e-22)
if is_not_null(args.infile):
tab_m = Table.read(args.infile, hdu="masses")
tab_s = Table.read(args.infile, hdu=args.chan)
xvals = tab_m['masses'][0]
yvals = tab_s[limit_col][0]
ldict = dict(limits=(xvals, yvals))
else:
ldict = {}
if is_not_null(args.bands):
tab_b = Table.read(args.bands, hdu=args.chan)
tab_bm = Table.read(args.bands, hdu="masses")
bands = get_ul_bands(tab_b, limit_col)
bands['masses'] = tab_bm['masses'][0]
else:
bands = None
if is_not_null(args.sim):
sim_srcs = load_yaml(args.sim)
injected_src = sim_srcs.get('injected_source', None)
else:
injected_src = None
xlims = (1e1, 1e4)
dm_plot = plot_limits_from_arrays(ldict, xlims, ylims, bands, decay=decay)
if injected_src is not None:
mc_model = injected_src['source_model']
plot_mc_truth(dm_plot[1], mc_model)
if args.outfile:
dm_plot[0].savefig(args.outfile)
return None
return dm_plot
class PlotMLEs(Link):
"""Small class to Plot DM maximum likelihood estimate <sigma v> versus mass.
"""
appname = 'dmpipe-plot-mles'
linkname_default = 'plot-mles'
usage = '%s [options]' % (appname)
description = "Plot DM maximum likelihood estimate on <sigma v> versus mass"
default_options = dict(infile=defaults.generic['infile'],
outfile=defaults.generic['outfile'],
chan=defaults.common['chan'],
bands=defaults.collect['bands'],
sim=defaults.sims['sim'])
__doc__ += Link.construct_docstring(default_options)
def run_analysis(self, argv):
"""Run this analysis"""
args = self._parser.parse_args(argv)
if args.chan.find('_decay') >= 0:
limit_col = 'll_0.95'
ylims = (1e+22, 1e+28)
else:
limit_col = 'ul_0.95'
ylims = (1e-28, 1e-22)
if is_not_null(args.infile):
tab_m = Table.read(args.infile, hdu="masses")
tab_s = Table.read(args.infile, hdu=args.chan)
xvals = tab_m['masses'][0]
yvals = tab_s[limit_col][0]
ldict = dict(limits=(xvals, yvals))
else:
ldict = {}
if is_not_null(args.bands):
tab_b = Table.read(args.bands, hdu=args.chan)
tab_bm = Table.read(args.bands, hdu="masses")
bands = get_ul_bands(tab_b, 'mles')
bands['masses'] = tab_bm['masses'][0]
else:
bands = None
if is_not_null(args.sim):
sim_srcs = load_yaml(args.sim)
injected_src = sim_srcs.get('injected_source', None)
else:
injected_src = None
xlims = (1e1, 1e4)
dm_plot = plot_limits_from_arrays(ldict, xlims, ylims, bands)
if injected_src is not None:
mc_model = injected_src['source_model']
plot_mc_truth(dm_plot[1], mc_model)
if args.outfile:
dm_plot[0].savefig(args.outfile)
return None
return dm_plot
class PlotDM(Link):
"""Small class to plot the likelihood vs <sigma v> and DM particle mass
"""
appname = 'dmpipe-plot-dm'
linkname_default = 'plot-dm'
usage = "%s [options]" % (appname)
description = "Plot the likelihood vs <sigma v> and DM particle mass"
default_options = dict(infile=defaults.generic['infile'],
outfile=defaults.generic['outfile'],
chan=defaults.common['chan'],
global_min=defaults.common['global_min'])
__doc__ += Link.construct_docstring(default_options)
def run_analysis(self, argv):
"""Run this analysis"""
args = self._parser.parse_args(argv)
exttype = splitext(args.infile)[-1]
if exttype in ['.fits']:
dm_castro = DMCastroData.create_from_fitsfile(args.infile, args.chan)
elif exttype in ['.yaml']:
dm_castro = DMCastroData.create_from_yamlfile(args.infile, args.chan)
else:
raise ValueError("Can not read file type %s for SED" % exttype)
dm_plot = plot_dm_castro(dm_castro, global_min=args.global_min)
if args.outfile:
dm_plot[0].savefig(args.outfile)
return None
return dm_plot
class PlotLimits_SG(ScatterGather):
"""Small class to generate configurations for `PlotLimits`
This does a triple nested loop over targets, profiles and j-factor priors
"""
appname = 'dmpipe-plot-limits-sg'
usage = "%s [options]" % (appname)
description = "Make castro plots for set of targets"
clientclass = PlotLimits
job_time = 60
default_options = dict(ttype=defaults.common['ttype'],
targetlist=defaults.common['targetlist'],
channels=defaults.common['channels'],
astro_priors=defaults.common['astro_priors'],
dry_run=defaults.common['dry_run'])
__doc__ += Link.construct_docstring(default_options)
def build_job_configs(self, args):
"""Hook to build job configurations
"""
job_configs = {}
ttype = args['ttype']
(targets_yaml, sim) = NAME_FACTORY.resolve_targetfile(args)
if targets_yaml is None:
return job_configs
astro_priors = args['astro_priors']
channels = args['channels']
base_config = dict(bands=None,
sim=sim)
targets = load_yaml(targets_yaml)
for target_name, target_list in list(targets.items()):
for targ_prof in target_list:
prof_chans = select_channels(channels, targ_prof)
for astro_prior in astro_priors:
name_keys = dict(target_type=ttype,
target_name=target_name,
profile=targ_prof,
astro_prior=astro_prior,
fullpath=True)
input_path = NAME_FACTORY.dmlimitsfile(**name_keys)
for chan in prof_chans:
targ_key = "%s:%s:%s:%s" % (
target_name, targ_prof, astro_prior, chan)
output_path = input_path.replace(
'.fits', '_%s.png' % chan)
logfile = make_nfs_path(
output_path.replace('.png', '.log'))
job_config = base_config.copy()
job_config.update(dict(infile=input_path,
outfile=output_path,
astro_prior=astro_prior,
logfile=logfile,
chan=chan))
job_configs[targ_key] = job_config
return job_configs
class PlotStackedLimits_SG(ScatterGather):
"""Small class to generate configurations for `PlotStackedLimits`
This does a double nested loop over rosters and j-factor priors
"""
appname = 'dmpipe-plot-stacked-limits-sg'
usage = "%s [options]" % (appname)
description = "Make castro plots for set of targets"
clientclass = PlotLimits
job_time = 60
default_options = dict(ttype=defaults.common['ttype'],
rosterlist=defaults.common['rosterlist'],
bands=defaults.collect['bands'],
channels=defaults.common['channels'],
astro_priors=defaults.common['astro_priors'],
sim=defaults.sims['sim'],
nsims=defaults.sims['nsims'],
seed=defaults.sims['seed'],
dry_run=defaults.common['dry_run'])
__doc__ += Link.construct_docstring(default_options)
def build_job_configs(self, args):
"""Hook to build job configurations
"""
job_configs = {}
ttype = args['ttype']
(roster_yaml, sim) = NAME_FACTORY.resolve_rosterfile(args)
if roster_yaml is None:
return job_configs
roster_dict = load_yaml(roster_yaml)
astro_priors = args['astro_priors']
channels = args['channels']
for roster_name in list(roster_dict.keys()):
rost_chans = select_channels(channels, roster_name)
for astro_prior in astro_priors:
name_keys = dict(target_type=ttype,
roster_name=roster_name,
astro_prior=astro_prior,
sim_name=sim,
fullpath=True)
for chan in rost_chans:
targ_key = "%s:%s:%s" % (roster_name, astro_prior, chan)
if sim is not None:
seedlist = list(range(
args['seed'], args['seed'] + args['nsims']))
sim_path = os.path.join('config', 'sim_%s.yaml' % sim)
else:
seedlist = [None]
sim_path = None
for seed in seedlist:
if seed is not None:
name_keys['seed'] = "%06i" % seed # pylint: disable=bad-string-format-type
input_path = NAME_FACTORY.sim_stackedlimitsfile(
**name_keys)
full_targ_key = "%s_%06i" % (targ_key, seed) # pylint: disable=bad-string-format-type
else:
input_path = NAME_FACTORY.stackedlimitsfile(
**name_keys)
full_targ_key = targ_key
output_path = input_path.replace(
'.fits', '_%s.png' % chan)
logfile = make_nfs_path(
output_path.replace('.png', '.log'))
job_config = dict(infile=input_path,
outfile=output_path,
astro_prior=astro_prior,
logfile=logfile,
sim=sim_path,
chan=chan)
job_configs[full_targ_key] = job_config
return job_configs
class PlotDM_SG(ScatterGather):
"""Small class to generate configurations for `PlotDM`
This does a quadruple nested loop over targets, profiles,
j-factor priors and channels
"""
appname = 'dmpipe-plot-dm-sg'
usage = "%s [options]" % (appname)
description = "Make castro plots for set of targets"
clientclass = PlotDM
job_time = 60
default_options = dict(ttype=defaults.common['ttype'],
targetlist=defaults.common['targetlist'],
channels=defaults.common['channels'],
astro_priors=defaults.common['astro_priors'],
global_min=defaults.common['global_min'],
dry_run=defaults.common['dry_run'])
__doc__ += Link.construct_docstring(default_options)
def build_job_configs(self, args):
"""Hook to build job configurations
"""
job_configs = {}
ttype = args['ttype']
(targets_yaml, _) = NAME_FACTORY.resolve_targetfile(args)
if targets_yaml is None:
return job_configs
targets = load_yaml(targets_yaml)
astro_priors = args['astro_priors']
channels = args['channels']
global_min = args['global_min']
for target_name, target_list in list(targets.items()):
for targ_prof in target_list:
prof_chans = select_channels(channels, targ_prof)
for astro_prior in astro_priors:
name_keys = dict(target_type=ttype,
target_name=target_name,
profile=targ_prof,
astro_prior=astro_prior,
fullpath=True)
input_path = NAME_FACTORY.dmlikefile(**name_keys)
for chan in prof_chans:
targ_key = "%s:%s:%s:%s" % (
target_name, targ_prof, astro_prior, chan)
output_path = input_path.replace(
'.fits', '_%s.png' % chan)
logfile = make_nfs_path(
output_path.replace('.png', '.log'))
job_config = dict(infile=input_path,
outfile=output_path,
astro_prior=astro_prior,
logfile=logfile,
global_min=global_min,
chan=chan)
job_configs[targ_key] = job_config
return job_configs
class PlotStackedDM_SG(ScatterGather):
"""Small class to generate configurations for `PlotDM`
This does a triple loop over rosters, j-factor priors and channels
"""
appname = 'dmpipe-plot-stacked-dm-sg'
usage = "%s [options]" % (appname)
description = "Make castro plots for set of targets"
clientclass = PlotDM
job_time = 60
default_options = dict(ttype=defaults.common['ttype'],
rosterlist=defaults.common['rosterlist'],
channels=defaults.common['channels'],
astro_priors=defaults.common['astro_priors'],
sim=defaults.sims['sim'],
nsims=defaults.sims['nsims'],
seed=defaults.sims['seed'],
global_min=defaults.common['global_min'],
dry_run=defaults.common['dry_run'])
__doc__ += Link.construct_docstring(default_options)
def build_job_configs(self, args):
"""Hook to build job configurations
"""
job_configs = {}
ttype = args['ttype']
(roster_yaml, sim) = NAME_FACTORY.resolve_rosterfile(args)
if roster_yaml is None:
return job_configs
roster_dict = load_yaml(roster_yaml)
astro_priors = args['astro_priors']
channels = args['channels']
global_min = args['global_min']
for roster_name in list(roster_dict.keys()):
rost_chans = select_channels(channels, roster_name)
for astro_prior in astro_priors:
name_keys = dict(target_type=ttype,
roster_name=roster_name,
astro_prior=astro_prior,
sim_name=sim,
fullpath=True)
for chan in rost_chans:
targ_key = "%s:%s:%s" % (roster_name, astro_prior, chan)
if sim is not None:
seedlist = list(range(
args['seed'], args['seed'] + args['nsims']))
else:
seedlist = [None]
for seed in seedlist:
if seed is not None:
name_keys['seed'] = "%06i" % seed # pylint: disable=bad-string-format-type
input_path = NAME_FACTORY.sim_resultsfile(
**name_keys)
full_targ_key = "%s_%06i" % (targ_key, seed) # pylint: disable=bad-string-format-type
else:
input_path = NAME_FACTORY.resultsfile(**name_keys)
full_targ_key = targ_key
output_path = input_path.replace(
'.fits', '_%s.png' % chan)
logfile = make_nfs_path(
output_path.replace('.png', '.log'))
job_config = dict(infile=input_path,
outfile=output_path,
astro_prior=astro_prior,
logfile=logfile,
global_min=global_min,
chan=chan)
job_configs[full_targ_key] = job_config
return job_configs
class PlotControlLimits_SG(ScatterGather):
"""Small class to generate configurations for `PlotLimits`
This does a quadruple loop over rosters, j-factor priors, channels, and expectation bands
"""
appname = 'dmpipe-plot-control-limits-sg'
usage = "%s [options]" % (appname)
description = "Make limits plots for positve controls"
clientclass = PlotLimits
job_time = 60
default_options = dict(ttype=defaults.common['ttype'],
rosterlist=defaults.common['targetlist'],
channels=defaults.common['channels'],
astro_priors=defaults.common['astro_priors'],
sim=defaults.sims['sim'],
dry_run=defaults.common['dry_run'])
__doc__ += Link.construct_docstring(default_options)
def build_job_configs(self, args):
"""Hook to build job configurations
"""
job_configs = {}
ttype = args['ttype']
try:
os.makedirs(os.path.join(ttype, 'results'))
except OSError:
pass
(roster_yaml, sim) = NAME_FACTORY.resolve_rosterfile(args)
if roster_yaml is None:
return job_configs
roster_dict = load_yaml(roster_yaml)
astro_priors = args['astro_priors']
channels = args['channels']
sim_path = os.path.join('config', 'sim_%s.yaml' % sim)
for roster_name in list(roster_dict.keys()):
rost_chans = select_channels(channels, roster_name)
for astro_prior in astro_priors:
name_keys = dict(target_type=ttype,
roster_name=roster_name,
astro_prior=astro_prior,
sim_name=sim,
seed='summary',
fullpath=True)
bands_path = NAME_FACTORY.sim_stackedlimitsfile(**name_keys)
for chan in rost_chans:
targ_key = "%s:%s:%s:%s" % (roster_name, astro_prior, sim, chan)
output_path = os.path.join(ttype, 'results', "control_%s_%s_%s_%s.png" % (roster_name, astro_prior, sim, chan))
logfile = make_nfs_path(output_path.replace('.png', '.log'))
job_config = dict(bands=bands_path,
outfile=output_path,
sim=sim_path,
logfile=logfile,
chan=chan)
job_configs[targ_key] = job_config
return job_configs
class PlotControlMLEs_SG(ScatterGather):
"""Small class to generate configurations for `PlotMLEs`
This does a quadruple loop over rosters, j-factor priors, channels, and expectation bands
"""
appname = 'dmpipe-plot-control-mles-sg'
usage = "%s [options]" % (appname)
description = "Make mle plots for positve controls"
clientclass = PlotMLEs
job_time = 60
default_options = dict(ttype=defaults.common['ttype'],
rosterlist=defaults.common['targetlist'],
channels=defaults.common['channels'],
astro_priors=defaults.common['astro_priors'],
sim=defaults.sims['sim'],
dry_run=defaults.common['dry_run'])
__doc__ += Link.construct_docstring(default_options)
def build_job_configs(self, args):
"""Hook to build job configurations
"""
job_configs = {}
ttype = args['ttype']
try:
os.makedirs(os.path.join(ttype, 'results'))
except OSError:
pass
(roster_yaml, sim) = NAME_FACTORY.resolve_rosterfile(args)
if roster_yaml is None:
return job_configs
roster_dict = load_yaml(roster_yaml)
astro_priors = args['astro_priors']
channels = args['channels']
sim_path = os.path.join('config', 'sim_%s.yaml' % sim)
for roster_name in list(roster_dict.keys()):
rost_chans = select_channels(channels, roster_name)
for astro_prior in astro_priors:
name_keys = dict(target_type=ttype,
roster_name=roster_name,
astro_prior=astro_prior,
sim_name=sim,
seed='summary',
fullpath=True)
bands_path = NAME_FACTORY.sim_stackedlimitsfile(**name_keys)
for chan in rost_chans:
targ_key = "%s:%s:%s:%s" % (roster_name, astro_prior, sim, chan)
output_path = os.path.join(ttype, 'results', "control_mle_%s_%s_%s_%s.png" % (roster_name, astro_prior, sim, chan))
logfile = make_nfs_path(output_path.replace('.png', '.log'))
job_config = dict(bands=bands_path,
outfile=output_path,
sim=sim_path,
logfile=logfile,
chan=chan)
job_configs[targ_key] = job_config
return job_configs
class PlotFinalLimits_SG(ScatterGather):
"""Small class to generate configurations for `PlotLimits`
This does a quadruple loop over rosters, j-factor priors, channels, and expectation bands
"""
appname = 'dmpipe-plot-final-limits-sg'
usage = "%s [options]" % (appname)
description = "Make final limits plots"
clientclass = PlotLimits
job_time = 60
default_options = dict(ttype=defaults.common['ttype'],
rosterlist=defaults.common['rosterlist'],
channels=defaults.common['channels'],
astro_priors=defaults.common['astro_priors'],
sims=defaults.sims['sims'],
dry_run=defaults.common['dry_run'])
__doc__ += Link.construct_docstring(default_options)
def build_job_configs(self, args):
"""Hook to build job configurations
"""
job_configs = {}
ttype = args['ttype']
(roster_yaml, sim) = NAME_FACTORY.resolve_rosterfile(args)
if roster_yaml is None:
return job_configs
if sim is not None:
raise ValueError("Sim argument set of plotting data results")
roster_dict = load_yaml(roster_yaml)
astro_priors = args['astro_priors']
channels = args['channels']
sims = args['sims']
for roster_name in list(roster_dict.keys()):
rost_chans = select_channels(channels, roster_name)
for astro_prior in astro_priors:
name_keys = dict(target_type=ttype,
roster_name=roster_name,
astro_prior=astro_prior,
fullpath=True)
input_path = NAME_FACTORY.stackedlimitsfile(**name_keys)
for sim in sims:
name_keys.update(sim_name=sim,
seed='summary')
bands_path = NAME_FACTORY.sim_stackedlimitsfile(**name_keys)
for chan in rost_chans:
targ_key = "%s:%s:%s:%s" % (roster_name, astro_prior, sim, chan)
output_path = os.path.join(ttype, 'results', "final_%s_%s_%s_%s.png" % (roster_name, astro_prior, sim, chan))
logfile = make_nfs_path(output_path.replace('.png', '.log'))
job_config = dict(infile=input_path,
outfile=output_path,
bands=bands_path,
logfile=logfile,
chan=chan)
job_configs[targ_key] = job_config
return job_configs
def register_classes():
"""Register these classes with the `LinkFactory` """
PlotDMSpectra.register_class()
PlotLimits.register_class()
PlotLimits_SG.register_class()
PlotMLEs.register_class()
PlotDM.register_class()
PlotDM_SG.register_class()
PlotStackedDM_SG.register_class()
PlotStackedLimits_SG.register_class()
PlotControlLimits_SG.register_class()
PlotControlMLEs_SG.register_class()
PlotFinalLimits_SG.register_class()
| StarcoderdataPython |
1859249 | print("Cal count bc you're fat")
gfat = float(input("Grams of Fat: "))
gprotein = float(input("Grams of Protein: "))
gcarbohydrate = float(input("Grams of Carbohydrates: "))
# Calorie calc
ONEG_FAT = 9
ONEG_PROTEIN = 4
ONEG_CARBOHYD = 4
fatCal = gfat * ONEG_FAT
print ("Grams of Fat: " +str(fatCal))
proteinCal = gprotein * ONEG_PROTEIN
print ("Grams of Protein: " + str(proteinCal))
carbohydCal = gcarbohydrate * ONEG_CARBOHYD
print ("Grams of Carbohydrates: " + str(carbohydCal))
totalCal = fatCal + proteinCal + carbohydCal
print ("Total Calories: " + str(totalCal))
| StarcoderdataPython |
9662303 | #
# encoding: utf-8
import datetime
from unittest import TestCase
from mock import MagicMock
from tornadoalf.token import Token, TokenHTTPError
from tornado.httpclient import HTTPResponse
class TestToken(TestCase):
def test_should_have_an_access_token(self):
token = Token(access_token='access_token')
self.assertEqual(token.access_token, 'access_token')
def test_should_know_when_it_has_expired(self):
token = Token(access_token='access_token', expires_in=0)
self.assertFalse(token.is_valid())
def test_should_know_when_it_is_valid(self):
token = Token(access_token='access_token', expires_in=10)
self.assertTrue(token.is_valid())
def test_expires_on_using_utc(self):
token = Token(access_token='access_token', expires_in=10)
self.assertTrue(token.expires_on > datetime.datetime.utcnow())
self.assertTrue(
token.expires_on <
datetime.datetime.utcnow() + datetime.timedelta(seconds=15))
class TestTokenHTTPError(TestCase):
def test_should_show_http_response_in_exception(self):
buf = MagicMock()
buf.getvalue.return_value = '{"myError": true}'
request = MagicMock()
response = HTTPResponse(
request=request, code=401, buffer=buf)
err = TokenHTTPError('My Error', response)
self.assertEqual(
str(err),
'My Error, StatusCode: 401, Body: {"myError": true}')
| StarcoderdataPython |
9757560 | #!/usr/bin/python
# coding=UTF-8
# ex:ts=4:sw=4:et=on
# Copyright (c) 2013, <NAME>
class DummyAsyncResult(object):
""" A non-asynchronous dummy implementation of the AsyncResult object """
def __init__(self, func):
self.result = func()
def get(self):
return self.result
pass #end of class | StarcoderdataPython |
40019 | <filename>export/parts.py
from .attributes import AttributeExport
from .attributes import MimicExport
from .attributes import JointExport
from .attributes import JointOrientationExport
from .attributes import GazeExport
import os
class BodyPartsExport:
ExportClass = AttributeExport
def __init__(self, name, part_names, header, snapshot_path):
self.name = name
self.part_names = part_names
self.header = header
self.snapshot_path = snapshot_path
self.create_parts()
def create_parts(self):
for part_name in self.part_names:
attr = self.ExportClass(part_name, self.header, self.snapshot_path)
self.__setattr__(part_name, attr)
def append_snapshot(self, snapshot):
for part_name in self.part_names:
self.__getattribute__(part_name).append_snapshot(snapshot)
def write(self, path):
path = os.path.join(path, self.name)
# create dir
try:
os.mkdir(path)
except FileExistsError:
pass
# write each file
for part_name in self.part_names:
self.__getattribute__(part_name).write(path)
class MimicsExport(BodyPartsExport):
name = 'mimics'
header = ['X', 'Y', 'Z']
snapshot_path = ['KinectFaceVertices']
ExportClass = MimicExport
face_points_count = 1346
def __init__(self):
part_names = list(map(lambda i: 'm' + str(i), range(self.face_points_count)))
super().__init__(self.name, part_names, self.header, self.snapshot_path)
class GazesExport(BodyPartsExport):
name = 'gazes'
header = ['X', 'Y', 'Z']
snapshot_path = ['GazeEstimation']
ExportClass = GazeExport # TODO change to GazeExport
part_names = ['faceGaze', 'gazeRight', 'gazeLeft']
def __init__(self):
super().__init__(self.name, self.part_names, self.header, self.snapshot_path)
class JointsExport(BodyPartsExport):
name = 'joints'
header = ['X', 'Y', 'Z']
snapshot_path = ['KinectBody']
ExportClass = JointExport
part_names = ['SpineBase',
'SpineMid',
'Neck',
'Head',
'ShoulderLeft',
'ElbowLeft',
'WristLeft',
'HandLeft',
'ShoulderRight',
'ElbowRight',
'WristRight',
'HandRight',
'HipLeft',
'KneeLeft',
'AnkleLeft',
'FootLeft',
'HipRight',
'KneeRight',
'AnkleRight',
'FootRight',
'SpineShoulder',
'HandTipLeft',
'ThumbLeft',
'HandTipRight',
'ThumbRight']
def __init__(self, skeleton):
snapshot_path = self.snapshot_path[:]
snapshot_path.append(str(skeleton))
super().__init__(self.name, self.part_names, self.header, snapshot_path)
class JointOrientationsExport(JointsExport):
name = 'joints_orient'
header = ['X', 'Y', 'Z', 'W']
ExportClass = JointOrientationExport
def __init__(self, skeleton):
super().__init__(skeleton)
| StarcoderdataPython |
5173968 | <filename>house_code/tutorials_altered/modules/file_writing.py<gh_stars>1-10
from .data_functions import DataFunctions as DataFunctions
class SensorDataFileWriting:
@staticmethod
def write_sensor_data_header_to_file(file,
header=("Index,Time,Difference,Hz,AveHz,"
"Pressure,"
"Acceleration-X,Acceleration-Y,Acceleration-Z,"
"Magnetic-X,Magnetic-Y,Magnetic-Z,"
"Angular-Vel-X,Angular-Vel-Y,Angular-Vel-Z,"
"Heading,Roll,Pitch,"
"Quaternion-X,Quaternion-Y,Quaternion-Z,Quaternion-W,"
"Linear-Acceleration-X,Linear-Acceleration-Y,Linear-Acceleration-Z,"
"Gravity-X,Gravity-Y,Gravity-Z,")):
"""
Writes column headers for all of the sensor data to a file
:param file: the file to write to
:param str header: The header labels, already set by default
"""
file.write(header + '\n')
@staticmethod
def write_line_of_sensor_data_to_file(index, elapsed_time, time_difference,
file, sensor_data):
hz = DataFunctions.convert_hertz(time_difference)
ave_hz = DataFunctions.find_average_hertz(index, elapsed_time)
output = (str(index) + "," + str(elapsed_time) + ","
+ str(time_difference) + "," + str(hz) + ","
+ str(ave_hz) + ",")
try:
output += (str(sensor_data.pressure) + ","
+ str(sensor_data.acceleration.x) + ","
+ str(sensor_data.acceleration.y) + ","
+ str(sensor_data.acceleration.z) + ","
+ str(sensor_data.magnetic.x) + ","
+ str(sensor_data.magnetic.y) + ","
+ str(sensor_data.magnetic.z) + ","
+ str(sensor_data.angular_vel.x) + ","
+ str(sensor_data.angular_vel.y) + ","
+ str(sensor_data.angular_vel.z) + ","
+ str(sensor_data.euler_angles.heading) + ","
+ str(sensor_data.euler_angles.roll) + ","
+ str(sensor_data.euler_angles.pitch) + ","
+ str(sensor_data.quaternion.x) + ","
+ str(sensor_data.quaternion.y) + ","
+ str(sensor_data.quaternion.z) + ","
+ str(sensor_data.quaternion.w) + ","
+ str(sensor_data.linear_acceleration.x) + ","
+ str(sensor_data.linear_acceleration.y) + ","
+ str(sensor_data.linear_acceleration.z) + ","
+ str(sensor_data.gravity_vector.x) + ","
+ str(sensor_data.gravity_vector.y) + ","
+ str(sensor_data.gravity_vector.z) + ","
+ "\n")
except AttributeError:
for i in range(0, 23):
output += "error,"
output += "\n"
file.write(output)
class SensorAndPositionFileWriting:
@staticmethod
def write_sensor_and_position_header_to_file(
file,
header=("Index,Time,Difference,Hz,AveHz,"
"Pressure,"
"Acceleration-X,Acceleration-Y,Acceleration-Z,"
"Magnetic-X,Magnetic-Y,Magnetic-Z,"
"Angular-Vel-X,Angular-Vel-Y,Angular-Vel-Z,"
"Heading,Roll,Pitch,"
"Quaternion-X,Quaternion-Y,Quaternion-Z,Quaternion-W,"
"Linear-Acceleration-X,Linear-Acceleration-Y,Linear-Acceleration-Z,"
"Gravity-X,Gravity-Y,Gravity-Z,"
"Position-X,Position-Y,Position-Z")):
"""
Writes column headers for all of the sensor data to a file
:param file: the file to write to
:param str header: The header labels, already set by default
"""
file.write(header + '\n')
@staticmethod
def write_sensor_and_position_data_to_file(index, elapsed_time, time_difference,
file, sensor_data, position_data):
hz = DataFunctions.convert_hertz(time_difference)
ave_hz = DataFunctions.find_average_hertz(index, elapsed_time)
output = (str(index) + "," + str(elapsed_time) + ","
+ str(time_difference) + "," + str(hz) + ","
+ str(ave_hz) + ",")
try:
output += (str(sensor_data.pressure) + ","
+ str(sensor_data.acceleration.x) + ","
+ str(sensor_data.acceleration.y) + ","
+ str(sensor_data.acceleration.z) + ","
+ str(sensor_data.magnetic.x) + ","
+ str(sensor_data.magnetic.y) + ","
+ str(sensor_data.magnetic.z) + ","
+ str(sensor_data.angular_vel.x) + ","
+ str(sensor_data.angular_vel.y) + ","
+ str(sensor_data.angular_vel.z) + ","
+ str(sensor_data.euler_angles.heading) + ","
+ str(sensor_data.euler_angles.roll) + ","
+ str(sensor_data.euler_angles.pitch) + ","
+ str(sensor_data.quaternion.x) + ","
+ str(sensor_data.quaternion.y) + ","
+ str(sensor_data.quaternion.z) + ","
+ str(sensor_data.quaternion.w) + ","
+ str(sensor_data.linear_acceleration.x) + ","
+ str(sensor_data.linear_acceleration.y) + ","
+ str(sensor_data.linear_acceleration.z) + ","
+ str(sensor_data.gravity_vector.x) + ","
+ str(sensor_data.gravity_vector.y) + ","
+ str(sensor_data.gravity_vector.z) + ","
+ str(position_data.x) + ","
+ str(position_data.y) + ","
+ str(position_data.z) + ","
+ "\n")
except AttributeError:
for i in range(0, 26):
output += "error,"
output += "\n"
file.write(output)
class PositionFileWriting:
@staticmethod
def write_position_header_to_file(
file,
header=("Index,Time,Difference,Hz,AveHz,"
"Position-X,Position-Y,Position-Z")):
"""
Writes column headers for position data to a file
:param file: the file to write to
:param str header: The header labels, already set by default
"""
file.write(header + '\n')
@staticmethod
def write_position_data_to_file(index, elapsed_time, time_difference,
file, position_data):
"""
This function writes the position data to the file each cycle in the while iterate_file.
"""
hz = DataFunctions.convert_hertz(time_difference)
ave_hz = DataFunctions.find_average_hertz(index, elapsed_time)
output = (str(index) + "," + str(elapsed_time) + ","
+ str(time_difference) + "," + str(hz) + ","
+ str(ave_hz) + ",")
try:
output += (str(position_data.x) + ","
+ str(position_data.y) + ","
+ str(position_data.z) + ","
+ "\n")
except AttributeError:
for i in range(0, 26):
output += "error,"
output += "\n"
file.write(output)
| StarcoderdataPython |
9722979 | <filename>models.py
from flask_login import UserMixin
from json import load
from typing import Dict, Optional
from werkzeug.security import generate_password_hash, \
check_password_hash
class User(UserMixin):
def __init__(self, id: str, username: str, email: str, password: str):
self.id = id
self.username = username
self.email = email
self.password = generate_password_hash(password)
@staticmethod
def get(user_id: str) -> Optional["User"]:
return users.get(user_id)
def __str__(self) -> str:
return f"<Id: {self.id}, Username: {self.username}, Email: {self.email}>"
def __repr__(self) -> str:
return self.__str__()
def verify_password(self, pwd):
return check_password_hash(self.password, pwd)
def get_users():
users = {}
with open("users.json") as file:
data = load(file)
for key in data:
users[key] = User(
id=key,
username=data[key]["username"],
email=data[key]["email"],
password=generate_password_hash(data[key]["password"]),
)
return users
users = get_users()
| StarcoderdataPython |
8128500 | <gh_stars>0
from flask import Flask
from .simple import simple_bp
from .signed import signed_bp
app = Flask(__name__)
app.register_blueprint(simple_bp)
app.register_blueprint(signed_bp)
| StarcoderdataPython |
308680 | '''
====================================================================
Copyright (c) 2003-2006 <NAME>. All rights reserved.
This software is licensed as described in the file LICENSE.txt,
which you should have received as part of this distribution.
====================================================================
'''
import sys
import UserDict
class PysvnDictBase(UserDict.IterableUserDict):
def __init__( self, value_dict, name='' ):
UserDict.IterableUserDict.__init__( self, value_dict )
self.__name = name
if self.__name is None:
print '%s given None as name' % self.__class__.__name__
def __getattr__( self, name ):
if name in self.data:
return self.data[ name ]
raise AttributeError( "%s instance has no attribute '%s'" % (self.__class__.__name__, name) )
def __repr__( self ):
return '<%s %s>' % (self.__class__.__name__, repr(self.__name))
class PysvnDirent(PysvnDictBase):
def __init__( self, value_dict ):
PysvnDictBase.__init__( self, value_dict, value_dict.get( 'name', None ) )
class PysvnList(PysvnDictBase):
def __init__( self, value_dict ):
PysvnDictBase.__init__( self, value_dict, value_dict.get( 'path', None ) )
class PysvnEntry(PysvnDictBase):
def __init__( self, value_dict ):
PysvnDictBase.__init__( self, value_dict, value_dict.get( 'name', None ) )
class PysvnInfo(PysvnDictBase):
def __init__( self, value_dict ):
PysvnDictBase.__init__( self, value_dict )
class PysvnLock(PysvnDictBase):
def __init__( self, value_dict ):
PysvnDictBase.__init__( self, value_dict )
class PysvnLog(PysvnDictBase):
def __init__( self, value_dict ):
PysvnDictBase.__init__( self, value_dict )
class PysvnLogChangedPath(PysvnDictBase):
def __init__( self, value_dict ):
PysvnDictBase.__init__( self, value_dict )
class PysvnWcInfo(PysvnDictBase):
def __init__( self, value_dict ):
PysvnDictBase.__init__( self, value_dict )
class PysvnStatus(PysvnDictBase):
def __init__( self, value_dict ):
PysvnDictBase.__init__( self, value_dict, value_dict.get( 'path', None ) )
class PysvnDiffSummary(PysvnDictBase):
def __init__( self, value_dict ):
PysvnDictBase.__init__( self, value_dict, value_dict.get( 'path', None ) )
# An indication that you are interested in the @c kind field
SVN_DIRENT_KIND = 0x00001
# An indication that you are interested in the @c size field
SVN_DIRENT_SIZE = 0x00002
# An indication that you are interested in the @c has_props field
SVN_DIRENT_HAS_PROPS = 0x00004
# An indication that you are interested in the @c created_rev field
SVN_DIRENT_CREATED_REV = 0x00008
# An indication that you are interested in the @c time field
SVN_DIRENT_TIME = 0x00010
# An indication that you are interested in the @c last_author field
SVN_DIRENT_LAST_AUTHOR = 0x00020
# A combination of all the dirent fields
SVN_DIRENT_ALL = 0xffffffffl
try:
maj_min = sys.version_info[:2]
#
# use an if tree so that its easy for app makers to
# find the import that is requires
#
if maj_min == (2,3):
import _pysvn_2_3
_pysvn = _pysvn_2_3
elif maj_min == (2,4):
import _pysvn_2_4
_pysvn = _pysvn_2_4
elif maj_min == (2,5):
import _pysvn_2_5
_pysvn = _pysvn_2_5
else:
raise ImportError( 'Fix pysvn/__init__.py to support python %d.%d' % tuple(maj_min) )
for key, value in _pysvn.__dict__.items():
if not key.startswith( '__' ):
globals()[ key ] = value
except ImportError, e:
# check for common installation errors that show up as ImportError
if ': undefined symbol:' in str(e):
raise ImportError, 'pysvn was built against newer (svn, apr, etc.) libraries then the ones installed on this system. %s' % str(e)
else:
raise
def Client( config_dir='' ):
return _pysvn._Client( config_dir, result_wrappers=globals() )
def Transaction( repos_path, transaction_name ):
return _pysvn._Transaction( repos_path, transaction_name, result_wrappers=globals() )
| StarcoderdataPython |
1683993 | <reponame>Baidaly/datacamp-samples
'''
The election results DataFrame has a column labeled 'margin' which expresses the number of extra votes the winner received over the losing candidate. This number is given as a percentage of the total votes cast. It is reasonable to assume that in counties where this margin was less than 1%, the results would be too-close-to-call.
Your job is to use boolean selection to filter the rows where the margin was less than 1. You'll then convert these rows of the 'winner' column to np.nan to indicate that these results are too close to declare a winner.
The DataFrame has been pre-loaded for you as election.
'''
# Import numpy
import numpy as np
# Create the boolean array: too_close
too_close = election.margin < 1
# Assign np.nan to the 'winner' column where the results were too close to call
election.winner[too_close] = np.nan
# Print the output of election.info()
print(election.info())
| StarcoderdataPython |
5026822 | <filename>migrations/versions/2b5117cc3df6_.py<gh_stars>1-10
"""empty message
Revision ID: <KEY>
Revises: None
Create Date: 2014-02-25 17:44:17.487690
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.drop_table('payout')
op.drop_table('block')
op.drop_table('transaction')
op.drop_table('coin_transaction')
#op.drop_table('one_minute_share')
op.create_table('blob',
sa.Column('key', sa.String(), nullable=False),
sa.Column('data', postgresql.HSTORE(), nullable=True),
sa.PrimaryKeyConstraint('key')
)
op.create_table('transaction',
sa.Column('txid', sa.String(), nullable=False),
sa.Column('confirmed', sa.Boolean(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('txid')
)
#op.create_table('one_minute_share',
# sa.Column('user', sa.String(), nullable=False),
# sa.Column('minute', sa.DateTime(), nullable=False),
# sa.Column('shares', sa.Integer(), nullable=True),
# sa.PrimaryKeyConstraint('user', 'minute')
# )
op.create_table('block',
sa.Column('height', sa.Integer(), nullable=False),
sa.Column('user', sa.String(), nullable=True),
sa.Column('found_at', sa.DateTime(), nullable=True),
sa.Column('time_started', sa.DateTime(), nullable=False),
sa.Column('orphan', sa.Boolean(), nullable=True),
sa.Column('mature', sa.Boolean(), nullable=True),
sa.Column('shares_to_solve', sa.BigInteger(), nullable=True),
sa.Column('total_value', sa.BigInteger(), nullable=True),
sa.Column('transaction_fees', sa.BigInteger(), nullable=True),
sa.Column('fees', sa.BigInteger(), nullable=True),
sa.Column('bits', sa.String(length=8), nullable=False),
sa.Column('last_share_id', sa.BigInteger(), nullable=True),
sa.Column('processed', sa.Boolean(), nullable=True),
sa.Column('hash', sa.String(), nullable=False),
sa.ForeignKeyConstraint(['last_share_id'], ['share.id'], ),
sa.PrimaryKeyConstraint('height')
)
op.create_table('payout',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('blockheight', sa.Integer(), nullable=True),
sa.Column('user', sa.String(), nullable=True),
sa.Column('shares', sa.BigInteger(), nullable=True),
sa.Column('amount', sa.BigInteger(), nullable=True),
sa.Column('transaction_id', sa.String(), nullable=True),
sa.ForeignKeyConstraint(['blockheight'], ['block.height'], ),
sa.ForeignKeyConstraint(['transaction_id'], ['transaction.txid'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('user', 'blockheight')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('payout')
op.drop_table('block')
op.drop_table('one_minute_share')
op.drop_table('transaction')
op.drop_table('blob')
### end Alembic commands ###
| StarcoderdataPython |
4809554 | from collections import defaultdict
from django.core.management.base import BaseCommand
from corehq.apps.userreports.models import (
DataSourceConfiguration,
StaticDataSourceConfiguration,
)
from corehq.apps.userreports.util import (
LEGACY_UCR_TABLE_PREFIX,
UCR_TABLE_PREFIX,
get_table_name,
)
from corehq.sql_db.connections import connection_manager
class Command(BaseCommand):
help = "Find orphaned UCR tables for data sources that no longer exist"
def add_arguments(self, parser):
parser.add_argument(
'--engine_id',
action='store',
help='Only check this DB engine',
)
parser.add_argument(
'--drop-empty-tables',
action='store_true',
default=False,
help='Call DROP TABLE on tables with no rows',
)
def handle(self, **options):
data_sources = list(DataSourceConfiguration.all())
data_sources.extend(list(StaticDataSourceConfiguration.all()))
tables_by_engine_id = self._get_tables_by_engine_id(data_sources, options.get('engine_id'))
tables_to_remove_by_engine = defaultdict(list)
for engine_id, expected_tables in tables_by_engine_id.items():
engine = connection_manager.get_engine(engine_id)
with engine.begin() as connection:
# Using string formatting rather than execute with %s syntax
# is acceptable here because the strings we're inserting are static
# and only templated for DRYness
results = connection.execute(f"""
SELECT table_name
FROM information_schema.tables
WHERE table_schema='public'
AND table_type='BASE TABLE'
AND (
table_name LIKE '{UCR_TABLE_PREFIX}%%'
OR
table_name LIKE '{LEGACY_UCR_TABLE_PREFIX}%%'
);
""").fetchall()
tables_in_db = {r[0] for r in results}
tables_to_remove_by_engine[engine_id] = tables_in_db - expected_tables
for engine_id, tablenames in tables_to_remove_by_engine.items():
print("\nTables no longer referenced in database: {}:\n".format(engine_id))
engine = connection_manager.get_engine(engine_id)
if not tablenames:
print("\t No tables to prune")
continue
for tablename in tablenames:
with engine.begin() as connection:
try:
result = connection.execute(f'SELECT COUNT(*), MAX(inserted_at) FROM "{tablename}"')
except Exception:
print(f"\t{tablename}: no inserted_at column, probably not UCR")
else:
row_count, idle_since = result.fetchone()
if row_count == 0:
print(f"\t{tablename}: {row_count} rows")
if options['drop_empty_tables']:
connection.execute(f'DROP TABLE "{tablename}"')
print(f'\t^-- deleted {tablename}')
else:
print(f"\t{tablename}: {row_count} rows, idle since {idle_since}")
def _get_tables_by_engine_id(self, data_sources, engine_id):
tables_by_engine_id = defaultdict(set)
for data_source in data_sources:
if engine_id and data_source.engine_id != engine_id:
continue
table_name = get_table_name(data_source.domain, data_source.table_id)
tables_by_engine_id[data_source.engine_id].add(table_name)
return tables_by_engine_id
| StarcoderdataPython |
6582697 | <filename>Desafios/064 desafio.py
'''
Crie um programa que leia vários números inteiros pelo teclado.
O programa só vai parar quando o usuário digitar o valor 999,
que é a condição de parada. No final, mostre quantos números
foram digitados e qual foi a soma entre eles
(desconsiderando o flog condição de parada)
'''
condicao = False
soma = 0
cont = 0
while not condicao:
num = int(input('Digite um número: '))
if num == 999:
condicao = True
else:
soma += num
cont += 1
print('O total de Números digitados foram: {}'.format(cont))
print('A soma total dos número é: {}'.format(soma))
| StarcoderdataPython |
3580974 | """ RNN (Recurent Neural Network) on fashion MNIST - 88,3% over 50 epochs """
# import libraries
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import tensorflow
import numpy as np
from tensorflow.keras.layers import Dense, Activation, Input, SimpleRNN
from tensorflow.keras.regularizers import l2
from tensorflow.keras.models import Model
from tensorflow.keras.datasets import fashion_mnist
class Init(object):
def __init__(self): # constructor
# load data
(self.x_train,self.y_train), (self.x_test,self.y_test) = fashion_mnist.load_data()
# compute the number of labels
self.num_labels = len(np.unique(self.y_train))
# reshape and renormalize input images
self.image_size = self.x_train.shape[1]
self.x_train = np.reshape(self.x_train,[-1,self.image_size,self.image_size])
self.x_test = np.reshape(self.x_test,[-1,self.image_size,self.image_size])
self.x_train = self.x_train.astype('float32')/255
self.x_test = self.x_test.astype('float32')/255
# network parameters
self.input_shape = (self.image_size,self.image_size)
self.batch_size = 128
self.units = 256
self.regul = l2(0.0001)
self.epochs = 50
class RNN(Init):
def build_train_model(self):
#build model
self.inputs = Input(shape=self.input_shape)
self.y = SimpleRNN(units=self.units,kernel_regularizer=self.regul)(self.inputs)
self.outputs = Dense(self.num_labels,activation='softmax')(self.y)
self.model = Model(self.inputs,self.outputs)
self.model.summary()
print("\n")
# train model
self.model.compile(optimizer='sgd',loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
self.model.fit(self.x_train,self.y_train,validation_data=(self.x_test,self.y_test),
epochs=self.epochs,batch_size=self.batch_size)
self.score = self.model.evaluate(self.x_test,self.y_test,batch_size=self.batch_size)
print("\nTest accuracy: %.1f%%" % (100*self.score[1]))
print("\n")
if __name__=='__main__':
rnn = RNN()
rnn.build_train_model()
| StarcoderdataPython |
3224097 | <gh_stars>1-10
#! /usr/bin/env python
import begin
@begin.start(auto_convert=True)
def add(a=0.0, b=0.0):
""" Add two numbers """
print(a + b)
| StarcoderdataPython |
1660959 | from collections import defaultdict
from .abstract_pop_splitter import AbstractPOPSplitter
from ...graph_utils import path_to_edge_list
from math import floor
class BaselineSplitter(AbstractPOPSplitter):
def __init__(self, num_subproblems):
super().__init__(num_subproblems)
def split(self, problem):
sub_problems = []
num_rows = len(problem.traffic_matrix.tm)
rows_per_problem = floor(num_rows / self._num_subproblems)
shuffled_indices = list(range(num_rows))
for i in range(self._num_subproblems):
sub_problems.append(problem.copy())
for indx, j in enumerate(shuffled_indices):
# zero out all rows except those in the corresponding block of shuffled indices
# first, cover special case for last block
if i == self._num_subproblems - 1:
if indx < i * rows_per_problem:
sub_problems[-1].traffic_matrix.tm[j, :] = 0
elif (indx < i * rows_per_problem) or (
indx >= (i + 1) * rows_per_problem
):
sub_problems[-1].traffic_matrix.tm[j, :] = 0
# split the capacity of each link
for u, v in sub_problems[-1].G.edges:
sub_problems[-1].G[u][v]["capacity"] = (
sub_problems[-1].G[u][v]["capacity"] / self._num_subproblems
)
return sub_problems
| StarcoderdataPython |
1638322 | <filename>onlinevars/views.py<gh_stars>0
from django.shortcuts import render, redirect
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse, HttpResponseNotFound, HttpResponseBadRequest
import json
from .models import Variable
# Create your views here.
@csrf_exempt
def api_v1(request, name):
answer = {
"name": name,
"value": None,
"error": None,
"cleaned": False,
"created": False,
"changed": False,
"deleted": False
}
if request.method == "GET":
clean = request.GET.get("clean", False)
create = request.GET.get("create", False)
if Variable.objects.filter(name=name).exists():
var = Variable.objects.get(name=name)
value = var.value
answer["value"] = value
if clean and value is not None:
var.value = None
var.save()
answer["cleaned"] = True
else:
answer["value"] = var.value
elif create:
Variable.objects.create(name=name, value=None)
answer["created"] = True
else:
answer["error"] = f"Var '{name}' not found!"
return HttpResponse(json.dumps(answer), content_type="application/json")
elif request.method == "POST":
value = request.POST.get("value", None)
append = request.POST.get("append", False)
if value:
if request.POST.get("multiple", False):
names = name.split(",")
else:
names = [name]
for name in names:
var = Variable.objects.get_or_create(name=name)[0]
var.value = ((((var.value+"\n") if var.value else "") + (value.strip() or "")) if append else value.strip()) or None
var.save()
answer["value"] = value
answer["changed"] = True
else:
answer["error"] = "Missing value! Please send data as application/x-www-form-urlencoded!"
return HttpResponse(json.dumps(answer), content_type="application/json")
elif request.method == "DELETE":
if Variable.objects.filter(name=name).exists():
Variable.objects.get(name=name).delete()
answer["deleted"] = True
else:
answer["error"] = f"Var '{name}' not found!"
return HttpResponse(json.dumps(answer), content_type="application/json")
else:
answer["error"] = "Use GET, POST or DELETE"
return HttpResponseBadRequest(json.dumps(answer), content_type="application/json")
# Chat
def chat_start(request):
if request.method == "GET":
return render(request, "onlinevars/chat_start.html")
elif request.method == "POST":
mykey = request.POST["mykey"]
postkey = request.POST["postkey"]
return redirect(f"/onlinevars/chat/{postkey}/{mykey}/")
def chat(request, postkey, mykey):
mykey = mykey.replace("chat_","")
postkey = "chat_"+(",chat_".join(postkey.replace("chat_","").replace(" ","").split(",")))
return render(request, "onlinevars/chat.html", context={"mykey": mykey, "postkey": postkey})
| StarcoderdataPython |
252717 | <gh_stars>0
#A library of code to examine properties of bulk water and near solutes
#
#Should eventually be able to handle local densities and fluctuations,
#solute-water and water-water energies, 3-body angles, hydrogen bonds,
#energy densities, and all of this as a function of space. Additionally,
#should also be able to compute interfaces, such as Willard-Chandler
#instantaneous interface, or vdW surface, SASA and volume of solute.
#
#Will work with pytraj interface for trajectory analysis, since this
#should later allow easier energy decomposition?
#If doesn't work out, will go back to sim package with netcdf plugin.
#
#Also, should have test script and some test system where know answers
#
import sys, os
import numpy as np
import scipy.optimize as optimize
from scipy.special import sph_harm
import waterlib as wl
#Define constants and unit conversions
#conversion for surface tension
kBJ = 1.38064852*(10**(-23))
temp = 300.0
tomJm2 = kBJ*temp*1000.0*(10**20) #converts kBT/Angstrom^2 to mJ/m^2
#Convert potential energy to kBT
kBTkcal = 0.0019858775*300.0
#Water density
watdens = 0.033456 # molecules or oxygens per Angstrom ^ 3 near 300 K
#Define library of useful functions
def SASAperAtom(pos, radii, radius=1.4, nPoints = 1000, nExpose = 10):
"""Inputs:
pos - Nx3 array of atomic positions
radii - N array of atomic radii
radius - solvent radius to "roll" over surface
nPoints - number points on each sphere
nExpose - number exposed points on atom (sphere) to be considered on surface
Outputs:
SASAper - SASA for each atom
surfAtoms - array of 1 for solvent exposed, 0 for not on surface
"""
points = wl.spherepoints(nPoints)
SASAper, surfAtoms = wl.spheresurfaceareas(pos, radii+radius, points, nExpose)
return SASAper, surfAtoms
def PepWatHBonds(allPos, pepAccInds, pepDonInds, watInds, distCut = 2.1, angCut = 30.0):
"""Currently kind of wack (does acceptor to hydrogen distance). Also, calculating
H-bonds geometrically seems less useful.
Inputs:
allPos - full position array for trajectory frame (all atoms included)
pepAccInds - global indices of peptide acceptors
pepDonInds - global indices of peptide donors
watInds - global indices of water atoms in selected hydration shell(s)
distCut(=2.1) - distance cutoff for H-bond detection
angCut(=30.0) - angle cutoff for H-bond detection
Outputs:
NBonds - number of detected H-bonds
bondsPer - number H-bonds for each water molecule with peptide
donors - indices of donors (H atoms only) as string
acceptors - indices of acceptors as string
"""
#Get H-bond info
NBonds, watAcc, watDon, pepAcc, pepDon = wl.findhbonds(
allPos[pepAccInds], allPos[pepDonInds], allPos[watInds], distCut, angCut)
#And sort nicely into just acceptors and donors
acceptorsList = []
donorsList = []
bondsWat = np.zeros(int(len(watInds)/3))
for (j, val) in enumerate(pepAcc):
acceptorsList = acceptorsList + (val*[pepAccInds[j]])
for (j, val) in enumerate(pepDon):
donorsList = donorsList + (val*[pepDonInds[j]])
for (j, val) in enumerate(watAcc):
acceptorsList = acceptorsList + (val*[watInds[j]])
bondsWat[int(j/3)] = bondsWat[int(j/3)] + val
for (j, val) in enumerate(watDon):
donorsList = donorsList + (val*[watInds[j]])
bondsWat[int(j/3)] = bondsWat[int(j/3)] + val
#Above uses properties of python lists to add each index the number of H-bonds it participates in
bondsPer = bondsWat
#For easy file writing, make donors and acceptors into strings of indices
#Remember that the sim package indexes at zero!
donors = ''.join(str(e)+"|" for e in donorsList)
acceptors = ''.join(str(e)+"|" for e in acceptorsList)
return NBonds, bondsPer, acceptors, donors
def BBHBonds(allPos, pepAccInds, pepDonInds, distCut = 2.1, angCut = 30.0):
"""Finds H bonds between two list of acceptors and donors. Intended for just peptide backbone.
Inputs:
allPos - full position array for trajectory frame
pepAccInds - global indics of peptide acceptors
pepDonInds - global indices of peptide doneors
distCut(=2.1) - distance cutoff for H-bond detection
angCut(=30.0) - angle cutoff for H-bond detection
Outputs:
NBonds - number of detected H-bonds
donors - indices of donors as string
acceptors - indices of acceptors as string
"""
#Get H-bonds
NBonds, pepAcc, pepDon = wl.bbhbonds(allPos[pepAccInds], allPos[pepDonInds], distCut, angCut)
#Sort nicely
acceptorsList = []
donorsList = []
for (j, val) in enumerate(pepAcc):
acceptorsList = acceptorsList + (val*[pepAccInds[j]])
for (j, val) in enumerate(pepDon):
donorsList = donorsList + (val*[pepDonInds[j]])
#set lists to strings and return
donors = ''.join(str(e)+"|" for e in donorsList)
acceptors = ''.join(str(e)+"|" for e in acceptorsList)
return NBonds, acceptors, donors
def WatHBonds(allPos, watInds, allWatInds, BoxDims, distCut = 2.1, angCut = 30.0):
"""Also kind of wack, but keeping since used in peptide-surface pulling analysis.
For a better, more general algorithm, use HBondsGeneral.
Inputs:
allPos - full position array for trajectory frame (all atoms included)
watInds - global indices of water atoms in selected hydration shell(s)
allWatInds - global indices of ALL water atoms
BoxDims - dimensions of box to account for periodic BCs (to turn off, set to zero)
distCut(=2.1) - distance cutoff for H-bond detection
angCut(=30.0) - angle cutoff for H-bond detection
Outputs:
NBonds - number of detected H-bonds
bondsPer - number of detected H-bonds for each water molecule in selection
acceptors - indices of acceptors as string
donors - indices of donors (H atoms only) as string
"""
#Get H-bond info
NBonds, watAcc, watDon = wl.wathbonds(allPos[watInds], allPos[allWatInds], BoxDims, distCut, angCut)
#And sort nicely into just acceptors and donors
#Also count number of H-bonds for each water to get estimate of average per water
acceptorsList = []
donorsList = []
bondsWat = np.zeros(int(len(watInds)/3))
for (j, val) in enumerate(watAcc):
acceptorsList = acceptorsList + (val*[watInds[j]])
bondsWat[int(j/3)] = bondsWat[int(j/3)] + val
for (j, val) in enumerate(watDon):
donorsList = donorsList + (val*[watInds[j]])
bondsWat[int(j/3)] = bondsWat[int(j/3)] + val
#Above uses properties of python lists to add each index the number of H-bonds it participates in
#print bondsWat
#bondsPer = np.average(bondsWat)
bondsPer = bondsWat
#For easy file writing, make donors and acceptors into strings of indices
#Remember that the sim package indexes at zero!
donors = ''.join(str(e)+"|" for e in donorsList)
acceptors = ''.join(str(e)+"|" for e in acceptorsList)
return NBonds, bondsPer, acceptors, donors
def getCosAngs(subPos, Pos, BoxDims, lowCut=0.0, highCut=3.413):
"""This is called getCosAngs, but actually just returns the angles themselves (faster to convert
from cos(theta) to theta in Fortran)
Inputs:
subPos - positions of set of atoms to measure tetrahedrality of (may be different, subset, or same as Pos)
Pos - positions of ALL atoms that can make tetrahedral configurations (needed if subPos not same as Pos)
BoxDims - current box dimensions to account for periodicity
lowCut - lower cutoff for nearest-neighbor shell (default 0.0)
highCut - higher cutoff for nearest-neighbor shell (default 3.413 - see Chaimovich, 2014, but should really
change to reflect first peak in g(r) for the chosen water model)
Outputs:
angVals - all angle values for current configuration of positions supplied
numAngs - number of angles for each central oxygen atom (i.e. number neighbors factorial)
This is useful for finding which angles belong to which central oxygens
This return value was added on 07/09/2017, so any code using this function
before then will break, unfortunately, but the fix is easy.
"""
#Set-up array to hold angle results and stack as go... list increases in size!
angVals = np.array([])
numAngs = np.zeros(len(subPos))
#Find nearest neighbors for ALL atoms in subPos
#But make sure using efficient algorithm...
#If subPos is same as Pos, use allnearneighbors instead
if np.array_equal(subPos, Pos):
nearNeighbs = wl.allnearneighbors(Pos, BoxDims, lowCut, highCut).astype(bool)
else:
nearNeighbs = wl.nearneighbors(subPos, Pos, BoxDims, lowCut, highCut).astype(bool)
#Loop over each position in subPos, finding angle made with all neighbor pairs
for (i, apos) in enumerate(subPos):
#Make sure have nearest neighbors...
if len(Pos[nearNeighbs[i]]) > 0:
#below returns symmetric, square array (zero diagonal)
tempAng = wl.tetracosang(apos, Pos[nearNeighbs[i]], BoxDims)
#Only want half of array, flattened
angVals = np.hstack((angVals, tempAng[np.triu_indices(len(tempAng),k=1)].tolist()))
numAngs[i] = tempAng.shape[0]
return angVals, numAngs
def tetrahedralMetrics(angVals, nBins=500, binRange=[0.0, 180.0]):
"""Inputs:
angVals - all angle values sampled
nBins - number histogram bins to use
binRange - histogram bin range to apply
Outputs:
angDist - distribution of angle
bins - bins used in histogramming
fracTet - fraction of distribution that is tetrahedral (integrate cosDist from -0.75 to 0.25 - see Chaimovich, 2014)
avgCos - average Cos(angle) within tetrahedral peak
stdCos - second moment of Cos(angle) within tetrahedral peak
"""
#Histogram the data - note that density set so just returns number of counts, not normalized
angDist, bins = np.histogram(angVals, bins=nBins, range=binRange, density=False)
#Take index before since want histogram bin containing this value
startTet = np.argmax(bins>np.arccos(0.25)*180.0/np.pi) - 1
endTet = np.argmax(bins>np.arccos(-0.75)*180.0/np.pi) - 1
fracTet = np.sum(angDist[startTet:endTet]) / np.sum(angDist)
#Take average and second moment within peak
avgCos = 0.0
stdCos = 0.0
angCount = 0
for ang in angVals:
if (ang >= np.arccos(0.25)*180.0/np.pi) and (ang <= np.arccos(-0.75)*180.0/np.pi):
avgCos = avgCos + np.cos(ang*np.pi/180.0)
stdCos = stdCos + np.cos(ang*np.pi/180.0)**2
angCount += 1
avgCos = avgCos / angCount
stdCos = stdCos / angCount
return angDist, bins, fracTet, avgCos, stdCos
def getOrderParamq(subPos, Pos, BoxDims, lowCut=0.0, highCut=8.0):
"""Finds angles for 4 nearest neighbors of each water and returns for all waters the
tetrahedral order parameter, q, used by Errington and Debenedetti (2001).
Inputs:
subPos - positions of set of atoms to measure tetrahedrality of (may be different, subset, or same as Pos)
Pos - positions of ALL atoms that can make tetrahedral configurations (needed if subPos not same as Pos)
BoxDims - current box dimensions to account for periodicity
lowCut - lower cutoff for nearest-neighbor shell (default 0.0)
highCut - higher cutoff for nearest-neighbor shell used to find 4 nearest neighbors
Outputs:
qVals - returns an order parameter value for each water
distNeighbs - returns distances from central oxygen to 4 nearest neighbors
"""
#Set-up array to hold results
qVals = np.zeros(len(subPos))
distNeighbs = np.zeros((len(subPos), 4))
#Find nearest neighbors for ALL atoms in subPos
#But make sure using efficient algorithm...
#If subPos is same as Pos, use allnearneighbors instead
if np.array_equal(subPos, Pos):
nearNeighbs = wl.allnearneighbors(Pos, BoxDims, lowCut, highCut).astype(bool)
else:
nearNeighbs = wl.nearneighbors(subPos, Pos, BoxDims, lowCut, highCut).astype(bool)
#Loop over each position in subPos, finding angle made with the closest 4 neighbors, then q
for (i, apos) in enumerate(subPos):
#Make sure have nearest neighbors...
if np.sum(nearNeighbs[i]) > 0:
thisPos = wl.reimage(Pos[nearNeighbs[i]], apos, BoxDims)
thisDists = np.linalg.norm(thisPos - apos, axis=1)
sortInds = np.argsort(thisDists)
newPos = thisPos[sortInds][:4]
distNeighbs[i,:] = thisDists[sortInds][:4]
#below returns symmetric, square array (zero diagonal)
tempAng = wl.tetracosang(apos, newPos, BoxDims)
#Only want half of array, flattened
angVals = tempAng[np.triu_indices(len(tempAng),k=1)]
#Now compute q for this set of angles
qVals[i] = 1.0 - (3.0/8.0)*np.sum((np.cos(angVals*np.pi/180.0) + (1.0/3.0))**2)
#Return all of the order parameter values
return qVals, distNeighbs
def findSineCoeffs(allangs, Norder=180, doNormalize=False):
"""Given an array of angles, computes the sine coefficients to the given order.
Note that to get right coefficients, will need to divide by total number of angles.
This is not done by default, assuming that angles provided are for each frame.
Inputs:
allangs - array or list of angles
Norder - (default 180) number of terms in sine series to use (excludes k=0)
doNormalize - (default False) if true, divides by number of samples to correctly normalize
Outputs:
coeffs - Norder x 2 array; 1st column is k, second column is coefficient
Comes from fact that period is zero to Pi, so only keep sin(k*angle) in series
"""
#Check if angles in radians - if any values are greater than Pi, assume in degrees
if np.max(allangs) > np.pi:
allangs = allangs * np.pi / 180.0
coeffs = np.zeros((Norder,2))
for k in range(Norder):
coeffs[k,0] = k+1
coeffs[k,1] = np.sqrt(2.0/np.pi)*np.sum(np.sin((k+1)*allangs))
if doNormalize:
coeffs = coeffs / len(allangs)
return coeffs
def distFromCoeffs(coeffs, angvals=None, Norder=60):
"""Given an array of coefficients for a sine series, compute the distribution.
Inputs:
coeffs - coefficients for each term in a sine series
assuming that for sin(k*angle) form, this array is sorted from small to large k
angvals - (default 0.0 to 180.0 by 0.01) angle values in degrees at which distribution
should be evaluated - normalization will be done for PDF along degrees
Norder - (default 60) number of terms in the series to use (i.e. number of coeffs)
Outputs:
adist - returns a normalized distribution
"""
if angvals is None:
angvals = np.arange(0.0, 180.0, 0.01)
#Also define in radians
radvals = angvals * np.pi / 180.0
adist = np.zeros(len(angvals))
normfac = 0.0
for k in range(Norder):
adist += coeffs[k]*np.sin((k+1)*radvals)
if (k+1)%2 != 0:
normfac += coeffs[k]*2.0/(k+1)
adist = adist / (normfac*(angvals[1]-angvals[0]))
return adist
def fitDist(refDists, Dist, bruteNs=200):
"""Given a set of reference distributions, as a numpy array with each distribution as a row,
fits the current distribution using a linear combination of the reference distributions.
Inputs:
refDists - array with each reference distribution as a row
Dist - (3-body angle) distribution to fit as linear combination of references with
the fitting parameters (linear coefficients) summing to one
bruteNs - number of discrete bins to use along each parameter when searching for brute minimum
Outputs:
fitParams - fit parameters resulting from fitting
resSq - sum of squared residuals for fit
resSigned - signed residuals at each point of fit
"""
#Define tolerance
tolf = 1.0e-12
tolx = 1.0e-12
#Initialize parameters to seek for - start in 4 ways and take minimum of these
initParams = np.eye(refDists.shape[0])
initParams = np.vstack((initParams, np.ones(refDists.shape[0]) * (1.0/refDists.shape[0])))
#Define an objective function to be minimized
def funcMin(vals, *withcon):
#Give it parameter values, returns squared residuals
func = np.sum((np.dot(vals, refDists) - Dist)**2)
if withcon:
func = func + (np.sum(vals) - 1.0)**2
return func
def jacFunc(vals):
#Returns the Jacobian of the function to minimize
func = np.dot(refDists, 2.0*(np.dot(vals, refDists) - Dist))
return func
def funcSquares(vals):
#Gives vector of squared residuals to see where best/worst parts of fit are
func = (np.dot(vals, refDists) - Dist)**2
return func
#Define constraints... for now say that all parms must sum to one
cons = ({'type' : 'eq',
'fun' : lambda x: np.sum(x) - 1.0,
'jac' : lambda x: np.ones(len(x))})
#And define bounds to keep all params between 0 and 1
bnds = [(0.0,1.0)]*refDists.shape[0]
#For each set of starting conditions, do minimization, then pick global min
globMinInfo = None
#And will store squared residuals at found mins as go
#Checks if one part of curve fits better than another
resSq = np.zeros((refDists.shape[1], initParams.shape[0]))
for (i, params) in enumerate(initParams):
#If only one distribution given, don't use constraint
if refDists.shape[0] == 1:
mininfo = optimize.minimize(funcMin, params, jac=jacFunc, method='SLSQP',
bounds=bnds, options={'ftol':tolf})
else:
mininfo = optimize.minimize(funcMin, params, jac=jacFunc, method='SLSQP',
constraints=cons, bounds=bnds, options={'ftol':tolf})
#print "Minimum sum of squares: %e at values "%mininfo.fun+str(mininfo.x)
if globMinInfo != None:
if mininfo.fun < globMinInfo.fun:
globMinInfo = mininfo
else:
globMinInfo = mininfo
resSq[:,i] = funcSquares(mininfo.x)
#Compare to global min with brute force
if refDists.shape[0] == 1:
(bruteMinInfo) = optimize.brute(funcMin, tuple(bnds), Ns=bruteNs, finish=None, full_output=True, disp=False)
else:
(bruteMinInfo) = optimize.brute(funcMin, tuple(bnds), args=(1,), Ns=bruteNs, finish=None, full_output=True, disp=False)
fitParams = bruteMinInfo[0]
#print "Brute force finds minima at "+str(fitParams)
#Also compute regular residuals, not squared
resSigned = np.dot(fitParams, refDists) - Dist
#print "Best fit found at:"
#print [float(q) for q in fitParams]
#print "And with parameters summing to %f" % np.sum(fitParams)
return fitParams, resSq, resSigned
def waterOrientationBinZ(Opos, Hpos, boxDim, refVec=[0.0, 0.0, 1.0], refBins=None, angBins=None):
"""Determines the angle between a reference vector and the dipoles and plane-normal vector
of all water molecule positions provided.
Inputs:
Opos - all water oxygen positions
Hpos - all water hydrogen positions
boxDim - box dimensions for imaging
refVec - the reference vector for water orientation, default
is the z-direction [1, 0, 0]
refBins - bins along the direction of refVec that the waters
should be placed into (default is min and max along refVec)
angBins - bins for calculated angles, default 500 bins from 0 to 180
Outputs:
plane2Dhist - 2D histogram with angle bins varying over rows and
refVec bins varying over rows (for the water plane vector angles)
dip2Dhist - 2D histogram as above, but for dipole vector angles
"""
#Get positions of oxygen atoms along refVec, then create
#this array with each entry repeated
refVec = refVec / np.linalg.norm(refVec)
zOpos = np.dot(Opos, refVec)
zOposforH = np.array([[z,z] for z in zOpos]).flatten()
#Compute all of the angles with respect to the reference
#Note that the dipole vector of each water molecule will be taken as
#the sum of the OH bond vectors
angDip, angPlane = wl.watorient(Opos, Hpos, refVec, boxDim)
#Set refBins if not set
if refBins is None:
refBins = np.arange(np.min(zOpos), np.max(zOpos), 0.2)
#Same for angBins
if angBins is None:
angBins = np.arange(0.0, 180.001, 180.0/500.0)
#And do 2D histogramming
plane2Dhist, angEdges, refEdges = np.histogram2d(angPlane, zOposforH, bins=[angBins, refBins], normed=False)
dip2Dhist, angEdges, refEdges = np.histogram2d(angDip, zOpos, bins=[angBins, refBins], normed=False)
return plane2Dhist, dip2Dhist
def waterOrientation(Opos, Hpos, boxDim, refVec=[0.0, 0.0, 1.0]):
"""This is a wrapper for the waterlib function watorient.
Inputs:
Opos - all water oxygen positions
Hpos - all water hydrogen positions
boxDim - box dimensions for imaging
refVec - the reference vector for water orientation, default
is the z-direction [1, 0, 0]
Outputs:
dipAngs - all angles of dipole vectors with reference vector for all waters
planeAngs - all angles of plane-normal vector to reference vector for all waters
"""
#Call watorient to get all angles
dipAngs, planeAngs = wl.watorient(Opos, Hpos, refVec, boxDim)
return dipAngs, planeAngs
def binnedVolumePofN(Opos, volBins, numBins, binMask=None):
"""Inputs:
Opos - array of oxygen 3D coordinates
volBins - volume (x,y,z coordinate) bin edges tiling the space to place waters into
Form should be tuple of x, y, and z bin edge arrays
Bins should be uniform (makes no sense to structure analysis this way otherwise)
numBins - bin edges for histogramming number of waters in each volume of volBins
binMask - boolean array of same dimension as number of bins in x, y, z
Use to exclude some bins by changing certain coordinates to False
Outputs:
numWatHist - counts for number of waters in sub-volume of size edgeLxedgeLxedgeL
"""
#Create mask if necessary
if binMask is None:
binMask = np.ones((len(volBins[0])-1, len(volBins[1])-1, len(volBins[2])-1), dtype=bool)
else:
if binMask.shape == (len(volBins[0])-1, len(volBins[1])-1, len(volBins[2])-1):
binMask = binMask
else:
print "Dimensions of mask for spatial bins does not match dimensions of spatial bins. Quitting."
sys.exit(2)
#Want to use sphere rather than cube for statistics
#So first find which bin each oxygen belongs to, then find distance
#to center of bin and see if should exclude or not
#Written in Fortran for speed
hist = wl.binongrid(Opos, volBins[0], volBins[1], volBins[2])
#Use numpy histogramming to count how many oxygens in each cube volume (doesn't use interior spheres)
#hist, edges = np.histogramdd(Opos, bins=volBins, normed=False)
#Now histogram number of waters in each subvolume, which will be P(N)
numWatHist, watedges = np.histogram(hist[binMask].flatten(), bins=numBins, normed=False)
return numWatHist
#Should also define some function "pointsInVol" that creates binMask based on given set of points or some geometry that should not be included when finding waters (i.e. like a hard sphere or protein)
def HBondsGeneral(accPos, donPos, donHPos, boxL, accInds, donInds, donHInds, distCut=3.5, angCut=150.0):
"""Wraps generalHbonds in the waterlib library to define H-bonds, and also returns their locations.
Inputs:
accPos - 3-dimensional vectors of acceptor heavy-atom positions
donPos - 3D vectors of donor heavy-atom positions (if have multiple hydrogens, must list multiple times)
donHPos - 3D vector of donor hydrogen positions (should be same length as donPos, which may have duplicates)
accInds - indices of acceptor atoms
donInds - indices of donor heavy-atoms
donHInds - indices of donor hydrogen atoms
boxL - box dimensions
distCut - (default 3.5) heavy-atom to heavy-atom distance below which an H-bond may be defined
angCut - (default 150.0) O-H---O angle cut-off, in degrees, above which an H-bond may be defined
Outputs:
NumHB - number of hydrogen bonds for acceptor/donor set provided
HBlist - NumHB x 2 array with acceptor index in the 1st column and donor index in the 2nd
HBloc - NumHB x 3 array of h-bond locations, which is halfway between the acceptor and donor H
"""
#First get H-bond matrix and locations
HBboolMat = wl.generalhbonds(accPos, donPos, donHPos, boxL, distCut, angCut)
HBboolMat = np.array(HBboolMat, dtype=bool)
#Now parse through matrix, counting H-bonds and creating list of index pairs
NumHB = np.sum(HBboolMat)
HBlist = (-1)*np.ones((NumHB, 2))
HBloc = np.zeros((NumHB, 3))
HBlistCount = 0
for i, abool in enumerate(HBboolMat):
theseDonors = donInds[abool]
if len(theseDonors) > 0:
theseDonHPos = donHPos[abool]
#Image donor H location around acceptor
theseDonHPos = wl.reimage(theseDonHPos, accPos[i], boxL)
for j, aDon in enumerate(theseDonors):
HBlist[HBlistCount,:] = [accInds[i], aDon]
HBloc[HBlistCount] = 0.5*(theseDonHPos[j] + accPos[i])
HBlistCount += 1
return NumHB, HBlist, HBloc
def computeSphericalFourierCoeffs(subPos, Pos, BoxDims, lowCut=0.0, highCut=3.413, minDegree=0, maxDegree=12):
"""Computes the vectors of Fourier coefficients for each degree of a spherical harmonic expansion
as described by Keys, Iacovella, and Glotzer, 2011. subPos is treated as the central atoms,
while Pos should include the atoms that may potentially be neighbors.
Inputs:
subPos - positions of atoms to treat as the central atoms
Pos - positions of all other atoms, which will be considered for neighbor-searching; can be same as subPos
BoxDims - box dimensions so that minimum images may be used
lowCut - (default 0.0) the lower cutoff for the radial shell
highCut - (default 3.413) the upper cutoff for the radial shell
minDegree - (default 0) the minimum spherical harmonic degree (l)
maxDegree - (default 12) the maximum spherical harmonic degree (l)
Outputs:
coeffVecs - a len(subPos) x (1 + maxDegree - minDegree) x (2*maxDegree + 1) matrix
For each central atom in subPos, a matrix of the complex-valued vectors (as rows)
is provided. This still allows magnitudes to be easily evaluated, since real and
imaginary parts of zero will contribute nothing to the magnitude
numNeighbs - number of neighbors for each water molecule (necessary to compute global order parameters
or coefficients by multiplying by this and the dividing by the total of the waters
to "average" over)
"""
#Set up the output matrix now since know size
coeffVecs = np.zeros((len(subPos), 1+maxDegree-minDegree, 2*maxDegree+1), dtype=complex)
#And array to return number of neighbors for each water
numNeighbs = np.zeros(len(subPos), dtype='float16')
#Would be nice to combine neighbor searching with 3-body angle computation or H-bonding code
#But then harder to efficiently implement different cutoffs...
#So that might be too ambitious
#Find neighbors within cutoff for ALL atoms in subPos
#But make sure using efficient algorithm...
#If subPos is same as Pos, use allnearneighbors instead
if np.array_equal(subPos, Pos):
nearNeighbs = wl.allnearneighbors(Pos, BoxDims, lowCut, highCut).astype(bool)
else:
nearNeighbs = wl.nearneighbors(subPos, Pos, BoxDims, lowCut, highCut).astype(bool)
#Loop over each position in subPos and find neighbor positions in spherical coordinates
for (i, apos) in enumerate(subPos):
#Make sure have nearest neighbors...
if len(Pos[nearNeighbs[i]]) > 0:
tempPos = wl.reimage(Pos[nearNeighbs[i]], apos, BoxDims) - apos
numNeighbs[i] = len(tempPos)
#Compute radial distances... unfortunate that have to do this again, but maybe improve later
rdists = np.linalg.norm(tempPos, axis=1)
#And get polar and azimuthal angles
polarang = np.arccos(tempPos[:,2]/rdists)
azimang = np.arctan2(tempPos[:,1], tempPos[:,0]) #Using special arctan2 function to get quadrant right
#Now compute Fourier coefficient vectors (i.e. have complex-valued component of coefficient vector
#associated with each m value, where m = -l, -l+1, ... , l)
#Loop over the desired number of coefficients to compute
for l in range(minDegree, maxDegree + 1):
thisvec = np.zeros(2*l + 1, dtype=complex)
#Also note that have one vector for each neighbor, so must loop over neighbors
for j in range(len(tempPos)):
thisvec += sph_harm(np.arange(-l, l+1), l, azimang[j], polarang[j])
thisvec /= len(tempPos)
#And compute the magnitude of this vector of complex numbers
coeffVecs[i,l-minDegree,:(2*l+1)] = thisvec
return coeffVecs, numNeighbs
def get1BodyDOFs(coordO, coordH1, coordH2):
"""Given O, H, and H 3D coordinates, identifies the 6 degrees of freedom for a single water
Note that this is assuming an inhomogeneous system
Vector returned is oxygen x, y, z, followed by the spherical coordinate angles for the
dipole vector relative to the oxygen, and the angle of rotation around the dipole vector
COORDINATES SHOULD ALREADY BE IMAGED.
"""
dofVec = np.zeros(6)
dofVec[:3] = coordO[:]
rOD = 0.5*(coordH1 + coordH2) - coordO
rOD /= np.linalg.norm(rOD) #Could hard-code the rOD length for speed... maybe later
rH1H2 = coordH2 - coordH1
rH1H2 /= np.linalg.norm(rH1H2) #And could also hard-code this, too...
#rOH1 = coordH1 - coordO
#rOH1 /= np.linalg.norm(rOH1)
#rOH2 = coordH2 - coordO
#rOH2 /= np.linalg.norm(rOH2)
unitX = np.array([0.0, 0.0, 1.0]) #Arbitrarily pick x axis to define reference plane for rotation about dipole
#cross1 = np.cross(rOH1, rOH2)
#cross1 /= np.linalg.norm(cross1)
crossX = np.cross(rOD, unitX)
crossX /= np.linalg.norm(crossX)
dofVec[3] = np.arctan2(rOD[1], rOD[0]) #Making sure to use arctan2 to cover range [-pi, pi]
dofVec[4] = np.arccos(rOD[2]) #Taking last element is same as dotting with unit Z vector
dofVec[5] = np.arccos(np.dot(rH1H2, crossX))
#dofVec[5] = np.arccos(np.dot(cross1, crossX))
return dofVec
def get2BodyDOFs(coordO1, coordH11, coordH12, coordO2, coordH21, coordH22):
"""Given 3D coordinates for all atoms in two water molecules, computes specifically 2-body degrees of freedom
Note that returns only 6 degrees of freedom, so excludes the DOFs for the first water
ONLY gives those relevant to relative distance and orientation of two waters
Order in returned vector is rO1O2, theta1, theta2, phi, chi1, chi2 (see Lazaridis and Karplus for definitions)
COORDINATES SHOULD ALREADY BE IMAGED
"""
dofVec = np.zeros(6)
rO1O2 = coordO2 - coordO1
dofVec[0] = np.linalg.norm(rO1O2)
rO1O2 /= dofVec[0]
rO2O1 = -rO1O2
rO1D1 = 0.5*(coordH11 + coordH12) - coordO1
rO1D1 /= np.linalg.norm(rO1D1) #Could hard-code to speed up... may do later
rO2D2 = 0.5*(coordH21 + coordH22) - coordO2
rO2D2 /= np.linalg.norm(rO2D2)
#Need to figure out which H is closer to other oxygen to define rH11H12 according to Lazaridis and Karplus, 1996
if np.linalg.norm(coordH11 - coordO2) <= np.linalg.norm(coordH12 - coordO2):
rH11H12 = coordH12 - coordH11
else:
rH11H12 = coordH11 - coordH12
rH11H12 /= np.linalg.norm(rH11H12) #Again, could hard code if wanted
if np.linalg.norm(coordH21 - coordO1) <= np.linalg.norm(coordH22 - coordO1):
rH21H22 = coordH22 - coordH21
else:
rH21H22 = coordH21 - coordH22
rH21H22 /= np.linalg.norm(rH21H22)
cross1 = np.cross(rO1O2, rO1D1)
cross1 /= np.linalg.norm(cross1)
cross2 = np.cross(rO2D2, rO2O1)
cross2 /= np.linalg.norm(cross2)
dofVec[1] = np.arccos(np.dot(rO1D1, rO1O2))
dofVec[2] = np.arccos(np.dot(rO2D2, rO2O1))
dofVec[3] = np.arccos(np.dot(cross1, cross2))
dofVec[4] = np.arccos(np.dot(rH11H12, cross1))
dofVec[5] = np.arccos(np.dot(rH21H22, cross2))
return dofVec
def get3BodyDOFs(coordO1, coordH11, coordH12, coordO2, coordH21, coordH22, coordO3, coordH31, coordH32):
"""Like above function, but gives 6 DOFs pertaining to just the 3-body degrees of freedom
Order in returned vector is rO1O3 (distance), theta3b (three-body angle),
omega (rotation of 3rd water around O1-O3 vector), then theta3, phi3, and chi3
(last three defined as for the second water in the 2-body DOFs, but for just the third water)
COORDINATES SHOULD ALREADY BE IMAGED
"""
dofVec = np.zeros(6)
rO1O2 = coordO2 - coordO1
rO1O2 /= np.linalg.norm(rO1O2)
rO2O1 = -rO1O2
rO1O3 = coordO3 - coordO1
dofVec[0] = np.linalg.norm(rO1O3)
rO1O3 /= dofVec[0]
rO3O1 = -rO1O3
rO1D1 = 0.5*(coordH11 + coordH12) - coordO1
rO1D1 /= np.linalg.norm(rO1D1)
rO3D3 = 0.5*(coordH31 + coordH32) - coordO3
rO3D3 /= np.linalg.norm(rO3D3)
if np.linalg.norm(coordH31 - coordO1) <= np.linalg.norm(coordH32 - coordO1):
rH31H32 = coordH32 - coordH31
else:
rH31H32 = coordH31 - coordH32
rH31H32 /= np.linalg.norm(rH31H32)
cross12 = np.cross(rO1O2, rO1D1)
cross12 /= np.linalg.norm(cross12)
cross13 = np.cross(rO1O3, rO1D1)
cross13 /= np.linalg.norm(cross13)
cross31 = np.cross(rO3D3, rO3O1)
cross31 /= np.linalg.norm(cross31)
rperp = rO1O3 - np.dot(rO1O2, rO1O3)*rO1O2
rperp /= np.linalg.norm(rperp)
dofVec[1] = np.arccos(np.dot(rO1O2, rO1O3))
dofVec[2] = np.arccos(np.dot(rperp, cross12))
dofVec[3] = np.arccos(np.dot(rO3D3, rO3O1))
dofVec[4] = np.arccos(np.dot(cross13, cross31))
dofVec[5] = np.arccos(np.dot(rH31H32, cross31))
return dofVec
def distanceMetric1B(vec1, vec2, Rsq=(0.09572**2), sintw=(np.sin(104.52*np.pi/180.0)**2)):
"""Computes distance metric appropriate to 1-body DOFs.
A direct Euclidean distance is not appropriate since using curvilinear coordinates,
so this defines a distance utilizing local curvature that is exact for very small
differences. It comes from Taylor-expanding the formula for Euclidean distance in
spherical coordinates with respect to both angles to second order.
"""
diffs = (vec2 - vec1)**2
dist = np.sqrt(diffs[0] + diffs[1] + diffs[2] + Rsq*diffs[3]
+ Rsq*np.sin(vec2[3])*np.sin(vec1[3])*diffs[4]
+ Rsq*sintw*diffs[5])
return dist
def distanceMetric2B(vec1, vec2, Rsq=(0.09572**2), sintw=(np.sin(104.52*np.pi/180.0)**2)):
"""Computes distance metric appropriate to 2-body DOFs.
A direct Euclidean distance is not appropriate since using curvilinear coordinates,
so this defines a distance utilizing local curvature that is exact for very small
differences. It comes from Taylor-expanding the formula for Euclidean distance in
spherical coordinates with respect to both angles to second order.
Note that this includes 1-body degrees of freedom, so expects 12-dimensional vectors.
"""
diffs = (vec2 - vec1)**2
dist = np.sqrt(diffs[0] + diffs[1] + diffs[2] + Rsq*diffs[3]
+ Rsq*np.sin(vec2[3])*np.sin(vec1[3])*diffs[4]
+ Rsq*sintw*diffs[5]
+ diffs[6] + Rsq*diffs[7] + Rsq*diffs[8]
+ Rsq*np.sin(vec2[8])*np.sin(vec1[8])*diffs[9]
+ Rsq*sintw*diffs[10] + Rsq*sintw*diffs[11])
return dist
def distanceMetric3B(vec1, vec2, Rsq=(0.09572**2), sintw=(np.sin(104.52*np.pi/180.0)**2)):
"""Computes distance metric appropriate to 3-body DOFs.
A direct Euclidean distance is not appropriate since using curvilinear coordinates,
so this defines a distance utilizing local curvature that is exact for very small
differences. It comes from Taylor-expanding the formula for Euclidean distance in
spherical coordinates with respect to both angles to second order.
Note that this includes 1- and 2-body degrees of freedom, so expects 18-dimensional vectors.
"""
diffs = (vec2 - vec1)**2
dist = np.sqrt(diffs[0] + diffs[1] + diffs[2] + Rsq*diffs[3]
+ Rsq*np.sin(vec2[3])*np.sin(vec1[3])*diffs[4]
+ Rsq*sintw*diffs[5]
+ diffs[6] + Rsq*diffs[7] + Rsq*diffs[8]
+ Rsq*np.sin(vec2[8])*np.sin(vec1[8])*diffs[9]
+ Rsq*sintw*diffs[10] + Rsq*sintw*diffs[11]
+ diffs[12] + vec2[12]*vec1[12]*diffs[13]
+ vec2[12]*vec1[12]*np.sin(vec2[13])*np.sin(vec1[13])*diffs[14]
+ Rsq*diffs[15]
+ Rsq*np.sin(vec2[15])*np.sin(vec1[15])*diffs[16]
+ Rsq*sintw*diffs[17])
return dist
| StarcoderdataPython |
252552 | import copy
import json
import sys
import uuid
from io import BufferedIOBase, TextIOBase
from ipaddress import (
ip_network,
ip_address,
IPv4Address,
IPv4Network,
IPv6Address,
IPv6Network
)
from jsonschema import Draft7Validator, ValidationError
from typing import (
Any,
Union
)
def prefixUUID(pre: str = "PREFIX", max_len: int = 30) -> str:
"""
Prefix a UUID to a set length
:param pre: prefix
:param max_len: maximum length of the string
:return: prefixed UUID
"""
uid_max = max_len - (len(pre) + 10)
uid = str(uuid.uuid4()).replace("-", "")[:uid_max]
return f"{pre}-{uid}"[:max_len]
def safe_load(file_obj: Union[str, BufferedIOBase, TextIOBase]) -> dict:
"""
Safely load a json file
:param file_obj: json file path/object to load
:return: loaded json data
"""
try:
if isinstance(file_obj, (BufferedIOBase, TextIOBase)):
return json.load(file_obj)
if isinstance(file_obj, str):
with open(file_obj, "rb") as f:
return json.load(f)
except Exception as e:
return {}
def valid_ip(ip: Union[bytes, str]) -> Union[None, IPv4Address, IPv6Address, IPv4Network, IPv6Network]:
"""
Validate and load an IP/Network
:param ip: IP/Network string/bytes to load
:return: None or loaded IP/Network
"""
if isinstance(ip, (str, bytes)):
try:
return ip_network(ip, strict=False) if "/" in ip else ip_address(ip)
except ValueError as e:
print(e)
return None
class ValidatorJSON(Draft7Validator):
# Custom Methods
def iter_errors_as(self, instance: dict, _type: str) -> list:
if "oneOf" in self.schema and self._is_exported(_type):
exp = self._get_definition(_type)
exp_type = exp.get('type', '')
if exp_type == 'object':
tmp_schema = copy.deepcopy(self.schema)
del tmp_schema['oneOf']
del tmp_schema['definitions'][_type]
tmp_schema.update(exp)
return self.iter_errors(instance, _schema=tmp_schema)
else:
raise TypeError(f'field type object is expected, field type: {exp_type}')
elif "properties" in self.schema and self._is_exported(_type):
props = [*self.schema['properties'].keys()]
msg_wrapper = props[props.index(_type.lower())]
instance = {msg_wrapper: instance}
return self.iter_errors(instance)
else:
raise TypeError(f'field type is not an exported field')
def is_valid_as(self, instance: dict, _type: str) -> bool:
"""
Check if the instance is valid under the current schema
:param instance: message to validate
:param _type: type to validate against
:return: bool - Valid/Invalid
"""
try:
self.validate_as(instance, _type)
return True
except ValidationError:
return False
def validate_as(self, instance: dict, _type: str):
"""
Check if the instance is valid under the current schema
:param instance: message to validate
:param _type: type to validate against
:return: ...
"""
if "oneOf" in self.schema and self._is_exported(_type):
exp = self._get_definition(_type)
exp_type = exp.get('type', '')
if exp_type == 'object':
tmp_schema = copy.deepcopy(self.schema)
del tmp_schema['oneOf']
del tmp_schema['definitions'][_type]
tmp_schema.update(exp)
return self.validate(instance, _schema=tmp_schema)
else:
raise TypeError(f'field type object is expected, field type: {exp_type}')
elif "properties" in self.schema and self._is_exported(_type):
props = [*self.schema['properties'].keys()]
msg_wrapper = props[props.index(_type.lower())]
instance = {msg_wrapper: instance}
return self.validate(instance)
else:
raise TypeError(f'field type is not an exported field')
# Helper Methods
def _is_exported(self, _type: str) -> bool:
"""
Check if the given type if exported
:param _type: name of type to check if exported
:return: bool - type is exported type
"""
if "oneOf" in self.schema:
exported = [exp.get('$ref', '') for exp in self.schema.get('oneOf', [])]
elif "properties" in self.schema:
_type = _type.lower()
exported = {*self.schema.get('properties', {}).keys()}
exported.update({exp.get('$ref', '') for exp in self.schema.get('properties', {}).values()})
exported = list(exported)
else:
raise TypeError("Schema format invalid")
return any([exp.endswith(f'{_type}') for exp in exported])
def _get_definition(self, _type: str) -> dict:
"""
Get the definition for the given type
:param _type: type to get hte definition for
:return: dict - type definition
"""
return self.schema.get('definitions', {}).get(_type, {})
def _toStr(self, s: Any) -> str:
"""
Convert a given type to a default string
:param s: item to convert to a string
:return: converted string
"""
return s.decode(sys.getdefaultencoding(), 'backslashreplace') if hasattr(s, 'decode') else str(s)
def _default_encoding(self, itm: Any) -> Any:
"""
Encode the given object/type to the default of the system
:param itm: object/type to convert to the system default
:return: system default converted object/type
"""
if isinstance(itm, dict):
return {self._toStr(k): self._default_encoding(v) for k, v in itm.items()}
if isinstance(itm, list):
return [self._default_encoding(i) for i in itm]
if isinstance(itm, tuple):
return (self._default_encoding(i) for i in itm)
if isinstance(itm, (bytes, bytearray)):
return self._toStr(itm)
if isinstance(itm, (complex, int, float, object)):
return itm
return self._toStr(itm) | StarcoderdataPython |
9738495 | # import library socket karena menggunakan IPC socket
import socket as sc
# definisikan IP untuk binding
HOST = "192.168.1.15"
# definisikan port untuk binding
PORT = 4044
# definisikan ukuran buffer untuk menerima pesan
buffer_size = 1024
# buat socket (bertipe UDP atau TCP?)
s = sc.socket(sc.AF_INET, sc.SOCK_STREAM)
# lakukan binding ke IP dan port
s.bind((HOST, PORT))
# lakukan listen
s.listen(1)
# siap menerima koneksi
conn, addr = s.accept()
print ('Connection address:', addr)
# buka file bernama "file_didownload.txt
# masih hard code, file harus ada dalam folder yang sama dengan script python
f = open("file_didownload.txt", "rb")
try:
# baca file tersebut sebesar buffer
byte = f.read(buffer_size)
# selama tidak END OF FILE; pada pyhton EOF adalah b''
while byte != b'':
# kirim hasil pembacaan file dari server ke client
conn.send(byte)
# baca sisa file hingga EOF
byte = f.read(buffer_size)
finally:
print ("end sending")
# tutup file jika semua file telah dibaca
f.close()
# tutup socket
s.close()
# tutup koneksi
conn.close() | StarcoderdataPython |
6586178 | import matplotlib.pyplot as plt
import numpy as np
def raw_plot(data):
"""
Args:
data: Numpy 2-D array. Row 0 contains episode lengths/timesteps and row 1 contains episodic returns
Returns:
"""
plt.plot(np.cumsum(data[0]), data[1])
plt.xlabel('Steps')
h = plt.ylabel("Return", labelpad=25)
h.set_rotation(0)
plt.pause(0.001)
plt.show()
def smoothed_curve(returns, ep_lens, x_tick=1000, window_len=1000):
"""
Args:
returns: 1-D numpy array with episodic returs
ep_lens: 1-D numpy array with episodic returs
x_tick (int): Bin size
window_len (int): Length of averaging window
Returns:
A numpy array
"""
rets = []
cum_episode_lengths = np.cumsum(ep_lens)
if cum_episode_lengths[-1] >= x_tick:
steps_show = np.arange(x_tick, cum_episode_lengths[-1] + 1, x_tick)
for i in range(len(steps_show)):
rets_in_window = returns[(cum_episode_lengths > max(0, x_tick * (i + 1) - window_len)) *
(cum_episode_lengths < x_tick * (i + 1))]
if rets_in_window.any():
rets.append(np.mean(rets_in_window))
return rets
def smoothed_plot(data):
"""
Args:
data: Numpy 2-D array. Row 0 contains episode lengths/timesteps and row 1 contains episodic returns
Returns:
"""
x_tick = 1000
window_len = 1000
returns = data[1]
ep_lens = data[0]
rets = smoothed_curve(returns=returns, ep_lens=ep_lens, x_tick=x_tick, window_len=window_len)
plt.plot(np.arange(1, len(rets) + 1) * x_tick, rets)
plt.xlabel('Steps')
h = plt.ylabel("Return", labelpad=25)
h.set_rotation(0)
plt.pause(0.001)
plt.show()
if __name__ == '__main__':
data = np.loadtxt("mover0.txt"); plt.close()
# Plot all data without smoothing
# raw_plot(data)
# Smoothed plot
smoothed_plot(data)
| StarcoderdataPython |
9600955 | <gh_stars>1-10
from threading import current_thread
from django.conf import settings
from django.contrib.auth.models import User
from ralph.account.models import Region
_requests = {}
def get_actual_regions():
thread_name = current_thread().name
if thread_name not in _requests:
return Region.objects.filter(
name=settings.DEFAULT_REGION_NAME,
)
return _requests[thread_name]['regions']
class RegionMiddleware(object):
def process_request(self, request):
if hasattr(request, 'user'):
if request.user.is_anonymous():
try:
user = User.objects.get(
username=request.GET.get('username'),
api_key__key=request.GET.get('api_key')
)
except User.DoesNotExist:
user = None
else:
user = request.user
if user:
data = {
'user_id': user.id,
'regions': user.profile.get_regions(),
}
_requests[current_thread().name] = data
def process_response(self, request, response):
if hasattr(request, 'user') and not request.user.is_anonymous():
_requests.pop(current_thread().name, None)
return response
| StarcoderdataPython |
1865082 | <reponame>jyooru/wigle-csv
from datetime import datetime
import pytest
from wigle_csv.reader import read
from . import data_path
@pytest.mark.parametrize(
["number", "ignore_preheader"],
[[x, bool(y)] for x in range(1, 5) for y in range(2)],
)
def test_preheader(number: int, ignore_preheader: bool) -> None:
with open(data_path / f"preheader_{number}.csv") as file:
preheader, _ = read(file, ignore_preheader=ignore_preheader)
if ignore_preheader or (number == 4):
assert preheader is None
else:
assert preheader is not None
assert preheader.format_version == "WigleWifi-1.4"
if number == 3:
assert preheader.app_release is None
assert preheader.model is None
assert preheader.release is None
assert preheader.device is None
assert preheader.display is None
assert preheader.board is None
assert preheader.brand is None
else:
assert preheader.app_release == "2.53"
assert preheader.model == "X-37B"
assert preheader.release == "11.0.0"
assert preheader.device == "felgercarb"
assert preheader.display == "OPSPLS1.76-1-S"
assert preheader.board == "snodgrass"
assert preheader.brand == "Moog"
@pytest.mark.parametrize(
["number"],
[[x] for x in range(1, 3)],
)
def test_radio(number: int) -> None:
with open(data_path / f"radio_{number}.csv") as file:
_, reader = read(file)
for row in reader:
if row.type == "WIFI":
assert row.address == "1a:9f:ee:5c:71:c6"
assert row.capabilities == "[WPA2-EAP-CCMP][ESS]"
assert row.timestamp == datetime(2018, 8, 1, 13, 8, 27)
assert row.channel == 161
assert row.rssi == -43
assert row.latitude == 37.76578028
assert row.longitude == -123.45919439
assert row.altitude == 67
assert row.accuracy == 3.2160000801086426
if number == 1:
assert row.name == "Scampoodle"
else:
assert row.name is None
elif row.type == "WCDMA":
assert row.address == "310410_56967_4118917"
assert row.capabilities == "WCDMA;310410"
assert row.timestamp == datetime(2018, 8, 1, 13, 8, 27)
assert row.channel == 3485
assert row.rssi == -81
assert row.latitude == 37.72090053
assert row.longitude == -122.44579219
assert row.altitude == 104
assert row.accuracy == 34.30400085449219
if number == 1:
assert row.name == "AT&T Mobility"
else:
assert row.name is None
elif row.type == "BLE":
assert row.address == "1a:9f:ee:5c:71:c6"
assert row.capabilities == "Misc [LE]"
assert row.timestamp == datetime(2018, 8, 1, 13, 8, 27)
assert row.channel == 0
assert row.rssi == -67
assert row.latitude == 37.73090571
assert row.longitude == -122.42877987
assert row.altitude == 104
assert row.accuracy == 49.3120002746582
if number == 1:
assert row.name == "Jabra Headset"
else:
assert row.name is None
| StarcoderdataPython |
3596505 | import asyncio
import aiohttp
import json
import ssl
import pyrebase
from firebasedata import LiveData
from .rcs_livesession import RCSLiveSession
DEFAULT_BASE_URL = "https://api.rcsnail.com/v1/"
FIREBASE_CONFIG = {
"apiKey": "<KEY>",
"authDomain": "rcsnail-api.firebaseapp.com",
"databaseURL": "https://rcsnail-api.firebaseio.com",
"projectId": "rcsnail-api",
"storageBucket": "rcsnail-api.appspot.com",
"messagingSenderId": "485865779952"
}
class RCSnail(object):
"""
This is RCSnail main class
"""
def __init__(self):
self.__auth = None
self.__db = None
self.__user = None
self.__firebase_app = pyrebase.initialize_app(FIREBASE_CONFIG)
self.client_session = None
self.live_session = None
def sign_in_with_email_and_password(self, login_or_token=None, password=None):
"""
:param login_or_token: string
:param password: string
"""
assert login_or_token is None or isinstance(login_or_token, str), login_or_token
assert password is None or isinstance(password, str), password
# Get a reference to the auth service
self.__auth = self.__firebase_app.auth()
# Log the user in
if password != "":
self.__user = self.__auth.sign_in_with_email_and_password(login_or_token, password)
elif login_or_token != "":
# Log in with token
self.__user = self.__auth.sign_in_with_custom_token(login_or_token)
else:
raise Exception("User name and password missing")
# Get a reference to the database service
self.__db = self.__firebase_app.database()
async def enqueue(self, loop, new_frame_callback, new_telemetry_callback=None, track="private", car="") -> None:
"""
Adding client to the queue to wait for the car becoming available. Returns live session object.
"""
ignore_aiohttp_ssl_eror(loop)
headers = {"Authorization": "Bearer " + self.__user['idToken']}
self.client_session = aiohttp.ClientSession(headers=headers)
data = {"track": track, "car": car}
r = await self.client_session.post(DEFAULT_BASE_URL + "queue", data=data)
json_body = await r.json()
if 'queueUrl' in json_body:
self.live_session = RCSLiveSession(rcs=self,
firebase_app=self.__firebase_app,
auth=self.__auth,
queueUrl=json_body['queueUrl'],
queueUpdateUrl=json_body['queueUpdateUrl'],
queueKeepAliveTime=json_body['queueKeepAliveTime'],
loop=loop)
await self.live_session.run(new_frame_callback, new_telemetry_callback)
else:
raise Exception(json.dumps(json_body))
async def close_client_session(self):
await self.client_session.close()
# gear reverse: -1, neutral: 0, drive: 1
# steering -1.0...1.0
# throttle 0..1.0
# braking 0..1.0
async def updateControl(self, gear, steering, throttle, braking):
if self.live_session is not None:
await self.live_session.updateControl(gear, steering, throttle, braking)
def ignore_aiohttp_ssl_eror(loop, aiohttpversion='3.5.4'):
"""Ignore aiohttp #3535 issue with SSL data after close
There appears to be an issue on Python 3.7 and aiohttp SSL that throws a
ssl.SSLError fatal error (ssl.SSLError: [SSL: KRB5_S_INIT] application data
after close notify (_ssl.c:2609)) after we are already done with the
connection. See GitHub issue aio-libs/aiohttp#3535
Given a loop, this sets up a exception handler that ignores this specific
exception, but passes everything else on to the previous exception handler
this one replaces.
If the current aiohttp version is not exactly equal to aiohttpversion
nothing is done, assuming that the next version will have this bug fixed.
This can be disabled by setting this parameter to None
"""
if aiohttpversion is not None and aiohttp.__version__ != aiohttpversion:
return
orig_handler = loop.get_exception_handler() or loop.default_exception_handler
def ignore_ssl_error(loop, context):
if context.get('message') == 'SSL error in data received':
# validate we have the right exception, transport and protocol
exception = context.get('exception')
protocol = context.get('protocol')
if (
isinstance(exception, ssl.SSLError) and exception.reason == 'KRB5_S_INIT' and
isinstance(protocol, asyncio.sslproto.SSLProtocol) and
isinstance(protocol._app_protocol, aiohttp.client_proto.ResponseHandler)
):
if loop.get_debug():
asyncio.log.logger.debug('Ignoring aiohttp SSL KRB5_S_INIT error')
return
orig_handler(context)
loop.set_exception_handler(ignore_ssl_error) | StarcoderdataPython |
1723295 | import datetime
from functools import wraps
from flask import Flask, request, jsonify, Response, make_response
from flask_pymongo import PyMongo
# hash password and check password
from werkzeug.security import generate_password_hash, check_password_hash
# jwt tokens
import jwt
# turn data mongo legible in json format
from bson import json_util
# convert json information in bson (mongodb information)
from bson.objectid import ObjectId
app = Flask(__name__)
app.config['SECRET_KEY'] = 'thisisthesecretkey'
app.config["MONGO_URI"] = 'mongodb://127.0.0.1/pythonmongodb'
mongo = PyMongo(app)
@app.route('/users', methods=['POST'])
def create_user():
# Receiving data
username = request.json['username']
password = request.json['password']
email = request.json['email']
if username and email and password:
hashed_pass = generate_password_hash(password)
id_mongo = mongo.db.users.insert({
"username": username,
"email": email,
"password": <PASSWORD>,
})
response = {
"id": str(id_mongo),
"username": username,
"password": <PASSWORD>,
"email": email
}
return response
else:
response = not_found()
return response
@app.route('/users', methods=['GET'])
def get_users():
users = mongo.db.users.find()
response = json_util.dumps(users)
return Response(response, mimetype='application/json')
@app.route('/users/<id_mongo>', methods=['GET'])
def get_user(id_mongo):
user = mongo.db.users.find_one({
"_id": ObjectId(id_mongo)
})
response = json_util.dumps(user)
return Response(response, mimetype="application/json")
@app.route('/login')
def login():
auth = request.authorization
if auth and auth.password == '<PASSWORD>':
payload = {
"user": auth.username,
"exp": datetime.datetime.utcnow() + datetime.timedelta(minutes=10)
}
token = jwt.encode(payload, app.config['SECRET_KEY'])
return jsonify({"token": token})
return make_response('Could not verify!', 401, {'WWW-Authenticate': 'Basic realm="Login Required'})
def token_required(f):
@wraps(f)
def decorated(*args, **kwargs):
# http://127.0.0.1:5000/route?token=<PASSWORD>
token = request.args.get('token')
if not token:
return jsonify({"message": "Token is missing!"}), 403
try:
jwt.decode(token, app.config['SECRET_KEY'])
except:
return jsonify({"message": "Token is invalid!"}), 403
return f(*args, **kwargs)
return decorated
@app.route('/users/<id_mongo>', methods=['DELETE'])
def delete_user(id_mongo):
mongo.db.users.delete_one({
"_id": ObjectId(id_mongo),
})
response = jsonify(
{"message": "User" + id_mongo + "was Deleted successfully"})
return response
@app.route('/users/<id_mongo>', methods=['PUT'])
def update_user(id_mongo):
username = request.json['username']
password = request.json['password']
email = request.json['email']
if username and email and password:
hashed_password = generate_password_hash(password)
mongo.db.users.update_one({"_id": ObjectId(id_mongo)}, {"$set": {
"username": username,
"password": <PASSWORD>,
"email": email
}})
response = jsonify(
{"message": "User" + id_mongo + "was updated successfuly"})
return response
@app.errorhandler(404)
def not_found(error=None):
# jsonify change code 200 http response
response = jsonify({
"message": "Resource Not Found:" + request.url,
"status": 404
})
response.status_code = 404
return response
if __name__ == '__main__':
app.run(debug=True)
| StarcoderdataPython |
1833299 | <reponame>sjev/wimm
from pathlib import Path
import os
import yaml
import wimm.structure as structure
__version__ = "DEV.0.0.9"
DATE_FMT = "%Y-%m-%d"
def get_path():
""" get path of database directory """
val = os.getenv('WIMM_PATH')
if not val:
return None
return Path(val)
def get_settings():
""" get settings from file or defaults """
path = get_path()
if path is None:
return structure.settings
p = path / structure.files['settings']
assert p.exists(), f'no settings file in {p.as_posix()}'
settings = yaml.load(p.open(), Loader=yaml.SafeLoader)
settings['path'] = path
return settings
| StarcoderdataPython |
6667176 | load("@rules_foreign_cc//foreign_cc:defs.bzl", "cmake")
package(default_visibility = ["//visibility:public"])
licenses(["notice"]) # MIT
exports_files(["LICENSE"])
filegroup(
name = "all_srcs",
srcs = glob(["**"]),
)
cmake(
name = "faiss_c",
generate_args = [
"-G Ninja",
"-DFAISS_ENABLE_GPU=OFF",
"-DFAISS_ENABLE_PYTHON=OFF",
"-DFAISS_ENABLE_C_API=ON",
"-DBUILD_TESTING=OFF",
"-DCMAKE_BUILD_TYPE=Release",
"-DFAISS_OPT_LEVEL=general",
],
lib_source = ":all_srcs",
out_static_libs = [
"libfaiss_c.a",
"libfaiss.a",
],
targets = [
"faiss_c",
"faiss",
],
)
| StarcoderdataPython |
8148816 | import snoop
@snoop.snoop()
def f(_one, _two, _three, _four):
_five = None
_six = None
_seven = None
_five, _six, _seven = 5, 6, 7
def main():
f(1, 2, 3, 4)
| StarcoderdataPython |
241930 | default_app_config = 'tutors.apps.TutorsConfig'
| StarcoderdataPython |
3448410 | <reponame>bricerisingalgorand/mule
import platform
def get_os_type():
return platform.system().lower()
def get_cpu_arch_type():
arch = platform.machine()
if arch == "x86_64":
return "amd64"
elif arch == "armv6l":
return "arm"
elif arch == "armv7l":
return "arm"
elif arch == "aarch64":
return "arm64"
return "unknown"
| StarcoderdataPython |
8083177 | <gh_stars>1-10
# Python Logical Operators: And, Or, Not:
# What is a Boolean?
isRaining = False
isSunny = True
# Logical Operators -> Special Operators for Booleans
# AND
# true and true --> true
# false and true --> false
# true and false --> false
# false and false --> false
if isRaining and isSunny:
print("Rainbow might appear")
# OR
# true and true --> true
# false and true --> true
# true and false --> true
# false and false --> false
if isRaining or isSunny:
print("It's might be raining or sunny.")
# NOT
# true --> false
# false --> true
if not isRaining:
print("Raining")
ages = [19, 12, 4, 18, 21, 6]
for age in ages:
is_adult = age > 17
if not is_adult:
print(f"being {age} doesn't make you an adult")
| StarcoderdataPython |
6450887 | <filename>autonetkit/collection/process.py
import autonetkit.log as log
def build_reverse_mappings_from_nidb(nidb):
"""Builds IP reverse mappings from NIDB"""
rev_map = {
"subnets": {},
"loopbacks": {},
"infra_interfaces": {},
}
for node in nidb:
if node.broadcast_domain:
rev_map["subnets"][str(node.ipv4_subnet)] = node
else:
rev_map["loopbacks"][str(node.loopback)] = node
for interface in node.physical_interfaces:
rev_map["infra_interfaces"][str(interface.ipv4_address)] = interface
return rev_map
def build_reverse_mappings_from_anm_input(anm):
"""Builds reverse mappings from ANM input graph,
assumes addresses have already been allocated onto input graph,
either externally or by previous run"""
from collections import defaultdict
import netaddr
g_in = anm['input']
rev_map = {
"loopbacks": {},
"infra_interfaces": {},
"subnets": {},
}
subnets = defaultdict(list)
for node in g_in:
rev_map["loopbacks"][str(node.loopback_v4)] = node
for interface in node.physical_interfaces:
rev_map["infra_interfaces"][str(interface.ipv4_address)] = interface
prefixlen = interface.ipv4_prefixlen
cidr_string = "%s/%s" % (interface.ipv4_address, prefixlen)
intermediate_subnet = netaddr.IPNetwork(cidr_string)
subnet_cidr_string = "%s/%s" % (intermediate_subnet.network, prefixlen)
subnet = netaddr.IPNetwork(subnet_cidr_string)
subnets[subnet].append(interface)
for subnet, interfaces in subnets.items():
subnet_str = str(subnet)
rev_map['subnets'][subnet_str] = "_".join(str(i.node) for i in interfaces)
return rev_map
def process_textfsm(template_file, data):
"""
TODO: make template return data other than just hops, and have reverse_map_path() handle accordingly
"""
import textfsm
with open(template_file, "r") as template:
re_table = textfsm.TextFSM(template)
data = re_table.ParseText(data)
header = re_table.header
return header, data
def extract_route_from_parsed_routing_table(header, routes, proto_id = "Proto",
network_id = "Network", via_id = "Via"):
network_index = header.index(network_id)
proto_index = header.index(proto_id)
via_index = header.index(via_id)
return [(item[proto_index], item[network_index], item[via_index]) for item in routes]
def extract_path_from_parsed_traceroute(header, routes, hop_id = "Hop"):
"""Returns the hop IPs from the TextFSM returned data"""
hop_index = header.index(hop_id)
return [item[hop_index] for item in routes]
def reverse_map_routing(rev_map, data):
"""Returns list of nodes in path
interfaces selects whether to return only nodes, or interfaces
e.g. eth0.r1 or just r1
"""
#print data
result = []
for protocol, network, via in data:
print "reversing", protocol, network, via
if network in rev_map['subnets']:
cd = rev_map['subnets'][network]
if via is None:
result.append((protocol, cd, None))
if via in rev_map['infra_interfaces']:
iface = rev_map['infra_interfaces'][via]
print "adding", protocol, cd, iface.node
result.append((protocol, cd, iface.node))
return result
def reverse_map_address(rev_map, address, interfaces = False):
if address in rev_map['infra_interfaces']:
iface = rev_map['infra_interfaces'][address]
if interfaces:
return iface
else:
return iface.node
elif address in rev_map['loopbacks']:
node = rev_map['loopbacks'][address]
return node
def extract_node_path_info(header, parsed_data, mapped_data, exclude_keys = None):
if len(parsed_data) != len(mapped_data):
log.warning("Parsed data different length to mapped data, not extracting node data")
if exclude_keys:
exclude_keys = set(exclude_keys)
else:
exclude_keys = set() # empty set for simpler test logic
retval = []
for index, hop in enumerate(mapped_data):
node_vals = parsed_data[index] # TextFSM output for this hop
node_data = dict(zip(header, node_vals))
filtered_data = {k: v for k, v in node_data.items()
if len(v) and k not in exclude_keys}
filtered_data['host'] = str(hop.id)
retval.append(filtered_data)
return retval
def reverse_map_path(rev_map, path, interfaces = False):
"""Returns list of nodes in path
interfaces selects whether to return only nodes, or interfaces
e.g. eth0.r1 or just r1
"""
result = []
for hop in path:
if hop in rev_map['infra_interfaces']:
iface = rev_map['infra_interfaces'][hop]
if interfaces:
result.append(iface)
else:
result.append(iface.node)
elif hop in rev_map['loopbacks']:
node = rev_map['loopbacks'][hop]
result.append(node)
return result
def substitute_ips(data, rev_map, interfaces = False):
import re
re_ip_address = r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}"
re_ip_loopback= r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}/32"
re_ip_subnet = r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}/\d{1,2}"
if not len(data):
log.info("No data provided to IP substitution, returning")
return data
def replace_ip(match):
match = match.group()
if match in rev_map['infra_interfaces']:
iface = rev_map['infra_interfaces'][match]
if interfaces:
named = "%s.%s" %(iface.id, iface.node)
return named
else:
return str(iface.node)
return match # no match, return the raw IP
def replace_loopbacks(match):
match = match.group()
# strip off the /32
loopback_ip = match[:-3]
if loopback_ip in rev_map['loopbacks']:
node = rev_map['loopbacks'][loopback_ip]
return str(node)
return match # no match, return the raw IP
def replace_loopbacks_no_mask(match):
#TODO: refactor
match = match.group()
# strip off the /32
loopback_ip = match
if loopback_ip in rev_map['loopbacks']:
node = rev_map['loopbacks'][loopback_ip]
return str(node)
return match # no match, return the raw IP
def replace_subnet(match):
match = match.group()
if match in rev_map['subnets']:
subnet = rev_map['subnets'][match]
return str(subnet)
return match # no match, return the raw IP
# do loopbacks first
data = re.sub(re_ip_loopback, replace_loopbacks, data)
data = re.sub(re_ip_address, replace_ip, data)
# try for ip addresses in loopback
data = re.sub(re_ip_address, replace_loopbacks_no_mask, data)
# and for subnets ie ip/netmask
return re.sub(re_ip_subnet, replace_subnet, data)
| StarcoderdataPython |
6651199 | <reponame>TAKHEXI/ALUMNI
# Generated by Django 3.2.5 on 2021-07-19 15:48
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Activity',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(default='DEFAULT', max_length=30, verbose_name='标题')),
('startTime', models.DateTimeField(verbose_name='开始时间')),
('endTime', models.DateTimeField(verbose_name='结束时间')),
('location', models.CharField(max_length=200, verbose_name='详细地点')),
('cost', models.PositiveIntegerField(verbose_name='费用')),
],
options={
'verbose_name': '活动',
'verbose_name_plural': '活动',
},
),
migrations.CreateModel(
name='City',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=30, verbose_name='城市')),
],
options={
'verbose_name': '城市',
'verbose_name_plural': '城市',
},
),
migrations.CreateModel(
name='Department',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True, verbose_name='院系')),
],
options={
'verbose_name': '院系',
'verbose_name_plural': '院系',
},
),
migrations.CreateModel(
name='Industry',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True, verbose_name='行业')),
],
options={
'verbose_name': '行业',
'verbose_name_plural': '行业',
},
),
migrations.CreateModel(
name='Province',
fields=[
('name', models.CharField(max_length=30, primary_key=True, serialize=False, unique=True, verbose_name='省份')),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True, verbose_name='标签')),
],
options={
'verbose_name': '标签',
'verbose_name_plural': '标签',
},
),
migrations.CreateModel(
name='Test',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField(max_length=3000)),
],
options={
'verbose_name': 'test',
'verbose_name_plural': 'test',
},
),
migrations.CreateModel(
name='User',
fields=[
('username', models.CharField(max_length=30, primary_key=True, serialize=False, verbose_name='用户名')),
('mail', models.CharField(max_length=20, verbose_name='邮箱')),
('grade', models.IntegerField(verbose_name='届次')),
('studentID', models.CharField(max_length=20, verbose_name='学号')),
('phone', models.CharField(max_length=20, verbose_name='电话')),
('referrer', models.CharField(max_length=30, verbose_name='推荐人')),
('password', models.CharField(max_length=20, verbose_name='密码')),
('photo', models.ImageField(blank=True, default='img/default.jpg', null=True, upload_to='user_photo/%Y/%m/%d', verbose_name='头像')),
('essay', models.TextField(default='', max_length=30, verbose_name='个性签名')),
('city', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='alumni.city', verbose_name='城市')),
('department', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='alumni.department', verbose_name='院系')),
('industry', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='alumni.industry', verbose_name='行业')),
],
options={
'verbose_name': '用户',
'verbose_name_plural': '用户',
},
),
migrations.CreateModel(
name='Tie',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=30, verbose_name='标题')),
('content', models.TextField(max_length=200, verbose_name='内容')),
('createdTime', models.DateTimeField(verbose_name='发布时间')),
('replyTime', models.DateTimeField(verbose_name='最新回复时间')),
('access', models.IntegerField(default=0, verbose_name='浏览量')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='alumni.user', verbose_name='作者')),
('relatedActivity', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='alumni.activity', verbose_name='相关活动')),
('tag', models.ManyToManyField(to='alumni.Tag', verbose_name='所属标签')),
],
options={
'verbose_name': '帖子',
'verbose_name_plural': '帖子',
},
),
migrations.CreateModel(
name='Student',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='姓名')),
('studentID', models.CharField(max_length=20, verbose_name='学号')),
('grade', models.IntegerField(verbose_name='界次')),
('department', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='alumni.department', verbose_name='院系')),
],
),
migrations.CreateModel(
name='Reply',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField(max_length=200, verbose_name='回复内容')),
('relatedTie', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='alumni.tie', verbose_name='相关帖')),
],
options={
'verbose_name': '楼层',
'verbose_name_plural': '楼层',
},
),
migrations.CreateModel(
name='Major',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True, verbose_name='专业')),
('department', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='alumni.department', verbose_name='所属院系')),
],
options={
'verbose_name': '专业',
'verbose_name_plural': '专业',
},
),
migrations.AddField(
model_name='city',
name='province',
field=models.ForeignKey(db_column='f', default='北京', on_delete=django.db.models.deletion.DO_NOTHING, to='alumni.province'),
),
migrations.AddField(
model_name='activity',
name='city',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='alumni.city', verbose_name='所在城市'),
),
]
| StarcoderdataPython |
3284776 | ###############################################
# <NAME> - PG Applied AI - Programming
# Unit tests, for the minimum edit distance
###############################################
import unittest # unit testing ftw
from excercise import Excercise # importing the actual code
class TestMethods(unittest.TestCase):
def test_equal_strings(self):
self.assertEqual(0, Excercise.CalculateEditDistance('', ''))
self.assertEqual(0, Excercise.CalculateEditDistance('AB', 'AB'))
def test_empty_strings(self):
self.assertEqual(2, Excercise.CalculateEditDistance('AB', ''))
self.assertEqual(2, Excercise.CalculateEditDistance('', 'AB'))
def test_small_strings(self):
self.assertEqual(1, Excercise.CalculateEditDistance('ME', 'MY'))
self.assertEqual(1, Excercise.CalculateEditDistance('aunt', 'ant'))
self.assertEqual(1, Excercise.CalculateEditDistance('cat', 'cut'))
self.assertEqual(2, Excercise.CalculateEditDistance('cat', 'cuta'))
def test_addition_only(self):
self.assertEqual(3, Excercise.CalculateEditDistance('Sam', 'Samuel'))
def test_complex_string(self):
self.assertEqual(3, Excercise.CalculateEditDistance('Saturday', 'Sunday'))
def test_ignore_casing(self):
self.assertEqual(3, Excercise.CalculateEditDistance('SATURDay', 'Sunday', True))
self.assertEqual(0, Excercise.CalculateEditDistance('ab', 'AB', True))
def test_default_casing(self):
self.assertNotEqual(3, Excercise.CalculateEditDistance('SATURDay', 'Sunday'))
self.assertNotEqual(0, Excercise.CalculateEditDistance('ab', 'AB'))
| StarcoderdataPython |
1606726 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Copyright 2020, <NAME>, UIUC.
Find the next permutation of an array.
'''
class Solution(object):
def nextPermutation(self, nums):
"""
:type nums: List[int]
:rtype: None Do not return anything, modify nums in-place instead.
"""
index = len(nums) - 2
while index >= 0 and nums[index+1] <= nums[index]:
index -= 1
if index >= 0 :
right = len(nums) - 1
while right > index and nums[right] <= nums[index]:
right -= 1
nums[index], nums[right] = nums[right], nums[index]
i = index + 1
j = len(nums) - 1
while i < j:
nums[i], nums[j] = nums[j], nums[i]
i += 1
j -= 1
| StarcoderdataPython |
97236 | import os
import h5py
import pickle
import numpy as np
from termcolor import colored
from torch.utils.data import Dataset, DataLoader
class CIFAR10Loader(Dataset):
'''Data loader for cifar10 dataset'''
def __init__(self, data_path='data/cifar-10-batches-py', mode='train', transform=None):
self.data_path = data_path
self.transform = transform
self.mode = mode
self.data = []
self.labels = []
self._init_loader()
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
sample_train = self.data[idx]
label_train = self.labels[idx]
if self.transform:
sample_train = self.transform(sample_train)
label_train = self.transform(label_train)
return sample_train, label_train
def _init_loader(self):
if self.mode == 'train':
batch_list = [
['data_batch_1', 'c99cafc152244af753f735de768cd75f'],
['data_batch_2', 'd4bba439e000b95fd0a9bffe97cbabec'],
['data_batch_3', '54ebc095f3ab1f0389bbae665268c751'],
['data_batch_4', '634d18415352ddfa80567beed471001a'],
['data_batch_5', '482c414d41f54cd18b22e5b47cb7c3cb'],
]
elif self.mode == 'test':
batch_list = [
['test_batch', '40351d587109b95175f43aff81a1287e'],
]
else:
raise Exception('Unknown: mode type(Options: train, test)')
for batch in batch_list:
print(colored('====> ', 'blue') + 'Processing file: ', os.path.join(self.data_path, batch[0]))
batch = unpickle(os.path.join(self.data_path, batch[0]))
tmp = batch[b'data']
self.data.append(tmp)
self.labels.append(batch[b'labels'])
self.data = np.float32(np.concatenate(self.data))
self.data = self.data.reshape(self.data.shape[0], 3, 32, 32) #.swapaxes(1, 3).swapaxes(1, 2)
self.labels = np.concatenate(self.labels).astype(np.long)
print('Data dims, Label dims :', self.data.shape, self.labels.shape)
def unpickle(file):
with open(file, 'rb') as fp:
dict = pickle.load(fp, encoding='bytes')
return dict
class PCamLoader(Dataset):
'''Data loader for PCam dataset'''
def __init__(self, data_path='data/', mode='train', transform=None):
self.data_path = data_path
self.transform = transform
self.mode = mode
self.data = []
self.labels = []
self._init_loader()
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
sample_train = self.data[idx]
label_train = self.labels[idx]
if self.transform:
sample_train = self.transform(sample_train)
label_train = self.transform(label_train)
return sample_train, label_train
def _init_loader(self):
if self.mode == 'train':
batch_list = [
'camelyonpatch_level_2_split_train_x.h5',
'camelyonpatch_level_2_split_train_y.h5'
]
elif self.mode == 'valid':
batch_list = [
'camelyonpatch_level_2_split_valid_x.h5',
'camelyonpatch_level_2_split_valid_y.h5'
]
else:
batch_list = [
'camelyonpatch_level_2_split_test_x.h5',
'camelyonpatch_level_2_split_test_y.h5'
]
data_file, label_file = batch_list
self.data = np.array(extract_hdf5(
os.path.join(self.data_path, data_file)
)['x'][:,32:64, 32:64, :],
dtype=np.float32).swapaxes(1, 2).swapaxes(1,3)
self.labels = np.array(
extract_hdf5(os.path.join(self.data_path, label_file))['y'],
).astype(np.long).reshape(-1)
print('Data dims, Label dims :', self.data.shape, self.labels.shape)
return 0
def extract_hdf5(filename):
f = h5py.File(filename, 'r')
return f
| StarcoderdataPython |
5024094 | <reponame>javiermas/BCNAirQualityDatathon<filename>airquality/models/validate_lstm.py<gh_stars>0
from airquality.data.read_data import read_obs, read_targets
from airquality.models.LSTM_keras import LSTM_K
from airquality.data.prepare_data import create_model_matrix
from airquality.models.split import tt_split, reshape_to_keras
# Read data
data_obs = read_obs()
data_targets = read_targets()
target_cols = data_targets.columns[:-1]
# Prepare data
seq_length = 1
model_matrix = create_model_matrix(data_obs, target_cols=list(target_cols))
train_size = int(len(model_matrix)*0.7)
train_X, test_X, train_Y, test_Y = tt_split(model_matrix, train_size, target_cols)
train_X = reshape_to_keras(train_X, seq_length)
test_X = reshape_to_keras(test_X, seq_length)
# Validate LSTM
param_dict = {
'batch_size': 10,
'seq_length': seq_length,
'size': train_X.shape[2],
'hidden_units': 50,
'num_layers': 1,
'dense_units': train_Y.shape[1],
'epochs': 3,
}
lstm = LSTM_K(**param_dict)
lstm.validate(train_X, train_Y, test_X, test_Y)
| StarcoderdataPython |
378313 | <reponame>xiaorancs/notebooks
# _*_coding:utf-8_*_
# Author: xiaoran
# Time: 2018-12-13
import numpy as np
def zero_one_loss(y_true, y_pred):
'''
param:
y_true: narray or list
y_pred: narray or list
return: double
'''
y_true = np.array(y_true)
y_pred = np.array(y_pred)
return 1.0 * (len(y_true) - np.sum(y_true == y_pred)) / len(y_true)
def accuracy_score(y_true, y_pred):
'''
param:
y_true: narray or list
y_pred: narray or list
return: double
'''
y_true = np.array(y_true)
y_pred = np.array(y_pred)
return 1.0 * np.sum(y_true == y_pred) / len(y_true)
def mean_absolute_loss(y_true, y_pred):
'''
param:
y_true: narray or list
y_pred: narray or list
return: double
'''
y_true = np.array(y_true)
y_pred = np.array(y_pred)
return 1.0 * np.sum(np.abs(y_true - y_pred)) / len(y_pred)
def mean_squared_loss(y_true, y_pred):
'''
param:
y_true: narray or list
y_pred: narray or list
return: double
'''
y_true = np.array(y_true)
y_pred = np.array(y_pred)
return 1.0 * np.sum((y_true - y_pred)**2) / len(y_pred)
def log_loss(y_true, y_pred):
'''
param:
y_true: narray or list
y_pred: narray or list
return: double
'''
y_true = np.array(y_true)
y_pred = np.array(y_pred)
return np.sum(-np.log(y_pred)) / len(y_pred)
if __name__ == "__main__":
y_true = [1,1,1,0,0,0]
y_pred = [1,0,1,0,0,1]
print(zero_one_loss(y_true, y_pred))
print(accuracy_score(y_true, y_pred))
y_true1 = [1.1,1.2,1.3,2,3,4]
y_pred1 = [1,0,1,2,3,4]
print(mean_absolute_loss(y_true1, y_pred1))
print(mean_squared_loss(y_true1, y_pred1))
y_true2 = [1,1,1,0,0,0]
y_pred2 = [0.8,0.7,0.98,0.2,0.4,0.1]
print(log_loss(y_true2, y_pred2))
| StarcoderdataPython |
12856922 | from django.core.management.base import NoArgsCommand, CommandError;
from ldap_login.ldapUtils import ldapManager;
from ldap_login.models import user,group,Role;
from datetime import datetime;
import traceback;
class Command(NoArgsCommand):
"""Import LDAP users from Active Directory.
Uses the ldapUtils backend.
Creates the users in our databases else uses existings users.
Updates group bindings and full names for existing and new users also.
"""
args = None;
help = "imports LDAP users from Active Directory into database"
can_import_settings = True
exclusion_list = ['exam','Domain Controllers']; #list of OUs we do not want to handle at all.
def handle_noargs(self, **options):
#**options is a dictionary of keyword parameters beyond those defined
try:
l = ldapManager();
groups = l.getGroups()
for g in groups:
if g in self.exclusion_list :
continue;
print "-" * 60
print '\nProcessing group %s' % g;
#does this group exist in our database ?
try:
groupObj = group.objects.get(name=g);
print "Using existing group %s" % g
except group.DoesNotExist:
groupObj = group(name=g,created_on=datetime.now());
groupObj.save();
print "Created group %s" % g;
finally:
users = l.getUsers(ou=g,attrs=['sAMAccountName','displayName']);
for u in users:
print "-" * 20
username = u['sAMAccountName'][0]; #because we get a dictionary of lists from ldap!
print '\nSearching for existing user with username : %s ' % username;
try:
userObj = user.objects.get(pk=username)
print "Using existing user %s " % userObj
except user.DoesNotExist:
userObj = user(pk=username);
userObj.created_on = datetime.now();
print "Created user %s " % userObj;
except Exception as e:
print 'An unknown exception occured! ';
print e;
print traceback.print_exc();
finally: #so that we update these properties for all user
if 'displayName' in u:
userObj.fullname = u['displayName'][0] #because it's a dictionary of lists!
else:
userObj.fullname = userObj.pk
#Don't forget to assign role!
if username.startswith('0') or username.startswith('1'):
userObj.role = Role.objects.get_or_create(name='student')[0]
else:
userObj.role = Role.objects.get_or_create(name='faculty')[0]
userObj.save();
#the following must be done after saving
#refer: http://stackoverflow.com/questions/7837033/valueerror-cannot-add-instance-is-on-database-default-value-is-on-databas
userObj.groups.add(groupObj); #add this user to the group;
except KeyError as e:
print 'KeyError happened in the structure :'
print e.message
print 'Structure:', u
print
except Exception as e:
print 'Some unexpected exception occured!';
print e;
print traceback.print_exc()
| StarcoderdataPython |
58107 | # -*- coding: utf-8 -*-
"""
Created on Fri Jan 27 12:47:00 2017
@author: sakurai
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import AffinityPropagation
from sklearn.metrics import f1_score
from sklearn.metrics import normalized_mutual_info_score
from sklearn.preprocessing import LabelEncoder
def ap_cluster_k(x, K, preference_init=-1.0, max_iter=30,
c=None, iter_finetune=10):
'''
Clustering of x by affinity propagation which the number of cluster is K.
args:
x (ndarray):
Data matrix.
K (int):
Target number of clusters.
max_iter (int):
Number of trials for bisection search.
c (ndarray, optional):
Class labels of x. If this parameter is specified, the function
try to find the better solution by random search.
iter_finetune (int):
Number of steps for the random search.
'''
# first, search rough lower bound of the preference
assert preference_init < 0, "preference_init must be negative."
p = float(preference_init) # preference parameter
p_upper = 0
for i in range(5):
ap = AffinityPropagation(preference=p).fit(y)
k_current = len(ap.cluster_centers_indices_)
if k_current > K:
p_upper = p
k_upper = k_current
p *= 10
else:
p_lower = p
k_lower = k_current
break
else:
raise RuntimeError("Can't find initial lower bound for preference."
" Try another value of p_initial.")
# search the preference by bisection method
for i in range(max_iter):
p = (p_lower + p_upper) / 2
ap = AffinityPropagation(preference=p).fit(y)
k_current = len(ap.cluster_centers_indices_)
print('K = {}, k_current = {}, p = {}'.format(K, k_current, p))
print('{}:{}, {}:{}, {}:{}'.format(k_lower, p_lower, k_current, p,
k_upper, p_upper))
# if the current k goes out of bounds then retry with perturbed p
while k_current < k_lower or k_current > k_upper:
print("retry")
p += np.random.uniform(p_lower, p_upper) / 10
ap = AffinityPropagation(preference=p).fit(y)
k_current = len(ap.cluster_centers_indices_)
print('K = {}, k_current = {}, p = {}'.format(K, k_current, p))
print('{}:{}, {}:{}, {}:{}'.format(k_lower, p_lower, k_current, p,
k_upper, p_upper))
if k_current < K:
p_lower = p
k_lower = k_current
elif k_current > K:
p_upper = p
k_upper = k_current
else:
break
else:
raise RuntimeError("Can't find a preference to form K clusters."
" Try another value of p_initial.")
if c is None:
return ap
# Search further better preference in terms of NMI score by random search
p_best = p
score_best = normalized_mutual_info_score(c, ap.predict(y))
print('initial score:', score_best)
print()
for i in range(iter_finetune):
p = np.random.normal(p_best, (p_upper - p_lower) / 2)
if p < p_lower or p > p_upper: # where p is rejected
print('reject')
continue
ap = AffinityPropagation(preference=p).fit(y)
k_current = len(ap.cluster_centers_indices_)
if k_current < K and p > p_lower:
p_lower = p
elif k_current > K and p < p_upper:
p_upper = p
else: # wgere k_current is K
score = normalized_mutual_info_score(c, ap.predict(y))
if score > score_best:
print("update p {} -> {}".format(p_best, p))
p_best = p
score_best = score
print('p: {}, {}, {}'.format(p_lower, p, p_upper))
print('score: {}'.format(score_best))
print()
return AffinityPropagation(preference=p_best).fit(y)
if __name__ == '__main__':
y_train = np.load('y_train.npy')
c_train = np.load('c_train.npy').ravel()
y_test = np.load('y_test.npy')
c_test = np.load('c_test.npy').ravel()
c_train = LabelEncoder().fit_transform(c_train)
c_test = LabelEncoder().fit_transform(c_test)
K = 40
# K = len(np.unique(c_train))
y = y_train[c_train.ravel() < K]
c = c_train[c_train < K]
# y = y_test[c_test.ravel() < K]
# c = c_test[c_test < K]
ap = ap_cluster_k(y, K, preference_init=-1.0, c=c, iter_finetune=30)
c_pred = ap.predict(y)
print(normalized_mutual_info_score(c, c_pred))
plt.plot(np.vstack((c_pred, c)).T)
plt.show()
# print f1_score(c, c_pred)
| StarcoderdataPython |
1619462 | <filename>thesis-deadline-version/rings/1round/graphs/all/create-graph.py
from os import system, remove
with open("graf.gnuplot", "w") as graf:
graf.write ( "set terminal pngcairo size 350,262 enhanced font \'Verdana,10\'\n" +
"set output \"rings.png\"\n" +
# "f(x) = (10**(a*x + b))\n" +
# "fit f(x) \"striedave-parne.txt\" via a,b\n" +
"set grid\n" +
# "set key outside\n" +
"set title \"Rings\"\n" +
"set logscale y 2\n" +
# "set xlabel \"n\" \n" +
# "set ylabel \"logp\"\n" +
"set key left bottom\n" +
"set key font \",8\"\n" +
"set key spacing 1\n" +
"set key box\n" +
"plot \"zeroes\", \"max0\", \"max1\", \"1d\""
)
system('gnuplot graf.gnuplot')
remove('graf.gnuplot') | StarcoderdataPython |
121426 | print('Accessing private members in Class:')
print('-'*35)
class Human():
# Private var
__privateVar = "this is __private variable"
# Constructor method
def __init__(self):
self.className = "Human class constructor"
self.__privateVar = "this is redefined __private variable"
# Public method
def showName(self, name):
self.name = name
return self.__privateVar + " with name: " + name
# Private method
def __privateMethod(self):
return "Private method"
def _protectedMethod(self):
return 'Protected Method'
# Public method that returns a private variable
def showPrivate(self):
return self.__privateMethod()
def showProtecded(self):
return self._protectedMethod()
class Male(Human):
def showClassName(self):
return "Male"
def showPrivate(self):
return self.__privateMethod()
def showProtected(self):
return self._protectedMethod()
class Female(Human):
def showClassName(self):
return "Female"
def showPrivate(self):
return self.__privateMethod()
human = Human()
print(f'\nCalling the: {human.className} from the Human class.')
print(f'\nAccessing the public method of Human class: {human.showName("Ling-Ling")}')
print(f'\nAccessing the private method of the Human class: {human.showPrivate()}, from Human Class.')
# print(f'Acessing the protected Method of the Human Class : {human.showProtected()},from Human Class.') -->AttributeError:'Human' object has no attribute 'showProtected'
male = Male()
print(f'\ncalling the {male.className} from the Male class')
print(f'\nAccessing the Public method of Male class: {male.showClassName()}, from male class')
print(f'\nAccessing the protected method of Male class: {male.showProtected()}, from male class.')
# print(f'Accessing the private method of Male class: {male.Human__showPrivate()}, from male Class.') --> AttributeError: 'Male' object has no attribute '_Male__privateMethod'
female = Female()
print(f'\ncalling the {female.className} from the Female class')
print(f'\nAccessing the Public method of female class: {female.showClassName()}, from Female class')
# print(f'Accessing the protected method of female class: {female.showProtected()}, from Female class.') --> AttributeError: 'Female' object has no attribute 'showProtected'
# print(f'Accessing the private method of female class: {female.showPrivate()}, from Female Class.') AttributeError: 'Female' object has no attribute '_Female__privateMethod'
print('\n'+'-'*25+"Method 2 -- Accessing private members in Class"+'-'*25)
print('\n'+'Example: Public Attributes: ')
print('-'*20)
class Employee:
def __init__(self,name,sal):
self.name=name #Public attribute
self.salary=sal #Public attribute
e1=Employee('Ling1',30000)
print(f'Accessing the Public Attributes: {e1.name} : {e1.salary}')
# if attribute is public then the value can be modified too
e1.salary=40000
print(f'Accessing the Public Attributes after modifying: {e1.name} : {e1.salary}')
print('\n'+'Example: Protected Attributes: ')
'''Python's convention to make an instance variable protected is to add a prefix _ (single underscore) to it.
This effectively prevents it to be accessed, unless it is from within a sub-class.'''
print('-'*25)
class Employee:
def __init__(self,name,sal):
self._name=name #protected attribute
self._salary=sal #protected attribute
e2=Employee('Ling2',50000)
print(f'Accessing the Protected Attributes: {e2._name} : {e2._salary}')
#even if attribute is protected the value can be modified too
e2._salary=44000
print(f'Accessing the Protected Attributes after modifying: {e2._name} : {e2._salary}')
print('\n'+'Example: Private Attributes: ')
'''a double underscore __ prefixed to a variable makes it private.
It gives a strong suggestion not to touch it from outside the class.
Any attempt to do so will result in an AttributeError.'''
print('-'*25)
class Employee:
def __init__(self,name,sal):
self.__name=name # private attribute
self.__salary=sal # private attribute
e3=Employee('Ling3',60000)
# print(f'Accessing the Privated Attributes: {e3.__name} : {e3.__salary}') --> AttributeError: 'Employee' object has no attribute '__name
'''In order to access the attributes, Python performs name mangling of private variables.
Every member with double underscore will be changed to _object._class__variable.'''
print(f'Accessing the Private Attributes: {e3._Employee__name} : {e3._Employee__salary}')
#even if attribute is protected the value can be modified too
e3._Employee__salary=15000
print(f'Accessing the Protected Attributes after modifying: {e3._Employee__name} : {e3._Employee__salary}')
| StarcoderdataPython |
9609398 | <reponame>learningmatter-mit/gulpy
import numpy as np
import networkx as nx
from networkx.algorithms.traversal.depth_first_search import dfs_edges
from typing import List
from pymatgen.core import Molecule, Structure
from pymatgen.analysis.graphs import StructureGraph
from pymatgen.analysis.local_env import JmolNN
class MoleculeExtractor:
def __init__(
self, structure: Structure,
):
"""Extracts a molecule from a pymatgen Structure using the given indices.
Useful when the molecule crosses the boundary of the unit cell and
the atoms are disconnected when the information on the lattice is ignored.
"""
self.structure = structure
def get_molecular_structure_from_indices(self, indices: List[int]) -> Structure:
return Structure(
species=[sp for i, sp in enumerate(self.structure.species) if i in indices],
coords=self.structure.cart_coords[indices],
lattice=self.structure.lattice.matrix,
coords_are_cartesian=True,
)
def get_structure_graph(self, struct: Structure) -> nx.Graph:
sgraph = StructureGraph.with_local_env_strategy(struct, JmolNN())
return sgraph
def walk_graph_and_get_coords(self, sgraph: StructureGraph) -> np.array:
vectors = self.get_distance_vectors(sgraph)
node_coords = {0: sgraph.structure[0].coords}
for u, v in dfs_edges(nx.Graph(sgraph.graph), source=0):
node_coords[v] = node_coords[u] + vectors[(u, v)]
final_coords = np.array([node_coords[k] for k in sorted(node_coords.keys())])
return final_coords
def get_distance_vectors(self, sgraph: StructureGraph) -> dict:
"""Creates the distance vectors between connected nodes.
Useful when walking through the graph later.
"""
distance_vectors = {}
for u in sgraph.graph.nodes:
ucoords = sgraph.structure[u].coords
for conn_site in sgraph.get_connected_sites(u):
v = conn_site.index
vcoords = conn_site.site.coords
distance_vectors[(u, v)] = vcoords - ucoords
return distance_vectors
def extract_molecule(self, indices: List[int]) -> Molecule:
struct = self.get_molecular_structure_from_indices(indices)
sgraph = self.get_structure_graph(struct)
coords = self.walk_graph_and_get_coords(sgraph)
return Molecule(species=struct.species, coords=coords)
| StarcoderdataPython |
67253 | # Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://nvlabs.github.io/stylegan2/license.html
"""Custom TensorFlow ops for efficient resampling of 2D images."""
import os
import numpy as np
import tensorflow as tf
from models.stylegan2.layers.cuda import custom_ops
def _get_plugin():
return custom_ops.get_plugin(os.path.splitext(__file__)[0] + ".cu")
# ----------------------------------------------------------------------------
def upfirdn_2d(
x,
k,
upx=1,
upy=1,
downx=1,
downy=1,
padx0=0,
padx1=0,
pady0=0,
pady1=0,
impl="cuda",
):
r"""Pad, upsample, FIR filter, and downsample a batch of 2D images.
Accepts a batch of 2D images of the shape `[majorDim, inH, inW, minorDim]`
and performs the following operations for each image, batched across
`majorDim` and `minorDim`:
1. Pad the image with zeros by the specified number of pixels on each side
(`padx0`, `padx1`, `pady0`, `pady1`). Specifying a negative value
corresponds to cropping the image.
2. Upsample the image by inserting the zeros after each pixel (`upx`, `upy`).
3. Convolve the image with the specified 2D FIR filter (`k`), shrinking the
image so that the footprint of all output pixels lies within the input image.
4. Downsample the image by throwing away pixels (`downx`, `downy`).
This sequence of operations bears close resemblance to scipy.signal.upfirdn().
The fused op is considerably more efficient than performing the same calculation
using standard TensorFlow ops. It supports gradients of arbitrary order.
Args:
x: Input tensor of the shape `[majorDim, inH, inW, minorDim]`.
k: 2D FIR filter of the shape `[firH, firW]`.
upx: Integer upsampling factor along the X-axis (default: 1).
upy: Integer upsampling factor along the Y-axis (default: 1).
downx: Integer downsampling factor along the X-axis (default: 1).
downy: Integer downsampling factor along the Y-axis (default: 1).
padx0: Number of pixels to pad on the left side (default: 0).
padx1: Number of pixels to pad on the right side (default: 0).
pady0: Number of pixels to pad on the top side (default: 0).
pady1: Number of pixels to pad on the bottom side (default: 0).
impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
Returns:
Tensor of the shape `[majorDim, outH, outW, minorDim]`, and same datatype as `x`.
"""
impl_dict = {
"ref": _upfirdn_2d_ref,
"cuda": _upfirdn_2d_cuda,
}
return impl_dict[impl](
x=x,
k=k,
upx=upx,
upy=upy,
downx=downx,
downy=downy,
padx0=padx0,
padx1=padx1,
pady0=pady0,
pady1=pady1,
)
# ----------------------------------------------------------------------------
def _upfirdn_2d_ref(x, k, upx, upy, downx, downy, padx0, padx1, pady0, pady1):
"""Slow reference implementation of `upfirdn_2d()` using standard TensorFlow ops."""
x = tf.convert_to_tensor(x)
k = np.asarray(k, dtype=np.float32)
assert x.shape.rank == 4
inH = x.shape[1]
inW = x.shape[2]
minorDim = _shape(x, 3)
kernelH, kernelW = k.shape
assert inW >= 1 and inH >= 1
assert kernelW >= 1 and kernelH >= 1
assert isinstance(upx, int) and isinstance(upy, int)
assert isinstance(downx, int) and isinstance(downy, int)
assert isinstance(padx0, int) and isinstance(padx1, int)
assert isinstance(pady0, int) and isinstance(pady1, int)
# Upsample (insert zeros).
x = tf.reshape(x, [-1, inH, 1, inW, 1, minorDim])
x = tf.pad(x, [[0, 0], [0, 0], [0, upy - 1], [0, 0], [0, upx - 1], [0, 0]])
x = tf.reshape(x, [-1, inH * upy, inW * upx, minorDim])
# Pad (crop if negative).
x = tf.pad(
x,
[
[0, 0],
[max(pady0, 0), max(pady1, 0)],
[max(padx0, 0), max(padx1, 0)],
[0, 0],
],
)
x = x[
:,
max(-pady0, 0) : x.shape[1] - max(-pady1, 0),
max(-padx0, 0) : x.shape[2] - max(-padx1, 0),
:,
]
# Convolve with filter.
x = tf.transpose(x, [0, 3, 1, 2])
x = tf.reshape(x, [-1, 1, inH * upy + pady0 + pady1, inW * upx + padx0 + padx1])
w = tf.constant(k[::-1, ::-1, np.newaxis, np.newaxis], dtype=x.dtype)
x = tf.nn.conv2d(x, w, strides=[1, 1, 1, 1], padding="VALID", data_format="NCHW")
x = tf.reshape(
x,
[
-1,
minorDim,
inH * upy + pady0 + pady1 - kernelH + 1,
inW * upx + padx0 + padx1 - kernelW + 1,
],
)
x = tf.transpose(x, [0, 2, 3, 1])
# Downsample (throw away pixels).
return x[:, ::downy, ::downx, :]
# ----------------------------------------------------------------------------
def _upfirdn_2d_cuda(x, k, upx, upy, downx, downy, padx0, padx1, pady0, pady1):
"""Fast CUDA implementation of `upfirdn_2d()` using custom ops."""
x = tf.convert_to_tensor(x)
k = np.asarray(k, dtype=np.float32)
majorDim, inH, inW, minorDim = x.shape.as_list()
kernelH, kernelW = k.shape
assert inW >= 1 and inH >= 1
assert kernelW >= 1 and kernelH >= 1
assert isinstance(upx, int) and isinstance(upy, int)
assert isinstance(downx, int) and isinstance(downy, int)
assert isinstance(padx0, int) and isinstance(padx1, int)
assert isinstance(pady0, int) and isinstance(pady1, int)
outW = (inW * upx + padx0 + padx1 - kernelW) // downx + 1
outH = (inH * upy + pady0 + pady1 - kernelH) // downy + 1
assert outW >= 1 and outH >= 1
kc = tf.constant(k, dtype=x.dtype)
gkc = tf.constant(k[::-1, ::-1], dtype=x.dtype)
gpadx0 = kernelW - padx0 - 1
gpady0 = kernelH - pady0 - 1
gpadx1 = inW * upx - outW * downx + padx0 - upx + 1
gpady1 = inH * upy - outH * downy + pady0 - upy + 1
@tf.custom_gradient
def func(x):
y = _get_plugin().up_fir_dn2d(
x=x,
k=kc,
upx=upx,
upy=upy,
downx=downx,
downy=downy,
padx0=padx0,
padx1=padx1,
pady0=pady0,
pady1=pady1,
)
y.set_shape([majorDim, outH, outW, minorDim])
@tf.custom_gradient
def grad(dy):
dx = _get_plugin().up_fir_dn2d(
x=dy,
k=gkc,
upx=downx,
upy=downy,
downx=upx,
downy=upy,
padx0=gpadx0,
padx1=gpadx1,
pady0=gpady0,
pady1=gpady1,
)
dx.set_shape([majorDim, inH, inW, minorDim])
return dx, func
return y, grad
return func(x)
# ----------------------------------------------------------------------------
def filter_2d(x, k, gain=1, data_format="NCHW", impl="cuda"):
r"""Filter a batch of 2D images with the given FIR filter.
Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]`
and filters each image with the given filter. The filter is normalized so that
if the input pixels are constant, they will be scaled by the specified `gain`.
Pixels outside the image are assumed to be zero.
Args:
x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.
k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable).
gain: Scaling factor for signal magnitude (default: 1.0).
data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`).
impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
Returns:
Tensor of the same shape and datatype as `x`.
"""
k = _setup_kernel(k) * gain
p = k.shape[0] - 1
return _simple_upfirdn_2d(
x, k, pad0=(p + 1) // 2, pad1=p // 2, data_format=data_format, impl=impl
)
# ----------------------------------------------------------------------------
def upsample_2d(x, k=None, factor=2, gain=1, data_format="NCHW", impl="cuda"):
r"""Upsample a batch of 2D images with the given filter.
Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]`
and upsamples each image with the given filter. The filter is normalized so that
if the input pixels are constant, they will be scaled by the specified `gain`.
Pixels outside the image are assumed to be zero, and the filter is padded with
zeros so that its shape is a multiple of the upsampling factor.
Args:
x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.
k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable).
The default is `[1] * factor`, which corresponds to nearest-neighbor
upsampling.
factor: Integer upsampling factor (default: 2).
gain: Scaling factor for signal magnitude (default: 1.0).
data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`).
impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
Returns:
Tensor of the shape `[N, C, H * factor, W * factor]` or
`[N, H * factor, W * factor, C]`, and same datatype as `x`.
"""
assert isinstance(factor, int) and factor >= 1
if k is None:
k = [1] * factor
k = _setup_kernel(k) * (gain * (factor ** 2))
p = k.shape[0] - factor
return _simple_upfirdn_2d(
x,
k,
up=factor,
pad0=(p + 1) // 2 + factor - 1,
pad1=p // 2,
data_format=data_format,
impl=impl,
)
# ----------------------------------------------------------------------------
def downsample_2d(x, k=None, factor=2, gain=1, data_format="NCHW", impl="cuda"):
r"""Downsample a batch of 2D images with the given filter.
Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]`
and downsamples each image with the given filter. The filter is normalized so that
if the input pixels are constant, they will be scaled by the specified `gain`.
Pixels outside the image are assumed to be zero, and the filter is padded with
zeros so that its shape is a multiple of the downsampling factor.
Args:
x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.
k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable).
The default is `[1] * factor`, which corresponds to average pooling.
factor: Integer downsampling factor (default: 2).
gain: Scaling factor for signal magnitude (default: 1.0).
data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`).
impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
Returns:
Tensor of the shape `[N, C, H // factor, W // factor]` or
`[N, H // factor, W // factor, C]`, and same datatype as `x`.
"""
assert isinstance(factor, int) and factor >= 1
if k is None:
k = [1] * factor
k = _setup_kernel(k) * gain
p = k.shape[0] - factor
return _simple_upfirdn_2d(
x,
k,
down=factor,
pad0=(p + 1) // 2,
pad1=p // 2,
data_format=data_format,
impl=impl,
)
# ----------------------------------------------------------------------------
def upsample_conv_2d(x, w, k=None, factor=2, gain=1, data_format="NCHW", impl="cuda"):
r"""Fused `upsample_2d()` followed by `tf.nn.conv2d()`.
Padding is performed only once at the beginning, not between the operations.
The fused op is considerably more efficient than performing the same calculation
using standard TensorFlow ops. It supports gradients of arbitrary order.
Args:
x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.
w: Weight tensor of the shape `[filterH, filterW, inChannels, outChannels]`.
Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`.
k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable).
The default is `[1] * factor`, which corresponds to nearest-neighbor
upsampling.
factor: Integer upsampling factor (default: 2).
gain: Scaling factor for signal magnitude (default: 1.0).
data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`).
impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
Returns:
Tensor of the shape `[N, C, H * factor, W * factor]` or
`[N, H * factor, W * factor, C]`, and same datatype as `x`.
"""
assert isinstance(factor, int) and factor >= 1
# Check weight shape.
w = tf.convert_to_tensor(w)
assert w.shape.rank == 4
convH = w.shape[0]
convW = w.shape[1]
inC = _shape(w, 2)
outC = _shape(w, 3)
assert convW == convH
# Setup filter kernel.
if k is None:
k = [1] * factor
k = _setup_kernel(k) * (gain * (factor ** 2))
p = (k.shape[0] - factor) - (convW - 1)
# Determine data dimensions.
if data_format == "NCHW":
stride = [1, 1, factor, factor]
output_shape = [
_shape(x, 0),
outC,
(_shape(x, 2) - 1) * factor + convH,
(_shape(x, 3) - 1) * factor + convW,
]
num_groups = _shape(x, 1) // inC
else:
stride = [1, factor, factor, 1]
output_shape = [
_shape(x, 0),
(_shape(x, 1) - 1) * factor + convH,
(_shape(x, 2) - 1) * factor + convW,
outC,
]
num_groups = _shape(x, 3) // inC
# Transpose weights.
w = tf.reshape(w, [convH, convW, inC, num_groups, -1])
w = tf.transpose(w[::-1, ::-1], [0, 1, 4, 3, 2])
w = tf.reshape(w, [convH, convW, -1, num_groups * inC])
# Execute.
x = tf.nn.conv2d_transpose(
x,
w,
output_shape=output_shape,
strides=stride,
padding="VALID",
data_format=data_format,
)
return _simple_upfirdn_2d(
x,
k,
pad0=(p + 1) // 2 + factor - 1,
pad1=p // 2 + 1,
data_format=data_format,
impl=impl,
)
# ----------------------------------------------------------------------------
def conv_downsample_2d(x, w, k=None, factor=2, gain=1, data_format="NCHW", impl="cuda"):
r"""Fused `tf.nn.conv2d()` followed by `downsample_2d()`.
Padding is performed only once at the beginning, not between the operations.
The fused op is considerably more efficient than performing the same calculation
using standard TensorFlow ops. It supports gradients of arbitrary order.
Args:
x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.
w: Weight tensor of the shape `[filterH, filterW, inChannels, outChannels]`.
Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`.
k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable).
The default is `[1] * factor`, which corresponds to average pooling.
factor: Integer downsampling factor (default: 2).
gain: Scaling factor for signal magnitude (default: 1.0).
data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`).
impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
Returns:
Tensor of the shape `[N, C, H // factor, W // factor]` or
`[N, H // factor, W // factor, C]`, and same datatype as `x`.
"""
assert isinstance(factor, int) and factor >= 1
w = tf.convert_to_tensor(w)
convH, convW, _inC, _outC = w.shape.as_list()
assert convW == convH
if k is None:
k = [1] * factor
k = _setup_kernel(k) * gain
p = (k.shape[0] - factor) + (convW - 1)
if data_format == "NCHW":
s = [1, 1, factor, factor]
else:
s = [1, factor, factor, 1]
x = _simple_upfirdn_2d(
x, k, pad0=(p + 1) // 2, pad1=p // 2, data_format=data_format, impl=impl
)
return tf.nn.conv2d(x, w, strides=s, padding="VALID", data_format=data_format)
# ----------------------------------------------------------------------------
# Internal helper funcs.
def _shape(tf_expr, dim_idx):
if tf_expr.shape.rank is not None:
dim = tf_expr.shape[dim_idx]
if dim is not None:
return dim
return tf.shape(tf_expr)[dim_idx]
def _setup_kernel(k):
k = np.asarray(k, dtype=np.float32)
if k.ndim == 1:
k = np.outer(k, k)
k /= np.sum(k)
assert k.ndim == 2
assert k.shape[0] == k.shape[1]
return k
def _simple_upfirdn_2d(
x, k, up=1, down=1, pad0=0, pad1=0, data_format="NCHW", impl="cuda"
):
assert data_format in ["NCHW", "NHWC"]
assert x.shape.rank == 4
y = x
if data_format == "NCHW":
y = tf.reshape(y, [-1, _shape(y, 2), _shape(y, 3), 1])
y = upfirdn_2d(
y,
k,
upx=up,
upy=up,
downx=down,
downy=down,
padx0=pad0,
padx1=pad1,
pady0=pad0,
pady1=pad1,
impl=impl,
)
if data_format == "NCHW":
y = tf.reshape(y, [-1, _shape(x, 1), _shape(y, 1), _shape(y, 2)])
return y
# ----------------------------------------------------------------------------
| StarcoderdataPython |
8055056 | <filename>sky_model/snrs/snrs_to_xml.py
"""
Convert SNRs to XML format.
"""
from pathlib import Path
import numpy as np
import astropy.units as u
from astropy.table import Table, Column
SOURCE_LIBRARY_TEMPLATE = """\
<?xml version="1.0" standalone="no"?>
<source_library title="CTA 1DC simulated supernova remnants">
{xml_sources}
</source_library>
"""
SOURCE_TEMPLATE = """
<source name="{source_name}" type="ExtendedSource">
{xml_spectral}
{xml_spatial}
</source>
"""
SPATIAL_TEMPLATE = """\
<spatialModel type="RadialShell">
<parameter name="GLON" value="{glon:.5f}" scale="1.0" min="-360" max="360" free="1"/>
<parameter name="GLAT" value="{glat:.5f}" scale="1.0" min="-90" max="90" free="1"/>
<parameter name="Radius" value="{radius:.5f}" scale="1.0" min="1e-10" max="1000" free="1"/>
<parameter name="Width" value="{width:.5f}" scale="1.0" min="1e-10" max="1000" free="1"/>
</spatialModel>"""
SPECTRUM_TEMPLATE = """\
<spectrum type="NodeFunction">
{xml_spectrum_nodes}\
</spectrum>"""
# Multi-line, more readable version
SPECTRUM_NODE_TEMPLATE_READABLE = """\
<node>
<parameter name="Energy" value="{energy:.5f}" scale="1e06" min="0.1" max="1.0e20" free="0"/>
<parameter name="Intensity" value="{dnde:.5g}" scale="1e-10" min="1e-20" max="1000.0" free="1"/>
</node>
"""
# Here you can select which one you want
SPECTRUM_NODE_TEMPLATE = SPECTRUM_NODE_TEMPLATE_READABLE
def make_table_spectrum_xml(sed_energy, sed_dnde):
xml_spectrum_nodes = ''
for energy, dnde in zip(sed_energy, sed_dnde):
xml_spectrum_nodes += SPECTRUM_NODE_TEMPLATE.format(
energy=1e-6 * energy,
dnde=1e+10 * dnde,
)
return SPECTRUM_TEMPLATE.format(xml_spectrum_nodes=xml_spectrum_nodes)
def make_spectral_point_selection(row):
# Jürgen requested that we remove nodes with zero or very low flux
# so that it works for ctools.
# So here we remove the ones below an arbirtrary low threshold
# In addition we noticed that some SNRs have all fluxes very low
# We remove these super faint SNRs completely.
mask = row['sed_dnde'] > 1e-20
sed_energy = row['sed_energy'][mask]
sed_dnde = row['sed_dnde'][mask]
keep = (mask.sum() > 3)
return dict(
sed_energy=sed_energy,
sed_dnde=sed_dnde,
keep=keep,
)
def make_snr_xml(table_sed, table):
print('Number of SNRs from Pierre: {}'.format(len(table)))
print('Number of SNRs from Pierre: {}'.format(len(table_sed)))
snr_in_output = 0
keep = []
xml_sources = ''
for row in table_sed:
spec = make_spectral_point_selection(row)
keep.append(spec['keep'])
if not spec['keep']:
continue
snr_in_output += 1
xml_spectral = make_table_spectrum_xml(
sed_energy=spec['sed_energy'],
sed_dnde=spec['sed_dnde'],
)
# Assumption on width of the SNR shell
# Pierre simulates a thin shell
# but Jurgen cannot handle 0, we put 5% of the shell radius
width_fraction = 0.05
radius = u.Quantity(row['sigma'], 'arcmin').to('deg')
width = width_fraction * radius
xml_spatial = SPATIAL_TEMPLATE.format(
glon=row['glon'],
glat=row['glat'],
radius=radius.value,
width=width.value,
)
source_name = 'snr_{}'.format(row.index)
xml_source = SOURCE_TEMPLATE.format(
source_name=source_name,
xml_spectral=xml_spectral,
xml_spatial=xml_spatial,
)
xml_sources += xml_source
table['keep'] = Column(keep, description='')
print('Number of SNRs in output XML: {}'.format(snr_in_output))
xml = SOURCE_LIBRARY_TEMPLATE.format(xml_sources=xml_sources)
return xml
def add_sed_columns(table):
energy_array = np.array(table.meta['energy_array'])
sed_energy = np.tile(energy_array, reps=(len(table), 1))
table.info()
# Copy over fluxes into array column
sed_dnde = np.empty_like(sed_energy)
for col_idx in range(50):
sed_dnde[:, col_idx] = table.columns[6 + col_idx]
table['sed_energy'] = u.Quantity(sed_energy, 'TeV').to('MeV')
table['sed_dnde'] = u.Quantity(sed_dnde, 'cm-2 s-1 TeV-1').to('cm-2 s-1 MeV-1')
if __name__ == '__main__':
for version in [1, 2]:
filename = 'ctadc_skymodel_gps_sources_snr_{}.ecsv'.format(version)
print('Reading {}'.format(filename))
table = Table.read(filename, format='ascii.ecsv')
#table.remove_column('skip')
table_sed = Table.read(filename, format='ascii.ecsv')
add_sed_columns(table_sed)
print(table)
xml = make_snr_xml(table_sed, table)
print(table)
filename = 'ctadc_skymodel_gps_sources_snr_{}_keep.ecsv'.format(version)
print('Writing {}'.format(filename))
table.write(filename, format='ascii.ecsv', overwrite=True)
filename = 'ctadc_skymodel_gps_sources_snr_{}.xml'.format(version)
print('Writing {}'.format(filename))
Path(filename).write_text(xml)
| StarcoderdataPython |
100173 | ## An Eve optimizer implementation in Chainer
# By <NAME>
# https://github.com/muupan/chainer-eve
# Modified by <NAME>
from __future__ import division
import math
import numpy
from chainer import optimizer
from chainer.optimizers import adam
_default_hyperparam = optimizer.Hyperparameter()
_default_hyperparam.alpha = 0.001
_default_hyperparam.beta1 = 0.9
_default_hyperparam.beta2 = 0.999
_default_hyperparam.beta3 = 0.999
_default_hyperparam.c = 10.0
_default_hyperparam.eps = 1e-8
_default_hyperparam.eta = 1.0
_default_hyperparam.f_star = 0.0
_default_hyperparam.weight_decay_rate = 0
_default_hyperparam.amsgrad = False
_default_hyperparam.adabound = False
def _learning_rate(hp, t, d_tilde):
if t == 0:
raise RuntimeError(
'Can\'t determine the learning rate of Eve optimizer '
'because the update steps have not been started.')
fix1 = 1. - math.pow(hp.beta1, t)
fix2 = 1. - math.pow(hp.beta2, t)
return (hp.alpha / d_tilde) * math.sqrt(fix2) / fix1
class EveRule(adam.AdamRule):
"""Update rule of Eve optimization algorithm.
See: https://arxiv.org/abs/1611.01505v3
Before calling :meth:`update`, :attr:`d_tilde` must be set.
Args:
parent_hyperparam (~chainer.optimizer.Hyperparameter): Hyperparameter
that provides the default values.
alpha (float): Coefficient of learning rate.
beta1 (float): Exponential decay rate of the first order moment.
beta2 (float): Exponential decay rate of the second order moment.
eps (float): Small value for the numerical stability.
eta (float): Schedule multiplier, can be used for warm restarts.
weight_decay_rate (float): Weight decay rate.
amsgrad (bool): Whether to use the AMSGrad variant of Eve.
"""
d_tilde = None
@property
def lr(self):
assert self.d_tilde is not None
return _learning_rate(self.hyperparam, self.t, self.d_tilde)
class Eve(optimizer.GradientMethod):
"""Eve optimizer.
See: https://arxiv.org/abs/1611.01505v3
Args:
alpha (float): Coefficient of learning rate.
beta1 (float): Exponential decay rate of the first order moment.
beta2 (float): Exponential decay rate of the second order moment.
beta3 (float): Exponential decay rate of the objective-dependent
coefficient of learning rate.
c (float): Constant used to clip the objective-dependent coefficient.
eps (float): Small value for the numerical stability.
eta (float): Schedule multiplier, can be used for warm restarts.
f_star (float): Minimum value that the loss function can take.
weight_decay_rate (float): Weight decay rate.
amsgrad (bool): Whether to use AMSGrad variant of Eve.
"""
def __init__(self,
alpha=_default_hyperparam.alpha,
beta1=_default_hyperparam.beta1,
beta2=_default_hyperparam.beta2,
beta3=_default_hyperparam.beta3,
c=_default_hyperparam.c,
eps=_default_hyperparam.eps,
eta=_default_hyperparam.eta,
f_star=_default_hyperparam.f_star,
weight_decay_rate=_default_hyperparam.weight_decay_rate,
amsgrad=_default_hyperparam.amsgrad,
adabound=_default_hyperparam.adabound,
):
super(Eve, self).__init__()
self.hyperparam.alpha = alpha
self.hyperparam.beta1 = beta1
self.hyperparam.beta2 = beta2
self.hyperparam.beta3 = beta3
self.hyperparam.c = c
self.hyperparam.eps = eps
self.hyperparam.eta = eta
self.hyperparam.f_star = f_star
self.hyperparam.weight_decay_rate = weight_decay_rate
self.hyperparam.amsgrad = amsgrad
self.hyperparam.adabound = adabound
alpha = optimizer.HyperparameterProxy('alpha')
beta1 = optimizer.HyperparameterProxy('beta1')
beta2 = optimizer.HyperparameterProxy('beta2')
beta3 = optimizer.HyperparameterProxy('beta3')
c = optimizer.HyperparameterProxy('c')
eps = optimizer.HyperparameterProxy('eps')
eta = optimizer.HyperparameterProxy('eta')
f_star = optimizer.HyperparameterProxy('f_star')
weight_decay_rate = optimizer.HyperparameterProxy('weight_decay_rate')
amsgrad = optimizer.HyperparameterProxy('amsgrad')
def setup(self, link):
"""Sets a target link and initializes the optimizer states.
Given link is set to the :attr:`target` attribute. It also prepares the
optimizer state dictionaries corresponding to all parameters in the
link hierarchy. The existing states are discarded.
Args:
link (~chainer.Link): Target link object.
Returns:
The optimizer instance.
.. note::
As of v4.0.0, this function returns the optimizer instance itself
so that you can instantiate and setup the optimizer in one line,
e.g., ``optimizer = SomeOptimizer().setup(link)``.
"""
super(Eve, self).setup(link)
self.d_tilde = numpy.nan
self.f = numpy.nan
return self
def create_update_rule(self):
return EveRule(self.hyperparam)
@property
def lr(self):
return _learning_rate(self.hyperparam, self.t, self.d_tilde)
def update(self, loss, *args, **kwds):
"""Updates parameters based on a loss function or computed gradients.
Because Eve uses loss values, `lossfun` is required unlike in the
case of other optimizers.
Args:
lossfun (callable): Callable that returns a ~chainer.Variable to be
minimized.
*args, **kwds: Arguments passed to `lossfun`.
"""
use_cleargrads = getattr(self, '_use_cleargrads', True)
loss_value = float(loss.array)
self.reallocate_cleared_grads()
self.call_hooks('pre')
self.t += 1
self._update_d_tilde_and_f(loss_value)
for param in self.target.params():
param.update_rule.d_tilde = self.d_tilde
param.update()
self.reallocate_cleared_grads()
self.call_hooks('post')
def serialize(self, serializer):
"""Serializes or deserializes the optimizer.
It only saves or loads the following things:
- Optimizer states
- Global states (:attr:`t`, :attr:`epoch`, :attr:`d_tilde`, and
:attr:`f`)
**It does not saves nor loads the parameters of the target link.** They
should be separately saved or loaded.
Args:
serializer (~chainer.AbstractSerializer): Serializer or
deserializer object.
"""
super(Eve, self).serialize(serializer)
self.d_tilde = serializer('d_tilde', self.d_tilde)
self.f = serializer('f', self.f)
def _update_d_tilde_and_f(self, loss):
if self.t > 1:
d = abs(loss - self.f) / (min(loss, self.f) - self.f_star)
d_hat = numpy.clip(d, 1/self.c, self.c)
self.d_tilde = self.beta3 * self.d_tilde + (1 - self.beta3) * d_hat
else:
self.d_tilde = 1
self.f = loss
| StarcoderdataPython |
6633385 | import asyncio
from wampify.wamp_client import WAMPClient
async def main():
client = WAMPClient(
'http://1192.168.127.12:8080/call',
'http://127.0.0.1:8080/publish',
'client',
'secret'
)
print(await client.call('com.example.pow', 10))
print(await client.publish('com.example.hello', 'Ivan'))
if __name__ == '__main__':
asyncio.run(main())
| StarcoderdataPython |
11240113 | """
PipelineNode instances are used to track and manage subprocesses run by shtk
Shells.
"""
import abc
import asyncio
import signal
import sys
from .util import export
__all__ = []
@export
class PipelineNode(abc.ABC):
"""
Abstract base class for subprocess management nodes
Attributes:
children (list of PipelineNode): children of this node
stdin_stream (None or Stream): Stream to use for stdin
stdout_stream (None or Stream): Stream to use for stdout
stderr_stream (None or Stream): Stream to use for stderr
"""
def __init__(self, event_loop):
self.children = []
self.stdin_stream = None
self.stderr_stream = None
self.stdout_stream = None
self.event_loop = event_loop
@classmethod
async def create(cls, *args, **kwargs):
"""
Instantiates and runs the node
Args:
*args: passed to the constructor
**kwargs: passed to the constructor
Returns:
PipelineNode:
The instantiated and run node.
"""
instance = cls(*args, **kwargs)
await instance.run()
return instance
async def run(self):
"""
Runs the process
"""
@staticmethod
async def _get_return_code(rc_list, idx, coro):
rc_list[idx] = await coro
def flatten_children(self):
"""
Flattens the PipelineNode DAG into a list of PipelineProcess objects
using a depth-first search.
Returns:
list of PipelineProcess:
All child PipelineProcess nodes
"""
ret = []
if len(self.children) > 0:
for child in self.children:
ret.extend(PipelineNode.flatten_children(child))
else:
ret.append(self)
return ret
def send_signal(self, signum): #pylint: disable=no-self-use
"""
Sends a signal to all child ProcessNode processes.
Args:
signum (int): the signal to send.
"""
poll_result = self.poll()
for child, rc in zip(self.flatten_children(), poll_result):
if rc is None:
try:
child.proc.send_signal(signum)
except ProcessLookupError:
pass
def terminate(self):
"""
Sends a signal.SIGTERM to all child ProcessNode processes.
"""
self.send_signal(signal.SIGTERM)
def kill(self):
"""
Sends a signal.SIGKILL to all child ProcessNode processes.
"""
self.send_signal(signal.SIGKILL)
async def poll_async(self, ret):
"""
Gets the return codes of all child ProcessNodes
Args:
ret (list of [int, None]): a list that will be modified to contain
a collection of return codes from flattened child ProcessNodes.
Child processes that have exited will be represented by their
return code. Child processes that have not exited will be
represented by None.
"""
ret.clear()
tasks = []
for it_child, child in enumerate(self.flatten_children()):
ret.append(None)
coro = self._get_return_code(ret, it_child, child.proc.wait())
task = self.event_loop.create_task(coro)
tasks.append(task)
try:
for task in tasks:
await task
except asyncio.CancelledError:
for task in tasks:
try:
if not task.done():
task.cancel()
await task
except asyncio.CancelledError:
pass
else:
return ret
def poll(self, timeout=1e-6):
"""
Synchronous wrapper for poll_async(). Gets the return codes of all
child ProcessNodes.
Returns:
list of (int or None): A list containing return codes from
flattened child ProcessNodes. Child processes that have exited
will be represented by their integer return code. Child
processes that have not exited will be represented by None.
"""
ret = []
try:
self.event_loop.run_until_complete(
asyncio.wait_for(
self.poll_async(ret),
timeout=timeout,
loop=self.event_loop
)
)
except asyncio.TimeoutError:
pass
return ret
async def wait_async(self):
"""
Waits for and retrieves the return codes of all child ProcessNodes.
Returns:
list of int:
A list of return codes from a flattened collection of child
processes.
"""
return await self.poll_async([])
def wait(self):
"""
Synchronous wrapper for wait_async().
Returns:
list of int:
A list of return codes from a flattened collection of child
processes.
"""
return self.event_loop.run_until_complete(self.wait_async())
@abc.abstractmethod
def __repr__(self):
pass
@abc.abstractmethod
def __str__(self):
pass
@export
class PipelineChannel(PipelineNode):
"""
Represents a pipeline of commands
Args:
left (PipelineNode): A PipelineNode whose stdout is (usually) fed to
right
right (PipelineNode): A PipelineNode whose stdin is (usually)
read from left
Attributes:
left (PipelineNode): The left PipelineNode
right (PipelineNode): The right PipelineNode
"""
def __init__(self, event_loop, left, right):
super().__init__(event_loop)
self.left = left
self.right = right
self.stdin_stream = self.left.stdin_stream
self.stdout_stream = self.right.stdout_stream
self.stderr_stream = self.right.stderr_stream
self.children.extend((self.left, self.right))
def __repr__(self):
return f"{self.left!r} | {self.right!r}"
def __str__(self):
return f"{self.left!s} | {self.right!s}"
@export
class PipelineProcess(PipelineNode):
"""
An interface representing subprocesses.
Args:
cwd (str or pathlib.Path): The current working directory
args (list of str or pathlib.Path): The arguments for the process
(including the base command).
env (dict of str): The environment variables for the process
stdin_stream (Stream): The Stream whose .reader() is used as stdin
stdout_stream (Stream): The Stream whose .writer() is used as stdout
stderr_stream (Stream): The Stream whose .writer() is used as stderr
user (None, int, or str): The user to pass to
asyncio.create_subprocess_exec(). Requires Python >= 3.9.
group (None, int, or str): The group to pass to
asyncio.create_subprocess_exec(). Requires Python >= 3.9.
Raises:
AssertionError: When len(args) <= 0
"""
def __init__(
self, event_loop, cwd, args, env, stdin_stream, stdout_stream,
stderr_stream, user=None, group=None
):
super().__init__(event_loop)
self.cwd = cwd
self.args = args
self.environment = dict(env)
self.proc = None
self.wait_future = None
self.user = user
self.group = group
self.stdin_stream = stdin_stream
self.stdout_stream = stdout_stream
self.stderr_stream = stderr_stream
assert len(self.args) > 0
async def run(self):
"""
Runs the process using asyncio.create_subprocess_exec()
"""
extra_kwargs = {}
if self.user is not None:
extra_kwargs['user'] = self.user
if (sys.version_info.major, sys.version_info.minor) < (3, 9):
raise NotImplementedError("Running subprocesses as a different user requires Python version >= 3.9") #pylint: disable=line-too-long
if self.group is not None:
extra_kwargs['group'] = self.group
if (sys.version_info.major, sys.version_info.minor) < (3, 9):
raise NotImplementedError("Running subprocesses as a different group requires Python version >= 3.9") #pylint: disable=line-too-long
proc_start = asyncio.create_subprocess_exec(
*self.args,
stdin=self.stdin_stream.reader(),
stdout=self.stdout_stream.writer(),
stderr=self.stderr_stream.writer(),
cwd=self.cwd,
env=self.environment,
restore_signals=True,
close_fds=True,
loop=self.event_loop,
**extra_kwargs
)
self.proc = await proc_start
def __repr__(self):
return f"PipelineProcess(cwd={self.cwd!r}, args={self.args!r}, env={self.environment!r}, stdin_stream={self.stdin_stream!r}, stdout_stream={self.stdout_stream!r}, stderr_stream={self.stderr_stream!r})" #pylint: disable=line-too-long
def __str__(self):
return f"PipelineProcess(args={self.args!r})"
| StarcoderdataPython |
1665942 | <reponame>stanislavkozlovski/python_wow<filename>models/characters/saved_character.py<gh_stars>10-100
from sqlalchemy import Column, Integer, String, Text, ForeignKey
from sqlalchemy.orm import relationship
from models.items.item_template import ItemTemplateSchema
from entities import Character
from constants import (CHARACTER_EQUIPMENT_BOOTS_KEY, CHARACTER_EQUIPMENT_LEGGINGS_KEY,
CHARACTER_EQUIPMENT_BELT_KEY, CHARACTER_EQUIPMENT_GLOVES_KEY,
CHARACTER_EQUIPMENT_BRACER_KEY,
CHARACTER_EQUIPMENT_CHESTGUARD_KEY, CHARACTER_EQUIPMENT_HEADPIECE_KEY,
CHARACTER_EQUIPMENT_NECKLACE_KEY,
CHARACTER_EQUIPMENT_SHOULDERPAD_KEY)
from classes import Paladin
from database.main import Base
class SavedCharacterSchema(Base):
"""
This table holds information about saved player characters
name - the name of the character
character_class - the class of the character
level - the level of the character
gold - the amount of gold the character has
"""
__tablename__ = 'saved_character'
entry = Column(Integer, primary_key=True)
name = Column(String(60))
character_class = Column('class', String(60))
level = Column(Integer)
gold = Column(Integer)
headpiece_id = Column(Integer, ForeignKey('item_template.entry'))
shoulderpad_id = Column(Integer, ForeignKey('item_template.entry'))
necklace_id = Column(Integer, ForeignKey('item_template.entry'))
chestguard_id = Column(Integer, ForeignKey('item_template.entry'))
bracer_id = Column(Integer, ForeignKey('item_template.entry'))
gloves_id = Column(Integer, ForeignKey('item_template.entry'))
belt_id = Column(Integer, ForeignKey('item_template.entry'))
leggings_id = Column(Integer, ForeignKey('item_template.entry'))
boots_id = Column(Integer, ForeignKey('item_template.entry'))
headpiece: ItemTemplateSchema or None = relationship('ItemTemplateSchema', foreign_keys=[headpiece_id])
shoulderpad: ItemTemplateSchema or None = relationship('ItemTemplateSchema', foreign_keys=[shoulderpad_id])
necklace: ItemTemplateSchema or None = relationship('ItemTemplateSchema', foreign_keys=[necklace_id])
chestguard: ItemTemplateSchema or None = relationship('ItemTemplateSchema', foreign_keys=[chestguard_id])
bracer: ItemTemplateSchema or None = relationship('ItemTemplateSchema', foreign_keys=[bracer_id])
gloves: ItemTemplateSchema or None = relationship('ItemTemplateSchema', foreign_keys=[gloves_id])
belt: ItemTemplateSchema or None = relationship('ItemTemplateSchema', foreign_keys=[belt_id])
leggings: ItemTemplateSchema or None = relationship('ItemTemplateSchema', foreign_keys=[leggings_id])
boots: ItemTemplateSchema or None = relationship('ItemTemplateSchema', foreign_keys=[boots_id])
def __init__(self, name: str, character_class: str, level: int, gold: int, headpiece_id: int,
shoulderpad_id: int, necklace_id: int, chestguard_id: int, bracer_id: int, gloves_id: int,
belt_id: int, leggings_id: int, boots_id: int):
# A init function for easily creating an object when wanting to insert a new row in the table
super().__init__()
self.name = name
self.character_class = character_class
self.level = level
self.gold = gold
self.headpiece_id = headpiece_id
self.shoulderpad_id = shoulderpad_id
self.necklace_id = necklace_id
self.chestguard_id = chestguard_id
self.bracer_id = bracer_id
self.gloves_id = gloves_id
self.belt_id = belt_id
self.leggings_id = leggings_id
self.boots_id = boots_id
def build_equipment(self) -> {str: 'Item' or None}:
"""
Create a dictionary holding the character's equipment as the Character class holds it
:return:
"""
saved_equipment = {CHARACTER_EQUIPMENT_BOOTS_KEY: self.boots,
CHARACTER_EQUIPMENT_LEGGINGS_KEY: self.leggings,
CHARACTER_EQUIPMENT_BELT_KEY: self.belt,
CHARACTER_EQUIPMENT_GLOVES_KEY: self.gloves,
CHARACTER_EQUIPMENT_BRACER_KEY: self.bracer,
CHARACTER_EQUIPMENT_CHESTGUARD_KEY: self.chestguard,
CHARACTER_EQUIPMENT_SHOULDERPAD_KEY: self.shoulderpad,
CHARACTER_EQUIPMENT_NECKLACE_KEY: self.necklace,
CHARACTER_EQUIPMENT_HEADPIECE_KEY: self.headpiece}
# convert the each equipment ItemTemplate to an Item object
for slot, item in saved_equipment.items():
if item is not None:
saved_equipment[slot] = item.convert_to_item_object()
return saved_equipment
def convert_to_character_object(self) -> Character:
""" Convert the SavedCharacter object to a Character object to be used in the game"""
loaded_scripts: {str} = {script.script_name for script in self.loaded_scripts}
killed_monsters: {int} = {monster.guid for monster in self.killed_monsters}
completed_quests: {str} = {quest.quest_id for quest in self.completed_quests}
inventory: {str: tuple} = {i_schema.item.name: (i_schema.item.convert_to_item_object(), i_schema.item_count)
for i_schema in self.inventory}
inventory['gold'] = self.gold
equipment = self.build_equipment()
print(equipment)
if self.character_class == 'paladin':
return Paladin(name=self.name,
level=self.level,
loaded_scripts=loaded_scripts,
killed_monsters=killed_monsters,
completed_quests=completed_quests,
saved_inventory=inventory,
saved_equipment=equipment)
else:
raise Exception(f'Unsupported class - {self.character_class}')
class CompletedQuestsSchema(Base):
"""
This table holds information about the completed quests for a specific character
saved_character_id - id of the character (NOT unique)
quest_id - the id of the quest that is completed
Ex:
saved_character_id, quest_id
1, 1
1, 2
Meaning that the character whose completed_quests_id points to 1 has completed both quests - (1)Kill Wolves and (2)Kill Bears
"""
# TODO: Holds more completed quests per row to minimize queries
__tablename__ = 'saved_character_completed_quests'
id = Column(Integer, primary_key=True)
saved_character_id = Column(Integer, ForeignKey('saved_character.entry'))
quest_id = Column(String, ForeignKey('quest_template.entry'))
_ = relationship('SavedCharacterSchema', foreign_keys=[saved_character_id], backref='completed_quests')
quest = relationship('QuestSchema')
class InventorySchema(Base):
"""
This table holds information about the inventory of a specific character
saved_character_id - id of the character (NOT unique)
item_id - the ID of the item in item_template
item_count - the count of the item
Example:
saved_character_id, item_id, item_count
1, 1, 5
1, 2, 1
Meaning the character whose inventory_id points to 1 has
- 5 items of id 1
- 1 item of id 2
"""
# TODO: Holds more items in the row to minimize queries
__tablename__ = 'saved_character_inventory'
id = Column(Integer, primary_key=True)
saved_character_id = Column(Integer, ForeignKey('saved_character.entry'))
item_id = Column(Integer, ForeignKey('item_template.entry'), primary_key=True)
item_count = Column(Integer)
_ = relationship('SavedCharacterSchema', foreign_keys=[saved_character_id], backref='inventory')
item = relationship('ItemTemplateSchema')
class KilledMonstersSchema(Base):
"""
This table holds information about all the monsters that the character has killed into the saved_character_killed_monsters DB table
saved_character_id - id of the character (NOT unique)
GUID - GUID of the monster in the creatures DB table
Table sample contents:
saved_character_id, GUID(of monster)
1, 14
1, 7
IMPORTANT: This works only for monsters that by design should not be killed twice if the player restarts the game
"""
# TODO: Hold more killed monsters per row to minimize queries
__tablename__ = 'saved_character_killed_monsters'
id = Column(Integer, primary_key=True)
saved_character_id = Column(Integer, ForeignKey('saved_character.entry'))
guid = Column(Integer, ForeignKey('creatures.guid'))
_ = relationship('SavedCharacterSchema', foreign_keys=[saved_character_id], backref='killed_monsters')
monster = relationship('CreaturesSchema')
class LoadedScriptsSchema(Base):
"""
This table holds the character's loaded scripts into the saved_character_loaded_scripts DB table
saved_character_id - id of the character (NOT unique)
script_name - the name of the script
Table sample contents:
saved_character_id, script_name
1, HASKELL_PRAXTON_CONVERSATION
1, Something_Something_Something
Meaning the character whose loaded_scripts_id has seen both scripts and should not see them again in the game.
"""
# TODO: Hold more scripts per row to minimize queries
__tablename__ = 'saved_character_loaded_scripts'
id = Column(Integer, primary_key=True)
saved_character_id = Column(Integer, ForeignKey('saved_character.entry'))
_ = relationship('SavedCharacterSchema', foreign_keys=[saved_character_id], backref='loaded_scripts')
script_name = Column(Text)
| StarcoderdataPython |
89997 | <filename>util/postprocessing.py
import sys
import os
import pickle
sys.path.append("..")
import util.structural as structural
import util.verilog as verilog
import dgl
if __name__ == "__main__":
folder = "../GCN/predicts/io/plus2/nl55"
total = 0
total_matched = 0
tried = 0
# find input from outputs
for case in os.listdir(folder):
# if case != "ut7_sample3.pkl":
# continue
case_name = os.path.join(folder, case)
with open(case_name, "rb") as f:
g, pred_i, pred_o = pickle.load(f)
assert len(pred_i) == len(pred_o) == g.number_of_nodes()
matched = set()
for idx, is_o in enumerate(pred_o):
in_s = []
if is_o == 0: # prediction: not output node
continue
for depth, nodes in enumerate(dgl.bfs_nodes_generator(g, idx, True)):
for n in nodes:
n = n.item()
tried += 1
if pred_i[n] == 1:
matched.add(n)
for n in nodes:
n = n.item()
if pred_i[n] == 1:
in_s.append(n)
matched.add(n)
if len(in_s) == 2:
break
if len(in_s) == 2:
break
print(
case,
len(matched) / len([1 for v in pred_i if v == 1]),
len(matched),
len([1 for v in pred_i if v == 1]),
)
total += len([1 for v in pred_i if v == 1])
total_matched += len(matched)
print(total_matched / total, total, total_matched, tried)
# find input from outputs
total = 0
total_matched = 0
tried = 0
for case in os.listdir(folder):
# if case != "ut7_sample3.pkl":
# continue
case_name = os.path.join(folder, case)
with open(case_name, "rb") as f:
g, pred_i, pred_o = pickle.load(f)
assert len(pred_i) == len(pred_o) == g.number_of_nodes()
matched = set()
for idx, is_i in enumerate(pred_i):
if is_i == 0: # prediction: not output node
continue
is_match = False
for depth, nodes in enumerate(dgl.bfs_nodes_generator(g, idx, False)):
if depth == 0: # cannot count self as output...
continue
for n in nodes:
n = n.item()
tried += 1
if pred_o[n] == 1:
matched.add(n)
is_match = True
if is_match:
break
print(
case,
len(matched) / len([1 for v in pred_o if v == 1]),
len(matched),
len([1 for v in pred_o if v == 1]),
)
total += len([1 for v in pred_o if v == 1])
total_matched += len(matched)
print(total_matched / total, total, total_matched, tried)
| StarcoderdataPython |
9600691 | __all__ = ("NewDatabase", "clear_cache")
__version__ = (1, 1, 7)
from pathlib import Path
DATA_DIR = Path(__file__).resolve().parent / "data"
INVENTORY_DIR = Path(__file__).resolve().parent / "data" / "additional_inventories"
from .ecoinvent_modification import NewDatabase, clear_cache
| StarcoderdataPython |
9733897 | <filename>pixel_perturbation.py
from torchvision import datasets, transforms, utils, models
from misc_functions import *
from gradcam import grad_cam
from functools import reduce
from saliency.inputgradient import Inputgrad
import gc
import argparse
import matplotlib.pyplot as plt
import numpy as np
import torch.nn.functional as F
from models.resnet import *
from models.vgg import *
# PATH variables
PATH = os.path.dirname(os.path.abspath(__file__)) + '/'
data_PATH = PATH + 'dataset/'
result_path = PATH + 'results/'
# get unnormalize object for plotting saliency maps
unnormalize = unnormalize()
# same transformations for each dataset
transform_standard = transform()
# prevents F.interpolate from random behaviour which caused Cuda memory errors
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# HELPER FUNCTIONS
def print_dict(dictionary, div=False):
"""
For Debugging purposes
:param dictionary:
:param div:
:return:
"""
for k, v in dictionary.items():
print("Gradient method: {}".format(k))
if div is True:
print("KL Div: {}".format(v[0]))
else:
print("Mean: {}, Std: {}".format(v[0], v[1]))
def kl_div(P, Q):
"""
returns actual KL divergence. torch.nn.functional.kl_div returns different values
:param P: discrete distribution
:param Q: discrete distribution
:return:
"""
kl = (P * (P / Q).log()).sum(1)
return kl
def compute_difference_metrics(initial_output, final_output, tmp_results):
"""
Given NN output before and after removal, all absolute difference metrics are stored.
:param initial_output: model.forward(x)
:param final_output: model.forward(x_salient_pixels_removed)
:param tmp_results: object to store all temporary results
:return:
"""
# get probabilities instead of output scores
initial_probabilities = F.softmax(initial_output, dim=1)
final_probabilities = F.softmax(final_output, dim=1)
# compute kl divergence on probability vectors
kldiv = kl_div(final_probabilities, initial_probabilities)
# score differences
tmp_score_diffs = get_temp_result(initial_output, final_output)
# prob differences
tmp_prob_diffs = get_temp_result(initial_probabilities, final_probabilities)
# changes of mean class score other classes based on topk classes, otherwise division by zero
tmp_other_diffs = get_other_class_change(initial_output, final_output, ARGS.topk)
tmp_other_probs_diffs = get_other_class_change(initial_probabilities, final_probabilities, ARGS.topk)
# max change in other classes than most confident one
tmp_other_max_diffs = tmp_other_diffs.max(1)[0]
tmp_other_max_prob_diffs = tmp_other_probs_diffs.max(1)[0]
# save per image
tmp_results[0].append(np.round(tmp_score_diffs.tolist(), 8))
tmp_results[1].append(np.round(tmp_prob_diffs.tolist(), 8))
tmp_results[2].append(np.round(kldiv.tolist(), 8))
tmp_results[3].append(np.round(tmp_other_diffs.tolist(), 8))
tmp_results[4].append(np.round(tmp_other_probs_diffs.tolist(), 8))
tmp_results[5].append(np.round(tmp_other_max_diffs.tolist(), 8))
tmp_results[6].append(np.round(tmp_other_max_prob_diffs.tolist(), 8))
return tmp_results
def initialize_grad_and_model(grad_type, model_name, device):
"""
Given a saliency method (grad_type), the exact modelname (vgg16, vgg16_bn,...) and the device, returns model and
grad object
:param grad_type:
:param model_name:
:param device:
:return:
"""
model, grad = None, None
if grad_type == 'gradcam':
# Gradcam
model, grad = initialize_grad_cam(model_name, device)
else:
model = initialize_fullgrad(model_name, device)
# same model for Inputgrad and Fullgrad, different grad object
if grad_type == "fullgrad":
# Initialize Gradient objects
grad = FullGrad(model)
elif grad_type == "inputgrad":
grad = Inputgrad(model)
return model, grad
def get_temp_result(initial_out, final_out):
"""
Takes NN outputs before and after removal to compute absolute fractional differences
:param initial_out:
:param final_out:
:return:
"""
# initially most confident class
initial_class_scores, predicted_class = initial_out.max(1)
# same value after modification
final_class_scores = final_out.index_select(1, predicted_class).max(0)[0]
# absolute fractional difference of raw results
tmp_result = abs(final_class_scores - initial_class_scores) / initial_class_scores
return tmp_result
def get_other_class_change(initial_out, final_out, topk):
"""
Takes NN output before and after removal and the number of most confident classes. Then gets topk classes other than
the most confident class before and computes bs fractional differences to after removal.
and
:param initial_out:
:param final_out:
:param topk:
:return:
"""
other_initial_scores = get_topk_other_scores(initial_out, initial_out, topk)
other_final_scores = get_topk_other_scores(initial_out, final_out, topk)
# absolute fractional difference of raw results
tmp_result = abs(other_initial_scores - other_final_scores) / other_initial_scores
return tmp_result
def get_other_classes_scores(initial_out, final_out):
"""
Takes NN output before and after removal, removes most confident class before removal and considers all other classes
to return respective tensors without most confident class.
:param initial_out:
:param final_out:
:return:
"""
# most confident class before removal
_, predicted_class = initial_out.max(1)
# did not find an easier method to remove the most confident class from tensor
ind_tensor = torch.LongTensor(initial_out.size()[0] * [list(range(1000))]).to("cpu")
new_tensor = torch.LongTensor(initial_out.size()[0] * [[1] * 999]).to("cpu")
final_class_scores = torch.zeros(initial_out.size()[0], 999)
# remove predicted class
for i in range(len(predicted_class)):
new_tensor[i] = np.delete(ind_tensor[i], predicted_class[i], None)
final_class_scores[i] = final_out[i, new_tensor[i]]
return final_class_scores
def get_topk_other_scores(initial_out, final_out, topk):
"""
Takes NN output before and after removal, gets topk other class scores and returns topk tensors without topk
most confident classes.
:param initial_out:
:param final_out:
:param topk:
:return:
"""
other_initial_scores = get_other_classes_scores(initial_out, initial_out)
other_final_scores = get_other_classes_scores(initial_out, final_out)
topk_scores, topk_ind = torch.topk(other_initial_scores, k=topk)
final_class_scores = torch.zeros(other_initial_scores.size()[0], topk)
# remove predicted class
for i in range(len(topk_ind)):
final_class_scores[i] = other_final_scores[i, topk_ind[i]]
return final_class_scores
def get_max_other_class_change(initial_output, final_output):
"""
Takes NN output before and after removal, and computes max ab fract change of the most confident of the topk classes
other than the initially most confident class.
:param initial_output:
:param final_output:
:return:
"""
other_initial_scores = get_other_classes_scores(initial_output, initial_output)
other_final_scores = get_other_classes_scores(initial_output, final_output)
changes = abs(other_initial_scores - other_final_scores) / other_initial_scores
tmp_result = changes.max(1)
return tmp_result
def append_mean_std(tmp_results, means, stds):
"""
Append batch means to mean list that keeps track the mean and stds for each k value.
:param tmp_results:
:param means:
:param stds:
:return:
"""
means.append(np.round(np.mean(tmp_results), 8))
stds.append(np.round(np.std(tmp_results), 8))
def plot_all_grads(results_dict, filename=None):
"""
Plots all metrics for one specification (model and which kind of removal)
:param results_dict:
:param filename:
:param div:
:return:
"""
plt.figure()
axes = plt.gca()
# axes.set_xlim([0, ARGS.k[-1]*100])
axes.set_xlabel('% pixels removed')
axes.set_ylabel('Absolute fractional output change')
x_labels = [i * 100 for i in ARGS.k]
for key, v in results_dict.items():
# Plot the mean and variance of the predictive distribution on the 100000 data points.
plt.plot(ARGS.k, np.array(v[0]), linewidth=1.2, label=str(key))
plt.fill_between(ARGS.k, np.array(v[0]) - np.array(v[1]), np.array(v[0]) + np.array(v[1]), alpha=1 / 3)
plt.xticks(ARGS.k, x_labels, rotation=45)
plt.tight_layout()
plt.legend()
#plt.savefig(filename + ".png")
plt.show()
def initialize_grad_cam(model_name, device, pretrained=True):
"""
Gradcam needs original torch model object to train on. Therefore different initialization than fullgrad.
:param model_name:
:param device:
:param pretrained:
:return:
"""
model = models.__dict__[model_name](pretrained=pretrained)
model.to(device)
model.eval()
gcam = grad_cam.GradCAM(model=model)
return model, gcam
def initialize_fullgrad(model_name, device):
"""
Initializes Fullgrad Object given modelname and device.
:param model_name:
:param device:
:return:
"""
model = eval(model_name)(pretrained=True)
model = model.to(device)
model.eval()
return model
def compute_saliency_per_grad(grad_type, grad, data, target_layer, target_class=None):
"""
Given the grad_type, computes saliency maps for a batch of images: data.
:param grad_type:
:param grad:
:param data:
:param target_layer: vgg: features, resnet: layer4
:param target_class: To visualize wrong predictions take original target
:return:
"""
saliency = None
if grad_type == "fullgrad" or grad_type == "inputgrad":
# print("calculating saliency")
saliency = grad.saliency(data, target_class=target_class)
elif grad_type == "gradcam":
probs, ids = grad.forward(data)
# Grad-CAM
grad.backward(ids=ids[:, [0]])
saliency = grad.generate(target_layer=target_layer)
return saliency
def get_filename(result_path, grad_type, index):
model_name = ARGS.model + ARGS.model_type
filename = result_path + "/" + grad_type + "_" + model_name + "_" + str(index) + ".png"
return filename
def save_saliency_map_batch(saliency, data, result_path, grad_type, index):
"""
Save all saliency maps of a batch of images.
:param saliency:
:param data:
:param result_path:
:param grad_type:
:param index:
:return:
"""
for i in range(len(data)):
im = unnormalize(data[i, :, :, :].cpu())
im = im.view(1, 3, 224, 224)[-1, :, :, :]
reg = saliency[i, :, :, :]
filename = get_filename(result_path, grad_type, index)
# print("filename:{}".format(filename))
save_saliency_map(im, reg, filename)
def print_memory():
"""
print all cuda tensors for memory leakage issues.
:return:
"""
total = 0
for obj in gc.get_objects():
try:
if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):
print(type(obj), obj.size())
if len(obj.size()) > 0:
if obj.type() == 'torch.cuda.FloatTensor':
total += reduce(lambda x, y: x * y, obj.size()) * 32
elif obj.type() == 'torch.cuda.LongTensor':
total += reduce(lambda x, y: x * y, obj.size()) * 64
elif obj.type() == 'torch.cuda.IntTensor':
total += reduce(lambda x, y: x * y, obj.size()) * 32
# else:
# Few non-cuda tensors in my case from dataloader
except Exception as e:
pass
print("{} GB".format(total / ((1024 ** 3) * 8)))
def get_sample_loader():
dataset = data_PATH
sample_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(dataset, transform=transform_standard),
batch_size=ARGS.batch_size, shuffle=False)
return sample_loader
def get_salient_type():
if ARGS.most_salient == "True":
salient_type = "most"
else:
salient_type = "least"
return salient_type
def print_results(all_results):
print("For K values: {}".format(ARGS.k))
print("############ Score absolute fractional differences ############")
print_dict(all_results[0])
print("############ Probs absolute fractional differences ############")
print_dict(all_results[1])
print("KL divergences per k")
print_dict(all_results[2])
print("############ Top: {} Other Score absolute fractional differences ############".format(ARGS.topk))
print_dict(all_results[3])
print("############ Top: {} Other Probs absolute fractional differences ############".format(ARGS.topk))
print_dict(all_results[4])
print("############ Max Other Score absolute fractional differences ############")
print_dict(all_results[5])
print("############ Max other Probs absolute fractional differences ############")
print_dict(all_results[6])
def initialize_means_std_dict():
means_std_dict= {}
means_std_dict["score_means"] = []
means_std_dict["score_stds"] = []
means_std_dict["prob_means"] = []
means_std_dict["prob_stds"] = []
means_std_dict["kl_div_means"] = []
means_std_dict["kl_div_stds"] = []
means_std_dict["other_score_means"] = []
means_std_dict["other_score_stds"] = []
means_std_dict["other_prob_means"] = []
means_std_dict["other_prob_stds"] = []
means_std_dict["other_max_score_means"] = []
means_std_dict["other_max_score_stds"] = []
means_std_dict["other_max_prob_means"] = []
means_std_dict["other_max_prob_stds"] = []
return means_std_dict
def append_tmp_means_stds(tmp_results, means_std_dict):
# save mean and std of
append_mean_std(tmp_results[0], means_std_dict["score_means"], means_std_dict["score_stds"])
append_mean_std(tmp_results[1], means_std_dict["prob_means"], means_std_dict["prob_stds"])
append_mean_std(tmp_results[2], means_std_dict["kl_div_means"], means_std_dict["kl_div_stds"])
append_mean_std(tmp_results[3], means_std_dict["other_score_means"], means_std_dict["other_score_stds"])
append_mean_std(tmp_results[4], means_std_dict["other_prob_means"], means_std_dict["other_prob_stds"])
append_mean_std(tmp_results[5], means_std_dict["other_max_score_means"], means_std_dict["other_max_score_stds"])
append_mean_std(tmp_results[6], means_std_dict["other_max_prob_means"], means_std_dict["other_max_prob_stds"])
def append_batch_means_stds(all_results, grad_type, means_std_dict):
all_results[0][grad_type] = [means_std_dict["score_means"], means_std_dict["score_stds"]]
all_results[1][grad_type] = [means_std_dict["prob_means"], means_std_dict["prob_stds"]]
all_results[2][grad_type] = [means_std_dict["kl_div_means"], means_std_dict["kl_div_stds"]]
all_results[3][grad_type] = [means_std_dict["other_score_means"], means_std_dict["other_score_stds"]]
all_results[4][grad_type] = [means_std_dict["other_prob_means"], means_std_dict["other_prob_stds"]]
all_results[5][grad_type] = [means_std_dict["other_max_score_means"], means_std_dict["other_max_score_stds"]]
all_results[6][grad_type] = [means_std_dict["other_max_prob_means"], means_std_dict["other_max_prob_stds"]]
def get_pickle_file_name(model_name, salient_type):
# plot for all gradient methods stds and means for all k% values
file_name = ARGS.dataset + "_" + model_name + "_" + salient_type + "_" + ARGS.replacement + str(ARGS.n_images)
return file_name
def main():
device = ARGS.device
sample_loader = get_sample_loader()
salient_type = get_salient_type()
model_name = ARGS.model + ARGS.model_type
save_path = PATH + 'results/' + ARGS.dataset
# initialize results dictionary: key: gradient method (random, fullgrad,...), values: [[mean, std],..] per k%
all_results = [{}, {}, {}, {}, {}, {}, {}]
total_features = 224 * 224
for grad_type in ARGS.grads:
model, grad = initialize_grad_and_model(grad_type, model_name, device)
# makes sure each saliency method only saves image once
grad_counter = 0
means_std_dict = initialize_means_std_dict()
print("grad_type:{}".format(grad_type))
for i in ARGS.k:
# print("grad:{}".format(grad))
grad_counter += 1
k_most_salient = int(i * total_features)
# print("k_most_salient:{}".format(k_most_salient))
counter = 0
tmp_results = [[], [], [], [], [], [], []] # score diffs, prob diffs, kl divs, score other diffs, prob other diffs, max other diffs
for batch_idx, (data, target) in enumerate(sample_loader):
counter += 1
# console testing
# data, _ = next(iter(sample_loader))
# for debugging purposes
if counter % 100 == 0:
print("{} image batches processed".format(counter))
if counter == ARGS.n_images:
break
data = data.to(device).requires_grad_()
# Run Input through network (two different networks if gradcam or fullgrad)
with torch.no_grad():
initial_output = model.forward(data)
initial_out = initial_output.to(torch.device("cpu"))
# compute saliency maps for grad methods not random
if grad_type != "random":
# print("data size:{}".format(data.size()))
cam = compute_saliency_per_grad(grad_type, grad, data, target_layer="features")
if ARGS.save_grad is True and grad_counter == 1 and counter <= ARGS.n_save:
save_saliency_map_batch(cam, data, result_path, grad_type, salient_type, counter)
new_data = remove_salient_pixels(data, cam, num_pixels=k_most_salient,
most_salient=ARGS.most_salient,
replacement=ARGS.replacement)
new_data.to("cpu")
# output after pixel perturbation
with torch.no_grad():
final_output = model.forward(new_data)
final_out = final_output.to("cpu")
tmp_results = compute_difference_metrics(initial_out, final_out, tmp_results)
# change pixels based on random removal
elif grad_type == "random":
# run n_random_runs for random pixel removal
sample_seeds = np.random.randint(0, 10000, ARGS.n_random_runs)
for seed in sample_seeds:
tmp_data = remove_random_salient_pixels(data, seed, k_percentage=i,
replacement=ARGS.replacement)
with torch.no_grad():
final_output = model.forward(tmp_data)
final_out = final_output.to("cpu")
tmp_results = compute_difference_metrics(initial_out, final_out, tmp_results)
append_tmp_means_stds(tmp_results, means_std_dict)
append_batch_means_stds(all_results, grad_type, means_std_dict)
# plot for all gradient methods stds and means for all k% values
file_name = get_pickle_file_name(model_name, salient_type)
save_experiment_file = result_path + file_name
print_results(all_results)
# save dictionary
save_obj(all_results, save_experiment_file)
# print_memory()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', default=5, type=int,
help='')
parser.add_argument('--dataset', default="imagenet", type=str,
help='which dataset')
parser.add_argument('--device', default="cuda:0", type=str,
help='cpu or gpu')
parser.add_argument('--grads', default=["gradcam"], type=str, nargs='+',
help='which grad methods to be applied')
parser.add_argument('--k', default=[0.001, 0.005, 0.01, 0.025, 0.05, 0.075, 0.1], type=float, nargs="+",
help='Percentage of k% most salient pixels')
parser.add_argument('--most_salient', default="True", type=str,
help='most salient = True or False depending on retrain or pixel perturbation')
parser.add_argument('--model', default="vgg", type=str,
help='which model to use')
parser.add_argument('--model_type', default="16_bn", type=str,
help='which model type: resnet_18, ...')
parser.add_argument('--n_images', default=50, type=int,
help='Test for n_images images ')
parser.add_argument('--n_random_runs', default=5, type=int,
help='Number of runs for random pixels to be removed to decrease std of random run')
parser.add_argument('--n_save', default=50, type=int,
help='Save saliency maps for first n_save images')
parser.add_argument('--target_layer', default="features", type=str,
help='Which layer to be visualized in GRADCAM')
parser.add_argument('--replacement', default="black", type=str,
help='black = 1.0 or mean = [0.485, 0.456, 0.406]')
parser.add_argument('--save_grad', default=False, type=bool,
help='saliency map to be saved?')
parser.add_argument('--topk', default=100, type=int,
help='Number of other classes considered for metric of other class changes')
ARGS = parser.parse_args()
main()
| StarcoderdataPython |
5004011 | import numpy as np
import math
import random
class MountainCar_SARSA:
def __init__(self, settings):
self.num_actions = 3
self.gamma = settings['gamma']
self.num_tilings = settings['num_tilings']
self.num_x_tiles = settings['num_x_tiles']
self.num_v_tiles = settings['num_v_tiles']
self.max_steps_per_episode = settings['max_steps_per_episode']
self.num_active_features = self.num_tilings
self.num_total_features = self.num_tilings * self.num_x_tiles * self.num_v_tiles
self.x_range = [-1.2, 0.5]
self.v_range = [-0.07, 0.07]
self.tiling_x_offset = np.zeros(self.num_tilings)
self.tiling_v_offset = np.zeros(self.num_tilings)
self.current_state = {}
self.init_state = settings['init_state_train']
self.init_state_train = settings['init_state_train']
self.init_state_transition = settings['init_state_transition']
self.init_state_test = settings['init_state_test']
self.init_state_eval = settings['init_state_eval']
self.reward_terminal2_x_range = [-0.6, -0.44]
self.reward_terminal2_v_range = [-0.003, 0.003]
self.terminal2_radius = 0.07
self.init = {}
self.task = 0
self.reward_terminal1 = 4
self.reward_terminal2 = 2
self.phase = 'pre_train'
self.flipped_terminal = settings['flipped_terminals']
self.flipped_actions = settings['flipped_actions']
self.initialize()
self.reset_state()
def initialize(self):
"""
This function can be used to initialize the domain, for example
by randomizing the offset of the tilings.
For our experiment, we used fixed tile positions. The reason is that
because we only use 3 tilings, randomization can cause huge variances.
with some representations being very bad (f.e., if all three tilings
have similar offset).
"""
x_tile_size = (self.x_range[1] - self.x_range[0]) / float(self.num_x_tiles)
v_tile_size = (self.v_range[1] - self.v_range[0]) / float(self.num_v_tiles)
for t in range(self.num_tilings):
self.tiling_x_offset[t] = random.uniform(0, x_tile_size)
self.tiling_v_offset[t] = random.uniform(0, v_tile_size)
def get_domain_info(self):
return [self.num_total_features, self.num_active_features, self.num_actions, self.gamma]
def set_task(self, task):
"""
This function sets the task
terminal 1 is the terminal at the top of the hill
terminal 2 is the terminal at the bottom of the hill
Task A -> Task 0,
Task B -> Task 1.
"""
self.task = task
if self.flipped_terminal:
if task == 0: # taskA
self.reward_terminal1 = 2
self.reward_terminal2 = 4
else: # taskB
self.reward_terminal1 = 2
self.reward_terminal2 = 1
else:
if task == 0: # taskA
self.reward_terminal1 = 4
self.reward_terminal2 = 2
else: # taskB
self.reward_terminal1 = 1
self.reward_terminal2 = 2
def set_phase(self, phase):
"""
This function sets the initial state at each phase of the experiment
"""
self.phase = phase
if phase == 'pre_train':
self.init_state = self.init_state_train
elif phase == 'local_pre_train':
self.init_state = self.init_state_transition
elif phase == 'train':
self.init_state = self.init_state_test
else:
assert False, 'incorrect identifier'
def set_eval_mode(self, eval):
"""
Set the terminals with higher reward 1 and lower reward 0. It makes it easier
to calculate evaluation performance
The initial state needs to be adjusted as well
"""
if eval:
if self.reward_terminal1 > self.reward_terminal2:
self.reward_terminal1 = 1
self.reward_terminal2 = 0
else:
self.reward_terminal1 = 0
self.reward_terminal2 = 1
self.init_state = self.init_state_eval
else:
self.set_task(self.task)
self.set_phase(self.phase)
def _update_state(self, action):
"""
This function implements dynamics
if self.flipped_terminal is True: flip the actions to cancel the effect of model learning
"""
if self.flipped_actions:
action = (action + 1) % 3
v = self.current_state['v']
x = self.current_state['x']
if not self.flipped_terminal:
if x >= 0.4 and v >= 0:
action = 2
else:
# if ((x + 0.52) ** 2 + 100 * v ** 2) <= 0.0164:
if self.init_state_transition[0][0][0] <= x <= self.init_state_transition[0][0][1]\
and abs(v) <= self.init_state_transition[0][1][1]:
action = 0 if v > 0 else 2
term = 0
next_v = v + 0.001 * (action - 1) - 0.0025 * math.cos(3 * x)
if next_v < self.v_range[0]:
next_v = self.v_range[0]
elif next_v > self.v_range[1]:
next_v = self.v_range[1]
next_x = x + next_v
if next_x <= self.x_range[0]:
next_x = self.x_range[0]
next_v = 0
elif next_x >= self.x_range[1]:
next_x = self.x_range[1]
next_v = 0
term = 1
# elif self.reward_terminal2_x_range[0] <= next_x <= self.reward_terminal2_x_range[1]\
# and abs(next_v) <= self.reward_terminal2_v_range[1]:
elif ((next_x + 0.52) ** 2 + 100 * next_v ** 2) <= (self.terminal2_radius)**2:
next_v = 0
term = 2
self.current_state['x'] = next_x
self.current_state['v'] = next_v
self.current_state['terminal'] = term
def reset_state(self):
"""
This function randomly generate the initial state. With prob = p, it takes the init_State from the first
interval and with prob = 1 - p from the second interval.
This scheme should be adjusted based on the domain such that during pre-training Task A can be fully solved
and during training Task B. And Evaluation area should be chosen such that after convergence, all the samples
taken from that ends at terminal with higher reward!
Return:
current state: {'x', 'v', 'terminal'}
"""
reset = False
x, v = 0, 0
p = random.uniform(0, 1)
while not reset:
if len(self.init_state) == 1:
x = random.uniform(self.init_state[0][0][0], self.init_state[0][0][1])
v = random.uniform(self.init_state[0][1][0], self.init_state[0][1][1])
elif len(self.init_state) == 2: # a mix distribution for initial states
if p < 1:
x = random.uniform(self.init_state[0][0][0], self.init_state[0][0][1])
v = random.uniform(self.init_state[0][1][0], self.init_state[0][1][1])
else:
x = random.uniform(self.init_state[1][0][0], self.init_state[1][0][1])
v = random.uniform(self.init_state[1][1][0], self.init_state[1][1][1])
# if (x <= self.reward_terminal2_x_range[0] or x >= self.reward_terminal2_x_range[1]) or\
# (v <= self.reward_terminal2_v_range[0] or v >= self.reward_terminal2_v_range[1]):
if ((x + 0.5234) ** 2 + 100 * v ** 2) >= (self.terminal2_radius)**2:
reset = True
self.init['x'] = x
self.init['v'] = v
self.current_state['x'] = x
self.current_state['v'] = v
self.current_state['terminal'] = 0
return self.current_state['x'], self.current_state['v']
def get_state_features(self, state_features):
self.get_active_state_features(state_features)
return self.current_state['terminal']
def get_active_state_features(self, state_features):
x_size = (self.x_range[1] - self.x_range[0]) / float(self.num_x_tiles - 1)
v_size = (self.v_range[1] - self.v_range[0]) / float(self.num_v_tiles - 1)
for t in range(self.num_active_features):
x = self.current_state['x'] + self.tiling_x_offset[t]
v = self.current_state['v'] + self.tiling_v_offset[t]
fx = int(math.floor((x - self.x_range[0]) / float(x_size)))
fx = min(fx, self.num_x_tiles)
fv = int(math.floor((v - self.v_range[0]) / float(v_size)))
fv = min(fv, self.num_v_tiles)
ft = fx + self.num_x_tiles * fv + t * self.num_x_tiles * self.num_v_tiles
assert (0 <= ft < self.num_total_features)
state_features[t] = ft
def take_action(self, action, next_state_features):
assert (0 <= action < self.num_actions)
self._update_state(action)
self.get_active_state_features(next_state_features)
reward = 0
if self.current_state['terminal'] == 1:
reward = self.reward_terminal1
if self.current_state['terminal'] == 2:
reward = self.reward_terminal2
return [reward, self.current_state['terminal']]
| StarcoderdataPython |
12840349 | <gh_stars>0
# coding: utf-8
#Import the necessary Python modules
import pandas as pd
import folium
from folium.plugins import TimestampedGeoJson
from shapely.geometry import Point
import os
from datetime import datetime
from branca.element import Template, MacroElement
import html
from scripts.location_map_constants import iLEAPP_KMLs, defaultShadowUrl, defaultIconUrl, colors, legend_tag, legend_title_tag, legend_div, template_part1, template_part2
import sqlite3
from scripts.artifact_report import ArtifactHtmlReport
#Helpers
def htmlencode(string):
return string.encode(encoding='ascii',errors='xmlcharrefreplace').decode('utf-8')
def geodfToFeatures(df, f, props):
coords = []
times = []
for i,row in df[df.Description.str.contains(f)].iterrows():
coords.append(
[row.Point.x,row.Point.y]
)
times.append(datetime.strptime(row.Name,'%Y-%m-%d %H:%M:%S').isoformat())
return {
'type': 'Feature',
'geometry': {
'type': props[f]['fType'],
'coordinates': coords,
},
'properties': {
'times': times,
'style': {'color': props[f]['color']},
'icon': props[f]['icon'],
'iconstyle': {
'iconUrl': props[f]['iconUrl'],
'shadowUrl': props[f]['shadowUrl'],
'iconSize': [25, 41],
'iconAnchor': [12, 41],
'popupAnchor': [1, -34],
'shadowSize': [41, 41],
'radius': 5,
},
},
}
def generate_location_map(reportfolderbase,legend_title):
KML_path = os.path.join(reportfolderbase,iLEAPP_KMLs)
if not os.path.isdir(KML_path) or not os.listdir(KML_path):
return
location_path = os.path.join(reportfolderbase, 'LOCATIONS')
os.makedirs(location_path,exist_ok=True)
db = sqlite3.connect(os.path.join(KML_path,"_latlong.db"))
df = pd.read_sql_query("SELECT key as Name, Activity as Description, latitude, longitude FROM data ;", db)
df["Point"] = df.apply(lambda row: Point(float(row['longitude']),float(row['latitude']),.0), axis=1)
#sorting is needed for correct display
df.sort_values(by=['Name'],inplace=True)
#Parse geo data and add to Folium Map
data_names = df[~df.Description.str.contains('Photos')].Description.unique()
featuresProp = {}
for c,d in zip(colors, data_names):
descFilter = d
if 'ZRT' in d:
fType = 'LineString'
icon = 'marker'
iconUrl = defaultIconUrl.format(c)
shadowUrl = defaultShadowUrl
else:
fType = 'MultiPoint'
icon = 'circle'
iconUrl = ''
shadowUrl = ''
color = c
featuresProp[d] = {
'fType': fType,
'color': c,
'icon': icon,
'iconUrl': iconUrl,
'shadowUrl': defaultShadowUrl,
}
location_map = folium.Map([df.iloc[0].Point.y,df.iloc[0].Point.x], prefer_canvas=True, zoom_start = 6)
bounds = (df[~df.Description.str.contains('Photos')]['longitude'].min(),
df[~df.Description.str.contains('Photos')]['latitude'].min(),
df[~df.Description.str.contains('Photos')]['longitude'].max(),
df[~df.Description.str.contains('Photos')]['latitude'].max(),
)
location_map.fit_bounds([
(bounds[1],bounds[0]),
(bounds[3],bounds[2]),
]
)
tsGeo = TimestampedGeoJson({
'type': 'FeatureCollection',
'features': [
geodfToFeatures(df, f, featuresProp) for f in data_names
]
}, period="PT1M", duration="PT1H", loop=False, transition_time = 50, time_slider_drag_update=True, add_last_point=True, max_speed=200).add_to(location_map)
#legend
legend = '\n'.join([ legend_tag.format(featuresProp[f]['color'], htmlencode(f)) for f in data_names])
template = '\n'.join([template_part1, legend_title_tag.format(htmlencode(legend_title)), legend_div.format(legend), template_part2])
macro = MacroElement()
macro._template = Template(template)
location_map.get_root().add_child(macro)
location_map.save(os.path.join(location_path,"Locations_Map.html"))
report = ArtifactHtmlReport('Locations Map')
report.start_artifact_report(location_path, 'Locations Map', 'Map plotting all locations')
report.write_raw_html(open(os.path.join(location_path,"Locations_Map.html")).read())
report.end_artifact_report()
| StarcoderdataPython |
5010035 | import calendar
from gettext import ngettext
from pathlib import Path
import arrow
from flask import (
abort,
current_app,
g,
redirect,
render_template,
request,
Response,
url_for,
)
from flask_login import current_user, login_required
from lemonade_soapbox import db
from lemonade_soapbox.helpers import Blueprint
from lemonade_soapbox.models import Article, Post, Tag
bp = Blueprint('blog', __name__)
@bp.errorhandler(404)
def error404(e):
return render_template('blog/errors/404.html'), 404
@bp.before_request
def before():
# Grab the year/month breakdown of articles.
g.breakdown = Article.post_breakdown()
g.full_month_name = calendar.month_name
g.month_name = calendar.month_abbr
@bp.route('/')
def index():
page = request.args.get('page', 1, int)
articles = (
Article.published()
.order_by(Article.date_published.desc())
.paginate(page=page, per_page=10)
)
return render_template(
'blog/views/index.html',
articles=articles,
description="Read my thoughts going back 17 years.",
page_title="Kara’s Blog",
)
@bp.route('/drafts/<handle>/')
@login_required
def show_draft(handle):
article = Article.query.filter_by(handle=handle, status='draft').first_or_404()
return render_template('blog/views/single_article.html', article=article)
@bp.route('/trash/<handle>/')
@login_required
def show_deleted(handle):
article = Article.query.filter_by(handle=handle, status='deleted').first_or_404()
return render_template('blog/views/single_article.html', article=article)
@bp.route('/feed/')
def default_feed():
return redirect(
url_for('.show_feed', format=current_app.config['DEFAULT_FEED_FORMAT'])
)
@bp.route('/feed/posts.<format>')
def show_feed(format):
articles = (
Article.published().order_by(Article.date_published.desc()).limit(10).all()
)
return Response(
render_template(
'blog/articles/' + format + '.xml',
articles=articles,
url=url_for('blog.show_feed', _external=True, format=format),
),
mimetype='application/' + format + '+xml',
)
@bp.route('/tags/')
def all_tags():
tags = [
t for t in Tag.frequency(post_types=['article']).all() if t["article_count"] > 0
]
sort_by = request.args.get('sort', 'frequency')
page_title = ngettext('%(num)d Tag', 'All %(num)d Tags', len(tags)) % {
'num': len(tags)
}
return render_template(
'blog/views/tags.html', page_title=page_title, tags=tags, sort_by=sort_by
)
@bp.route('/tags/<handle>/')
@bp.route('/tags/<handle>/posts.<format>')
def show_tag(handle, format=None):
page = request.args.get('page', 1, int)
tag = Tag.query.filter_by(handle=handle).first_or_404()
articles = tag.posts.filter(
Post.post_type == "article",
Post.status == 'published',
Post.date_published <= arrow.utcnow(),
).order_by(Post.date_published.desc())
if format:
return Response(
render_template(
'blog/articles/' + format + '.xml',
title=f'Kara.Reviews: {tag.label.title()} Books',
articles=articles.all(),
url=url_for(
'blog.show_tag',
_external=True,
handle=handle,
format=format,
),
),
mimetype='application/' + format + '+xml',
)
articles = articles.paginate(page=page, per_page=20)
cover = None
if Path(
current_app.static_folder, 'images/layout/header_bg', f"{handle}.jpg"
).exists():
cover = url_for(
'.static', filename=f"images/layout/header_bg/{handle}.jpg", _external=True
)
return render_template(
'blog/views/article_list.html',
handle=handle,
read_more_length=75,
articles=articles,
page_title=f"Articles Tagged with “{tag.label}”",
cover=cover,
)
@bp.route('/<int:year>/')
def year_archive(year):
start, end = arrow.get(year, 1, 1).span('year')
articles = (
Article.published()
.filter(Article.date_published.between(start, end))
.order_by(Article.date_published.desc())
.all()
)
if not articles:
abort(404)
# Check if there are posts for previous and next years
prev_link = (
Article.published()
.filter(db.func.extract('YEAR', Article.date_published) == (year - 1))
.count()
> 0
)
next_link = (
Article.published()
.filter(db.func.extract('YEAR', Article.date_published) == (year + 1))
.count()
> 0
)
year_summaries = {
"2004": "First year of university, first real job, first year online.",
"2005": "In which I discover Doctor Who, move high schools, and get political.",
"2006": "Learning how to drive, playing with Linux, and more politics.",
"2007": "Pop culture, environmental awareness, and high school graduation.",
"2008": "Joining Twitter and Goodreads, visiting an online friend, discovering Mass Effect.",
"2009": "Lots of thoughts on technology, more politics.",
"2010": "A summer spent researching mathematics, my first smartphone.",
"2011": "In which I was very into voting for the Hugo Awards.",
"2012": "Learning how to knit, graduating university, and moving to England.",
"2013": "Living and teaching in England and a lot of pop culture.",
"2014": "Trip to Amsterdam, moving back to Canada, new phone who dis?",
"2015": "In which I start blogging seriously about education.",
"2016": "Critical reflections on education, algorithms, and some lighter fare in knitting.",
"2017": "On being asexual and aromantic, buying a house, and meeting my ride or die",
"2018": "Starting a podcast with my bestie, visiting her in Montréal.",
"2019": "More reflections on being an educator in Ontario.",
"2020": "Coming out as trans, launching Kara.Reviews.",
"2021": "Transition during a pandemic and reflections on friendship and pop culture.",
"2022": "The current year. I hope it’s a good one!",
}
return render_template(
'blog/views/year_archive.html',
articles=articles,
page_title=f"{len(articles)} Articles from {year}",
prev_link=prev_link,
next_link=next_link,
year=year,
year_summaries=year_summaries,
)
@bp.route('/<int:year>/<month>/')
def month_archive(year, month):
if int(month) not in range(1, 13):
abort(404)
start, end = arrow.get(year, int(month), 1).span('month')
articles = (
Article.published()
.filter(Article.date_published.between(start, end))
.order_by(Article.date_published.desc())
.all()
)
if not articles:
abort(404)
# Check if there are posts for previous and next months
prev_article = (
Article.published()
.filter(Article.date_published < articles[-1].date_published)
.order_by(Article.date_published.desc())
.first()
)
prev_link = (
(
url_for(
".month_archive",
year=prev_article.date_published.year,
month=f"{prev_article.date_published.month:02}",
),
prev_article.date_published.format("MMMM YYYY"),
)
if prev_article
else None
)
next_article = (
Article.published()
.filter(Article.date_published > articles[0].date_published)
.order_by(Article.date_published.asc())
.first()
)
next_link = (
(
url_for(
".month_archive",
year=next_article.date_published.year,
month=f"{next_article.date_published.month:02}",
),
next_article.date_published.format("MMMM YYYY"),
)
if next_article
else None
)
return render_template(
'blog/views/article_list.html',
articles=articles,
description=f"{len(articles)} articles",
prev_link=prev_link,
next_link=next_link,
page_title=f"Articles from {calendar.month_name[int(month)]} {year}",
)
@bp.route('/<int:year>/<month>/<handle>/')
def single_article(year, month, handle):
start, end = arrow.get(year, int(month), 1).span('month')
article = Article.query if current_user.is_authenticated else Article.published()
article = (
article.filter(Article.date_published.between(start, end))
.filter_by(handle=handle)
.first_or_404()
)
return render_template('blog/views/single_article.html', article=article)
| StarcoderdataPython |
8046060 | <reponame>spire-allyjweir/beeline-python
import beeline
from beeline.propagation import Request
from beeline.middleware.wsgi import WSGIRequest
class HoneyWSGIMiddleware(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
wr = WSGIRequest("werkzeug", environ)
root_span = beeline.propagate_and_start_trace(wr.request_context(), wr)
def _start_response(status, headers, *args):
beeline.add_context_field("response.status_code", status)
beeline.finish_trace(root_span)
return start_response(status, headers, *args)
return self.app(environ, _start_response)
| StarcoderdataPython |
1896380 | <reponame>AndreasGeiger/hackerrank-python
first, second = [int(x) for x in input().split()]
arrayN = []
for _i_ in range(first):
arrayN.append([int(x) for x in input().split()][1:])
from itertools import product
possible_combination = list(product(*arrayN))
def func(nums):
return sum(x*x for x in nums) % second
print(max(list(map(func, possible_combination))))
| StarcoderdataPython |
3287288 | # This file is automatically generated. Do not edit.
glyph2tile = [
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 28, 29, 30, 31, 32, 34, 35, 36, 37,
38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85,
86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97,
98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109,
110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133,
134, 135, 137, 138, 139, 140, 141, 142, 143, 144, 145, 147,
148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159,
160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171,
172, 173, 174, 175, 176, 177, 178, 179, 181, 182, 183, 184,
185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196,
197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208,
209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220,
221, 222, 223, 224, 225, 226, 227, 228, 230, 231, 232, 233,
234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245,
246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257,
258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269,
270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281,
282, 283, 284, 285, 286, 287, 288, 290, 291, 292, 293, 294,
295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306,
307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 319,
320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331,
332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343,
344, 345, 346, 347, 348, 349, 350, 351, 352, 355, 356, 357,
358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 369, 370,
371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 382, 383,
384, 385, 386, 387, 388, 389, 390, 391, 392, 0, 1, 2,
3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76,
77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88,
89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112,
113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124,
125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 137,
138, 139, 140, 141, 142, 143, 144, 145, 147, 148, 149, 150,
151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162,
163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174,
175, 176, 177, 178, 179, 181, 182, 183, 184, 185, 186, 187,
188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199,
200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211,
212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223,
224, 225, 226, 227, 228, 230, 231, 232, 233, 234, 235, 236,
237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248,
249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260,
261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272,
273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
285, 286, 287, 288, 290, 291, 292, 293, 294, 295, 296, 297,
298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309,
310, 311, 312, 313, 314, 315, 316, 317, 319, 320, 321, 322,
323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334,
335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346,
347, 348, 349, 350, 351, 352, 355, 356, 357, 358, 359, 360,
361, 362, 363, 364, 365, 366, 367, 369, 370, 371, 372, 373,
374, 375, 376, 377, 378, 379, 380, 382, 383, 384, 385, 386,
387, 388, 389, 390, 391, 392, 393, 0, 1, 2, 3, 4,
5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 28, 29,
30, 31, 32, 34, 35, 36, 37, 38, 39, 40, 41, 42,
43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66,
67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78,
79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102,
103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114,
115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126,
127, 128, 129, 130, 131, 132, 133, 134, 135, 137, 138, 139,
140, 141, 142, 143, 144, 145, 147, 148, 149, 150, 151, 152,
153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164,
165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176,
177, 178, 179, 181, 182, 183, 184, 185, 186, 187, 188, 189,
190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201,
202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213,
214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225,
226, 227, 228, 230, 231, 232, 233, 234, 235, 236, 237, 238,
239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250,
251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262,
263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286,
287, 288, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299,
300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311,
312, 313, 314, 315, 316, 317, 319, 320, 321, 322, 323, 324,
325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336,
337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348,
349, 350, 351, 352, 355, 356, 357, 358, 359, 360, 361, 362,
363, 364, 365, 366, 367, 369, 370, 371, 372, 373, 374, 375,
376, 377, 378, 379, 380, 382, 383, 384, 385, 386, 387, 388,
389, 390, 391, 392, 636, 636, 636, 636, 636, 636, 636, 636,
636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636,
636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636,
636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636,
636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636,
636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636,
636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636,
636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636,
636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636,
636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636,
636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636,
636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636,
636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636,
636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636,
636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636,
636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636,
636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636,
636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636,
636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636,
636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636,
636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636,
636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636,
636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636,
636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636,
636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636,
636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636,
636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636,
636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636,
636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636,
636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636,
636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636,
636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636,
636, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
23, 24, 25, 26, 28, 29, 30, 31, 32, 34, 35, 36,
37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72,
73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96,
97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120,
121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132,
133, 134, 135, 137, 138, 139, 140, 141, 142, 143, 144, 145,
147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158,
159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170,
171, 172, 173, 174, 175, 176, 177, 178, 179, 181, 182, 183,
184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195,
196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207,
208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
220, 221, 222, 223, 224, 225, 226, 227, 228, 230, 231, 232,
233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244,
245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256,
257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268,
269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280,
281, 282, 283, 284, 285, 286, 287, 288, 290, 291, 292, 293,
294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305,
306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317,
319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330,
331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342,
343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 355, 356,
357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 369,
370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 382,
383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 394, 395,
396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 407,
408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, 419,
420, 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, 431,
432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443,
444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455,
456, 457, 458, 459, 460, 461, 462, 463, 464, 465, 466, 467,
468, 469, 470, 471, 472, 473, 474, 475, 476, 477, 479, 480,
481, 482, 483, 484, 485, 486, 487, 489, 490, 491, 492, 493,
494, 495, 496, 497, 498, 499, 500, 501, 502, 503, 504, 505,
506, 507, 508, 509, 510, 511, 512, 513, 514, 515, 516, 517,
518, 519, 520, 521, 522, 523, 524, 525, 526, 527, 528, 529,
530, 531, 532, 533, 534, 535, 536, 537, 538, 539, 540, 541,
542, 543, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553,
554, 555, 556, 557, 558, 559, 560, 561, 562, 563, 564, 565,
566, 567, 568, 569, 570, 571, 572, 573, 574, 575, 576, 577,
578, 579, 580, 581, 582, 583, 584, 585, 586, 587, 588, 589,
590, 591, 592, 593, 594, 595, 596, 597, 598, 599, 600, 601,
602, 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613,
614, 615, 616, 617, 618, 619, 620, 621, 622, 623, 624, 625,
626, 627, 628, 629, 630, 631, 632, 633, 634, 635, 636, 637,
638, 639, 640, 641, 642, 643, 644, 645, 646, 647, 648, 649,
650, 651, 652, 653, 654, 655, 656, 657, 658, 659, 660, 661,
662, 663, 664, 665, 666, 667, 668, 669, 670, 671, 672, 673,
674, 675, 676, 677, 678, 679, 680, 681, 682, 683, 684, 685,
686, 687, 688, 689, 690, 691, 692, 693, 694, 695, 696, 697,
698, 699, 700, 701, 702, 703, 704, 705, 706, 707, 708, 709,
710, 711, 712, 713, 714, 715, 716, 717, 718, 719, 720, 721,
722, 723, 724, 725, 726, 727, 728, 729, 730, 731, 732, 733,
734, 736, 737, 738, 739, 740, 741, 742, 743, 744, 745, 746,
747, 748, 749, 750, 751, 752, 753, 754, 755, 756, 757, 758,
759, 760, 761, 762, 763, 764, 765, 766, 767, 768, 769, 770,
771, 772, 773, 774, 775, 776, 777, 778, 779, 780, 781, 782,
783, 784, 785, 786, 787, 788, 789, 790, 791, 792, 793, 794,
795, 796, 797, 798, 799, 800, 801, 802, 803, 804, 805, 806,
807, 808, 809, 810, 811, 812, 813, 814, 815, 816, 817, 818,
819, 820, 821, 822, 823, 824, 825, 826, 827, 828, 829, 830,
831, 832, 833, 834, 835, 836, 837, 838, 839, 840, 841, 842,
843, 844, 845, 846, 847, 848, 849, 850, 851, 852, 853, 854,
855, 856, 857, 858, 859, 860, 861, 862, 863, 864, 865, 866,
867, 868, 869, 870, 871, 872, 873, 874, 875, 876, 877, 878,
879, 880, 881, 882, 883, 884, 885, 886, 887, 888, 889, 890,
891, 892, 893, 894, 895, 896, 897, 898, 899, 900, 901, 902,
903, 904, 905, 906, 907, 908, 909, 910, 911, 912, 913, 914,
915, 916, 917, 918, 919, 920, 921, 922, 923, 924, 925, 926,
927, 928, 929, 930, 931, 932, 933, 934, 935, 936, 937, 938,
939, 940, 941, 942, 943, 944, 945, 946, 947, 948, 949, 950,
951, 952, 953, 954, 955, 956, 957, 958, 959, 960, 961, 962,
963, 964, 965, 966, 967, 968, 969, 970, 971, 972, 973, 974,
975, 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, 986,
987, 988, 989, 990, 991, 992, 993, 994, 995, 996, 997, 998,
999, 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009, 1010,
1011, 1012, 1013, 1014, 1015, 1016, 1017, 1018, 1019, 1020, 1021, 1022,
1023, 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 929, 930, 931,
932, 933, 934, 935, 936, 929, 930, 931, 932, 933, 934, 935,
936, 929, 930, 931, 932, 933, 934, 935, 936, 1032, 1033, 1034,
1035, 1036, 1037, 845, 845, 845, 845, 845, 845, 845, 845, 845,
845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845,
845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845,
845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845,
845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845,
845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845,
845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845,
845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845,
845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845,
845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845,
845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845,
845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845,
845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845,
845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845,
845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845,
845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845,
845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845,
845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845,
845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845,
845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845,
845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845,
845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845,
845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845,
845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845,
845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845,
845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845,
845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845,
845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845,
845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845,
845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845,
845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845,
845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845, 845,
]
MAXMONTILE = 393
MAXOBJTILE = 849
MAXOTHTILE = 1037
# tile.py
| StarcoderdataPython |
5052478 | '''
To do: Merge into data_script
'''
import getopt
import sys
import tourroute
def main(argv):
help_str = 'tr_exec.py -api <apikey>'
try:
opts, args = getopt.getopt(argv, 'api:', 'apikey=')
except getopt.GetoptError:
print(help_str)
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print(help_str)
sys.exit()
elif opt in ('-api', '--apikey'):
return arg
else:
print('Unrecognised command line argument(s)')
print(help_str)
if __name__ == '__main__':
apikey = main(sys.argv[1:])
print(apikey)
tr = tourroute.TourRoute('../out/tour.csv')
tr_slices = tr.slices()
dist, dur = tourroute.get_tour_distdur(apikey, tr_slices)
print(f'Total duration is {dur:,} seconds and distance is {dist:,} metres')
| StarcoderdataPython |
4821119 | <reponame>morozoffnor/govnoed_grisha_rewritten<gh_stars>0
import discord
from discord.ext import commands
import random
import sys
import json
sys.path.insert(1, '../functions')
from functions.cmd_print import cmd_print
class Korona(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command()
async def korona(self, ctx):
cmd = ctx.message.content
mention = cmd.lstrip("?korona ")
with open("./data/korona.json") as json_data_file:
data = json.load(json_data_file)
if data["owner"] == mention:
await ctx.send(f"{mention} уже и так владеет кароной. Ты вообще не можешь отобрать у человека карону, чтобы отдать её снова ему. Зачем? Ты нормальный вообще?")
elif data["owner"] == ("<@!" + str(ctx.message.author.id) + ">"):
print("<@!" + str(ctx.message.author.id) + ">")
data["owner"] = mention
file_object = open('./data/korona.json', 'w')
json.dump(data, file_object)
await ctx.send(f"{mention} теперь владеет кароной! Поздравляю, ты - долбоеб!")
elif data["owner"] != ("<@!" + str(ctx.message.author.id) + ">"):
print("<@!" + str(ctx.message.author.id) + "> is not equal to the data one")
await ctx.send(f"пошел нахуй")
def setup(client):
client.add_cog(Korona(client)) | StarcoderdataPython |
3231856 | #!/usr/bin/python
import os
import sys
import time
def i2s(bridge):
print ("i2s.py execution on-going")
addrWavOutHead = os.popen("riscv32-unknown-elf-nm test |grep -w 'I2sOutHeader$'").read()
addrWavOutHead = (addrWavOutHead.split())[0]
time.sleep(3)
input("Press return to start sound acquisition:")
micReady = int(("0x"+addrWavOutHead), 16) + 12
bridge.write_32(micReady, 1)
recReady = int(("0x"+addrWavOutHead), 16) + 8
WavOutFlag = bridge.read_32(recReady)
while ( int(WavOutFlag) == 0 ):
time.sleep(1)
recReady = int(("0x"+addrWavOutHead), 16) + 8
WavOutFlag = bridge.read_32(recReady)
print ("py: waiting recReady")
l2Addr = int(("0x"+addrWavOutHead), 16)
addrWav = bridge.read_32(l2Addr)
size = int(("0x"+addrWavOutHead), 16) + 4
WavSize = bridge.read_32(size)
with open("wavOut.wav", 'wb') as waveOut:
print ("py: now download", WavSize, "bytes")
for byte in bridge.read(addrWav, WavSize):
waveOut.write(byte)
waveOut.closed
recReady = int(("0x"+addrWavOutHead), 16) + 8
bridge.write_32(recReady, 0)
os.system("if [ ! -f ../../../waveOut.wav ]; then ln -s BUILD/GAP8/GCC_RISCV/wavOut.wav ../../../wavOut.wav 1>/dev/null 2>/dev/null; fi")
| StarcoderdataPython |
6605625 | #Problem Link: https://www.hackerrank.com/challenges/defaultdict-tutorial/problem
# Enter your code here. Read input from STDIN. Print output to STDOUT
from collections import defaultdict
n,m = map(int,raw_input().split())
A = defaultdict(list)
index =1
for i in range(n):
word = raw_input()
A[word].append(index)
index+=1
B = list()
for i in range(m):
word = raw_input()
B.append(word)
for x in B:
if(len(A[x])!=0):
for i in range(len(A[x])):
print A[x][i],
else:
print '-1',
print
| StarcoderdataPython |
229431 | import sys, getopt
from LexicalReplacement import LexicalReplacement
from ModelTesting import Test
config = {"mappings":"../mappings/w2v_50.json",
"embeddings":"../models/w2v_50"}
def text_to_tests(texts):
for line in texts:
yield Test.from_tsv(line)
def generate(model, texts):
tests = text_to_tests(texts)
for t in tests:
print(model.generate_test(t))
def main(argv=None):
if argv is None:
argv = sys.argv
try:
opts, args = getopt.getopt(argv[1:], "hm:e:", ["help", "mappings", "embeddings"])
except Exception as e:
print ("Error in args : " + str(argv[1:]))
print (e)
return 2
for o in opts:
if o[0] == "-m":
config["mappings"] = o[1]
if o[0] == "-e":
config["embeddings"] = o[1]
model = LexicalReplacement(config["mappings"], config["embeddings"])
texts = [t.split("\t") for t in open(args[0]).readlines()]
generate(model, texts)
if __name__ == "__main__":
main()
| StarcoderdataPython |
1643184 | default_app_config = "apps.api.notice.apps.NoticeConfig"
| StarcoderdataPython |
6672218 | from __future__ import print_function
import numpy as np
import sys
from tqdm import tqdm
import os
import pickle
import cv2
import itertools
from six.moves import xrange
from feature_match import computeNN
from utils import saveh5, loadh5
from geom import load_geom, parse_geom, get_episym
from transformations import quaternion_from_matrix
class Sequence(object):
def __init__(self, dataset_path, dump_dir, desc_name, vis_th, pair_num, pair_name=None):
self.data_path = dataset_path.rstrip("/") + "/"
self.dump_dir = dump_dir
self.desc_name = desc_name
print('dump dir ' + self.dump_dir)
if not os.path.exists(self.dump_dir):
os.makedirs(self.dump_dir)
self.intermediate_dir = os.path.join(self.dump_dir, 'dump')
if not os.path.exists(self.intermediate_dir):
os.makedirs(self.intermediate_dir)
img_list_file = self.data_path + "images.txt"
geom_list_file = self.data_path + "calibration.txt"
vis_list_file = self.data_path + "visibility.txt"
self.image_fullpath_list = self.parse_list_file(self.data_path, img_list_file)
self.geom_fullpath_list = self.parse_list_file(self.data_path, geom_list_file)
self.vis_fullpath_list = self.parse_list_file(self.data_path, vis_list_file)
# load geom and vis
self.geom, self.vis = [], []
for geom_file, vis_file in zip(self.geom_fullpath_list, self.vis_fullpath_list):
self.geom += [load_geom(geom_file)]
self.vis += [np.loadtxt(vis_file).flatten().astype("float32")]
self.vis = np.asarray(self.vis)
img_num = len(self.image_fullpath_list)
if pair_name is None:
self.pairs = []
for ii, jj in itertools.product(xrange(img_num), xrange(img_num)):
if ii != jj and self.vis[ii][jj] > vis_th:
self.pairs.append((ii, jj))
np.random.seed(1234)
self.pairs = [self.pairs[i] for i in np.random.permutation(len(self.pairs))[:pair_num]]
else:
with open(pair_name, 'rb') as f:
self.pairs = pickle.load(f)
print('pair lens' + str(len(self.pairs)))
def dump_nn(self, ii, jj):
dump_file = os.path.join(self.intermediate_dir, "nn-{}-{}.h5".format(ii, jj))
if not os.path.exists(dump_file):
image_i, image_j = self.image_fullpath_list[ii], self.image_fullpath_list[jj]
desc_ii = loadh5(image_i+'.'+self.desc_name+'.hdf5')["descriptors"]
desc_jj = loadh5(image_j+'.'+self.desc_name+'.hdf5')["descriptors"]
idx_sort, ratio_test, mutual_nearest = computeNN(desc_ii, desc_jj)
# Dump to disk
dump_dict = {}
dump_dict["idx_sort"] = idx_sort
dump_dict["ratio_test"] = ratio_test
dump_dict["mutual_nearest"] = mutual_nearest
saveh5(dump_dict, dump_file)
def dump_intermediate(self):
for ii, jj in tqdm(self.pairs):
self.dump_nn(ii,jj)
print('Done')
def unpack_K(self, geom):
img_size, K = geom['img_size'], geom['K']
w, h = img_size[0], img_size[1]
cx = (w - 1.0) * 0.5
cy = (h - 1.0) * 0.5
cx += K[0, 2]
cy += K[1, 2]
# Get focals
fx = K[0, 0]
fy = K[1, 1]
return cx,cy,[fx,fy]
def norm_kp(self, cx, cy, fx, fy, kp):
# New kp
kp = (kp - np.array([[cx, cy]])) / np.asarray([[fx, fy]])
return kp
def make_xy(self, ii, jj):
geom_i, geom_j = parse_geom(self.geom[ii]), parse_geom(self.geom[jj])
# should check the image size here
#load img and check img_size
image_i, image_j = self.image_fullpath_list[ii], self.image_fullpath_list[jj]
kp_i = loadh5(image_i+'.'+self.desc_name+'.hdf5')["keypoints"][:, :2]
kp_j = loadh5(image_j+'.'+self.desc_name+'.hdf5')["keypoints"][:, :2]
cx1, cy1, f1 = self.unpack_K(geom_i)
cx2, cy2, f2 = self.unpack_K(geom_j)
x1 = self.norm_kp(cx1, cy1, f1[0], f1[1], kp_i)
x2 = self.norm_kp(cx2, cy2, f2[0], f2[1], kp_j)
R_i, R_j = geom_i["R"], geom_j["R"]
dR = np.dot(R_j, R_i.T)
t_i, t_j = geom_i["t"].reshape([3, 1]), geom_j["t"].reshape([3, 1])
dt = t_j - np.dot(dR, t_i)
if np.sqrt(np.sum(dt**2)) <= 1e-5:
return []
dtnorm = np.sqrt(np.sum(dt**2))
dt /= dtnorm
nn_info = loadh5(os.path.join(self.intermediate_dir, "nn-{}-{}.h5".format(ii, jj)))
idx_sort, ratio_test, mutual_nearest = nn_info["idx_sort"], nn_info["ratio_test"], nn_info["mutual_nearest"]
x2 = x2[idx_sort[1],:]
xs = np.concatenate([x1, x2], axis=1).reshape(1,-1,4)
geod_d = get_episym(x1, x2, dR, dt)
ys = geod_d.reshape(-1,1)
return xs, ys, dR, dt, ratio_test, mutual_nearest, cx1, cy1, f1, cx2, cy2, f2
def dump_datasets(self):
ready_file = os.path.join(self.dump_dir, "ready")
var_name = ['xs', 'ys', 'Rs', 'ts', 'ratios', 'mutuals', 'cx1s', 'cy1s', 'f1s', 'cx2s', 'cy2s', 'f2s']
res_dict = {}
for name in var_name:
res_dict[name] = []
if not os.path.exists(ready_file):
print("\n -- No ready file {}".format(ready_file))
for pair_idx, pair in enumerate(self.pairs):
print("\rWorking on {} / {}".format(pair_idx, len(self.pairs)), end="")
sys.stdout.flush()
res = self.make_xy(pair[0], pair[1])
if len(res)!=0:
for var_idx, name in enumerate(var_name):
res_dict[name] += [res[var_idx]]
for name in var_name:
out_file_name = os.path.join(self.dump_dir, name) + ".pkl"
with open(out_file_name, "wb") as ofp:
pickle.dump(res_dict[name], ofp)
# Mark ready
with open(ready_file, "w") as ofp:
ofp.write("This folder is ready\n")
else:
print('Done!')
def parse_list_file(self, data_path, list_file):
fullpath_list = []
with open(list_file, "r") as img_list:
while True:
# read a single line
tmp = img_list.readline()
if type(tmp) != str:
line2parse = tmp.decode("utf-8")
else:
line2parse = tmp
if not line2parse:
break
# strip the newline at the end and add to list with full path
fullpath_list += [data_path + line2parse.rstrip("\n")]
return fullpath_list
| StarcoderdataPython |
1662124 | from .shared import compute_stats
__all__ = ["compute_stats"]
| StarcoderdataPython |
8115518 | <gh_stars>10-100
from . node import Node
class NodeVisitor(object):
"""
A node visitor base class that walks the abstract syntax tree and calls a
visitor function for every node found. This function may return a value
which is forwarded by the `visit` method.
This class is meant to be subclassed, with the subclass adding visitor
methods.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `visit` method. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
Don't use the `NodeVisitor` if you want to apply changes to nodes during
traversing. A special visitor called `NodeTransformer` should be used
instead, but we do not provide it here
"""
def visit(self, node):
""" Visit a node.
"""
assert(isinstance(node, Node))
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node)
def generic_visit(self, node):
""" Called if no explicit visitor function exists for a
node. Implements preorder visiting of the node.
"""
assert(isinstance(node, Node))
for child in node.children():
self.visit(child)
| StarcoderdataPython |
9740728 | <gh_stars>10-100
# -*- coding: utf-8 -*-
from api.management.commands.importbasics import *
def import_en_events(opt):
local = opt['local']
print '### Import EN events T1/T2 cutoffs from decaf wiki'
if local:
f = open('eventsEN.html', 'r')
else:
f = urllib2.urlopen('http://decaf.kouhi.me/lovelive/index.php?title=English_Version_Info&action=edit')
cards_section = False
set_to_worldwide = []
for line in f.readlines():
line = h.unescape(line)
if line.startswith('=== '):
if line.startswith('=== All Cards ==='):
cards_section = True
else:
cards_section = False
data = str(line).split('||')
if cards_section and len(data) > 1:
card_id = int(data[0].split('|')[-1].strip())
set_to_worldwide.append(card_id)
elif len(data) >= 5 and len(data) <= 8:
dates = data[0].replace('|', '').split(' - ')
beginning = eventDateFromString(dates[0])
end = eventDateFromString(str(beginning.year) + '/' + dates[1])
names = data[1].replace('[[', '').replace(']]', '').split('|')
japanese_name = cleanwithquotes(names[-2])
english_name = clean(names[-1])
t1_points = optInt(clean(data[3]))
i = 4
if 'rowspan' in data[i] or len(data) == 7 or len(data) == 8:
t1_new_rank = optInt(clean(data[i].split('|')[-1]))
if t1_new_rank: t1_rank = t1_new_rank
i = i + 1
t2_points = optInt(data[i])
i = i + 1
if len(data) > i and ('rowspan' in data[i] or len(data) == 7 or len(data) == 8):
t2_new_rank = optInt(clean(data[i].split('|')[-1]))
if t2_new_rank: t2_rank = t2_new_rank
i = i + 1
note = None
if len(data) > i:
note = optString(clean(data[i].split('|')[-1]))
print 'Import event ', english_name, '...',; sys.stdout.flush()
defaults = {
'english_name': english_name,
'english_beginning': beginning,
'english_end': end,
'english_t1_points': t1_points,
'english_t1_rank': (None if not t1_points else t1_rank),
'english_t2_points': t2_points,
'english_t2_rank': t2_rank,
}
event, created = models.Event.objects.update_or_create(japanese_name=japanese_name, defaults=defaults)
print 'Done'
print 'Set card {} cards as worldwide available...'.format(len(set_to_worldwide)),
models.Card.objects.filter(pk__in=set_to_worldwide).update(japan_only=False)
print 'Done'
f.close()
class Command(BaseCommand):
can_import_settings = True
def handle(self, *args, **options):
opt = opt_parse(args)
import_en_events(opt)
import_raw_db()
| StarcoderdataPython |
246143 | #打印1到100之间的整数,跳过可以被7整除的,以及数字中包含7的整数
for i in range(1,101):
if i % 7 == 0 or i % 10 == 7 or i // 10 == 7:
continue
else:
print(i)
| StarcoderdataPython |
1632994 | <reponame>anotherbyte-net/gather-vision<gh_stars>0
from urllib.parse import urlparse, parse_qs
from environ import FileAwareEnv, ImproperlyConfigured
from gather_vision.process.item.playlist_conf import PlaylistConf
class GatherVisionEnv(FileAwareEnv):
DEFAULT_EXTERNAL_HTTP_CACHE_ENV = "EXTERNAL_HTTP_CACHE_URL"
# https://requests-cache.readthedocs.io/en/stable/user_guide/backends.html
EXTERNAL_HTTP_CACHE_SCHEMES = {
"sqlite": "requests_cache.backends.sqlite.SQLiteCache",
"noop": None,
"filesystem": "requests_cache.backends.filesystem.FileCache",
"memory": "requests_cache.backends.base.BaseCache",
}
DEFAULT_PLAYLIST_SOURCES_TARGETS_ENV = "PLAYLIST_SOURCES_TARGETS"
def external_http_cache_url(
self,
var=DEFAULT_EXTERNAL_HTTP_CACHE_ENV,
default=FileAwareEnv.NOTSET,
backend=None,
):
"""Returns a config dictionary, defaulting to EXTERNAL_HTTP_CACHE_URL.
:rtype: dict
"""
return self.external_http_cache_url_config(
self.url(var, default=default), backend=backend
)
@classmethod
def external_http_cache_url_config(cls, url, backend=None):
"""Pulled from DJ-Cache-URL, parse an arbitrary Cache URL.
:param url:
:param backend:
:return:
"""
if not isinstance(url, cls.URL_CLASS):
if not url:
return {}
else:
url = urlparse(url)
if url.scheme not in cls.EXTERNAL_HTTP_CACHE_SCHEMES:
raise ImproperlyConfigured("Invalid cache schema {}".format(url.scheme))
location = url.netloc.split(",")
if len(location) == 1:
location = location[0]
querystring = parse_qs(url.query) if url.query else {}
backend_params = {}
for key, values in querystring.items():
if len(values) == 0:
backend_params[key] = None
elif len(values) == 1:
backend_params[key] = values[0]
else:
backend_params[key] = values
config = {
"BACKEND": cls.EXTERNAL_HTTP_CACHE_SCHEMES[url.scheme],
"LOCATION": location or url.path,
"EXPIRES": backend_params.pop(
"expires", backend_params.pop("EXPIRES", None)
),
"BACKEND_PARAMS": backend_params,
}
return config
def playlist_sources_targets(
self,
var=DEFAULT_PLAYLIST_SOURCES_TARGETS_ENV,
default=None,
backend=None,
) -> list[PlaylistConf]:
items = self.json(var, default or [])
result = []
for item in items:
result.append(
PlaylistConf(
source_code=item.get("source", {}).get("code"),
source_collection=item.get("source", {}).get("collection"),
target_code=item.get("target", {}).get("code"),
target_playlist_id=item.get("target", {}).get("playlist_id"),
target_title=item.get("target", {}).get("title"),
)
)
return result
| StarcoderdataPython |
6608180 | import numpy as np
import random
from skimage import transform, exposure
from preprocessing.utils import make_folder
def random_rotation(img):
""" Randomly rotate the image.
Pick a random degree of rotation between.
25% on the left and 25% on the right.
Args:
img (numpy array): Array of image pixels to rotate.
Returns:
(numpy array): Rotated image.
"""
random_degree = random.uniform(-25, 25)
return (transform.rotate(img, random_degree,
preserve_range=True)).astype(np.uint8)
def horizontal_flip(img):
""" Flip the image horizontally.
horizontal flip doesn't need skimage,
it's easy as flipping the image array of pixels!
Args:
img (numpy array): Array of image pixels.
Returns:
(numpy array): Rotated image.
"""
return img[:, ::-1]
def intensity(img):
""" Change the intensity of the image.
Args:
img (numpy array): Array of image pixels.
Returns:
(numpy array): Rotated image.
"""
v_min, v_max = np.percentile(img, (0.2, 99.8))
if np.abs(v_max - v_min) < 1e-3:
v_max += 1e-3
return exposure.rescale_intensity(img, in_range=(v_min, v_max))
def gamma(img):
""" Perform gamma correction of the image.
Args:
img (numpy array): Array of image pixels.
Returns:
(numpy array): Rotated image.
"""
return exposure.adjust_gamma(img, gamma=0.4, gain=0.9)
def vertical_flip(img):
""" Flip the image vertically.
vertical flip doesn't need skimage,
it's easy as flipping the image array of pixels!
Args:
img (numpy array): Array of image pixels.
Returns:
(numpy array): Rotated image.
"""
return img[::-1, :]
def data_augment(img, y_label):
""" Perform image augmentation using rotation,
intensity scaling, flip and gamma correction.
Args:
img (numpy array): Array of image pixels.
y_label (str): Label of the image.
Returns:
(numpy array): Augmented images.
(numpy array): Array of labels corresponding to the images.
"""
temp = [horizontal_flip(img), vertical_flip(img),
random_rotation(img), gamma(img), intensity(img)]
label = [y_label, y_label, y_label, y_label, y_label]
return temp, label
def main():
""" Load train data.
Augment the data.
"""
# Load data
X_train = np.load('../data/intermediate/ImageTrain_input.npy')
y_train = np.load('../data/intermediate/DiseaseTrain_input.npy')
print('TO BE AUGMENTED DATA')
print(X_train.shape)
print(y_train.shape)
br_count = e_count = lb_count = 0
transformed_img = []
y_array = []
for i, name in enumerate(y_train):
if name == 'healthy':
x, y = data_augment(X_train[i], name)
transformed_img.extend(x)
y_array.extend(y)
elif (name == 'black rot') and (br_count < 450):
x, y = data_augment(X_train[i], name)
transformed_img.extend(x)
y_array.extend(y)
br_count += 1
elif (name == 'ecsa') and (e_count < 321):
x, y = data_augment(X_train[i], name)
transformed_img.extend(x)
y_array.extend(y)
e_count += 1
elif (name == 'leaf_blight') and (lb_count < 308):
x, y = data_augment(X_train[i], name)
transformed_img.extend(x)
y_array.extend(y)
lb_count += 1
elif name == 'powdery mildew':
x, y = data_augment(X_train[i], name)
transformed_img.extend(x)
y_array.extend(y)
transformed_img = np.array(transformed_img)
y_array = np.array(y_array)
print('AUGMENTED DATA')
print(transformed_img.shape)
print(y_array.shape)
# Concatenate with initial image_array
X_train = np.concatenate((X_train, transformed_img), axis=0)
y_train = np.concatenate((y_train, y_array), axis=0)
print('TOTAL MODEL INPUT DATA')
print(X_train.shape)
print(y_train.shape)
# Save data
make_folder('../data/augment')
np.save('../data/augment/ImageAugment_input.npy', X_train)
np.save('../data/augment/DiseaseAugment_input.npy', y_train)
if __name__ == "__main__":
main()
| StarcoderdataPython |
1797565 | <gh_stars>1-10
from flask import Flask, request, jsonify
from flask_cors import CORS, cross_origin
from nlp.keyPhraseApi import KeyPhrases
# from nlp.syntaxApi import WordSyntax
from src.tl_gan.script_generation_interactive import gen_image
from nlp.text_to_feature import get_closest_feature
import io
import base64
import json
app = Flask(__name__)
CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
dictReturn = {}
imgs = {}
@app.route("/", methods=["PUT"])
@cross_origin()
def put():
uid = request.json['uid']
# officer = request.json['o']
# caseNumber = request.json['cn']
# witnessName = request.json['wn']
gender = request.json['g']
ethnicity = request.json['e']
moreDetails = request.json['md']
keyPhrases = KeyPhrases().lookup(moreDetails)
# syntaxDict = WordSyntax().lookup(moreDetails)
print('KeyPhrases:',keyPhrases)
# arr = ['Arched_Eyebrows', 'Attractive', 'Bags_Under_Eyes', 'Bald', 'Bangs', 'Big_Lips',
# 'Big_Nose', 'Black_Hair', 'Blond_Hair', 'Brown_Hair', 'Bushy_Eyebrows', 'Chubby', 'Double_Chin',
# 'Eyeglasses', 'Goatee', 'Gray_Hair', 'Heavy_Makeup', 'High_Cheekbones', 'Male', 'Mouth_Slightly_Open',
# 'Mustache', 'Narrow_Eyes', 'No_Beard', 'Oval_Face', 'Pale_Skin', 'Pointy_Nose', 'Receding_Hairline',
# 'Rosy_Cheeks', 'Sideburns', 'Smiling', 'Straight_Hair', 'Wavy_Hair', 'Young']
# features = {}
# for key in keyPhrases:
# # print('key',key)
# direct = key.title().replace(' ','_')
# for a in arr:
# if direct == a:
# features[direct] = 1
# else:
# words = direct.split('_')
# for word in words:
# if word.lower() == a.lower():
# features[word] = 1
features = get_closest_feature(keyPhrases = keyPhrases)
print('Features: ',features)
# prev = None
# for x in syntaxDict:
# if syntaxDict[x] == 'adj' or syntaxDict[x] == 'adv':
# pass
images = gen_image(gender, ethnicity, features)
for i in range(len(images)):
imgs[i] = images[i]
# dictReturn[uid] = [officer, caseNumber, witnessName, gender, ethnicity, moreDetails, keyPhrases, syntaxDict]
dictReturn[uid] = [gender, ethnicity, moreDetails, features]
# for i in range(len(images)):
# imgByteArr = io.BytesIO()
# images[i].save(imgByteArr, format = 'JPEG')
# imgByteArr = imgByteArr.getvalue()
# imgs[i] = json.dumps({str(i): imgByteArr.encode('base64')})
# print("type of images:", type(imgs[i]))
# print('dictReturn: ',dictReturn)
return jsonify(dictReturn)
@cross_origin()
@app.route("/", methods=["POST"])
def get():
print('REQUEST.JSON:', request.json)
uid = request.json['uid']
print(dictReturn)
return json.dumps(imgs, ensure_ascii=False, indent=4)
@cross_origin()
@app.route("/clear/")
def clear():
dict = {}
| StarcoderdataPython |
5036240 | # -*- coding: utf-8 -*-
import lemoncheesecake.api as lcc
from lemoncheesecake.matching import check_that, is_
from common.base_test import BaseTest
SUITE = {
"description": "Method 'get_block_virtual_ops'"
}
@lcc.prop("main", "type")
@lcc.prop("positive", "type")
@lcc.tags("api", "database_api", "database_api_blocks_transactions", "get_block_virtual_ops")
@lcc.suite("Check work of method 'get_block'", rank=1)
class GetBlockVirtualOps(BaseTest):
def __init__(self):
super().__init__()
self.__database_api_identifier = None
def get_head_block_num(self):
return self.echo.api.database.get_dynamic_global_properties()["head_block_number"]
def setup_suite(self):
super().setup_suite()
self._connect_to_echopy_lib()
lcc.set_step("Setup for {}".format(self.__class__.__name__))
self.__database_api_identifier = self.get_identifier("database")
lcc.log_info("Database API identifier is '{}'".format(self.__database_api_identifier))
def teardown_suite(self):
self._disconnect_to_echopy_lib()
super().teardown_suite()
@lcc.test("Simple work of method 'get_block_virtual_ops'")
def method_main_check(self):
lcc.set_step("Get the full first block in the chain")
self.utils.set_timeout_until_num_blocks_released(self, self.__database_api_identifier, print_log=False)
block_num = self.get_head_block_num()
response_id = self.send_request(self.get_request("get_block_virtual_ops", [block_num]),
self.__database_api_identifier)
response = self.get_response(response_id)
lcc.log_info("Call method 'get_block_virtual_ops' with block_num='{}' parameter".format(block_num))
check_that("result", response["result"], is_([]))
@lcc.prop("positive", "type")
@lcc.tags("api", "database_api", "database_api_blocks_transactions", "get_block_virtual_ops")
@lcc.suite("Positive testing of method 'get_block_virtual_ops'", rank=2)
class PositiveTesting(BaseTest):
def __init__(self):
super().__init__()
self.__database_api_identifier = None
def setup_suite(self):
super().setup_suite()
lcc.set_step("Setup for {}".format(self.__class__.__name__))
self.__database_api_identifier = self.get_identifier("database")
lcc.log_info("Database API identifier is '{}'".format(self.__database_api_identifier))
def teardown_suite(self):
self._disconnect_to_echopy_lib()
super().teardown_suite()
@lcc.test("Check virtual block_reward_operation in 1st block")
def method_main_check(self):
lcc.set_step("Get the first block virtual operations in the chain")
block_num = 1
virtual_operation_id = self.echo.config.operation_ids.BLOCK_REWARD
response_id = self.send_request(self.get_request("get_block_virtual_ops", [block_num]),
self.__database_api_identifier)
block_operation_id = self.get_response(response_id)["result"][0]["op"][0]
lcc.log_info("Call method 'get_block_virtual_ops' with block_num='{}' parameter".format(block_num))
check_that("block_reward_operation", block_operation_id, is_(virtual_operation_id))
| StarcoderdataPython |
1857693 | <filename>utils/mysql_stat.py<gh_stars>1-10
# coding: utf-8
from __future__ import print_function
from starter_app.utils import setup_django
setup_django('starter_app')
from django.db import connection
from django.conf import settings
import pprint
def execute_print(cursor, sql):
cursor.execute(sql)
print(sql)
for i in cursor.fetchall():
print(' {}'.format(i))
def main():
print('DATABASE conf:')
pprint.pprint(settings.DATABASES['default'])
with connection.cursor() as cur:
sqls = [
r"SELECT USER();",
r"SELECT DATABASE();",
r"SHOW VARIABLES LIKE 'character%';",
r"SHOW VARIABLES LIKE 'sql_mode';",
r"SHOW VARIABLES LIKE 'default_storage_engine';",
]
for sql in sqls:
execute_print(cur, sql)
if __name__ == '__main__':
main()
| StarcoderdataPython |
5035162 | # -*- coding: utf-8 -*-
import pytest
import random
import time
import sys
sys.path.extend(["../"])
from bbc1.core import bbclib
from testutils import prepare, get_core_client, start_core_thread, make_client, domain_setup_utility
from bbc1.core import domain0_manager, user_message_routing
from bbc1.core.message_key_types import KeyType
LOGLEVEL = 'info'
#LOGLEVEL = 'debug'
domain_num = 3
core_per_domain = 5
core_num = domain_num * core_per_domain
client_per_core = 2
client_num = core_num * client_per_core
cores = None
clients = None
domain_ids = [bbclib.get_new_id("testdomain%d" % i) for i in range(domain_num)]
asset_group_id = bbclib.get_new_id("asset_group_0")
core_domains = [None for i in range(core_num)]
msg_processor = [None for i in range(client_num)]
num_assign_cross_ref = 0
num_cross_ref_registered = 0
cross_ref_regsistered_list = dict()
def show_domain_list(domain_list):
for dm in domain_list.keys():
print(" Domain:", dm.hex())
for nd in domain_list[dm]:
print(" node_id:", nd.hex())
def sleep_tick(wait_for):
print("-- sleep %d sec" % wait_for)
end_time = time.time() + wait_for
while time.time() < end_time:
print("(%d) .. waiting" % int(time.time()))
time.sleep(1)
def prepare_transaction(asset_group, client, datnum, txid_pointer=None, no_cross_ref=False):
user_id = client['user_id']
kp = client['keypair']
txobj = bbclib.BBcTransaction()
rtn = bbclib.BBcRelation()
asset = bbclib.BBcAsset()
asset.add(user_id=user_id, asset_body=b'data=%d' % datnum)
rtn.add(asset_group_id=asset_group, asset=asset)
if txid_pointer is not None:
ptr = bbclib.BBcPointer()
ptr.add(transaction_id=txid_pointer)
rtn.add(pointer=ptr)
wit = bbclib.BBcWitness()
txobj.add(relation=rtn, witness=wit)
wit.add_witness(user_id)
if not no_cross_ref:
client['app'].include_cross_ref(txobj)
txobj.add_signature(user_id=user_id, keypair=kp)
txobj.digest()
return txobj
class TestBBcAppClient(object):
def test_00_setup(self):
print("-----", sys._getframe().f_code.co_name, "-----")
domain0_manager.Domain0Manager.DOMAIN_INFO_ADVERTISE_INTERVAL = 4 # just for testing
domain0_manager.Domain0Manager.DOMAIN_INFO_LIFETIME = 8 # just for testing
domain0_manager.Domain0Manager.NUM_OF_COPIES = 1
user_message_routing.UserMessageRouting.MAX_CROSS_REF_STOCK = 0
global msg_processor
prepare(core_num=core_num, client_num=client_num, loglevel=LOGLEVEL)
for i in range(domain_num):
for j in range(core_per_domain):
base_core_index = i * core_per_domain + j
domain0_flag = True if j == 0 or j == 1 else False
start_core_thread(index=base_core_index, core_port_increment=base_core_index,
p2p_port_increment=base_core_index, use_domain0=domain0_flag)
domain_setup_utility(base_core_index, domain_ids[i])
core_domains[base_core_index] = domain_ids[i]
time.sleep(1)
for i in range(domain_num):
print("domain:", i)
for j in range(core_per_domain):
base_core_index = i * core_per_domain + j
print(" base_core_index:", base_core_index)
print(" client_index:", base_core_index*client_per_core, base_core_index*client_per_core+1)
for k in range(client_per_core):
make_client(index=base_core_index*client_per_core+k, core_port_increment=base_core_index,
domain_id=domain_ids[i])
time.sleep(1)
global cores, clients
cores, clients = get_core_client()
for i in range(client_num):
msg_processor[i] = clients[i]['app'].callback
def test_1_register(self):
print("\n-----", sys._getframe().f_code.co_name, "-----")
for cl in clients:
ret = cl['app'].register_to_core()
assert ret
time.sleep(1)
def test_2_setup_network_domain0(self):
print("\n-----", sys._getframe().f_code.co_name, "-----")
ipv4 = cores[0].networking.ip_address
ipv6 = cores[0].networking.ip6_address
port = cores[0].networking.port
for i in [1, 5, 6, 10, 11]:
cores[i].networking.send_domain_ping(domain_id=bbclib.domain_global_0, ipv4=ipv4, ipv6=ipv6, port=port, is_static=True)
print("-- wait 5 seconds --")
time.sleep(5)
assert len(cores[0].networking.domains[bbclib.domain_global_0]['neighbor'].nodeinfo_list) == 5 # 6 - 1
assert len(cores[1].networking.domains[bbclib.domain_global_0]['neighbor'].nodeinfo_list) == 5
assert len(cores[5].networking.domains[bbclib.domain_global_0]['neighbor'].nodeinfo_list) == 5
assert len(cores[6].networking.domains[bbclib.domain_global_0]['neighbor'].nodeinfo_list) == 5
assert len(cores[10].networking.domains[bbclib.domain_global_0]['neighbor'].nodeinfo_list) == 5
assert len(cores[11].networking.domains[bbclib.domain_global_0]['neighbor'].nodeinfo_list) == 5
assert bbclib.domain_global_0 not in cores[2].networking.domains
print("-- wait 5 seconds --")
time.sleep(5)
def test_3_setup_network(self):
print("\n-----", sys._getframe().f_code.co_name, "-----")
for i in range(domain_num):
base_client_index = i*core_per_domain*client_per_core
#print("base_client_index:", base_client_index)
clients[base_client_index]['app'].get_domain_neighborlist(domain_id=domain_ids[i])
dat = msg_processor[base_client_index].synchronize()
print("[%d] nodeinfo = %s" % (i * core_per_domain, dat[0]))
node_id, ipv4, ipv6, port, domain0 = dat[0]
for j in range(core_per_domain):
c_index = base_client_index + j * client_per_core
clients[c_index]['app'].send_domain_ping(domain_ids[i], ipv4, ipv6, port)
print("*** wait 5 seconds ***")
time.sleep(5)
for i in range(domain_num):
print("**** domain:%d" % i)
print(cores[i*core_per_domain].networking.domains[domain_ids[i]]['neighbor'].show_list())
print(cores[i*core_per_domain+1].networking.domains[domain_ids[i]]['neighbor'].show_list())
print(cores[i*core_per_domain+2].networking.domains[domain_ids[i]]['neighbor'].show_list())
def test_13_wait(self):
print("\n-----", sys._getframe().f_code.co_name, "-----")
print("-- sleep 10 sec")
time.sleep(10)
for i in range(domain_num):
print("****** [%d] %s ******" % (i, cores[i*core_per_domain].networking.domain0manager.my_node_id.hex()))
dm = cores[i*core_per_domain].networking.domain0manager.domain_list
show_domain_list(dm)
print("-- sleep 10 sec")
time.sleep(10)
for i in range(domain_num):
nd = cores[i*core_per_domain].networking.domain0manager.node_domain_list
assert len(nd) == 4 # 6(all dom0_managers) - 2(dom0_managers in the domain)
def test_20_make_transactions(self):
print("\n-----", sys._getframe().f_code.co_name, "-----")
i = 0
for k, cl in enumerate(clients):
if k % 10 <= 1:
continue
for j in range(5):
txobj = prepare_transaction(asset_group_id, cl, i, no_cross_ref=True)
cl['app'].insert_transaction(txobj)
msg_processor[k].synchronize()
i += 1
cl['app'].get_stats()
msg_processor[k].synchronize()
time.sleep(3)
global num_assign_cross_ref
num_cross_ref_in_clients = 0
num_distribute_cross_ref_in_domain0 = 0
num_drop_cross_ref = 0
for i, cl in enumerate(clients):
num_cross_ref_in_clients += len(cl['app'].cross_ref_list)
if i % 2 == 1:
continue
cl['app'].get_stats()
dat = msg_processor[i].synchronize()
if KeyType.stats in dat:
stat = dat[KeyType.stats]
if i % 10 > 1:
print("[%d] transaction.insert_count=%d" % (i, stat[b'transaction'][b'insert_count']))
print("[%d] data_handler.insert_transaction=%d" % (i, stat[b'data_handler'][b'insert_transaction']))
assert stat[b'transaction'][b'insert_count'] == 5 * client_per_core
assert stat[b'data_handler'][b'insert_transaction'] == 5 * (core_per_domain - 1) * client_per_core
if b'domain0' in dat[KeyType.stats]:
print("[%d] distribute_cross_ref_in_domain0=%d" %
(i, stat[b'domain0'].get(b'distribute_cross_ref_in_domain0', 0)))
print("[%d] GET_CROSS_REF_DISTRIBUTION=%d" %
(i, stat[b'domain0'].get(b'GET_CROSS_REF_DISTRIBUTION', 0)))
print("[%d] assign_cross_ref_to_nodes=%d" %
(i, stat[b'domain0'].get(b'assign_cross_ref_to_nodes', 0)))
print("[%d] drop_cross_ref_because_exceed_margin=%d" %
(i, stat[b'domain0'].get(b'drop_cross_ref_because_exceed_margin', 0)))
print("[%d] cross_ref_registered=%d" %
(i, stat[b'domain0'].get(b'cross_ref_registered', 0)))
num_distribute_cross_ref_in_domain0 += stat[b'domain0'].get(b'distribute_cross_ref_in_domain0', 0)
num_assign_cross_ref += stat[b'domain0'].get(b'assign_cross_ref_to_nodes', 0)
num_drop_cross_ref += stat[b'domain0'].get(b'drop_cross_ref_because_exceed_margin', 0)
assert stat[b'domain0'].get(b'cross_ref_registered', 0) == 0
assert num_distribute_cross_ref_in_domain0 == num_assign_cross_ref + num_drop_cross_ref
def test_21_make_transactions(self):
print("\n-----", sys._getframe().f_code.co_name, "-----")
i = 100
num_including_cross_ref = 0
for k, cl in enumerate(clients):
if k % 10 <= 1:
continue
for j in range(5):
txobj = prepare_transaction(asset_group_id, cl, i)
cl['app'].insert_transaction(txobj)
msg_processor[k].synchronize()
if txobj.cross_ref is not None:
num_including_cross_ref += 1
i += 1
time.sleep(5)
global num_cross_ref_registered
for i, cl in enumerate(clients):
if i % 2 == 1:
continue
cl['app'].get_stats()
dat = msg_processor[i].synchronize()
if KeyType.stats in dat and b'domain0' in dat[KeyType.stats]:
stat = dat[KeyType.stats]
if b'cross_ref_registered' in stat[b'domain0']:
print("[%d] cross_ref_registered=%d" % (i, stat[b'domain0'][b'cross_ref_registered']))
num_cross_ref_registered += stat[b'domain0'][b'cross_ref_registered']
print("[%d] insert_cross_ref=%d" % (i, stat[b'data_handler'][b'insert_cross_ref']))
assert num_including_cross_ref == num_cross_ref_registered
def test_22_get_cross_ref_list(self):
print("\n-----", sys._getframe().f_code.co_name, "-----")
num_in_registered_list = 0
global cross_ref_regsistered_list
for i in [7, 17, 27]:
clients[i]['app'].request_cross_ref_holders_list()
dat = msg_processor[i].synchronize()
assert KeyType.transaction_id_list in dat
dm = clients[i]['app'].domain_id
num_in_registered_list += len(dat[KeyType.transaction_id_list])
cross_ref_regsistered_list.setdefault(dm, list())
print("----")
for txid in dat[KeyType.transaction_id_list]:
print("txid:", txid.hex())
cross_ref_regsistered_list[dm].append(txid)
assert num_in_registered_list == num_cross_ref_registered
def test_23_verify_cross_ref(self):
print("\n-----", sys._getframe().f_code.co_name, "-----")
for i in [6, 16, 26]:
dm = clients[i]['app'].domain_id
if len(cross_ref_regsistered_list[dm]) == 0:
continue
txid_to_verify = random.choice(cross_ref_regsistered_list[dm])
clients[i]['app'].request_verify_by_cross_ref(txid_to_verify)
dat = msg_processor[i].synchronize()
assert KeyType.cross_ref_verification_info in dat
transaction_base_digest, cross_ref_data, sigdata = dat[KeyType.cross_ref_verification_info]
assert bbclib.verify_using_cross_ref(dm, txid_to_verify, transaction_base_digest, cross_ref_data, sigdata)
if __name__ == '__main__':
pytest.main()
| StarcoderdataPython |
6601295 | """Create plots for option learning."""
import os
from functools import partial
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
from predicators.scripts.analyze_results_directory import create_dataframes, \
get_df_for_entry
pd.options.mode.chained_assignment = None # default='warn'
# plt.rcParams["font.family"] = "CMU Serif"
############################ Change below here ################################
# Details about the plt figure.
DPI = 500
FONT_SIZE = 18
# Groups over which to take mean/std.
GROUPS = [
"ENV", "APPROACH", "EXCLUDED_PREDICATES", "EXPERIMENT_ID",
"NUM_TRAIN_TASKS", "CYCLE"
]
# All column names and keys to load into the pandas tables before plotting.
COLUMN_NAMES_AND_KEYS = [
("ENV", "env"),
("APPROACH", "approach"),
("EXCLUDED_PREDICATES", "excluded_predicates"),
("EXPERIMENT_ID", "experiment_id"),
("SEED", "seed"),
("NUM_TRAIN_TASKS", "num_train_tasks"),
("CYCLE", "cycle"),
("NUM_SOLVED", "num_solved"),
("AVG_NUM_PREDS", "avg_num_preds"),
("AVG_TEST_TIME", "avg_suc_time"),
("AVG_NODES_CREATED", "avg_num_nodes_created"),
("LEARNING_TIME", "learning_time"),
("PERC_SOLVED", "perc_solved"),
]
DERIVED_KEYS = [("perc_solved",
lambda r: 100 * r["num_solved"] / r["num_test_tasks"])]
# The first element is the name of the metric that will be plotted on the
# x axis. See COLUMN_NAMES_AND_KEYS for all available metrics. The second
# element is used to label the x axis.
X_KEY_AND_LABEL = [
("NUM_TRAIN_TASKS", "Number of Demonstrations"),
]
# Same as above, but for the y axis.
Y_KEY_AND_LABEL = [
("PERC_SOLVED", "% Evaluation Tasks Solved"),
]
# PLOT_GROUPS is a nested dict where each outer dict corresponds to one plot,
# and each inner entry corresponds to one line on the plot.
# The keys of the outer dict are plot titles.
# The keys of the inner dict are (legend label, marker, df selector).
TITLE_ENVS = [
("Cover", "cover_multistep_options"),
("Stick Button", "stick_button"),
("Doors", "doors"),
("Coffee", "coffee"),
]
def _select_data(env: str, approach: str, df: pd.DataFrame) -> pd.DataFrame:
return df["EXPERIMENT_ID"].apply(
lambda v: v.startswith(f"{env}_{approach}_"))
PLOT_GROUPS = {
title: [
# ("Oracle Options", "black", "*",
# partial(_select_data, env, "oracle_options")),
("Ours", "darkgreen", "o", partial(_select_data, env, "main")),
("Ours (Nonparam)", "darkorange", "o",
partial(_select_data, env, "direct_bc_nonparam")),
("GNN Metacontroller (Param)", "blue", "o",
partial(_select_data, env, "gnn_metacontroller_param")),
# ("GNN Metacontroller Param, Test # Objs", "blue", "o",
# partial(_select_data, env, "gnn_metacontroller_param")),
# ("GNN Metacontroller Param, Train # Objs", "gold", "*",
# partial(_select_data, "train_objs_" + env,
# "gnn_metacontroller_param")),
("GNN Metacontroller (Nonparam)", "purple", "o",
partial(_select_data, env, "gnn_metacontroller_nonparam")),
# ("GNN Action Policy", "gold", "o",
# partial(_select_data, env, "gnn_action_policy")),
("Max Skeletons=1", "gray", "o",
partial(_select_data, env, "direct_bc_max_skel1")),
("Max Samples=1", "brown", "o",
partial(_select_data, env, "direct_bc_max_samp1")),
]
for (title, env) in TITLE_ENVS
}
# If True, add (0, 0) to every plot
ADD_ZERO_POINT = True
Y_LIM = (-5, 110)
#################### Should not need to change below here #####################
def _main() -> None:
outdir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"results")
os.makedirs(outdir, exist_ok=True)
matplotlib.rcParams.update({'font.size': FONT_SIZE})
grouped_means, grouped_stds, _ = create_dataframes(COLUMN_NAMES_AND_KEYS,
GROUPS, DERIVED_KEYS)
means = grouped_means.reset_index()
stds = grouped_stds.reset_index()
for x_key, x_label in X_KEY_AND_LABEL:
for y_key, y_label in Y_KEY_AND_LABEL:
for plot_title, d in PLOT_GROUPS.items():
_, ax = plt.subplots(figsize=(10, 5))
for label, color, marker, selector in d:
exp_means = get_df_for_entry(x_key, means, selector)
exp_stds = get_df_for_entry(x_key, stds, selector)
xs = exp_means[x_key].tolist()
ys = exp_means[y_key].tolist()
y_stds = exp_stds[y_key].tolist()
if ADD_ZERO_POINT:
xs = [0] + xs
ys = [0] + ys
y_stds = [0] + y_stds
ax.errorbar(xs,
ys,
yerr=y_stds,
label=label,
color=color,
marker=marker)
ax.set_title(plot_title)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.set_ylim(Y_LIM)
plt.legend(loc='center left',
bbox_to_anchor=(1, 0.5),
prop={'size': 12})
plt.tight_layout()
filename = f"{plot_title}_{x_key}_{y_key}.png"
filename = filename.replace(" ", "_").lower()
outfile = os.path.join(outdir, filename)
plt.savefig(outfile, dpi=DPI)
print(f"Wrote out to {outfile}")
if __name__ == "__main__":
_main()
| StarcoderdataPython |
9618603 | <filename>build/lib/dupecheck/utils.py
def v_print(verbose=False, msg=""):
print(msg)
| StarcoderdataPython |
66016 | <reponame>patrickfung/microsoft-authentication-library-for-python
"""This OAuth2 client implementation aims to be spec-compliant, and generic."""
# OAuth2 spec https://tools.ietf.org/html/rfc6749
import json
try:
from urllib.parse import urlencode, parse_qs, quote_plus
except ImportError:
from urlparse import parse_qs
from urllib import urlencode, quote_plus
import logging
import warnings
import time
import base64
import sys
import functools
import random
import string
import requests
string_types = (str,) if sys.version_info[0] >= 3 else (basestring, )
class BaseClient(object):
# This low-level interface works. Yet you'll find its sub-class
# more friendly to remind you what parameters are needed in each scenario.
# More on Client Types at https://tools.ietf.org/html/rfc6749#section-2.1
@staticmethod
def encode_saml_assertion(assertion):
return base64.urlsafe_b64encode(assertion).rstrip(b'=') # Per RFC 7522
CLIENT_ASSERTION_TYPE_JWT = "urn:ietf:params:oauth:client-assertion-type:jwt-bearer"
CLIENT_ASSERTION_TYPE_SAML2 = "urn:ietf:params:oauth:client-assertion-type:saml2-bearer"
client_assertion_encoders = {CLIENT_ASSERTION_TYPE_SAML2: encode_saml_assertion}
@property
def session(self):
warnings.warn("Will be gone in next major release", DeprecationWarning)
return self._http_client
@session.setter
def session(self, value):
warnings.warn("Will be gone in next major release", DeprecationWarning)
self._http_client = value
def __init__(
self,
server_configuration, # type: dict
client_id, # type: str
http_client=None, # We insert it here to match the upcoming async API
client_secret=None, # type: Optional[str]
client_assertion=None, # type: Union[bytes, callable, None]
client_assertion_type=None, # type: Optional[str]
default_headers=None, # type: Optional[dict]
default_body=None, # type: Optional[dict]
verify=None, # type: Union[str, True, False, None]
proxies=None, # type: Optional[dict]
timeout=None, # type: Union[tuple, float, None]
):
"""Initialize a client object to talk all the OAuth2 grants to the server.
Args:
server_configuration (dict):
It contains the configuration (i.e. metadata) of the auth server.
The actual content typically contains keys like
"authorization_endpoint", "token_endpoint", etc..
Based on RFC 8414 (https://tools.ietf.org/html/rfc8414),
you can probably fetch it online from either
https://example.com/.../.well-known/oauth-authorization-server
or
https://example.com/.../.well-known/openid-configuration
client_id (str): The client's id, issued by the authorization server
http_client (http.HttpClient):
Your implementation of abstract class :class:`http.HttpClient`.
Defaults to a requests session instance.
There is no session-wide `timeout` parameter defined here.
Timeout behavior is determined by the actual http client you use.
If you happen to use Requests, it disallows session-wide timeout
(https://github.com/psf/requests/issues/3341). The workaround is:
s = requests.Session()
s.request = functools.partial(s.request, timeout=3)
and then feed that patched session instance to this class.
client_secret (str): Triggers HTTP AUTH for Confidential Client
client_assertion (bytes, callable):
The client assertion to authenticate this client, per RFC 7521.
It can be a raw SAML2 assertion (this method will encode it for you),
or a raw JWT assertion.
It can also be a callable (recommended),
so that we will do lazy creation of an assertion.
client_assertion_type (str):
The type of your :attr:`client_assertion` parameter.
It is typically the value of :attr:`CLIENT_ASSERTION_TYPE_SAML2` or
:attr:`CLIENT_ASSERTION_TYPE_JWT`, the only two defined in RFC 7521.
default_headers (dict):
A dict to be sent in each request header.
It is not required by OAuth2 specs, but you may use it for telemetry.
default_body (dict):
A dict to be sent in each token request body. For example,
you could choose to set this as {"client_secret": "your secret"}
if your authorization server wants it to be in the request body
(rather than in the request header).
verify (boolean):
It will be passed to the
`verify parameter in the underlying requests library
<http://docs.python-requests.org/en/v2.9.1/user/advanced/#ssl-cert-verification>`_.
When leaving it with default value (None), we will use True instead.
This does not apply if you have chosen to pass your own Http client.
proxies (dict):
It will be passed to the
`proxies parameter in the underlying requests library
<http://docs.python-requests.org/en/v2.9.1/user/advanced/#proxies>`_.
This does not apply if you have chosen to pass your own Http client.
timeout (object):
It will be passed to the
`timeout parameter in the underlying requests library
<http://docs.python-requests.org/en/v2.9.1/user/advanced/#timeouts>`_.
This does not apply if you have chosen to pass your own Http client.
"""
if not server_configuration:
raise ValueError("Missing input parameter server_configuration")
if not client_id:
raise ValueError("Missing input parameter client_id")
self.configuration = server_configuration
self.client_id = client_id
self.client_secret = client_secret
self.client_assertion = client_assertion
self.default_headers = default_headers or {}
self.default_body = default_body or {}
if client_assertion_type is not None:
self.default_body["client_assertion_type"] = client_assertion_type
self.logger = logging.getLogger(__name__)
if http_client:
if verify is not None or proxies is not None or timeout is not None:
raise ValueError(
"verify, proxies, or timeout is not allowed "
"when http_client is in use")
self._http_client = http_client
else:
self._http_client = requests.Session()
self._http_client.verify = True if verify is None else verify
self._http_client.proxies = proxies
self._http_client.request = functools.partial(
# A workaround for requests not supporting session-wide timeout
self._http_client.request, timeout=timeout)
def _build_auth_request_params(self, response_type, **kwargs):
# response_type is a string defined in
# https://tools.ietf.org/html/rfc6749#section-3.1.1
# or it can be a space-delimited string as defined in
# https://tools.ietf.org/html/rfc6749#section-8.4
response_type = self._stringify(response_type)
params = {'client_id': self.client_id, 'response_type': response_type}
params.update(kwargs) # Note: None values will override params
params = {k: v for k, v in params.items() if v is not None} # clean up
if params.get('scope'):
params['scope'] = self._stringify(params['scope'])
return params # A dict suitable to be used in http request
def _obtain_token( # The verb "obtain" is influenced by OAUTH2 RFC 6749
self, grant_type,
params=None, # a dict to be sent as query string to the endpoint
data=None, # All relevant data, which will go into the http body
headers=None, # a dict to be sent as request headers
post=None, # A callable to replace requests.post(), for testing.
# Such as: lambda url, **kwargs:
# Mock(status_code=200, text='{}')
**kwargs # Relay all extra parameters to underlying requests
): # Returns the json object came from the OAUTH2 response
_data = {'client_id': self.client_id, 'grant_type': grant_type}
if self.default_body.get("client_assertion_type") and self.client_assertion:
# See https://tools.ietf.org/html/rfc7521#section-4.2
encoder = self.client_assertion_encoders.get(
self.default_body["client_assertion_type"], lambda a: a)
_data["client_assertion"] = encoder(
self.client_assertion() # Do lazy on-the-fly computation
if callable(self.client_assertion) else self.client_assertion)
_data.update(self.default_body) # It may contain authen parameters
_data.update(data or {}) # So the content in data param prevails
_data = {k: v for k, v in _data.items() if v} # Clean up None values
if _data.get('scope'):
_data['scope'] = self._stringify(_data['scope'])
_headers = {'Accept': 'application/json'}
_headers.update(self.default_headers)
_headers.update(headers or {})
# Quoted from https://tools.ietf.org/html/rfc6749#section-2.3.1
# Clients in possession of a client password MAY use the HTTP Basic
# authentication.
# Alternatively, (but NOT RECOMMENDED,)
# the authorization server MAY support including the
# client credentials in the request-body using the following
# parameters: client_id, client_secret.
if self.client_secret and self.client_id:
_headers["Authorization"] = "Basic " + base64.b64encode("{}:{}".format(
# Per https://tools.ietf.org/html/rfc6749#section-2.3.1
# client_id and client_secret needs to be encoded by
# "application/x-www-form-urlencoded"
# https://www.w3.org/TR/html401/interact/forms.html#h-17.13.4.1
# BEFORE they are fed into HTTP Basic Authentication
quote_plus(self.client_id), quote_plus(self.client_secret)
).encode("ascii")).decode("ascii")
if "token_endpoint" not in self.configuration:
raise ValueError("token_endpoint not found in configuration")
resp = (post or self._http_client.post)(
self.configuration["token_endpoint"],
headers=_headers, params=params, data=_data,
**kwargs)
if resp.status_code >= 500:
resp.raise_for_status() # TODO: Will probably retry here
try:
# The spec (https://tools.ietf.org/html/rfc6749#section-5.2) says
# even an error response will be a valid json structure,
# so we simply return it here, without needing to invent an exception.
return json.loads(resp.text)
except ValueError:
self.logger.exception(
"Token response is not in json format: %s", resp.text)
raise
def obtain_token_by_refresh_token(self, refresh_token, scope=None, **kwargs):
# type: (str, Union[str, list, set, tuple]) -> dict
"""Obtain an access token via a refresh token.
:param refresh_token: The refresh token issued to the client
:param scope: If omitted, is treated as equal to the scope originally
granted by the resource owner,
according to https://tools.ietf.org/html/rfc6749#section-6
"""
assert isinstance(refresh_token, string_types)
data = kwargs.pop('data', {})
data.update(refresh_token=refresh_token, scope=scope)
return self._obtain_token("refresh_token", data=data, **kwargs)
def _stringify(self, sequence):
if isinstance(sequence, (list, set, tuple)):
return ' '.join(sorted(sequence)) # normalizing it, ascendingly
return sequence # as-is
class Client(BaseClient): # We choose to implement all 4 grants in 1 class
"""This is the main API for oauth2 client.
Its methods define and document parameters mentioned in OAUTH2 RFC 6749.
"""
DEVICE_FLOW = { # consts for device flow, that can be customized by sub-class
"GRANT_TYPE": "urn:ietf:params:oauth:grant-type:device_code",
"DEVICE_CODE": "device_code",
}
DEVICE_FLOW_RETRIABLE_ERRORS = ("authorization_pending", "slow_down")
GRANT_TYPE_SAML2 = "urn:ietf:params:oauth:grant-type:saml2-bearer" # RFC7522
GRANT_TYPE_JWT = "urn:ietf:params:oauth:grant-type:jwt-bearer" # RFC7523
grant_assertion_encoders = {GRANT_TYPE_SAML2: BaseClient.encode_saml_assertion}
def initiate_device_flow(self, scope=None, **kwargs):
# type: (list, **dict) -> dict
# The naming of this method is following the wording of this specs
# https://tools.ietf.org/html/draft-ietf-oauth-device-flow-12#section-3.1
"""Initiate a device flow.
Returns the data defined in Device Flow specs.
https://tools.ietf.org/html/draft-ietf-oauth-device-flow-12#section-3.2
You should then orchestrate the User Interaction as defined in here
https://tools.ietf.org/html/draft-ietf-oauth-device-flow-12#section-3.3
And possibly here
https://tools.ietf.org/html/draft-ietf-oauth-device-flow-12#section-3.3.1
"""
DAE = "device_authorization_endpoint"
if not self.configuration.get(DAE):
raise ValueError("You need to provide device authorization endpoint")
resp = self._http_client.post(self.configuration[DAE],
data={"client_id": self.client_id, "scope": self._stringify(scope or [])},
headers=dict(self.default_headers, **kwargs.pop("headers", {})),
**kwargs)
flow = json.loads(resp.text)
flow["interval"] = int(flow.get("interval", 5)) # Some IdP returns string
flow["expires_in"] = int(flow.get("expires_in", 1800))
flow["expires_at"] = time.time() + flow["expires_in"] # We invent this
return flow
def _obtain_token_by_device_flow(self, flow, **kwargs):
# type: (dict, **dict) -> dict
# This method updates flow during each run. And it is non-blocking.
now = time.time()
skew = 1
if flow.get("latest_attempt_at", 0) + flow.get("interval", 5) - skew > now:
warnings.warn('Attempted too soon. Please do time.sleep(flow["interval"])')
data = kwargs.pop("data", {})
data.update({
"client_id": self.client_id,
self.DEVICE_FLOW["DEVICE_CODE"]: flow["device_code"],
})
result = self._obtain_token(
self.DEVICE_FLOW["GRANT_TYPE"], data=data, **kwargs)
if result.get("error") == "slow_down":
# Respecting https://tools.ietf.org/html/draft-ietf-oauth-device-flow-12#section-3.5
flow["interval"] = flow.get("interval", 5) + 5
flow["latest_attempt_at"] = now
return result
def obtain_token_by_device_flow(self,
flow,
exit_condition=lambda flow: flow.get("expires_at", 0) < time.time(),
**kwargs):
# type: (dict, Callable) -> dict
"""Obtain token by a device flow object, with customizable polling effect.
Args:
flow (dict):
An object previously generated by initiate_device_flow(...).
Its content WILL BE CHANGED by this method during each run.
We share this object with you, so that you could implement
your own loop, should you choose to do so.
exit_condition (Callable):
This method implements a loop to provide polling effect.
The loop's exit condition is calculated by this callback.
The default callback makes the loop run until the flow expires.
Therefore, one of the ways to exit the polling early,
is to change the flow["expires_at"] to a small number such as 0.
In case you are doing async programming, you may want to
completely turn off the loop. You can do so by using a callback as:
exit_condition = lambda flow: True
to make the loop run only once, i.e. no polling, hence non-block.
"""
while True:
result = self._obtain_token_by_device_flow(flow, **kwargs)
if result.get("error") not in self.DEVICE_FLOW_RETRIABLE_ERRORS:
return result
for i in range(flow.get("interval", 5)): # Wait interval seconds
if exit_condition(flow):
return result
time.sleep(1) # Shorten each round, to make exit more responsive
def _build_auth_request_uri(
self,
response_type, redirect_uri=None, scope=None, state=None, **kwargs):
if "authorization_endpoint" not in self.configuration:
raise ValueError("authorization_endpoint not found in configuration")
authorization_endpoint = self.configuration["authorization_endpoint"]
params = self._build_auth_request_params(
response_type, redirect_uri=redirect_uri, scope=scope, state=state,
**kwargs)
sep = '&' if '?' in authorization_endpoint else '?'
return "%s%s%s" % (authorization_endpoint, sep, urlencode(params))
def build_auth_request_uri(
self,
response_type, redirect_uri=None, scope=None, state=None, **kwargs):
# This method could be named build_authorization_request_uri() instead,
# but then there would be a build_authentication_request_uri() in the OIDC
# subclass doing almost the same thing. So we use a loose term "auth" here.
"""Generate an authorization uri to be visited by resource owner.
Parameters are the same as another method :func:`initiate_auth_code_flow()`,
whose functionality is a superset of this method.
:return: The auth uri as a string.
"""
warnings.warn("Use initiate_auth_code_flow() instead. ", DeprecationWarning)
return self._build_auth_request_uri(
response_type, redirect_uri=redirect_uri, scope=scope, state=state,
**kwargs)
def initiate_auth_code_flow(
# The name is influenced by OIDC
# https://openid.net/specs/openid-connect-core-1_0.html#CodeFlowAuth
self,
scope=None, redirect_uri=None, state=None,
**kwargs):
"""Initiate an auth code flow.
Later when the response reaches your redirect_uri,
you can use :func:`~obtain_token_by_auth_code_flow()`
to complete the authentication/authorization.
:param list scope:
It is a list of case-sensitive strings.
Some ID provider can accept empty string to represent default scope.
:param str redirect_uri:
Optional. If not specified, server will use the pre-registered one.
:param str state:
An opaque value used by the client to
maintain state between the request and callback.
If absent, this library will automatically generate one internally.
:param kwargs: Other parameters, typically defined in OpenID Connect.
:return:
The auth code flow. It is a dict in this form::
{
"auth_uri": "https://...", // Guide user to visit this
"state": "...", // You may choose to verify it by yourself,
// or just let obtain_token_by_auth_code_flow()
// do that for you.
"...": "...", // Everything else are reserved and internal
}
The caller is expected to::
1. somehow store this content, typically inside the current session,
2. guide the end user (i.e. resource owner) to visit that auth_uri,
3. and then relay this dict and subsequent auth response to
:func:`~obtain_token_by_auth_code_flow()`.
"""
response_type = kwargs.pop("response_type", "code") # Auth Code flow
# Must be "code" when you are using Authorization Code Grant.
# The "token" for Implicit Grant is not applicable thus not allowed.
# It could theoretically be other
# (possibly space-delimited) strings as registered extension value.
# See https://tools.ietf.org/html/rfc6749#section-3.1.1
if "token" in response_type:
# Implicit grant would cause auth response coming back in #fragment,
# but fragment won't reach a web service.
raise ValueError('response_type="token ..." is not allowed')
flow = { # These data are required by obtain_token_by_auth_code_flow()
"state": state or "".join(random.sample(string.ascii_letters, 16)),
"redirect_uri": redirect_uri,
"scope": scope,
}
auth_uri = self._build_auth_request_uri(
response_type, **dict(flow, **kwargs))
flow["auth_uri"] = auth_uri
return flow
def obtain_token_by_auth_code_flow(
self,
auth_code_flow,
auth_response,
scope=None,
**kwargs):
"""With the auth_response being redirected back,
validate it against auth_code_flow, and then obtain tokens.
:param dict auth_code_flow:
The same dict returned by :func:`~initiate_auth_code_flow()`.
:param dict auth_response:
A dict based on query string received from auth server.
:param scope:
You don't usually need to use scope parameter here.
Some Identity Provider allows you to provide
a subset of what you specified during :func:`~initiate_auth_code_flow`.
:return:
* A dict containing "access_token" and/or "id_token", among others,
depends on what scope was used.
(See https://tools.ietf.org/html/rfc6749#section-5.1)
* A dict containing "error", optionally "error_description", "error_uri".
(It is either `this <https://tools.ietf.org/html/rfc6749#section-4.1.2.1>`_
or `that <https://tools.ietf.org/html/rfc6749#section-5.2>`_
"""
if auth_code_flow.get("state") != auth_response.get("state"):
raise ValueError("state mismatch: {} vs {}".format(
auth_code_flow.get("state"), auth_response.get("state")))
if auth_response.get("error"): # It means the first leg encountered error
return auth_response
if scope and set(scope) - set(auth_code_flow.get("scope", [])):
raise ValueError(
"scope must be None or a subset of %s" % auth_code_flow.get("scope"))
assert auth_response.get("code"), "First leg's response should have code"
return self._obtain_token_by_authorization_code(
auth_response["code"],
redirect_uri=auth_code_flow.get("redirect_uri"),
# Required, if "redirect_uri" parameter was included in the
# authorization request, and their values MUST be identical.
scope=scope or auth_code_flow.get("scope"),
# It is both unnecessary and harmless, per RFC 6749.
# We use the same scope already used in auth request uri,
# thus token cache can know what scope the tokens are for.
**kwargs)
@staticmethod
def parse_auth_response(params, state=None):
"""Parse the authorization response being redirected back.
:param params: A string or dict of the query string
:param state: REQUIRED if the state parameter was present in the client
authorization request. This function will compare it with response.
"""
warnings.warn(
"Use obtain_token_by_auth_code_flow() instead", DeprecationWarning)
if not isinstance(params, dict):
params = parse_qs(params)
if params.get('state') != state:
raise ValueError('state mismatch')
return params
def obtain_token_by_authorization_code(
self, code, redirect_uri=None, scope=None, **kwargs):
"""Get a token via authorization code. a.k.a. Authorization Code Grant.
This is typically used by a server-side app (Confidential Client),
but it can also be used by a device-side native app (Public Client).
See more detail at https://tools.ietf.org/html/rfc6749#section-4.1.3
You are encouraged to use its higher level method
:func:`~obtain_token_by_auth_code_flow` instead.
:param code: The authorization code received from authorization server.
:param redirect_uri:
Required, if the "redirect_uri" parameter was included in the
authorization request, and their values MUST be identical.
:param scope:
It is both unnecessary and harmless to use scope here, per RFC 6749.
We suggest to use the same scope already used in auth request uri,
so that this library can link the obtained tokens with their scope.
"""
warnings.warn(
"Use obtain_token_by_auth_code_flow() instead", DeprecationWarning)
return self._obtain_token_by_authorization_code(
code, redirect_uri=redirect_uri, scope=scope, **kwargs)
def _obtain_token_by_authorization_code(
self, code, redirect_uri=None, scope=None, **kwargs):
data = kwargs.pop("data", {})
data.update(code=code, redirect_uri=redirect_uri)
if scope:
data["scope"] = scope
if not self.client_secret:
# client_id is required, if the client is not authenticating itself.
# See https://tools.ietf.org/html/rfc6749#section-4.1.3
data["client_id"] = self.client_id
return self._obtain_token("authorization_code", data=data, **kwargs)
def obtain_token_by_username_password(
self, username, password, scope=None, **kwargs):
"""The Resource Owner Password Credentials Grant, used by legacy app."""
data = kwargs.pop("data", {})
data.update(username=username, password=password, scope=scope)
return self._obtain_token("password", data=data, **kwargs)
def obtain_token_for_client(self, scope=None, **kwargs):
"""Obtain token for this client (rather than for an end user),
a.k.a. the Client Credentials Grant, used by Backend Applications.
We don't name it obtain_token_by_client_credentials(...) because those
credentials are typically already provided in class constructor, not here.
You can still explicitly provide an optional client_secret parameter,
or you can provide such extra parameters as `default_body` during the
class initialization.
"""
data = kwargs.pop("data", {})
data.update(scope=scope)
return self._obtain_token("client_credentials", data=data, **kwargs)
def __init__(self,
server_configuration, client_id,
on_obtaining_tokens=lambda event: None, # event is defined in _obtain_token(...)
on_removing_rt=lambda token_item: None,
on_updating_rt=lambda token_item, new_rt: None,
**kwargs):
super(Client, self).__init__(server_configuration, client_id, **kwargs)
self.on_obtaining_tokens = on_obtaining_tokens
self.on_removing_rt = on_removing_rt
self.on_updating_rt = on_updating_rt
def _obtain_token(
self, grant_type, params=None, data=None,
also_save_rt=False,
on_obtaining_tokens=None,
*args, **kwargs):
_data = data.copy() # to prevent side effect
resp = super(Client, self)._obtain_token(
grant_type, params, _data, *args, **kwargs)
if "error" not in resp:
_resp = resp.copy()
RT = "refresh_token"
if grant_type == RT and RT in _resp and not also_save_rt:
# Then we skip it from on_obtaining_tokens();
# Leave it to self.obtain_token_by_refresh_token()
_resp.pop(RT, None)
if "scope" in _resp:
scope = _resp["scope"].split() # It is conceptually a set,
# but we represent it as a list which can be persisted to JSON
else:
# Note: The scope will generally be absent in authorization grant,
# but our obtain_token_by_authorization_code(...) encourages
# app developer to still explicitly provide a scope here.
scope = _data.get("scope")
(on_obtaining_tokens or self.on_obtaining_tokens)({
"client_id": self.client_id,
"scope": scope,
"token_endpoint": self.configuration["token_endpoint"],
"grant_type": grant_type, # can be used to know an IdToken-less
# response is for an app or for a user
"response": _resp, "params": params, "data": _data,
})
return resp
def obtain_token_by_refresh_token(self, token_item, scope=None,
rt_getter=lambda token_item: token_item["refresh_token"],
on_removing_rt=None,
on_updating_rt=None,
on_obtaining_tokens=None,
**kwargs):
# type: (Union[str, dict], Union[str, list, set, tuple], Callable) -> dict
"""This is an overload which will trigger token storage callbacks.
:param token_item:
A refresh token (RT) item, in flexible format. It can be a string,
or a whatever data structure containing RT string and its metadata,
in such case the `rt_getter` callable must be able to
extract the RT string out from the token item data structure.
Either way, this token_item will be passed into other callbacks as-is.
:param scope: If omitted, is treated as equal to the scope originally
granted by the resource owner,
according to https://tools.ietf.org/html/rfc6749#section-6
:param rt_getter: A callable to translate the token_item to a raw RT string
:param on_removing_rt: If absent, fall back to the one defined in initialization
:param on_updating_rt:
Default to None, it will fall back to the one defined in initialization.
This is the most common case.
As a special case, you can pass in a False,
then this function will NOT trigger on_updating_rt() for RT UPDATE,
instead it will allow the RT to be added by on_obtaining_tokens().
This behavior is useful when you are migrating RTs from elsewhere
into a token storage managed by this library.
"""
resp = super(Client, self).obtain_token_by_refresh_token(
rt_getter(token_item)
if not isinstance(token_item, string_types) else token_item,
scope=scope,
also_save_rt=on_updating_rt is False,
**kwargs)
if resp.get('error') == 'invalid_grant':
(on_removing_rt or self.on_removing_rt)(token_item) # Discard old RT
RT = "refresh_token"
if on_updating_rt is not False and RT in resp:
(on_updating_rt or self.on_updating_rt)(token_item, resp[RT])
return resp
def obtain_token_by_assertion(
self, assertion, grant_type, scope=None, **kwargs):
# type: (bytes, Union[str, None], Union[str, list, set, tuple]) -> dict
"""This method implements Assertion Framework for OAuth2 (RFC 7521).
See details at https://tools.ietf.org/html/rfc7521#section-4.1
:param assertion:
The assertion bytes can be a raw SAML2 assertion, or a JWT assertion.
:param grant_type:
It is typically either the value of :attr:`GRANT_TYPE_SAML2`,
or :attr:`GRANT_TYPE_JWT`, the only two profiles defined in RFC 7521.
:param scope: Optional. It must be a subset of previously granted scopes.
"""
encoder = self.grant_assertion_encoders.get(grant_type, lambda a: a)
data = kwargs.pop("data", {})
data.update(scope=scope, assertion=encoder(assertion))
return self._obtain_token(grant_type, data=data, **kwargs)
| StarcoderdataPython |
8082285 | <filename>hikari_clusters/info_classes.py
# MIT License
#
# Copyright (c) 2021 TrigonDev
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import annotations
from dataclasses import dataclass
__all__ = ("ServerInfo", "ClusterInfo")
@dataclass
class ServerInfo:
"""A representation of a :class:`~server.Server`."""
uid: int
"""The ipc uid of the server."""
cluster_uids: list[int]
"""The uids of the clusters that this server has launched."""
@dataclass
class ClusterInfo:
"""A representation of a :class:`~cluster.Cluster`."""
uid: int
"""The ipc uid of the cluster."""
server_uid: int
"""The uid of the server that launched this custer."""
shard_ids: list[int]
"""The shard ids that this cluster owns."""
ready: bool
"""Whether or not this cluster is ready."""
@property
def smallest_shard(self) -> int:
"""The min of the shard_ids of this cluster."""
return min(self.shard_ids)
@property
def cluster_id(self) -> int:
"""The cluster id of this cluster."""
return self.get_cluster_id(self.smallest_shard, len(self.shard_ids))
@staticmethod
def get_cluster_id(shard_id: int, shards_per_cluster: int) -> int:
"""Generate a cluster id based on the id of a shard.
Assumes that all the shard ids of a cluster are adjacent."""
return shard_id // shards_per_cluster
| StarcoderdataPython |
355449 | <gh_stars>0
#!/usr/local/bin/python
import math
import os
from pathlib import Path
from configs import Configurator
CONFIG_FILE = 'duet_pressure_advance.cfg'
class GCodeGen:
def __init__(self, config):
self.cfg = config
def extrusion_volume_to_length(self, volume):
return volume / (self.cfg.filament_diameter ** 2 * math.pi * 0.25) # 0.25 is to convert dia to radius (R/2)^2
def extrusion_for_length(self, length):
return self.extrusion_volume_to_length(length * self.cfg.extrusion_width * self.cfg.layer_height)
def up(self):
return f"G1 Z{self.cfg.layer_height:.3f}"
def line(self, x: float, y: float, speed: float) -> str:
assert speed > 0, 'speed cannot be 0'
length = math.sqrt(x**2 + y**2)
extrusion = self.extrusion_for_length(length)
return f"G1 X{x:.3f} Y{y:.3f} E{extrusion:.4f} F{speed * 60:.0f}"
def goto(self, x: float, y: float) -> str:
return f"G1 X{x:.3f} Y{y:.3f} F{self.cfg.travel_speed_in_min:.0f}"
def goto_xyz(self, x: float, y: float, z: float) -> str:
return f"G1 X{x:.3f} Y{y:.3f} Y{z:.3f} F{self.cfg.travel_speed_in_min:.0f}"
def fan_on(self):
return f'M106 S{self.cfg.cooling_fan_speed} ; start fan'
def relative_moves(self):
return 'G91; relative'
class TestPrinter(GCodeGen):
def __init__(self, cfg):
super().__init__(cfg)
def start_gcode(self) -> [str]:
s = self.cfg.start_gcode.format(test=self.cfg, filament=self.cfg)
return s.split('\n')
def end_gcode(self) -> [str]:
s = self.cfg.end_gcode.format(filament=self.cfg)
return s.split('\n')
def raft_loops(self):
result = ['; Raft start']
result.append(self.goto(self.cfg.start_x, self.cfg.start_y))
result.append(self.relative_moves())
for loop in range(0, self.cfg.raft_loops * 2, 2):
result.append(self.line(0,
loop * self.cfg.extrusion_width,
self.cfg.first_layer_speed))
result.append(self.line(self.cfg.object_width + loop * self.cfg.extrusion_width,
0,
self.cfg.first_layer_speed))
result.append(self.line(0,
(loop + 1) * -self.cfg.extrusion_width,
self.cfg.first_layer_speed))
result.append(self.line(-self.cfg.object_width - (loop + 1) * self.cfg.extrusion_width,
0,
self.cfg.first_layer_speed))
result.append('G90; absolute')
result.append('; Raft end')
return result
def get_segment_rel(self, dir: float) -> list:
segment = (self.cfg.object_width * 1.0) / self.cfg.num_patterns
space = segment - self.cfg.pattern_width
result = []
result.append(self.line(dir * space / 2, 0, self.cfg.fast_speed))
result.append(self.line(dir * self.cfg.pattern_width, 0, self.cfg.slow_speed))
result.append(self.line(dir * space / 2, 0, self.cfg.fast_speed))
return result
def generate_layer(self) -> []:
result = []
to_right = self.get_segment_rel(1.0)
to_left = self.get_segment_rel(-1.0)
for _ in range(self.cfg.num_patterns):
result += to_right
result.append(self.line(0, -self.cfg.extrusion_width, self.cfg.fast_speed))
for _ in range(self.cfg.num_patterns):
result += to_left
result.append(self.line(0, self.cfg.extrusion_width, self.cfg.fast_speed))
return result
def get_test(self):
pressure_step = (self.cfg.pressure_advance_max - self.cfg.pressure_advance_min) + self.cfg.pressure_advance_min
layer = self.generate_layer()
result = [self.relative_moves()]
for l in range(self.cfg.layers):
pressure_advance = (l / (self.cfg.layers * 1.0)) * pressure_step
result.append("; layer %d, pressure advance: %.3f" %(l, pressure_advance))
if self.cfg.show_messages:
result.append(f"M117 layer {l}, pressure advance: {pressure_advance:.3f}")
result.append(f"M572 D0 S{pressure_advance:.3f}")
result += layer
result.append(self.up())
result.append('G90; abs')
return result
def start_fan(self):
return [self.fan_on()]
def goto_start(self):
return [self.goto(self.cfg.start_x, self.cfg.start_y)]
def generate_pa_test(cfg):
printer = TestPrinter(cfg)
gcode = (printer.start_gcode()
+ printer.goto_start()
+ printer.raft_loops()
+ printer.start_fan()
+ printer.goto_start()
+ printer.get_test()
+ printer.end_gcode())
return gcode
if __name__ == '__main__':
cfg_file = os.path.join(Path.home(), CONFIG_FILE)
configurator = Configurator(cfg_file)
gcode = generate_pa_test(configurator)
s = '\n'.join(gcode)
with open('test.gcode', 'w') as f:
f.write(s)
configurator.save(cfg_file)
| StarcoderdataPython |
3285032 | """TLA+ parser and syntax tree."""
from tla.parser import parse
from tla.parser import parse_expr
try:
from tla._version import version as __version__
except:
__version__ = None
| StarcoderdataPython |
3247292 | import os
import tensorflow as tf
from keras.models import load_model
import numpy as np
from datetime import datetime
from flask import Blueprint, request, render_template, jsonify
from modules.dataBase import collection as db
import cv2
mod = Blueprint('backend', __name__, template_folder='templates', static_folder='./static')
UPLOAD_URL = 'http://0.0.0.0:5000/static/'
model = load_model("modules/model/mobilenet_model.hdf5")
class_names = ['Dark', 'Green', 'Light', 'Medium']
model.make_predict_function()
@mod.route('/')
def home():
return render_template('index.html')
@mod.route('/predict', methods=['POST'])
def predict():
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
return "someting went wrong 1"
user_file = request.files['file']
if user_file.filename == '':
return "file name not found ..."
else:
path = os.path.join(os.getcwd() + '\\modules\\static\\' + user_file.filename)
user_file.save(path)
image = cv2.resize(cv2.imread(path), (224, 224))
# Use gaussian blur
blurImg = cv2.GaussianBlur(image, (5, 5), 0)
# Convert to HSV image
hsvImg = cv2.cvtColor(blurImg, cv2.COLOR_BGR2HSV)
# Create mask (parameters - green color range)
lower_green = (25, 40, 50)
upper_green = (75, 255, 255)
mask = cv2.inRange(hsvImg, lower_green, upper_green)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
# Create bool mask
bMask = mask > 0
# Apply the mask
clear = np.zeros_like(image, np.uint8) # Create empty image
clear[bMask] = image[bMask] # Apply boolean mask to the origin image
clearTestImg = clear / 255
clearTestImg = tf.expand_dims(clearTestImg, 0)
predictions = model.predict(clearTestImg)
score = tf.nn.softmax(predictions)
class_name = class_names[np.argmax(score)]
score = np.max(score) * 100
db.addNewImage(
user_file.filename,
class_name,
str(round(score, 2)),
datetime.now(),
UPLOAD_URL + user_file.filename)
return jsonify({
"status": "success",
"class": class_name,
"score": str(round(score, 2)),
"upload_time": datetime.now()
})
# def identifyImage(img_path):
# image = img.load_img(img_path, target_size=(224, 224))
# x = img_to_array(image)
# x = np.expand_dims(x, axis=0)
# preds = model.predict(x)
# score = tf.nn.softmax(preds[0])
# class_name = class_names[np.argmax(score)]
# print(preds, score, class_name)
# return preds, score, class_name
| StarcoderdataPython |
3287918 | # cob: type=views mountpoint=/index
from cob import route
from . import mymodels
from flask import jsonify
@route('/purge', methods=['POST'])
def purge():
mymodels.Person.query.delete()
mymodels.db.session.commit()
return 'ok'
@route('/list_models')
def get_all_models():
return jsonify([{'id': p.id} for p in mymodels.Person.query.all()])
@route('/add_model', methods=['POST'])
def add_model():
mymodels.db.session.add(
mymodels.Person())
mymodels.db.session.commit()
return 'ok'
| StarcoderdataPython |
9670863 | <filename>navigator/resources.py
#!/usr/bin/env python
import asyncio
import json
from functools import wraps
from pathlib import Path
import aiohttp
from aiohttp import WSCloseCode, WSMsgType, web
from aiohttp.http_exceptions import HttpBadRequest
from aiohttp.web import Request, Response
from aiohttp.web_exceptions import HTTPMethodNotAllowed
from aiohttp_swagger import *
from navigator.conf import BASE_DIR
def callback_channel(ws):
def listen(connection, pid, channel, payload):
print("Running Callback Channel for {}: {}".format(channel, payload))
asyncio.ensure_future(ws.send_str(payload))
return listen
async def channel_handler(request):
channel = request.match_info.get("channel", "navigator")
print("Websocket connection starting for channel {}".format(channel))
ws = web.WebSocketResponse()
await ws.prepare(request)
#TODO: connection is not defined, I dont understand this code
socket = {"ws": ws, "conn": connection}
request.app["websockets"].append(socket)
print(socket)
print('Websocket Channel connection ready')
try:
async for msg in ws:
print(msg)
if msg.type == aiohttp.WSMsgType.TEXT:
print(msg.data)
if msg.data == 'close':
await ws.close()
else:
await ws.send_str(msg.data + '/answer')
finally:
request.app["websockets"].remove(socket)
return ws
class WebSocket(web.View):
def __init__(self, *args, **kwargs):
super(WebSocket, self).__init__(*args, **kwargs)
self.app = self.request.app
async def get(self):
# user need a channel:
channel = self.request.match_info.get("channel", "navigator")
print("Websocket connection starting")
ws = web.WebSocketResponse()
await ws.prepare(self.request)
self.request.app["websockets"].append(ws)
print("Websocket connection ready")
# ws.start(request)
# session = await get_session(self.request)
# user = User(self.request.db, {'id': session.get('user')})
# login = await user.get_login()
try:
async for msg in ws:
print(msg)
if msg.type == WSMsgType.TEXT:
print(msg.data)
if msg.data == "close":
await ws.close()
else:
await ws.send_str(msg.data + "/answer")
elif msg.type == WSMsgType.ERROR:
print("ws connection closed with exception %s" % ws.exception())
finally:
self.request.app["websockets"].remove(ws)
print("Websocket connection closed")
return ws
async def ping(request):
"""
---
summary: This end-point allow to test that service is up.
tags:
- Health check
produces:
- text/plain
responses:
"200":
description: successful operation. Return "pong" text
"405":
description: invalid HTTP Method
"""
return web.Response(text="pong")
async def home(request):
"""
---
summary: This end-point is the default "home" for all newly projects
tags:
- Home
- Index
produces:
- text/html
responses:
"200":
description: template "templates/home.html" returned.
"404":
description: Template "templates/home.html" not found.
"""
path = Path(BASE_DIR).joinpath("templates/home.html")
try:
file_path = path
if not file_path.exists():
return web.HTTPNotFound()
return web.FileResponse(file_path)
except Exception as e:
response_obj = {"status": "failed", "reason": str(e)}
return web.Response(
text=json.dumps(response_obj), status=500, content_type="application/json"
)
| StarcoderdataPython |
348410 | <gh_stars>0
import pytest
import numpy as np
from io2048.io_offline import IOOffline
def test_reset():
io = IOOffline()
state = io.reset()
assert(len(np.where(state != 0)[0]) == 2)
assert(np.sum(state) >=4 and np.sum(state) <= 8)
def test_step():
io = IOOffline()
state = io.reset()
next_state, reward, done = io.step(0)
next_state, reward, done = io.step(1)
next_state, reward, done = io.step(2)
next_state, reward, done = io.step(3)
def test_merge_row_cells_to_the_left():
io = IOOffline()
input_row = np.array([1,1,2,2])
expected_output = np.array([2,4,0,0])
expected_reward = 6
merged_row, reward = io._merge_row_cells_to_the_left(input_row)
assert((merged_row == expected_output).all())
assert(reward == expected_reward)
input_row = np.array([1,1,1,1])
expected_output = np.array([2,2,0,0])
expected_reward = 4
merged_row, reward = io._merge_row_cells_to_the_left(input_row)
assert((merged_row == expected_output).all())
assert(reward == expected_reward)
input_row = np.array([0,0,0,2])
expected_output = np.array([2,0,0,0])
expected_reward = 0
merged_row, reward = io._merge_row_cells_to_the_left(input_row)
assert((merged_row == expected_output).all())
assert(reward == expected_reward)
input_row = np.array([3,1,1,2])
expected_output = np.array([3,2,2,0])
expected_reward = 2
merged_row, reward = io._merge_row_cells_to_the_left(input_row)
assert((merged_row == expected_output).all())
assert(reward == expected_reward)
input_row = np.array([1,2,3,4])
expected_output = np.array([1,2,3,4])
expected_reward = 0
merged_row, reward = io._merge_row_cells_to_the_left(input_row)
assert((merged_row == expected_output).all())
assert(reward == expected_reward)
input_row = np.array([2,1,4,4])
expected_output = np.array([2,1,8,0])
expected_reward = 8
merged_row, reward = io._merge_row_cells_to_the_left(input_row)
assert((merged_row == expected_output).all())
assert(reward == expected_reward)
def test_make_move():
io = IOOffline()
io._board = np.array([[0,0,2,2],[4,2,8,2],[2,2,4,8],[16,2,2,8]])
expected_board = np.array([[4,0,0,0],[4,2,8,2],[4,4,8,0],[16,4,8,0]])
expected_reward = 12
new_board, reward = io._make_move(3)
assert((new_board == expected_board).all())
assert(reward == expected_reward)
io._board = np.array([[0,0,2,2],[4,2,8,2],[2,2,4,8],[16,2,2,8]])
expected_board = np.array([[0,0,2,0],[4,0,8,0],[2,2,4,4],[16,4,2,16]])
expected_reward = 24
new_board, reward = io._make_move(2)
assert((new_board == expected_board).all())
assert(reward == expected_reward)
io._board = np.array([[0,0,2,2],[4,2,8,2],[2,2,4,8],[16,2,2,8]])
expected_board = np.array([[0,0,0,4],[4,2,8,2],[0,4,4,8],[0,16,4,8]])
expected_reward = 12
new_board, reward = io._make_move(1)
assert((new_board == expected_board).all())
assert(reward == expected_reward)
io._board = np.array([[0,0,2,2],[4,2,8,2],[2,2,4,8],[16,2,2,8]])
expected_board = np.array([[4,4,2,4],[2,2,8,16],[16,0,4,0],[0,0,2,0]])
expected_reward = 24
new_board, reward = io._make_move(0)
assert((new_board == expected_board).all())
assert(reward == expected_reward)
io._board = np.array([[8, 2, 0, 0,],[8, 2, 0, 0,],[2, 8, 2, 0,],[2, 4, 2, 0,]])
expected_board = np.array([[16,4,4,0],[4,8,0,0],[0,4,0,0],[0,0,0,0]])
expected_reward = 28
new_board, reward = io._make_move(0)
assert((new_board == expected_board).all())
assert(reward == expected_reward)
def test_is_done():
io = IOOffline()
io._board = np.array([[ 4, 2, 4, 2], [32, 64, 16, 8], [16, 4, 32, 4], [ 4, 2, 8, 2]])
assert(io._is_done() == True)
| StarcoderdataPython |
6702360 | def transform_list(iterable, func):
return [func(i) for i in iterable]
LEN_SUFFIXES = {
1: 'meth',
2: 'eth',
3: 'prop',
4: 'but',
5: 'pent',
6: 'hex',
7: 'hept',
8: 'oct',
}
COUNT_SUFFIXES = {
2: 'di',
3: 'tri',
4: 'tetra',
}
helptxt = \
"""
N: Nombre de...
P: Position de...
L: Longueur de...
E: Existance de...
"""
def numberof(what):
numworks_safe_print("N: %s ?" % what)
return int(input('>'))
def num_choose(msg, choices):
choicemap = {i + 1: v for i, v in enumerate(choices)}
numworks_safe_print(msg)
print("Choix possibles:")
for i, v in choicemap.items():
numworks_safe_print("{}: {}".format(i, v))
chosen = int(input('>'))
while chosen not in choicemap.keys():
chosen = int(input('>'))
return choicemap[chosen]
def posof(what="ce groupe"):
numworks_safe_print("P: %s ?" % what)
return int(input('>'))
def lenof(what):
numworks_safe_print("L: %s ?" % what)
return int(input('>'))
def presenceof(what):
numworks_safe_print("E: %s ?" % what)
return input('>').lower().strip().startswith('y')
def numworks_safe_print(string):
for line in string.split('\n'):
print(line)
DEBUG = False
# len: pos
subs = {}
if not DEBUG:
aldehydegroup = "-CH=O"
carboxylegroup = "-C|OH=O"
hydroxylegroup = "-OH"
carbonylegroup = "C=O"
numworks_safe_print(helptxt)
pchain_len = lenof("chaine principale")
def choose_special_sub():
choices = ('aucun', hydroxylegroup, carbonylegroup, aldehydegroup, carboxylegroup)
chosen = num_choose("Quel groupe est présent ?",list(choices))
return chosen == choices[0], chosen == choices[1], chosen == choices[2], chosen == choices[3], chosen == choices[4]
is_not_particular, is_alcool, is_carbonyle, is_aldehyde, is_acid_carboxylique = choose_special_sub()
if is_alcool or is_carbonyle:
# ask for hydroxyle if not is_carbonyle else ask for carbonyle
special_sub_pos = posof(["Groupe hydroxyle","Groupe carbonyle"][int(is_carbonyle)])
else:
special_sub_pos = 0
n_subs = numberof("substituants")
else:
pchain_len = 6
hydroxyle_pos = 3
n_subs = 3
subs_len = [1] * 3
subs_pos = [2] + [4] * 2
for _ in range(n_subs):
if not DEBUG:
slen = lenof("substituant")
spos = posof("substituant")
else:
slen = subs_len[_]
spos = subs_pos[_]
if subs.get(slen):
subs[slen].append(spos)
else:
subs[slen] = [spos]
# sort subs dict by length
sorted_subs = sorted(subs.items(), key=lambda kv: kv[0])
subs_str = []
for slen, spos in sorted_subs:
if len(spos) > 1:
posstr = ','.join(transform_list(spos, str)) + '-' + COUNT_SUFFIXES[len(spos)]
else:
posstr = str(spos[0]) + '-'
substr = posstr + LEN_SUFFIXES[slen] + 'yl'
subs_str.append(substr)
subs_str = '-'.join(subs_str)
if is_alcool:
special_sub_str = "-%s-ol" % special_sub_pos
elif is_carbonyle:
special_sub_str = "-%s-one" % special_sub_pos
elif is_aldehyde:
special_sub_str = 'al'
elif is_acid_carboxylique:
special_sub_str = 'oique'
else:
special_sub_str = str()
acid = 'acide ' if special_sub_str == 'oique' else ''
final_str = acid + subs_str + LEN_SUFFIXES[pchain_len] + 'an' + special_sub_str
if final_str == 'propan-2-ol': final_str = 'isopropyl'
numworks_safe_print('-' * 20 + '\n' + final_str)
| StarcoderdataPython |
3230651 | import json
import falcon
from models import Ticket
from config import session
from repo import Repo
class CreateTicket:
@staticmethod
def on_post(req, resp):
request_payload = json.loads(req.stream.read().decode('utf-8'))
ticket_type = request_payload.get('ticket_type')
message = request_payload.get('message')
try:
ticket = Ticket.create_new(ticket_type, message)
with session() as db:
repo = Repo(db)
ticket.ticket_id = repo.save_new_ticket(ticket)
db.commit()
resp.body = json.dumps(
{
'id': ticket.ticket_id,
'status': ticket.status
},
sort_keys=True, indent=4
)
except ValueError as error:
resp.body = json.dumps(
{
'status': 'failed',
'reason': str(error)
},
sort_keys=True, indent=4
)
class QueryTicketStatus:
@staticmethod
def on_get(req, resp, ticket_id):
with session() as db:
repo = Repo(db)
ticket = repo.list_ticket(int(ticket_id))
if ticket is None:
resp.body = json.dumps({'status': 'not found'}, sort_keys=True, indent=4)
else:
resp.body = json.dumps(
{
'id': ticket.ticket_id,
'ticket_type': ticket.ticket_type,
'status': Ticket.find_status_name(ticket.status)
},
sort_keys=True, indent=4
)
class ChangeTicketStatus:
@staticmethod
def on_put(req, resp, ticket_id):
payload = json.loads(req.stream.read().decode('utf-8'))
status_name = payload.get('status')
with session() as db:
repo = Repo(db)
ticket = repo.list_ticket(int(ticket_id))
repo.change_ticket_status(
ticket=ticket,
status=Ticket.STATUSES[status_name]
)
resp.body = json.dumps(
{
'id': ticket_id,
'status': status_name
},
sort_keys=True, indent=4
)
app = falcon.API()
app.add_route('/create-ticket', CreateTicket)
app.add_route('/view-status/{ticket_id}', QueryTicketStatus)
app.add_route('/change-ticket-status/{ticket_id}', ChangeTicketStatus)
| StarcoderdataPython |
1826792 | import os
import sys
import shutil
import subprocess
import xarray as xr
import numpy as np
# Current, parent and file paths
CWD = os.getcwd()
CF = os.path.realpath(__file__)
CFD = os.path.dirname(CF)
# Import library specific modules
sys.path.append(os.path.join(CFD,"../"))
sys.path.append(os.path.join(CFD,"../pyspod"))
from pyspod.spod_low_storage import SPOD_low_storage
from pyspod.spod_low_ram import SPOD_low_ram
from pyspod.spod_streaming import SPOD_streaming
# Let's create some 2D syntetic data
# and store them into a variable called p
variables = ['p']
x1 = np.linspace(0,10,100)
x2 = np.linspace(0, 5, 50)
xx1, xx2 = np.meshgrid(x1, x2)
t = np.linspace(0, 200, 1000)
nt = t.shape[0]
s_component = np.sin(xx1 * xx2) + np.cos(xx1)**2 + np.sin(0.1*xx2)
t_component = np.sin(0.1 * t)**2 + np.cos(t) * np.sin(0.5*t)
p = np.empty((t_component.shape[0],)+s_component.shape)
for i, t_c in enumerate(t_component):
p[i] = s_component * t_c
# We now save the data into netCDF format
ds = xr.Dataset(
{"p": (("time", "x1", "x2"), p)},
coords={
"x1": x2,
"x2": x1,
"time": t,
},
)
ds.to_netcdf("data.nc")
# We now show how to construct a data reader that can be passed
# to the constructor of pyspod to read data sequentially (thereby
# reducing RAM requirements)
# Reader for netCDF
def read_data_netCDF(data, t_0, t_end, variables):
if t_0 == t_end: ti = [t_0]
else : ti = np.arange(t_0,t_end)
X = np.empty([len(ti), x2.shape[0], x1.shape[0], len(variables)])
for _,var in enumerate(variables):
X = np.array(ds[var].isel(time=ti))
return X
x_nc = read_data_netCDF('data.nc', t_0=0, t_end=t.shape[0], variables=variables)
x_nc_ssn = read_data_netCDF('data.nc', t_0=0, t_end=0, variables=variables)
# Let's define the required parameters into a dictionary
params = dict()
# -- required parameters
params['time_step' ] = 1 # data time-sampling
params['n_space_dims'] = 2 # number of spatial dimensions (longitude and latitude)
params['n_variables' ] = len(variables) # number of variables
params['n_DFT' ] = 100 # length of FFT blocks (100 time-snapshots)
# -- optional parameters
params['overlap' ] = 0 # dimension block overlap region
params['mean_type' ] = 'blockwise' # type of mean to subtract to the data
params['normalize_weights'] = False # normalization of weights by data variance
params['normalize_data' ] = False # normalize data by data variance
params['n_modes_save' ] = 3 # modes to be saved
params['conf_level' ] = 0.95 # calculate confidence level
params['reuse_blocks' ] = True # whether to reuse blocks if present
params['savefft' ] = False # save FFT blocks to reuse them in the future (saves time)
params['savedir' ] = os.path.join(CWD, 'results', 'simple_test') # folder where to save results
def test_basic_file_spod_low_storage():
# Initialize libraries by using data_handler for the low storage algorithm
spod_ls = SPOD_low_storage(
params=params,
data_handler=read_data_netCDF,
variables=variables)
# fit spod
spod_ls.fit(data=os.path.join(CWD,'data.nc'), nt=nt)
# Show results
T_approx = 10 # approximate period = 10 days (in days)
freq = spod_ls.freq
freq_found, freq_idx = spod_ls.find_nearest_freq(freq_required=1/T_approx, freq=freq)
modes_at_freq = spod_ls.get_modes_at_freq(freq_idx=freq_idx)
tol = 1e-10
assert((np.abs(modes_at_freq[5,10,0,0]) < 0.010068515759308167 +tol) & \
(np.abs(modes_at_freq[5,10,0,0]) > 0.010068515759308167 -tol))
assert((np.abs(modes_at_freq[0,0,0,0]) < 0.012180208154393609 +tol) & \
(np.abs(modes_at_freq[0,0,0,0]) > 0.012180208154393609 -tol))
assert((np.max(np.abs(modes_at_freq)) < 0.029919118328162627 +tol) & \
(np.max(np.abs(modes_at_freq)) > 0.029919118328162627 -tol))
def test_basic_file_spod_low_ram():
# Let's try the low_ram algorithm
spod_ram = SPOD_low_ram(
params=params,
data_handler=read_data_netCDF,
variables=variables)
spod_ram.fit(data=os.path.join(CWD,'data.nc'), nt=nt)
# Show results
T_approx = 10 # approximate period = 10 days (in days)
freq = spod_ram.freq
freq_found, freq_idx = spod_ram.find_nearest_freq(freq_required=1/T_approx, freq=freq)
modes_at_freq = spod_ram.get_modes_at_freq(freq_idx=freq_idx)
tol = 1e-10
assert((np.abs(modes_at_freq[5,10,0,0]) < 0.010068515759308162 +tol) & \
(np.abs(modes_at_freq[5,10,0,0]) > 0.010068515759308162 -tol))
assert((np.abs(modes_at_freq[0,0,0,0]) < 0.01218020815439358 +tol) & \
(np.abs(modes_at_freq[0,0,0,0]) > 0.01218020815439358 -tol))
assert((np.max(np.abs(modes_at_freq)) < 0.02991911832816271 +tol) & \
(np.max(np.abs(modes_at_freq)) > 0.02991911832816271 -tol))
try:
shutil.rmtree(os.path.join(CWD,'results'))
except OSError as e:
print("Error: %s : %s" % (os.path.join(CWD,'results'), e.strerror))
try:
os.remove(os.path.join(CWD,'data.nc'))
except OSError as e:
print("Error: %s : %s" % (os.path.join(CWD,'data.nc'), e.strerror))
if __name__ == "__main__":
test_basic_file_spod_low_storage()
test_basic_file_spod_low_ram ()
| StarcoderdataPython |
6562817 | <filename>mrn_rdpgw_service_discovery.py
#!/usr/bin/env python
# |-----------------------------------------------------------------------------
# | This source code is provided under the Apache 2.0 license --
# | and is provided AS IS with no warranty or guarantee of fit for purpose. --
# | See the project's LICENSE.md for details. --
# | Copyright (C) 2018-2021 Refinitiv. All rights reserved. --
# |-----------------------------------------------------------------------------
"""
This example demonstrates authenticating via Refinitiv Data Platform, using an
authentication token to discover Refinitiv Real-Time service endpoint, and
using the endpoint and authentitcation to retrieve market content.
This example maintains a session by proactively renewing the authentication
token before expiration.
This example can run with optional hotstandby support. Without this support, the application
will use a load-balanced interface with two hosts behind the load balancer. With hot standly
support, the application will access two hosts and display the data (should be identical) from
each of the hosts.
It performs the following steps:
- Authenticating via HTTP Post request to Refinitiv Data Platform
- Retrieving service endpoints from Service Discovery via HTTP Get request,
using the token retrieved from Refinitiv Data Platform
- Opening a WebSocket (or two, if the --hotstandby option is specified) to
a Refinitiv Real-Time Service endpoint, as retrieved from Service Discovery
- Sending Login into the Real-Time Service using the token retrieved
from Refinitiv Data Platform.
- Requesting market-price content.
- Printing the response content.
- Periodically proactively re-authenticating to Refinitiv Data Platform, and
providing the updated token to the Real-Time endpoint before token expiration.
"""
import sys
import time
import getopt
import requests
import socket
import json
import websocket
import threading
from threading import Thread, Event
import base64
import zlib
# Global Default Variables
app_id = '256'
auth_url = 'https://api.refinitiv.com:443/auth/oauth2/v1/token'
discovery_url = 'https://api.refinitiv.com/streaming/pricing/v1/'
password = ''
newPassword = ''
position = ''
sts_token = ''
refresh_token = ''
user = ''
clientid = ''
client_secret = ''
scope = 'trapi.streaming.pricing.read'
region = 'us-east-1'
mrn_domain = 'NewsTextAnalytics'
mrn_ric = 'MRN_STORY'
hostList = []
hotstandby = False
# Global Variables
session2 = None
_news_envelopes = []
original_expire_time = '0';
# Global Variables for Password Policy Description
PASSWORD_LENGTH_MASK = 0x1;
PASSWORD_UPPERCASE_LETTER_MASK = 0x2;
PASSWORD_LOWERCASE_LETTER_MASK = 0x4;
PASSWORD_DIGIT_MASK = 0x8;
PASSWORD_SPECIAL_CHARACTER_MASK = 0x10;
PASSWORD_INVALID_CHARACTER_MASK = 0x20;
PASSWORD_LENGTH_MIN = 30;
PASSWORD_UPPERCASE_LETTER_MIN = 1;
PASSWORD_LOWERCASE_LETTER_MIN = 1;
PASSWORD_DIGIT_MIN = 1;
PASSWORD_SPECIAL_CHARACTER_MIN = 1;
PASSWORD_SPECIAL_CHARACTER_SET = "~!@#$%^&*()-_=+[]{}|;:,.<>/?";
PASSWORD_MIN_NUMBER_OF_CATEGORIES = 3;
class WebSocketSession:
logged_in = False
session_name = ''
web_socket_app = None
web_socket_open = False
host = ''
disconnected_by_user = False
def __init__(self, name, host):
self.session_name = name
self.host = host
def _send_mrn_request(self):
""" Create and send simple Market Price request """
mrn_req_json = {
'ID': 2,
'Domain': mrn_domain,
'Key': {
'Name': mrn_ric
},
}
self.web_socket_app.send(json.dumps(mrn_req_json))
print("SENT on " + self.session_name + ":")
print(json.dumps(mrn_req_json, sort_keys=True, indent=2, separators=(',', ':')))
def _decodeFieldList(self,fieldList_dict):
for key, value in fieldList_dict.items():
print("Name = %s: Value = %s" % (key, value))
def _processRefresh(self, message_json):
print("RECEIVED: Refresh Message")
self._decodeFieldList(message_json["Fields"])
def _processMRNUpdate(self, message_json): # process incoming News Update messages
fields_data = message_json["Fields"]
# Dump the FieldList first (for informational purposes)
# decodeFieldList(message_json["Fields"])
# declare variables
tot_size = 0
guid = None
try:
# Get data for all requried fields
fragment = base64.b64decode(fields_data["FRAGMENT"])
frag_num = int(fields_data["FRAG_NUM"])
guid = fields_data["GUID"]
mrn_src = fields_data["MRN_SRC"]
#print("GUID = %s" % guid)
#print("FRAG_NUM = %d" % frag_num)
#print("MRN_SRC = %s" % mrn_src)
if frag_num > 1: # We are now processing more than one part of an envelope - retrieve the current details
guid_index = next((index for (index, d) in enumerate(
_news_envelopes) if d["guid"] == guid), None)
envelop = _news_envelopes[guid_index]
if envelop and envelop["data"]["mrn_src"] == mrn_src and frag_num == envelop["data"]["frag_num"] + 1:
print("process multiple fragments for guid %s" %
envelop["guid"])
#print("fragment before merge = %d" % len(envelop["data"]["fragment"]))
# Merge incoming data to existing news envelop and getting FRAGMENT and TOT_SIZE data to local variables
fragment = envelop["data"]["fragment"] = envelop["data"]["fragment"] + fragment
envelop["data"]["frag_num"] = frag_num
tot_size = envelop["data"]["tot_size"]
print("TOT_SIZE = %d" % tot_size)
print("Current FRAGMENT length = %d" % len(fragment))
# The multiple fragments news are not completed, waiting.
if tot_size != len(fragment):
return None
# The multiple fragments news are completed, delete assoiclate GUID envelop
elif tot_size == len(fragment):
del _news_envelopes[guid_index]
else:
print("Error: Cannot find fragment for GUID %s with matching FRAG_NUM or MRN_SRC %s" % (
guid, mrn_src))
return None
else: # FRAG_NUM = 1 The first fragment
tot_size = int(fields_data["TOT_SIZE"])
print("FRAGMENT length = %d" % len(fragment))
# The fragment news is not completed, waiting and add this news data to envelop object.
if tot_size != len(fragment):
print("Add new fragments to news envelop for guid %s" % guid)
_news_envelopes.append({ # the envelop object is a Python dictionary with GUID as a key and other fields are data
"guid": guid,
"data": {
"fragment": fragment,
"mrn_src": mrn_src,
"frag_num": frag_num,
"tot_size": tot_size
}
})
return None
# News Fragment(s) completed, decompress and print data as JSON to console
if tot_size == len(fragment):
print("decompress News FRAGMENT(s) for GUID %s" % guid)
decompressed_data = zlib.decompress(fragment, zlib.MAX_WBITS | 32)
print("News = %s" % json.loads(decompressed_data))
sys.stdout.flush()
except KeyError as keyerror:
print('KeyError exception: ', keyerror)
except IndexError as indexerror:
print('IndexError exception: ', indexerror)
except binascii.Error as b64error:
print('base64 decoding exception:', b64error)
except zlib.error as error:
print('zlib decompressing exception: ', error)
# Some console environments like Windows may encounter this unicode display as a limitation of OS
except UnicodeEncodeError as encodeerror:
print("UnicodeEncodeError exception. Cannot decode unicode character for %s in this enviroment: " %
guid, encodeerror)
except Exception as e:
print('exception: ', sys.exc_info()[0])
def _processStatus(self, message_json): # process incoming status message
print("RECEIVED: Status Message")
print(json.dumps(message_json, sort_keys=True, indent=2, separators=(',', ':')))
def _send_login_request(self, auth_token, is_refresh_token):
"""
Send login request with authentication token.
Used both for the initial login and subsequent reissues to update the authentication token
"""
login_json = {
'ID': 1,
'Domain': 'Login',
'Key': {
'NameType': 'AuthnToken',
'Elements': {
'ApplicationId': '',
'Position': '',
'AuthenticationToken': ''
}
}
}
login_json['Key']['Elements']['ApplicationId'] = app_id
login_json['Key']['Elements']['Position'] = position
login_json['Key']['Elements']['AuthenticationToken'] = auth_token
# If the token is a refresh token, this is not our first login attempt.
if is_refresh_token:
login_json['Refresh'] = False
self.web_socket_app.send(json.dumps(login_json))
print("SENT on " + self.session_name + ":")
print(json.dumps(login_json, sort_keys=True, indent=2, separators=(',', ':')))
def _process_login_response(self, message_json):
""" Send item request """
if message_json['State']['Stream'] != "Open" or message_json['State']['Data'] != "Ok":
print("Login failed.")
sys.exit(1)
self.logged_in = True
self._send_mrn_request()
def _process_message(self, message_json):
""" Parse at high level and output JSON of message """
message_type = message_json['Type']
if message_type == "Refresh":
if "Domain" in message_json:
message_domain = message_json["Domain"]
if message_domain == "Login":
self._process_login_response(message_json)
elif message_domain:
self._processRefresh(message_json)
elif message_type == "Update":
if "Domain" in message_json and message_json["Domain"] == mrn_domain:
self._processMRNUpdate(message_json)
elif message_type == "Status":
self._processStatus(message_json)
elif message_type == "Ping":
pong_json = {'Type': 'Pong'}
self.web_socket_app.send(json.dumps(pong_json))
print("SENT on " + self.session_name + ":")
print(json.dumps(pong_json, sort_keys=True,
indent=2, separators=(',', ':')))
# Callback events from WebSocketApp
def _on_message(self, ws, message):
""" Called when message received, parse message into JSON for processing """
print("RECEIVED on " + self.session_name + ":")
message_json = json.loads(message)
print(json.dumps(message_json, sort_keys=True, indent=2, separators=(',', ':')))
for singleMsg in message_json:
self._process_message(singleMsg)
def _on_error(self, ws, error):
""" Called when websocket error has occurred """
print(error + " for " + self.session_name)
def _on_close(self, ws, close_status_code, close_msg):
""" Called when websocket is closed """
self.web_socket_open = False
self.logged_in = False
print("WebSocket Closed for " + self.session_name)
if not self.disconnected_by_user:
print("Reconnect to the endpoint for " + self.session_name + " after 3 seconds... ")
time.sleep(3)
self.connect()
def _on_open(self, ws):
""" Called when handshake is complete and websocket is open, send login """
print("WebSocket successfully connected for " + self.session_name + "!")
self.web_socket_open = True
self._send_login_request(sts_token, False)
# Operations
def connect(self):
# Start websocket handshake
ws_address = "wss://{}/WebSocket".format(self.host)
print("Connecting to WebSocket " + ws_address + " for " + self.session_name + "...")
self.web_socket_app = websocket.WebSocketApp(ws_address,
on_message=self._on_message,
on_error=self._on_error,
on_close=self._on_close,
on_open=self._on_open,
subprotocols=['tr_json2'])
# Event loop
wst = threading.Thread(target=self.web_socket_app.run_forever, kwargs={'sslopt': {'check_hostname': False}})
wst.start()
def disconnect(self):
print("Closing the WebSocket connection for " + self.session_name)
self.disconnected_by_user = True
if self.web_socket_open:
self.web_socket_app.close()
def refresh_token(self):
if self.logged_in:
print("Refreshing the access token for " + self.session_name)
self._send_login_request(sts_token, True)
def query_service_discovery(url=None):
if url is None:
url = discovery_url
print("Sending Refinitiv Data Platform service discovery request to " + url)
try:
r = requests.get(url, headers={"Authorization": "Bearer " + sts_token}, params={"transport": "websocket"}, allow_redirects=False)
except requests.exceptions.RequestException as e:
print('Refinitiv Data Platform service discovery exception failure:', e)
return False
if r.status_code == 200:
# Authentication was successful. Deserialize the response.
response_json = r.json()
print("Refinitiv Data Platform Service discovery succeeded. RECEIVED:")
print(json.dumps(response_json, sort_keys=True, indent=2, separators=(',', ':')))
for index in range(len(response_json['services'])):
if not response_json['services'][index]['location'][0].startswith(region):
continue
if not hotstandby:
if len(response_json['services'][index]['location']) == 2:
hostList.append(response_json['services'][index]['endpoint'] + ":" +
str(response_json['services'][index]['port']))
break
else:
if len(response_json['services'][index]['location']) == 1:
hostList.append(response_json['services'][index]['endpoint'] + ":" +
str(response_json['services'][index]['port']))
if hotstandby:
if len(hostList) < 2:
print("Expected 2 hosts but received:", len(hostList), "or the region:", region, "is not present in list of endpoints")
sys.exit(1)
else:
if len(hostList) == 0:
print("The region:", region, "is not present in list of endpoints")
sys.exit(1)
return True
elif r.status_code == 301 or r.status_code == 302 or r.status_code == 303 or r.status_code == 307 or r.status_code == 308:
# Perform URL redirect
print('Refinitiv Data Platform service discovery HTTP code:', r.status_code, r.reason)
new_host = r.headers['Location']
if new_host is not None:
print('Perform URL redirect to ', new_host)
return query_service_discovery(new_host)
return False
elif r.status_code == 403 or r.status_code == 451:
# Stop trying with the request
print('Refinitiv Data Platform service discovery HTTP code:', r.status_code, r.reason)
print('Stop trying with the request')
return False
else:
# Retry the service discovery request
print('Refinitiv Data Platform service discovery HTTP code:', r.status_code, r.reason)
print('Retry the service discovery request')
return query_service_discovery()
def get_sts_token(current_refresh_token, url=None):
"""
Retrieves an authentication token.
:param current_refresh_token: Refresh token retrieved from a previous authentication, used to retrieve a
subsequent access token. If not provided (i.e. on the initial authentication), the password is used.
"""
if url is None:
url = auth_url
if not current_refresh_token: # First time through, send password
data = {'username': user, 'password': password, 'client_id': clientid, 'grant_type': 'password', 'takeExclusiveSignOnControl': True,
'scope': scope}
print("Sending authentication request with password to", url, "...")
else: # Use the given refresh token
data = {'username': user, 'client_id': clientid, 'refresh_token': current_refresh_token, 'grant_type': 'refresh_token'}
print("Sending authentication request with refresh token to", url, "...")
if client_secret != '':
data['client_secret'] = client_secret;
try:
# Request with auth for https protocol
r = requests.post(url,
headers={'Accept': 'application/json'},
data=data,
auth=(clientid, client_secret),
verify=True,
allow_redirects=False)
except requests.exceptions.RequestException as e:
print('Refinitiv Data Platform authentication exception failure:', e)
return None, None, None
if r.status_code == 200:
auth_json = r.json()
print("Refinitiv Data Platform Authentication succeeded. RECEIVED:")
print(json.dumps(auth_json, sort_keys=True, indent=2, separators=(',', ':')))
return auth_json['access_token'], auth_json['refresh_token'], auth_json['expires_in']
elif r.status_code == 301 or r.status_code == 302 or r.status_code == 307 or r.status_code == 308:
# Perform URL redirect
print('Refinitiv Data Platform authentication HTTP code:', r.status_code, r.reason)
new_host = r.headers['Location']
if new_host is not None:
print('Perform URL redirect to ', new_host)
return get_sts_token(current_refresh_token, new_host)
return None, None, None
elif r.status_code == 400 or r.status_code == 401:
# Retry with username and password
print('Refinitiv Data Platform authentication HTTP code:', r.status_code, r.reason)
if current_refresh_token:
# Refresh token may have expired. Try using our password.
print('Retry with username and password')
return get_sts_token(None)
return None, None, None
elif r.status_code == 403 or r.status_code == 451:
# Stop retrying with the request
print('Refinitiv Data Platform authentication HTTP code:', r.status_code, r.reason)
print('Stop retrying with the request')
return None, None, None
else:
# Retry the request to Refinitiv Data Platform
print('Refinitiv Data Platform authentication HTTP code:', r.status_code, r.reason)
print('Retry the request to Refinitiv Data Platform')
return get_sts_token(current_refresh_token)
def print_commandline_usage_and_exit(exit_code):
print('Usage: market_price_rdpgw_service_discovery.py [--app_id app_id] '
'[--user user] [--clientid clientid] [--password password] [--newPassword <PASSWORD>] [--position position] [--auth_url auth_url] '
'[--discovery_url discovery_url] [--scope scope] [--region region] [--hotstandby] [--help]')
sys.exit(exit_code)
def check_new_password(pwd):
result = 0;
countUpper = 0;
countLower = 0;
countDigit = 0;
countSpecial = 0;
if len(pwd) < PASSWORD_LENGTH_MIN :
result |= PASSWORD_LENGTH_MASK;
for c in pwd :
# This long condition is used in order not to import re library
# If re will be imported for some other purpose this condition should be
# refactored using regular expression
if not ((c >= 'A' and c <= 'Z') or (c >= 'a' and c <= 'z') \
or (c >= '0' and c <= '9') or (c in PASSWORD_SPECIAL_CHARACTER_SET)) :
result |= PASSWORD_INVALID_CHARACTER_MASK;
if (c >= 'A' and c <= 'Z') :
countUpper += 1;
if (c >= 'a' and c <= 'z') :
countLower += 1;
if (c >= '0' and c <= '9') :
countDigit += 1;
if (c in PASSWORD_SPECIAL_CHARACTER_SET) :
countSpecial += 1;
if (countUpper < PASSWORD_UPPERCASE_LETTER_MIN) :
result |= PASSWORD_UPPERCASE_LETTER_MASK;
if (countLower < PASSWORD_LOWERCASE_LETTER_MIN) :
result |= PASSWORD_LOWERCASE_LETTER_MASK;
if (countDigit < PASSWORD_DIGIT_MIN) :
result |= PASSWORD_DIGIT_MASK;
if (countSpecial < PASSWORD_SPECIAL_CHARACTER_MIN) :
result |= PASSWORD_SPECIAL_CHARACTER_MASK;
return result
def changePassword():
data = {'username': user, 'password': password, 'client_id': clientid, 'grant_type': 'password', 'takeExclusiveSignOnControl': True,
'scope': scope, 'newPassword' : <PASSWORD>Password}
print("Sending changing password request to", auth_url, "...")
try:
# Request with auth for https protocol
r = requests.post(auth_url,
headers={'Accept': 'application/json'},
data=data,
auth=(clientid, client_secret),
verify=True,
allow_redirects=False)
except requests.exceptions.RequestException as e:
print('Changing password exception failure:', e)
return False
if r.status_code == 200:
auth_json = r.json()
print("Password successfully changed.")
print(json.dumps(auth_json, sort_keys=True, indent=2, separators=(',', ':')))
return True
elif r.status_code == 301 or r.status_code == 302 or r.status_code == 307 or r.status_code == 308:
# Perform URL redirect
print('Changing password response HTTP code:', r.status_code, r.reason)
new_host = r.headers['Location']
if new_host is not None:
print('Perform URL redirect to ', new_host)
return changePassword()
return False
elif r.status_code >= 400 :
# Error during change password attempt
auth_json = r.json()
print('Changing password response HTTP code:', r.status_code, r.reason)
print(json.dumps(auth_json, sort_keys=True, indent=2, separators=(',', ':')))
return False
else:
# Retry the request to the API gateway
print('Changing password response HTTP code:', r.status_code, r.reason)
print('Retry change request')
return changePassword()
if __name__ == "__main__":
# Get command line parameters
opts = []
try:
opts, args = getopt.getopt(sys.argv[1:], "", ["help", "app_id=", "user=", "clientid=", "password=", "newPassword=",
"position=", "auth_url=", "discovery_url=", "scope=", "region=",
"hotstandby"])
except getopt.GetoptError:
print_commandline_usage_and_exit(2)
for opt, arg in opts:
if opt in "--help":
print_commandline_usage_and_exit(0)
elif opt in "--app_id":
app_id = arg
elif opt in "--user":
user = arg
elif opt in "--clientid":
clientid = arg
elif opt in "--password":
password = arg
elif opt in "--newPassword":
newPassword = arg
elif opt in "--position":
position = arg
elif opt in "--auth_url":
auth_url = arg
elif opt in "--discovery_url":
discovery_url = arg
elif opt in "--scope":
scope = arg
elif opt in "--region":
region = arg
elif opt in "--hotstandby":
hotstandby = True
if user == '' or password == '' or clientid == '':
print("user, clientid and password are required options")
sys.exit(2)
if (newPassword != '') :
policyResult = check_new_password(newPassword);
if (policyResult & PASSWORD_INVALID_CHARACTER_MASK != 0) :
print("New password contains invalid symbol");
print("valid symbols are [A-Z][a-z][0-9]", PASSWORD_SPECIAL_CHARACTER_SET, sep = '');
sys.exit(2);
if (policyResult & PASSWORD_LENGTH_MASK != 0) :
print("New password length should be at least ", PASSWORD_LENGTH_MIN, " characters");
sys.exit(2);
countCategories = 0;
if (policyResult & PASSWORD_UPPERCASE_LETTER_MASK == 0) :
countCategories += 1;
if (policyResult & PASSWORD_LOWERCASE_LETTER_MASK == 0) :
countCategories += 1;
if (policyResult & PASSWORD_DIGIT_MASK == 0) :
countCategories += 1;
if (policyResult & PASSWORD_SPECIAL_CHARACTER_MASK == 0) :
countCategories += 1;
if (countCategories < PASSWORD_MIN_NUMBER_OF_CATEGORIES) :
print ("Password must contain characters belonging to at least three of the following four categories:\n"
"uppercase letters, lowercase letters, digits, and special characters.\n");
sys.exit(2);
if (not changePassword()):
sys.exit(2);
password = <PASSWORD>;
newPassword = '';
if position == '':
# Populate position if possible
try:
position_host = socket.gethostname()
position = socket.gethostbyname(position_host) + "/" + position_host
except socket.gaierror:
position = "127.0.0.1/net"
sts_token, refresh_token, expire_time = get_sts_token(None)
if not sts_token:
sys.exit(1)
original_expire_time = expire_time
# Query VIPs from Refinitiv Data Platform service discovery
if not query_service_discovery():
print("Failed to retrieve endpoints from Refinitiv Data Platform Service Discovery. Exiting...")
sys.exit(1)
# Start websocket handshake; create two sessions when the hotstandby parameter is specified.
session1 = WebSocketSession("session1", hostList[0])
session1.connect()
if hotstandby:
session2 = WebSocketSession("session2", hostList[1])
session2.connect()
try:
while True:
# Continue using current token until 90% of initial time before it expires.
time.sleep(int(float(expire_time) * 0.90))
sts_token, refresh_token, expire_time = get_sts_token(refresh_token)
if not sts_token:
sys.exit(1)
if int(expire_time) != int(original_expire_time):
print('expire time changed from ' + str(original_expire_time) + ' sec to ' + str(expire_time) + ' sec; retry with password')
sts_token, refresh_token, expire_time = get_sts_token(None)
if not sts_token:
sys.exit(1)
original_expire_time = expire_time
# Update token.
session1.refresh_token()
if hotstandby:
session2.refresh_token()
except KeyboardInterrupt:
session1.disconnect()
if hotstandby:
session2.disconnect()
| StarcoderdataPython |
11250143 | import urwid
from console.modes import default_global_mode, modemap
class HelpDialog(urwid.Pile):
def __init__(self):
items = []
items += self.get_mode_help("Global Keys", default_global_mode)
items.append(urwid.BoxAdapter(urwid.SolidFill(' '), 1))
mode_title = "{} Keys".format(modemap._mode.capitalize())
items += self.get_mode_help(mode_title, modemap.mode)
urwid.Pile.__init__(self, items)
def get_sorted_binds(self, mode):
binds = []
for key, bind in mode.items():
if isinstance(bind, tuple):
bind, help = bind
else:
help = ""
binds.append((bind, key, help))
return sorted(binds)
def get_bind_rows(self, binds):
rows = []
for bind in binds:
event, key, help = bind
rows.append(urwid.Columns([
('weight', 1, urwid.Text(event)),
('weight', 1, urwid.Text(key)),
('weight', 4, urwid.Text(help)),
]))
return rows
def get_mode_title(self, label):
return urwid.Columns([
('pack', urwid.Text(label)),
(1, urwid.BoxAdapter(urwid.SolidFill(' '), 1)),
('weight', 1, urwid.BoxAdapter(urwid.SolidFill('-'), 1)),
])
def get_mode_help(self, label, mode):
items = [self.get_mode_title(label)]
sorted_binds = self.get_sorted_binds(mode)
bind_rows = self.get_bind_rows(sorted_binds)
items += bind_rows
return [urwid.Padding(i, left=2, right=2) for i in items]
| StarcoderdataPython |
4921566 | <filename>dmt_lib_test.py
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 10 14:29:44 2019
@author: dongxucz
"""
from random import randint
import numpy as np
import csv as csvlib
from locale import atoi, atof
from core.dmt_lib import DmtMod, DmtDeMod
from bitstring import BitArray
import matplotlib.pyplot as plt
def extract_samples(bytes_received):
alldata = bytes_received.hex()
samples_int = []
nsample = int(len(bytes_received)/1.5)
for i in range(nsample):
sample_bitarray = BitArray('0x'+ alldata[i*3:(i+1)*3])
samples_int.append(sample_bitarray.int)
return samples_int
def load_preamble_data():
preamble_file_dir = 'D:/PythonScripts/vadatech/vt898/qam16_dmt_Apr26.csv'
f_pre = open(preamble_file_dir, 'r')
preamble384 = [atof(item[0]) for item in csvlib.reader(f_pre)]
f_pre.close()
preamble_file_dir = 'D:/PythonScripts/vadatech/vt898/qam16_Apr26.csv'
f_pre_int = open(preamble_file_dir, 'r')
preamble_int192 = [atoi(item[0]) for item in csvlib.reader(f_pre_int)]
f_pre_int.close()
qam_ref = [qam16_to_complex(i) for i in preamble_int192]
qam_ref_shaped = np.reshape(qam_ref[0:],(int(frame_size/2),2), order='F')
qam_ref_shaped_extend = DMT_conj_map(qam_ref_shaped)
qam_ref_extend = np.reshape(qam_ref_shaped_extend,(frame_size*2,),order='F')
return (preamble384,qam_ref_extend)
qam_dec=[]
Mod = 16
for i in range(1920):
qam_dec.append(randint(0,(Mod-1)))
dmt = DmtMod(symbols_dec=qam_dec, frame_len=192, qam_level=Mod)
dmt._interp_kind = 'linear' # 'quadratic'
new_sample_rate = 4
dmt.change_sample_rate(new_sample_rate)
N = 25
plt.plot(np.arange(N), dmt.wvfm_real[:N], 'ro-')
postinterp = dmt.samples[0:int(N*dmt.over_sample_ratio)-(new_sample_rate-1)]
plt.plot(np.linspace(0, N-1, num=len(postinterp)), postinterp, 'b*-')
plt.show()
plt.plot(np.arange(N), dmt.wvfm_real[-1*N:], 'ro-')
postinterp = dmt.samples[int(-1*N*dmt.over_sample_ratio):-1*(new_sample_rate-1)]
plt.plot(np.linspace(0, N-1, num=len(postinterp)), postinterp, 'b*-')
dmt_prmbl = DmtMod(symbols_dec = preamble_int192, frame_len = 192, qam_level=16)
dmt_prmbl_wvfm = dmt_prmbl.samples
dmt_prmbl_symbols_iq = dmt_prmbl.symbols_iq
preamble192 = preamble384[:192]
preamble_int96 = preamble_int192[:96]
from dmt_lib import DmtMod, DmtDeMod
prmbl = DmtDeMod(samples = dmt_prmbl_wvfm, frame_len=192, qam_level=16,
preamble = preamble_int96)
prmbl = DmtDeMod(samples = dmt.samples, frame_len=192, qam_level=16,
preamble = preamble_int96)
asdf=DmtDeMod()
wvfm0 = np.concatenate( (dmt_prmbl_wvfm[50:], dmt_prmbl_wvfm[:50] ) )
wvfm = np.concatenate( (wvfm0, wvfm0) )
prmbl.update(wvfm, re_calibrate = True)
N = 25
plt.plot(np.arange(N), dmt_prmbl_wvfm[:N], 'ro-')
plt.plot(np.arange(N), preamble384[:N], 'b*-')
plt.show()
asdf0 = dmt_prmbl.symbols_iq
asdf = prmbl.symbols_iq
plt.scatter(asdf.real,asdf.imag, s=5)
qamfigure,[txqam, rxqam] = plt.subplots(nrows=1,ncols=2)
qamfigure.set_size_inches((8,2.4))
txqam.scatter(np.array(asdf0).real, np.array(asdf0).imag, s=5)
txqam.set_position([0.125,0.125,0.26,0.80])
rxqam.scatter(asdf.real, asdf.imag, s=5)
rxqam.set_position([0.52,0.125,0.26,0.80]) | StarcoderdataPython |
1921929 | import os
import pickle
from copy import deepcopy
from .Train import Optimizer
from .Workers import FillGradientsWorker
class Graph:
def __init__(self, adj = {}, logging = True, optimizable_variables = [], tags = {}, optimization_configs = {}, from_pickle=False, pickle_name = "model", pickle_path = "./models/"):
# initialize from scratch
if not from_pickle:
self.initialize_graph(adj, logging, optimizable_variables, tags, optimization_configs)
else:
# init from pickle
self.deserialize(pickle_name, pickle_path)
def initialize_graph(self, adj, logging, optimizable_variables, tags, optimization_configs):
self.adj = adj
self.optimizable_variables = optimizable_variables
self.tags = tags
self.logging = logging
self.OC = optimization_configs
def print_me(self):
print repr(self.adj)
def add_edge(self, vertex, node = None):
if not vertex in self.adj:
self.adj[vertex] = []
if not node is None:
if not node in self.adj[vertex]:
self.adj[vertex] += [node]
else:
if self.logging:
print "Edge already exist: from " + repr(vertex) + " to " + repr(node)
# add the name of the node to a dictionary
def tag_node(self, node, tag):
# do nothing to none
if node is not None:
if tag in self.tags:
# duplicate name
if logging:
print "Bad node tag: " + tag + " is already used."
else:
# add name for future reference
self.tags[tag] = node
# get a node by its name
def get_node(self, tag):
if tag in self.tags:
return self.tags[tag]
else:
if logging:
print "Bad node name: " + tag + " is not found in graph."
return None
def get_dependency(self, vertex):
if not vertex in self.adj:
return None
else:
return self.adj[vertex]
def in_graph(self, vertex):
return vertex in self.adj
# deferred execution
def exe(self, node):
# running an op/tensor will use the DEG scheme
if not self.in_graph(node):
res = node.run()
else:
dependencies = self.get_dependency(node)
data_flow = []
for d in dependencies:
data_flow += [self.exe(d)]
res = node.run(*data_flow)
if isinstance(node, Optimizer):
return node
else:
return res
def fill_gradients(self, node, lo = None, hi = None):
run_parallel = "FILLGRADIENTS_PARA_FLAG" in self.OC
if run_parallel:
# base case
if lo != None and hi != None:
dependencies = self.get_dependency(node)
for d_i in range(lo, hi):
d = dependencies[d_i]
d.pass_gradients(node, *self.get_dependency(d))
self.fill_gradients(d, node)
else:
assert "FILLGRADIENTS_PARA_CUTOFF" in self.OC
cutoff = self.OC["FILLGRADIENTS_PARA_CUTOFF"]
# create worker
worker = FillGradientsWorker(self, node, 0, len(self.get_dependency(node)), cutoff = cutoff)
# do the work on current thread
worker.run()
else:
dependencies = self.get_dependency(node)
for d in dependencies:
d.pass_gradients(node, *self.get_dependency(d))
self.fill_gradients(d)
# return a copy of this graph
def copy(self):
return Graph(adj = deepcopy(self.adj), logging = True, \
optimizable_variables = deepcopy(self.optimizable_variables))
##### for serialization #####
def serialize(self, name, path="./models/"):
if len(self.tags) == 0:
if self.logging:
print "Warning: serialization of non-tagged graph."
full_path = path+name+".model"
if not os.path.exists(os.path.dirname(full_path)):
try:
os.makedirs(os.path.dirname(full_path))
except OSError as exc: # race condition
if exc.errno != errno.EEXIST:
raise
with open(full_path, 'w') as to_file:
pickle.dump(self, to_file)
def deserialize(self, name, path="./models/"):
full_path = path+name+".model"
if not os.path.exists(os.path.dirname(full_path)):
raise
with open(full_path, 'r') as from_file:
data = from_file.read()
g = pickle.loads(data)
self.initialize_graph(g.adj, g.logging, g.optimizable_variables, g.tags)
############################################
# add an optimizable variable to the graph,
# this will allow the optimizer later on to
# be attached to them
def add_opt_var(self, opt_var):
self.optimizable_variables.append(opt_var)
# return the optimizable variables
def get_opt_vars(self):
return self.optimizable_variables
| StarcoderdataPython |
9756950 | import datetime
from typing import Optional, Tuple
import pandas as pd
from python import DOCUMENT_ID, PUBLISH_DATE, SUBTOPIC, MENTION_ID, SENTENCE_IDX, TOKEN_IDX_FROM, TOKEN_IDX_TO, EVENT, \
SENTENCE_TYPE, TOKEN_IDX, TOKEN, EVENT_ID, TOPIC_ID
from python.util.ftfy import clean_string
def load_gvc_dataset(path: str, doc_to_subtopic_file: Optional[str] = None) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
contents_rows = []
documents_rows = []
mentions_rows = []
with open(path, 'r') as file:
doc_id = None
dct = None
iter_sentence_index = -1
last_sentence_index = None
iter_mention_id = 0
token_idx_from = None
token_idx_offset = 0 # we need this to correct indices of tokens following "NEWLINE" tokens, which we remove
for line in file:
if line.startswith("#begin"):
doc_id = line.strip().split()[2][1:-2]
elif line.startswith("#end"):
doc_row = {DOCUMENT_ID: doc_id, PUBLISH_DATE: dct}
documents_rows.append(doc_row)
# reset variables for next document
doc_id = None
dct = None
iter_sentence_index = -1
last_sentence_index = None
iter_mention_id = 0
token_idx_from = None
token_idx_offset = 0
else:
parts = line.strip().split("\t")
token_idx_conflated, token, sentence_type, label_text = parts
# detect special lines with publish date information
if sentence_type == "DCT":
dct = datetime.datetime.strptime(token, "%Y-%m-%d")
continue
# there are over 5000 useless NEWLINE tokens in the corpus, skip those
if token == "NEWLINE":
token_idx_offset -= 1
continue
# there are tokens with unicode garbage in the corpus, for example some \x92 in
# 254c63ca82173008f14f769c20db88e0: remove those, and if the token is empty after removal, skip it
token = clean_string(token).strip()
if not token:
token_idx_offset -= 1
continue
# take apart the token index, the pattern is: 40b69cf630792394ef837aee6c959ece.t1.2
_, sentence_idx_and_sentence_type, token_idx = token_idx_conflated.split(".")
# In the original files, there can be a title sentence with sentence index 1 and a body sentence
# with sentence index 1. We normalize this and number the first sentence 0, the second 1 etc.
# irrespective of the sentence type.
if last_sentence_index is None or not sentence_idx_and_sentence_type == last_sentence_index:
last_sentence_index = sentence_idx_and_sentence_type
iter_sentence_index += 1
token_idx_offset = 0
sentence_idx = iter_sentence_index
token_idx = int(token_idx) + token_idx_offset
# For some reason, the token numbering of the first title sentence and the first body sentence of each article starts with 1. We set this manually to 0 again...
if sentence_idx_and_sentence_type in ["t1", "b1"]:
token_idx -= 1
if not label_text == "-":
label = int(label_text.replace("(", "").replace(")", ""))
if "(" in label_text:
token_idx_from = token_idx
if ")" in label_text:
token_idx_to = token_idx + 1
mentions_rows.append(
{DOCUMENT_ID: doc_id, MENTION_ID: iter_mention_id, SENTENCE_IDX: sentence_idx,
TOKEN_IDX_FROM: token_idx_from, TOKEN_IDX_TO: token_idx_to, EVENT: label})
iter_mention_id += 1
content_line = {DOCUMENT_ID: doc_id, SENTENCE_TYPE: sentence_type, SENTENCE_IDX: sentence_idx,
TOKEN_IDX: token_idx, TOKEN: token}
contents_rows.append(content_line)
contents = pd.DataFrame(contents_rows)
contents.set_index([DOCUMENT_ID, SENTENCE_IDX, TOKEN_IDX], inplace=True)
contents.sort_index(inplace=True)
if not doc_to_subtopic_file:
raise ValueError
doc_to_subtopic = pd.read_csv(doc_to_subtopic_file, index_col=0)
documents = pd.DataFrame(documents_rows)
documents[SUBTOPIC] = documents[DOCUMENT_ID].map(doc_to_subtopic[EVENT_ID]).astype(str)
documents[TOPIC_ID] = "gun_violence"
documents.set_index([TOPIC_ID, SUBTOPIC, DOCUMENT_ID], inplace=True)
documents.sort_index(inplace=True)
documents[DOCUMENT_ID] = documents.index.get_level_values(DOCUMENT_ID) # add doc-id back as a data column
mentions = pd.DataFrame(mentions_rows)
mentions.set_index([DOCUMENT_ID, MENTION_ID], inplace=True)
mentions.sort_index(inplace=True)
return documents, contents, mentions | StarcoderdataPython |
5040033 | <filename>Lab1/Main.py
import math
import numpy as np
import time
from corpus import get_dict
from metrics import Metrics
def dist_hemming(seq1, seq2):
if seq1 == seq2:
return 0
ml = max(len(seq1), len(seq2))
ml_check = seq1.ljust(ml)
ml_word = seq2.ljust(ml)
dist = 0
for i in range(ml):
if ml_check[i] != ml_word[i]:
dist = dist + 1
return dist
def alg_hemming(check, dictionary, min_percentage):
nested_list = []
dist_list = []
for word in dictionary:
dist = dist_hemming(check, word)
nested_list.append({'word': word, 'dist': dist})
dist_list.append(dist)
max_dist = max(nested_list, key=lambda x: x['dist'])['dist']
for i in range(len(nested_list)):
nested_list[i]['percentage'] = round(100 * (max_dist - nested_list[i]['dist']) / max_dist, 2)
sorted_list = sorted(nested_list, key=lambda x: x['percentage'], reverse=True)
if min_percentage is None:
return sorted_list
filter_list = filter(lambda x: x['percentage'] >= min_percentage, sorted_list)
result_list = list(filter_list)
return result_list, Metrics.get_mse(dist_list, result_list[0]['dist']), Metrics.get_std(dist_list)
def dist_jaro(seq1, seq2):
if seq1 == seq2:
return 1
ml = max(len(seq1), len(seq2))
seq1 = seq1.ljust(ml)
seq2 = seq2.ljust(ml)
len1 = len(seq1)
len2 = len(seq2)
h = math.floor(max(len1, len2) / 2 - 1)
m = 0
hash_s1 = [0] * len1
hash_s2 = [0] * len2
for i in range(len1):
for j in range(max(0, i - h), min(len2, i + h + 1)):
if seq1[i] == seq2[j] and hash_s2[j] == 0:
hash_s1[i] = 1
hash_s2[j] = 1
m += 1
break
if m == 0:
return 0
t = 0
point = 0
for i in range(len1):
if hash_s1[i]:
while hash_s2[point] == 0:
point += 1
if seq1[i] != seq2[point]:
point += 1
t += 1
t = t // 2
return (m / len1 + m / len2 + (m - t + 1) / m) / 3
def alg_jaro(check, dictionary, min_percentage):
nested_list = []
dist_list = []
for word in dictionary:
dist = dist_jaro(check, word)
nested_list.append({'word': word, 'dist': dist})
dist_list.append(dist)
for i in range(len(nested_list)):
nested_list[i]['percentage'] = round(100 * nested_list[i]['dist'], 2)
sorted_list = sorted(nested_list, key=lambda x: x['percentage'], reverse=True)
if min_percentage is None:
return sorted_list
filter_list = filter(lambda x: x['percentage'] >= min_percentage, sorted_list)
result_list = list(filter_list)
return result_list, Metrics.get_mse(dist_list, result_list[0]['dist']), Metrics.get_std(dist_list)
def dist_levenshtein(seq1, seq2):
if seq1 == seq2:
return 0
size_x = len(seq1) + 1
size_y = len(seq2) + 1
matrix = np.zeros((size_x, size_y))
for x in range(size_x):
matrix[x, 0] = x
for y in range(size_y):
matrix[0, y] = y
for x in range(1, size_x):
for y in range(1, size_y):
if seq1[x - 1] == seq2[y - 1]:
matrix[x, y] = min(matrix[x - 1, y] + 1, matrix[x - 1, y - 1], matrix[x, y - 1] + 1)
else:
matrix[x, y] = min(matrix[x - 1, y] + 1, matrix[x - 1, y - 1] + 1, matrix[x, y - 1] + 1)
# print(matrix)
return matrix[size_x - 1, size_y - 1]
def alg_levenshtein(check, dictionary, min_percentage):
nested_list = []
dist_list = []
for word in dictionary:
dist = dist_levenshtein(check, word)
nested_list.append({'word': word, 'dist': dist})
dist_list.append(dist)
max_dist = max(nested_list, key=lambda x: x['dist'])['dist']
for i in range(len(nested_list)):
nested_list[i]['percentage'] = round(100 * (max_dist - nested_list[i]['dist']) / max_dist, 2)
sorted_list = sorted(nested_list, key=lambda x: x['percentage'], reverse=True)
if min_percentage is None:
return sorted_list
filter_list = filter(lambda x: x['percentage'] >= min_percentage, sorted_list)
result_list = list(filter_list)
return result_list, Metrics.get_mse(dist_list, result_list[0]['dist']), Metrics.get_std(dist_list)
dict = get_dict()
print("Число слов в словаре:", len(dict))
print("Метрики: Среднеквадратическая ошибка (чем ближе к 0 тем лучше модель), Среднеквадратическое отклонение")
check_word = "в<PASSWORD>"
print("Проверяем слово:", check_word, "\n")
print("<NAME>:")
start_time = time.time()
result, mse, std = alg_hemming(check_word, dict, 90)
print("Время выполнения:", round(time.time() - start_time, 3), "сек.")
print("Среднеквадратическая ошибка:", mse)
print("Среднеквадратическое отклонение:", std)
for r in result:
print(r)
print("\n")
print("<NAME>:")
start_time = time.time()
result, mse, std = alg_jaro(check_word, dict, 80)
print("Время выполнения:", round(time.time() - start_time, 3), "сек.")
print("Среднеквадратическая ошибка:", mse)
print("Среднеквадратическое отклонение:", std)
for r in result:
print(r)
print("\n")
print("<NAME>:")
start_time = time.time()
result, mse, std = alg_levenshtein(check_word, dict, 90)
print("Время выполнения:", round(time.time() - start_time, 3), "сек.")
print("Среднеквадратическая ошибка:", mse)
print("Среднеквадратическое отклонение:", std)
for r in result:
print(r)
| StarcoderdataPython |
3315663 | from __future__ import unicode_literals
from PIL import Image
from django.core.files.uploadedfile import InMemoryUploadedFile
from ..utils import get_image_metadata_from_file_ext
EXIF_ORIENTATION_KEY = 274
class ProcessedImage(object):
"""
A base class for processing/saving different renditions of an image.
Constructor arguments:
* `path_to_image`: A path to a file within `storage`
* `storage`: A django storage class
* `create_on_demand`: A bool signifying whether new images should be
created on-demand.
Subclasses must define the `process_image` method. see
versatileimagefield.datastructures.filteredimage.FilteredImage and
versatileimagefield.datastructures.sizedimage.SizedImage
for examples.
Includes a preprocessing API based on image format/file type. See
the `preprocess` method for more specific information.
"""
def __init__(self, path_to_image, storage, create_on_demand,
placeholder_image=None):
self.path_to_image = path_to_image
self.storage = storage
self.create_on_demand = create_on_demand
self.placeholder_image = placeholder_image
def process_image(self, image, image_format, **kwargs):
"""
Arguments:
* `image`: a PIL Image instance
* `image_format`: str, a valid PIL format (i.e. 'JPEG' or 'GIF')
Returns a BytesIO representation of the resized image.
Subclasses MUST implement this method.
"""
raise NotImplementedError(
'Subclasses MUST provide a `process_image` method.'
)
def preprocess(self, image, image_format):
"""
Preprocesses an image.
An API hook for image pre-processing. Calls any image format specific
pre-processors (if defined). I.E. If `image_format` is 'JPEG', this
method will look for a method named `preprocess_JPEG`, if found
`image` will be passed to it.
Arguments:
* `image`: a PIL Image instance
* `image_format`: str, a valid PIL format (i.e. 'JPEG' or 'GIF')
Subclasses should return a 2-tuple:
* [0]: A PIL Image instance.
* [1]: A dictionary of additional keyword arguments to be used
when the instance is saved. If no additional keyword
arguments, return an empty dict ({}).
"""
save_kwargs = {'format': image_format}
# Ensuring image is properly rotated
if hasattr(image, '_getexif'):
exif_datadict = image._getexif() # returns None if no EXIF data
if exif_datadict is not None:
exif = dict(exif_datadict.items())
orientation = exif.get(EXIF_ORIENTATION_KEY, None)
if orientation == 3:
image = image.transpose(Image.ROTATE_180)
elif orientation == 6:
image = image.transpose(Image.ROTATE_270)
elif orientation == 8:
image = image.transpose(Image.ROTATE_90)
if hasattr(self, 'preprocess_%s' % image_format):
image, addl_save_kwargs = getattr(
self,
'preprocess_%s' % image_format
)(image=image)
save_kwargs.update(addl_save_kwargs)
return image, save_kwargs
def retrieve_image(self, path_to_image):
"""
Returns a PIL Image instance stored at `path_to_image`
"""
image = self.storage.open(path_to_image, 'rb')
file_ext = path_to_image.rsplit('.')[-1]
image_format, mime_type = get_image_metadata_from_file_ext(file_ext)
return (
Image.open(image),
file_ext,
image_format,
mime_type
)
def save_image(self, imagefile, save_path, file_ext, mime_type):
"""
Saves an image to self.storage at `save_path`.
Arguments:
`imagefile`: Raw image data, typically a BytesIO instance.
`save_path`: The path within self.storage where the image should
be saved.
`file_ext`: The file extension of the image-to-be-saved.
`mime_type`: A valid image mime type (as found in
versatileimagefield.utils)
"""
file_to_save = InMemoryUploadedFile(
imagefile,
None,
'foo.%s' % file_ext,
mime_type,
None,
None
)
file_to_save.seek(0)
self.storage.save(save_path, file_to_save)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.