id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
85749 | # -*- coding: utf-8 -*-
import os
import subprocess
class execlib:
@staticmethod
def get_stdout(cmdline):
""" @return A byte string, so maybe need to decode with .decode('cp932')"""
return subprocess.check_output(cmdline, shell=True)
@staticmethod
def execute(cmdline):
""" @return A return code. """
childobj = subprocess.Popen(cmdline, shell=True)
childobj.communicate()
return childobj.returncode
@staticmethod
def nonblocking_exec(cmdline):
""" windows only.
`(binpath) (params)` 形式の場合は両方とも "" で囲むこと. """
commandline = 'start "" %s' % cmdline
return execlib.execute(commandline)
def abort(msg):
raise RuntimeError(msg)
exit(1)
def nowstr():
import datetime
return datetime.datetime.now().strftime("%y%m%d_%H%M%S")
def parse_arguments():
import argparse
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('comment', default='', nargs='*',
help='Comment for appending to a filename.')
parser.add_argument('-s', '--second', default=0, type=int)
parser.add_argument('-m', '--minutes', default=0, type=int)
parser.add_argument('--hour', default=0, type=int)
parser.add_argument('-t', '--time', default=None,
help='The format must be `HHHH:MM:SS`.')
parser.add_argument('--wav', default=False, action='store_true',
help='Use not default WMA but WAV format.')
#parser.add_argument('-p', '--progress', default=False, action='store_true',
# help='Show waiting progress.')
parser.add_argument('--ps', default=False, action='store_true',
help='Show soundrecorder.exe processes information.')
parser.add_argument('-d', '--dialog', default=False, action='store_true',
help='Open the mic dialog. (Single Use)')
parser.add_argument('--here', default=False, action='store_true',
help='Open the current directory with explorer.exe (Single Use)')
parser.add_argument('--test', default=False, action='store_true',
help='No execution. (Only show the commandline.)')
parsed_args = parser.parse_args()
return parsed_args
args = parse_arguments()
curdir = os.getcwd()
comment = ''
for elm in args.comment:
comment += elm
# single commands when given.
# ---------------------------
if args.dialog:
err = execlib.nonblocking_exec('"C:\\Windows\\system32\\rundll32.exe" Shell32.dll,Control_RunDLL mmsys.cpl,,recording')
exit(err)
if args.here:
err = execlib.nonblocking_exec(curdir)
exit(err)
if args.ps:
commandline = "WMIC PROCESS WHERE \"Name LIKE '%soundrecorder%'\" GET CREATIONDATE,CAPTION /FORMAT:LIST"
stdout_raw = execlib.get_stdout(commandline)
# cmd 前提なので文字コードも固定
stdout_str = stdout_raw.decode('cp932')
stdout_lines = stdout_str.split('\r\n')
stdout_lines = [line for line in stdout_lines if len(line.strip())!=0]
if stdout_lines:
# Before: ['Caption=SoundRecorder.exe\r\r', 'CreationDate=20170221144301.365129+540\r\r', ...]
# After : from yyyymmdd_hhmmss
for i in range(len(stdout_lines)):
line = stdout_lines[i]
if i%2!=0:
print('from {}_{}'.format(line.split('=')[1][:8], line.split('=')[1][8:8+6]))
exit(0)
# fix filename
# ------------
timestr = args.time
if args.second:
timestr = '0000:00:%02d' % args.second
elif args.minutes:
timestr = '0000:%02d:00' % args.minutes
elif args.hour:
timestr = '%04d:00:00' % args.hour
def abort_if_time_format_is_invalid(timestr):
if not(timestr):
abort('No recording time.')
if len(timestr)!=len('HHHH:MM:SS'):
abort('Invalid time format. (invalid length)')
try:
h, m, s = [int(elm) for elm in timestr.split(':')]
except (ValueError, IndexError):
abort('Invalid time format. (not `HHHH:MM:SS`)')
if h<0 or h>9999:
abort('Invalid time format. (invalid hour "%d")' % h)
if m<0 or m>59:
abort('Invalid time format. (invalid minute "%d")' % m)
if s<0 or s>59:
abort('Invalid time format. (invalid second "%d")' % s)
abort_if_time_format_is_invalid(timestr)
recordee_name = nowstr()
def timestr_to_filename(timestr):
h, m, s = [int(elm) for elm in timestr.split(':')]
ret = '_'
if h!=0:
ret += '%dh' % h
if m!=0:
ret += '%dm' % m
if s!=0:
ret += '%ds' % s
return ret
recordee_name += timestr_to_filename(timestr)
if comment:
recordee_name += '_%s' % comment
ext = '.wma'
if args.wav:
ext = '.wav'
recordee_name += ext
# execute
# -------
commandline = 'soundrecorder /FILE %s /DURATION %s' % (recordee_name, timestr)
if not(args.test):
err = execlib.nonblocking_exec(commandline)
exit(err)
else:
print(commandline)
exit(0)
| StarcoderdataPython |
1786019 | from website_downloader.services.files import FilesService
from website_downloader.services.utils import is_google_tag_manager
class ScriptsService(FilesService):
def extract_elements_from_page(self):
raw_links = self.page.find_all('script')
for raw in raw_links:
src = raw.attrs.get('src')
if not src:
continue
if src not in self.raw_elements:
self.raw_elements.append(src)
def filter_elements(self):
self.elements = list(filter(lambda elem: not is_google_tag_manager(elem), self.raw_elements))
| StarcoderdataPython |
11928 | # -*- coding: future_fstrings -*-
import codecs
import pdb
import string
# NOTE https://stackoverflow.com/questions/38777818/how-do-i-properly-create-custom-text-codecs
# prepare map from numbers to letters
_encode_table = {str(number): bytes(letter) for number, letter in enumerate(string.ascii_lowercase)}
# prepare inverse map
_decode_table = {v: k for k, v in _encode_table.items()}
def custom_encode(text):
# example encoder that converts ints to letters
print "custom_encode",text
# see https://docs.python.org/3/library/codecs.html#codecs.Codec.encode
return b''.join(_encode_table[x] for x in text), len(text)
def custom_decode(binary):
# example decoder that converts letters to ints
print "custom_decode",binary
# see https://docs.python.org/3/library/codecs.html#codecs.Codec.decode
return ''.join(_decode_table[x] for x in binary), len(binary)
def custom_search_function(encoding_name):
return codecs.CodecInfo(encode=custom_encode, decode=custom_decode, name='Reasons')
def main():
# register your custom codec
# note that CodecInfo.name is used later
codecs.register(custom_search_function)
binary = 'abcdefg'
# decode letters to numbers
pdb.set_trace()
text = binary.decode('Reasons')
print(text)
# encode numbers to letters
binary2 = text.encode('Reasons')
print(binary2)
# fstring = 'f"hello {text}"'.decode('future-fstrings')
# print fstring
# encode(decode(...)) should be an identity function
assert binary == binary2
if __name__ == '__main__':
main() | StarcoderdataPython |
3396102 | <filename>fiber.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Fiber Class
-----------
To be used in conjuction with IFU reduction code, Panacea
"""
from __future__ import (division, print_function, absolute_import,
unicode_literals)
import numpy as np
import cPickle as pickle
import os.path as op
import os
from utils import biweight_filter
__all__ = ["Fiber"]
class Fiber:
def __init__(self, D, fibnum, path, filename, trace_poly_order=3,
fibmodel_poly_order=3,wave_poly_order=3):
'''
Initialize class
----------------
:param D:
The number of columns in the spectrograph detector image.
:param fibnum:
The fiber number starting at 1 related to initialization. This
serves as a place holder until there is a mapping to sky.
:param path:
The directory path name for where this fiber file is saved
:param filename:
The full filename of the raw frame from which the fiber was
extracted
:param trace_poly_order:
The order of the polynomial used to fit the trace across the
columns of the image
:param fibmodel_poly_order:
The order of the polynomial used to fit the fibmodel bins across
the columns of the image. Not necessarily used.
:param wave_poly_order:
The order of the polynomial used to fit the wavelength across
the columns of the image.
:init flag:
Flag value for undefined values in trace, fibmodel, or wavelength
:init trace_x:
Columns values for trace to be filled in through
Amplifier.get_trace()
:init trace_y:
Row values for trace to be filled in through
Amplifier.get_trace()
:init trace:
Full trace solution to be solved for using Amplifier.get_trace()
:init wavelength:
Wavelength for each column along the trace
:init fiber_to_fiber:
This is solved for in Amplifier.get_fiber_to_fiber().
:init throughput:
Fiber throughput.
'''
# Taking inputs and initializing the properties for this fiber.
self.D = D
self.fibnum = fibnum
self.filename = filename
self.basename = op.basename(filename)[:-5]
self.path = path
self.trace_poly_order = trace_poly_order
self.fibmodel_poly_order = fibmodel_poly_order
self.wave_poly_order = wave_poly_order
# Other initilized variables. Mostly defaulting to None for processing.
self.flag = -999
self.init_trace_info()
self.wavelength = None
self.throughput = None
self.fiber_to_fiber = None
self.amp_to_amp = None
self.RA = None
self.Dec = None
self.rot = None
self.theta = None
self.gain = None
self.dark_mult = None
self.bias_mult = None
self.file = None
self.ifuslot = None
self.specid = None
self.ifuid = None
self.object = None
self.datetime = None
self.core = None
self.xind = None
self.yind = None
self.yoff = None
self.wavelength = None
self.fibmodel = None
self.fibmodel_x = None
self.fibmodel_y = None
self.binx = None
self.fibmodel_polyvals = None
self.trace_polyvals = None
self.spectrum = None
self.mask_spectrum = None
self.wave_polyvals = None
self.sky_spectrum = None
self.default_trace_y = None
self.dead = False
def init_trace_info(self):
self.trace_x = self.flag * np.ones((self.D,),dtype = np.int)
self.trace_y = np.zeros((self.D,))
self.trace = np.zeros((self.D,))
self.trace_polyvals = np.zeros((self.trace_poly_order+1,))
def fit_trace_poly(self):
sel = self.trace_x != self.flag
self.trace_polyvals = np.polyfit(self.trace_x[sel] / self.D,
self.trace_y[sel],
self.trace_poly_order)
def fit_fibmodel_poly(self):
self.fibmodel_polyvals = np.polyfit(self.fibmodel_x / self.D,
self.fibmodel_y,
self.fibmodel_poly_order)
def eval_trace_poly(self, use_poly=False, smoothing_length=25):
sel = self.trace_x != self.flag
if use_poly:
self.trace = np.polyval(self.trace_polyvals,
1. * np.arange(self.D) / self.D)
else:
self.trace = np.zeros((self.D,))
init_x = np.where(sel)[0][0]
fin_x = np.where(sel)[0][-1]
self.trace[init_x:fin_x] = np.interp(np.arange(init_x,fin_x),
self.trace_x[sel],
self.trace_y[sel])
self.trace[init_x:fin_x] = biweight_filter(self.trace[init_x:fin_x],
smoothing_length)
ix = int(init_x+smoothing_length/2+1)
fx = int(init_x+smoothing_length/2+1 + smoothing_length*2)
p1 = np.polyfit(np.arange(ix,fx), self.trace[ix:fx], 1)
self.trace[:ix] = np.polyval(p1, np.arange(ix))
ix = int(fin_x-smoothing_length/2-1 - smoothing_length*2)
fx = int(fin_x-smoothing_length/2)
pf = np.polyfit(np.arange(ix,fx), self.trace[ix:fx], 1)
self.trace[fx:self.D] = np.polyval(pf, np.arange(fx,self.D))
def eval_fibmodel_poly(self, use_poly=False):
self.fibmodel = np.zeros((self.D, len(self.binx)))
for i in xrange(len(self.binx)):
if use_poly:
self.fibmodel[:,i] = np.polyval(self.fibmodel_polyvals[:,i],
1.* np.arange(self.D) / self.D)
else:
self.fibmodel[:,i] = np.interp(np.arange(self.D),
self.fibmodel_x,
self.fibmodel_y[:,i])
def eval_wave_poly(self):
self.wavelength = np.polyval(self.wave_polyvals,
1.* np.arange(self.D) / self.D)
def save(self, specid, ifuslot, ifuid, amp):
self.fibmodel = None
self.trace_x = None
self.trace_y = None
self.wavelength = None
self.mask_spectrum = None
self.fn = op.join(self.path, 'fiber_%03d_%s_%s_%s_%s.pkl'
% (self.fibnum, specid, ifuslot, ifuid, amp))
if not op.exists(self.path):
os.mkdir(self.path)
with open(self.fn, 'wb') as f:
pickle.dump(self, f) | StarcoderdataPython |
3357004 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 31 09:28:17 2021
@author: qichen
"""
from aiida import load_profile
profile = load_profile()
from aiida.common import LinkType
from aiida.orm.utils.links import LinkPair
from aiida.tools.visualization import Graph, pstate_node_styles
graph = Graph()
graph.add_node('15870')
graph.add_node('15879')
graph.add_node('15888')
graph.add_node('15898')
graph.add_node('15908')
graph.add_node('15917')
graph.add_incoming('15870')
graph.add_outgoing('15870')
graph.add_incoming('15888')
graph.add_outgoing('15888')
graph.add_outgoing('15879')
graph.add_outgoing('15888')
graph.add_outgoing('15898')
graph.add_outgoing('15908')
graph.add_outgoing('15917')
graph.graphviz | StarcoderdataPython |
35482 | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import csv
import seaborn as sns
import itertools
import pandas as pd
import scipy
from scipy.signal import savgol_filter
from scipy.signal import find_peaks_cwt
from scipy.signal import boxcar
sns.set(font_scale=1.2)
sns.set_style("white")
colors = ["#95a5a6", "amber"]
sns.set_palette(sns.color_palette())
hr_24 = np.loadtxt("MDA DTX_1 4_24hr.txt", skiprows=1)
ctl = np.loadtxt("MDA DTX_1 Ctl.txt", skiprows=1)
hr_4 = np.loadtxt("MDA DTX_1 4hr.txt", skiprows=1)
# hr_2 = np.loadtxt("MDA-DTX-#2hr.txt", skiprows=1)
hr_8 = np.loadtxt("MDA DTX 8hr.txt", skiprows=1)
dmso = np.loadtxt("MDA DTX DMSO.txt", skiprows=1)
def filterDat(data):
num = 9
ones = boxcar(num)/num
result = np.abs(np.convolve(data, ones, mode='same'))
return np.interp(result, (result.min(), result.max()), (0, 100))
def shift(data):
"""
firstIndex = 200
index = np.argmax(data)
if index < firstIndex:
data = np.insert(data, 0, np.zeros(
firstIndex-index))[:-(firstIndex-index)]
elif index > firstIndex:
data = data[index-firstIndex:]
data = np.insert(data, len(data)-1, np.zeros(index-firstIndex))
"""
# Stretch
secondIndex = 400
indexes = find_peaks_cwt(data, np.arange(1, 100))
# find max of indexes
peaks = data[indexes]
secondMax = 0
lastPeak = 0
for x in range(len(peaks)):
if peaks[x] < 95.0:
if peaks[x] > lastPeak:
lastPeak = peaks[x]
secondMax = x
secondMaxIndex = indexes[secondMax]
difference = secondIndex-secondMaxIndex
ratio = secondIndex/(secondIndex-difference)
old_x = np.linspace(0, int(len(data))-1, int(len(data)))
new_x = np.linspace(0, int(len(data))-1, int(len(data)*ratio))
new_data = np.interp(new_x, old_x, data)
return new_data, np.linspace(0, int(len(new_x))-1, int(len(new_x)))
fig, axes = plt.subplots(figsize=(8, 6))
filterData = filterDat(ctl[:, 2])
y, x = shift(filterData)
axes.plot(x, y, label="Control", color='black')
axes.fill_between(x, y, alpha=0.3)
"""filterData = filterDat(hr_4[:, 2])
y, x = shift(filterData)
axes.plot(x, y, label="4 hour")
axes.fill_between(x, y, alpha=0.3)
filterData = filterDat(hr_8[:, 2])
y, x = shift(filterData)
axes.plot(x, y, label="8 hour")
axes.fill_between(x, y, alpha=0.3)
"""
filterData = filterDat(hr_24[:, 2])
y, x = shift(filterData)
axes.plot(x, y, label="24 hour", color='maroon')
axes.fill_between(x, y, alpha=0.3)
axes.legend()
axes.set_ylabel('% of Max')
axes.set_xlabel('Fluorescence')
axes.set_xlim((0, 800))
plt.show()
| StarcoderdataPython |
1630211 | #!/usr/bin/env python
# Note that this should be used with original GALFIT.
from glob import glob
from astropy.io import fits as pyfits
import os, sys, getopt
"""make_images.py - Create images for galfitm spiralsim test
As well as creating singe-band images from the model feedmes, this
routine produces multi-band model feedmes and corresponding
images. It also produces fit feedmes for the various noise levels
and non-parametric versions.
Usage:
make_images.py [arch]
where [arch] is 'linux' or 'osx',
defaulting to linux if not specified.
Run from each folder, e.g. spiral, bending, etc.
"""
#noiselevels = [1, 5, 10, 50, 100]
#nonparams = [False, True]
noiselevels = [1]
nonparams = [False]
def make_images(arch='linux'):
noise = [pyfits.getdata('../n%i.fits'%i) for i in noiselevels]
gals = glob('galfit.gal*')
for g in gals:
make_mwl_model_feedme(g)
gals = glob('galfit.gal*')
for g in gals:
os.system('galfitm-0.0.3-%s %s'%(arch, g))
imgname = g.replace('galfit.', '')
for i, n in enumerate(noiselevels):
img = pyfits.open(imgname+'.fits')
img[0].data += noise[i]
img.writeto(imgname+'n%i.fits'%n, clobber=True)
feedmes = glob('feedme.gal*[0123456789]')
for f in feedmes:
make_mwl_fit_feedme(f)
feedmes = glob('feedme.gal*[0123456789]')
feedmes += glob('feedme.gal*[0123456789]mwl')
for f in feedmes:
make_noise_feedme(f)
def make_noise_feedme(feedme):
feedmelines = open(feedme).readlines()
for i, n in enumerate(noiselevels):
for nonparam in nonparams:
if nonparam:
np = 'n'
else:
np = ''
feedmeout = open(feedme+'n%i%s'%(n,np), 'w')
for l in feedmelines:
ls = l.split(None, 2)
if len(ls) > 1 and ls[0] == 'A)':
l = l.replace('.fits', 'n%i.fits'%n)
if len(ls) > 1 and ls[0] == 'B)':
l = l.replace('fit.fits', 'n%i%sfit.fits'%(n,np))
if nonparam and len(ls) > 1 and ls[0] == 'U)':
l = l.replace('0', '1', 1)
feedmeout.write(l)
feedmeout.close()
def make_mwl_model_feedme(feedme='galfit.gal8'):
bands = 'ugrizYJHK'
# corresponds to bulge or disk in illustration model B:
mag_disk = [17.687,16.717,15.753,15.315,15.019,14.936,14.745,14.425,14.299]
# corresponds to bulge in illustration model E:
mag_spheroid = [18.546,17.519,15.753,15.084,14.764,14.623,14.433,14.02,13.78]
# corresponds to disk in illustration model E:
mag_arms = [16.999,16.127,15.753,15.529,15.389,15.41,15.384,15.192,15.281]
component = 0
feedmelines = open(feedme).readlines()
for i, b in enumerate(bands):
feedmeout = open(feedme+b, 'w')
for l in feedmelines:
ls = l.split(None, 2)
if len(ls) > 1 and ls[0] == '3)':
component += 1
mag0 = float(ls[1])
if component == 1:
magmwl = mag_spheroid
elif component == 2:
magmwl = mag_disk
else:
magmwl = mag_arms
deltamag = magmwl[2] - mag0
mag = magmwl[i] - deltamag
mags = '%.3f'%mag
l = l.replace(ls[1], mags)
if len(ls) > 1 and ls[0] == 'B)':
l = l.replace('.fits', b+'.fits')
feedmeout.write(l)
feedmeout.close()
def make_mwl_fit_feedme(feedme='feedme.gal8'):
bands = 'ugrizYJHK'
feedmelines = open(feedme).readlines()
feedmeout = open(feedme+'mwl', 'w')
for l in feedmelines:
ls = l.split(None, 3)
if len(ls) > 1 and ls[0] == '3)':
l = l.replace(' 1 ', ' 7 ')
if len(ls) > 1 and ls[0] == 'A)':
out = ','.join(ls[1].replace('.fits', b+'.fits') for b in bands)
l = l.replace(ls[1], out)
l += 'A1) u,g,r,i,z,Y,J,H,K # Band labels\n'
l += 'A2) 3543,4770,6231,7625,9134,10305,12483,16313,22010 # Band wavelengths\n'
if len(ls) > 1 and ls[0] == 'B)':
l = l.replace('fit.fits', 'mwlfit.fits')
if len(ls) > 1 and ls[0][0] in 'DJ':
l = l.replace(ls[1], ','.join([ls[1]]*9))
feedmeout.write(l)
feedmeout.close()
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def main(argv=None):
if argv is None:
argv = sys.argv
try:
try:
opts, args = getopt.getopt(argv[1:], "hf", ["help", "force"])
except getopt.error as msg:
raise Usage(msg)
clobber = False
for o, a in opts:
if o in ("-h", "--help"):
print(__doc__)
return 1
if o in ("-f", "--force"):
clobber = True
if len(args) == 0:
arch = 'linux'
if len(args) == 1:
arch = args[0]
elif len(args) > 1:
raise Usage("Wrong number of arguments")
make_images(arch)
except Usage as err:
print(err.msg, file=sys.stderr)
print("For help use --help", file=sys.stderr)
return 2
if __name__ == "__main__":
sys.exit(main())
| StarcoderdataPython |
3246922 | from __future__ import print_function
import random
import math
import numpy as np
import torch
import torchnet as tnt
class FewShotDataloader:
def __init__(
self,
dataset,
nKnovel=5,
nKbase=-1,
nExemplars=1,
nTestNovel=15*5,
nTestBase=15*5,
batch_size=1,
num_workers=4,
epoch_size=2000):
self.dataset = dataset
self.phase = self.dataset.phase
max_possible_nKnovel = (
self.dataset.num_cats_base if (
self.phase=='train' or self.phase=='trainval')
else self.dataset.num_cats_novel)
assert 0 <= nKnovel <= max_possible_nKnovel
self.nKnovel = nKnovel
max_possible_nKbase = self.dataset.num_cats_base
nKbase = nKbase if nKbase >= 0 else max_possible_nKbase
if (self.phase=='train' or self.phase=='trainval') and nKbase > 0:
nKbase -= self.nKnovel
max_possible_nKbase -= self.nKnovel
assert 0 <= nKbase <= max_possible_nKbase
self.nKbase = nKbase
self.nExemplars = nExemplars
self.nTestNovel = nTestNovel
self.nTestBase = nTestBase
self.batch_size = batch_size
self.epoch_size = epoch_size
self.num_workers = num_workers
self.is_eval_mode = (self.phase=='test') or (self.phase=='val')
def sampleImageIdsFrom(self, cat_id, sample_size=1):
"""
Samples `sample_size` number of unique image ids picked from the
category `cat_id` (i.e., self.dataset.label2ind[cat_id]).
Args:
cat_id: a scalar with the id of the category from which images will
be sampled.
sample_size: number of images that will be sampled.
Returns:
image_ids: a list of length `sample_size` with unique image ids.
"""
assert(cat_id in self.dataset.label2ind.keys())
assert(len(self.dataset.label2ind[cat_id]) >= sample_size)
# Note: random.sample samples elements without replacement.
return random.sample(self.dataset.label2ind[cat_id], sample_size)
def sampleCategories(self, cat_set, sample_size=1):
"""
Samples `sample_size` number of unique categories picked from the
`cat_set` set of categories. `cat_set` can be either 'base' or 'novel'.
Args:
cat_set: string that specifies the set of categories from which
categories will be sampled.
sample_size: number of categories that will be sampled.
Returns:
cat_ids: a list of length `sample_size` with unique category ids.
"""
if cat_set=='base':
labelIds = self.dataset.labelIds_base
elif cat_set=='novel':
labelIds = self.dataset.labelIds_novel
else:
raise ValueError('Not recognized category set {}'.format(cat_set))
assert(len(labelIds) >= sample_size)
# return sample_size unique categories chosen from labelIds set of
# categories (that can be either self.labelIds_base or self.labelIds_novel)
# Note: random.sample samples elements without replacement.
return random.sample(labelIds, sample_size)
def sample_base_and_novel_categories(self, nKbase, nKnovel):
"""
Samples `nKbase` number of base categories and `nKnovel` number of novel
categories.
Args:
nKbase: number of base categories
nKnovel: number of novel categories
Returns:
Kbase: a list of length 'nKbase' with the ids of the sampled base
categories.
Knovel: a list of lenght 'nKnovel' with the ids of the sampled novel
categories.
"""
if self.is_eval_mode:
assert(nKnovel <= self.dataset.num_cats_novel)
# sample from the set of base categories 'nKbase' number of base
# categories.
Kbase = sorted(self.sampleCategories('base', nKbase))
# sample from the set of novel categories 'nKnovel' number of novel
# categories.
Knovel = sorted(self.sampleCategories('novel', nKnovel))
else:
# sample from the set of base categories 'nKnovel' + 'nKbase' number
# of categories.
cats_ids = self.sampleCategories('base', nKnovel+nKbase)
assert(len(cats_ids) == (nKnovel+nKbase))
# Randomly pick 'nKnovel' number of fake novel categories and keep
# the rest as base categories.
random.shuffle(cats_ids)
Knovel = sorted(cats_ids[:nKnovel])
Kbase = sorted(cats_ids[nKnovel:])
return Kbase, Knovel
def sample_test_examples_for_base_categories(self, Kbase, nTestBase):
"""
Sample `nTestBase` number of images from the `Kbase` categories.
Args:
Kbase: a list of length `nKbase` with the ids of the categories from
where the images will be sampled.
nTestBase: the total number of images that will be sampled.
Returns:
Tbase: a list of length `nTestBase` with 2-element tuples. The 1st
element of each tuple is the image id that was sampled and the
2nd elemend is its category label (which is in the range
[0, len(Kbase)-1]).
"""
Tbase = []
if len(Kbase) > 0:
# Sample for each base category a number images such that the total
# number sampled images of all categories to be equal to `nTestBase`.
KbaseIndices = np.random.choice(
np.arange(len(Kbase)), size=nTestBase, replace=True)
KbaseIndices, NumImagesPerCategory = np.unique(
KbaseIndices, return_counts=True)
for Kbase_idx, NumImages in zip(KbaseIndices, NumImagesPerCategory):
imd_ids = self.sampleImageIdsFrom(
Kbase[Kbase_idx], sample_size=NumImages)
Tbase += [(img_id, Kbase_idx) for img_id in imd_ids]
assert len(Tbase) == nTestBase
return Tbase
def sample_train_and_test_examples_for_novel_categories(
self, Knovel, nTestExamplesTotal, nExemplars, nKbase):
"""Samples train and test examples of the novel categories.
Args:
Knovel: a list with the ids of the novel categories.
nTestExamplesTotal: the total number of test images that will be sampled
from all the novel categories.
nExemplars: the number of training examples per novel category that
will be sampled.
nKbase: the number of base categories. It is used as offset of the
category index of each sampled image.
Returns:
Tnovel: a list of length `nTestNovel` with 2-element tuples. The
1st element of each tuple is the image id that was sampled and
the 2nd element is its category label (which is in the range
[nKbase, nKbase + len(Knovel) - 1]).
Exemplars: a list of length len(Knovel) * nExemplars of 2-element
tuples. The 1st element of each tuple is the image id that was
sampled and the 2nd element is its category label (which is in
the ragne [nKbase, nKbase + len(Knovel) - 1]).
"""
if len(Knovel) == 0:
return [], []
nKnovel = len(Knovel)
Tnovel = []
Exemplars = []
assert (nTestExamplesTotal % nKnovel) == 0
nTestExamples = nTestExamplesTotal // nKnovel
for Knovel_idx in range(len(Knovel)):
img_ids = self.sampleImageIdsFrom(
Knovel[Knovel_idx],
sample_size=(nTestExamples + nExemplars))
img_labeled = img_ids[:(nTestExamples + nExemplars)]
img_tnovel = img_labeled[:nTestExamples]
img_exemplars = img_labeled[nTestExamples:]
Tnovel += [
(img_id, nKbase+Knovel_idx) for img_id in img_tnovel]
Exemplars += [
(img_id, nKbase+Knovel_idx) for img_id in img_exemplars]
assert len(Tnovel) == nTestExamplesTotal
assert len(Exemplars) == len(Knovel) * nExemplars
random.shuffle(Exemplars)
return Tnovel, Exemplars
def sample_episode(self):
"""Samples a training episode."""
nKnovel = self.nKnovel
nKbase = self.nKbase
nTestNovel = self.nTestNovel
nTestBase = self.nTestBase
nExemplars = self.nExemplars
Kbase, Knovel = self.sample_base_and_novel_categories(nKbase, nKnovel)
Tbase = self.sample_test_examples_for_base_categories(Kbase, nTestBase)
outputs = self.sample_train_and_test_examples_for_novel_categories(
Knovel, nTestNovel, nExemplars, nKbase)
Tnovel, Exemplars = outputs
# concatenate the base and novel category examples.
Test = Tbase + Tnovel
random.shuffle(Test)
Kall = Kbase + Knovel
return Exemplars, Test, Kall, nKbase
def createExamplesTensorData(self, examples):
"""
Creates the examples image and label tensor data.
Args:
examples: a list of 2-element tuples, each representing a
train or test example. The 1st element of each tuple
is the image id of the example and 2nd element is the
category label of the example, which is in the range
[0, nK - 1], where nK is the total number of categories
(both novel and base).
Returns:
images: a tensor of shape [nExamples, Height, Width, 3] with the
example images, where nExamples is the number of examples
(i.e., nExamples = len(examples)).
labels: a tensor of shape [nExamples] with the category label
of each example.
"""
images = torch.stack(
[self.dataset[img_idx][0] for img_idx, _ in examples],
dim=0)
labels = torch.LongTensor(
[label for _, label in examples])
return images, labels
def get_iterator(self, epoch=0):
rand_seed = epoch
random.seed(rand_seed)
np.random.seed(rand_seed)
def load_function(iter_idx):
Exemplars, Test, Kall, nKbase = self.sample_episode()
Xt, Yt = self.createExamplesTensorData(Test)
Kall = torch.LongTensor(Kall)
if len(Exemplars) > 0:
Xe, Ye = self.createExamplesTensorData(Exemplars)
return Xe, Ye, Xt, Yt, Kall, nKbase
else:
return Xt, Yt, Kall, nKbase
tnt_dataset = tnt.dataset.ListDataset(
elem_list=list(range(self.epoch_size)),
load=load_function)
data_loader = tnt_dataset.parallel(
batch_size=self.batch_size,
num_workers=self.num_workers,
shuffle=(False if self.is_eval_mode else True))
return data_loader
def __call__(self, epoch=0):
return self.get_iterator(epoch)
def __len__(self):
return self.epoch_size // self.batch_size
class LowShotDataloader:
def __init__(
self,
dataset_train_novel,
dataset_evaluation,
nExemplars=1,
batch_size=1,
num_workers=4):
self.nExemplars = nExemplars
self.batch_size = batch_size
self.num_workers = num_workers
self.dataset_train_novel = dataset_train_novel
self.dataset_evaluation = dataset_evaluation
assert(self.dataset_evaluation.labelIds_novel ==
self.dataset_train_novel.labelIds_novel)
assert(self.dataset_evaluation.labelIds_base ==
self.dataset_train_novel.labelIds_base)
assert(self.dataset_evaluation.base_classes_eval_split ==
self.dataset_train_novel.base_classes_eval_split)
self.nKnovel = self.dataset_evaluation.num_cats_novel
self.nKbase = self.dataset_evaluation.num_cats_base
# Category ids of the base categories.
self.Kbase = sorted(self.dataset_evaluation.labelIds_base)
assert(self.nKbase == len(self.Kbase))
# Category ids of the novel categories.
self.Knovel = sorted(self.dataset_evaluation.labelIds_novel)
assert(self.nKnovel == len(self.Knovel))
self.Kall = self.Kbase + self.Knovel
self.CategoryId2LabelIndex = {
category_id: label_index for label_index, category_id in enumerate(self.Kall)
}
self.Kbase_eval_split = self.dataset_train_novel.base_classes_eval_split
Kbase_set = set(self.Kall[:self.nKbase])
Kbase_eval_split_set = set(self.Kbase_eval_split)
assert(len(set.intersection(Kbase_set, Kbase_eval_split_set)) == len(Kbase_eval_split_set))
self.base_eval_split_labels = sorted(
[self.CategoryId2LabelIndex[category_id] for category_id in self.Kbase_eval_split]
)
# Collect the image indices of the evaluation set for both the base and
# the novel categories.
data_indices = []
for category_id in self.Kbase_eval_split:
data_indices += self.dataset_evaluation.label2ind[category_id]
for category_id in self.Knovel:
data_indices += self.dataset_evaluation.label2ind[category_id]
self.eval_data_indices = sorted(data_indices)
self.epoch_size = len(self.eval_data_indices)
def base_category_label_indices(self):
return self.base_eval_split_labels
def novel_category_label_indices(self):
return list(range(self.nKbase, len(self.Kall)))
def sampleImageIdsFrom(self, cat_id, sample_size=1):
"""
Samples `sample_size` number of unique image ids picked from the
category `cat_id` (i.e., self.dataset_train_novel.label2ind[cat_id]).
Args:
cat_id: a scalar with the id of the category from which images will
be sampled.
sample_size: number of images that will be sampled.
Returns:
image_ids: a list of length `sample_size` with unique image ids.
"""
assert(cat_id in self.dataset_train_novel.label2ind)
assert(len(self.dataset_train_novel.label2ind[cat_id]) >= sample_size)
# Note: random.sample samples elements without replacement.
return random.sample(self.dataset_train_novel.label2ind[cat_id], sample_size)
def sample_training_examples_for_novel_categories(
self, Knovel, nExemplars, nKbase):
"""Samples (a few) training examples for the novel categories.
Args:
Knovel: a list with the ids of the novel categories.
nExemplars: the number of training examples per novel category.
nKbase: the number of base categories.
Returns:
Exemplars: a list of length len(Knovel) * nExemplars of 2-element
tuples. The 1st element of each tuple is the image id that was
sampled and the 2nd element is its category label (which is in
the ragne [nKbase, nKbase + len(Knovel) - 1]).
"""
Exemplars = []
for knovel_idx, knovel_label in enumerate(Knovel):
imds = self.sampleImageIdsFrom(knovel_label, sample_size=nExemplars)
Exemplars += [(img_id, nKbase + knovel_idx) for img_id in imds]
random.shuffle(Exemplars)
return Exemplars
def create_examples_tensor_data(self, examples):
"""
Creates the examples image and label tensor data.
Args:
examples: a list of 2-element tuples, each representing a
train or test example. The 1st element of each tuple
is the image id of the example and 2nd element is the
category label of the example, which is in the range
[0, nK - 1], where nK is the total number of categories
(both novel and base).
Returns:
images: a tensor of shape [nExamples, Height, Width, 3] with the
example images, where nExamples is the number of examples
(i.e., nExamples = len(examples)).
labels: a tensor of shape [nExamples] with the category label
of each example.
"""
images = torch.stack(
[self.dataset_train_novel[img_idx][0] for img_idx, _ in examples],
dim=0)
labels = torch.LongTensor([label for _, label in examples])
return images, labels
def sample_training_data_for_novel_categories(self, exp_id=0):
nKnovel = self.nKnovel
nKbase = self.nKbase
random.seed(exp_id) # fix the seed for this experiment.
# Sample `nExemplars` number of training examples per novel category.
train_examples = self.sample_training_examples_for_novel_categories(
self.Knovel, self.nExemplars, nKbase)
Kall = torch.LongTensor(self.Kall)
images_train, labels_train = self.create_examples_tensor_data(
train_examples)
return images_train, labels_train, Kall, nKbase, nKnovel
def get_iterator(self, epoch=0):
def load_fun_(idx):
img_idx = self.eval_data_indices[idx]
img, category_id = self.dataset_evaluation[img_idx]
label = (self.CategoryId2LabelIndex[category_id]
if (category_id in self.CategoryId2LabelIndex) else -1)
return img, label
tnt_dataset = tnt.dataset.ListDataset(
elem_list=list(range(self.epoch_size)), load=load_fun_)
data_loader = tnt_dataset.parallel(
batch_size=self.batch_size,
num_workers=self.num_workers,
shuffle=False,
drop_last=False)
return data_loader
def __call__(self, epoch=0):
return self.get_iterator(epoch)
def __len__(self):
return int(math.ceil(float(self.epoch_size)/self.batch_size))
| StarcoderdataPython |
3325921 | <reponame>linux-machine/linuxmachinebeta
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.permissions import AllowAny
from linuxmachinebeta.view.api.serializers import ServiceViewSerializer
from linuxmachinebeta.utils.helpers import get_user_ip
class ServiceViewAPIView(APIView):
permission_classes = [AllowAny, ]
def post(self, request):
user_ip = get_user_ip(request)
serializer = ServiceViewSerializer(data=request.data, context={'user_ip': user_ip})
if serializer.is_valid():
serializer.save(user_ip=user_ip)
return Response(status=200)
return Response({'error_msg': serializer.errors}, status=200)
service_view_api_view = ServiceViewAPIView.as_view()
| StarcoderdataPython |
1796067 | <reponame>pbhatia243/DeepCRF
# A few utility functions
import itertools
import numpy as np
###############################################
# Generally useful functions #
###############################################
# useful with reshape
def linearize_indices(indices, dims):
res = []
remain = indices
for i, _ in enumerate(dims):
res = [remain % dims[-i - 1]] + res
remain = remain / dims[-i - 1]
linearized = tf.transpose(tf.pack(res))
return linearized
###############################################
# Data reading functions #
###############################################
class Config:
def __init__(self, batch_size=20, num_steps=32, learning_rate=1e-2,
l1_reg=2e-3, l1_list=[],
l2_reg=2e-3, l2_list=[],
features_dim=50, init_words=False, input_features=[],
use_rnn=False, rnn_hidden_units=100, rnn_output_size=50,
use_convo=False, conv_window=5, conv_dim=50,
pot_size=1,
pred_window=1, tag_list=[],
verbose=False, num_epochs=10, num_predict=5):
# optimization parameters
self.batch_size = batch_size
self.num_steps = num_steps
self.learning_rate = learning_rate
# regularization parameters
self.l1_reg = l1_reg
self.l1_list = l1_list
self.l2_reg = l2_reg
self.l2_list = l2_list
# input layer
self.features_dim = features_dim
self.init_words = init_words
self.input_features = input_features
# recurrent layer
self.use_rnn = use_rnn
self.rnn_hidden_units = rnn_hidden_units
self.rnn_output_size = rnn_output_size
# convolutional layer
self.use_convo = use_convo
self.conv_window = conv_window
self.conv_dim = conv_dim
# CRF parameters:
self.pot_size = pot_size
self.n_tags = len(tag_list)
# output layer
self.pred_window = pred_window
self.tag_list = tag_list
self.label_dict = {}
tags_ct = 0
for element in itertools.product(tag_list, repeat=pred_window):
tag_st = '_'.join(element)
mid = element[pred_window / 2]
if mid == '<P>':
self.label_dict[tag_st] = (-1, tag_list.index(mid))
else:
self.label_dict[tag_st] = (tags_ct, tag_list.index(mid))
tags_ct += 1
self.n_outcomes = tags_ct
# misc parameters
self.verbose = verbose
self.num_epochs = num_epochs
self.num_predict = num_predict
def make_mappings(self, data):
self.feature_maps = dict([(feat, {'lookup': {'_unk_': 0},
'reverse': ['_unk_']})
for feat in data[0][0]])
for sentence in data:
for token in sentence:
for feat in data[0][0]:
ft = token[feat]
if ft not in self.feature_maps[feat]['lookup']:
self.feature_maps[feat]['lookup'][ft] = \
len(self.feature_maps[feat]['reverse'])
self.feature_maps[feat]['reverse'] += [ft]
def to_string(self):
st = ''
for k, v in self.__dict__.items():
if k not in ['feature_maps', 'label_dict']:
st += k + ' --- ' + str(v) + ' \n'
return st
class Batch:
def __init__(self):
# features: {'word': 'have', 'pos': 'VB', ...} ->
# [1345, 12 * num_features + 1,...]
self.features = []
# tags: 'B' -> 1
self.tags = []
# tags_one_hot: 'B' -> [0, 1, 0, 0, 0, 0]
self.tags_one_hot = []
# tag_windows: '<P>_B_O' -> [0, 1, 3]
self.tag_windows = []
# tag_windows_lin: '<P>_B_O' -> num_values * token_id + 0 * config.n_tags **2 + 1 * config.n_tags + 3
self.tag_windows_lin = []
# tag_windows_one_hot: '<P>_B_O' -> [0, ..., 0, 1, 0, ..., 0]
self.tag_windows_one_hot = []
# tag_neighbours: '<P>_B_O' -> [0, 3]
self.tag_neighbours = []
# tag_neighbours_linearized: '<P>_B_O' -> num_values * token_id + 0 * config.n_tags + 3
self.tag_neighbours_lin = []
# mask: <P> -> 0, everything else -> 1
def read(self, data, start, config, fill=False):
num_features = len(config.input_features)
batch_data = data[start:start + config.batch_size]
batch_features = [[[config.feature_maps[feat]['lookup'][token[feat]]
for feat in config.input_features]
for token in sentence]
for sentence in batch_data]
batch_labels = [[config.label_dict[token['label']]
for token in sentence]
for sentence in batch_data]
# multiply feature indices for use in tf.nn.embedding_lookup
self.features = [[[num_features * ft + i for i, ft in enumerate(word)]
for word in sentence] for sentence in batch_features]
self.tags = [[label[1] for label in sentence]
for sentence in batch_labels]
self.tags_one_hot = [[[int(x == label[1] and x > 0) # TODO: count padding tokens?
for x in range(config.n_tags)]
for label in sentence]
for sentence in batch_labels]
self.tag_windows_one_hot = [[[int(x == label[0])
for x in range(config.n_outcomes)]
for label in sentence]
for sentence in batch_labels]
if fill:
max_len = max(config.conv_window,
max([len(sentence) for sentence in batch_data]) + 2)
for i in range(config.batch_size):
current_len = len(batch_data[i])
pre_len = (max_len - current_len) / 2
post_len = max_len - pre_len - current_len
self.features[i] = [range(num_features)] * pre_len + \
self.features[i] + \
[range(num_features)] * post_len
self.tags[i] = [0] * pre_len + self.tags[i] + [0] * post_len
self.tags_one_hot[i] = [[0] * config.n_outcomes] * pre_len + \
self.tags_one_hot[i] + \
[[0] * config.n_outcomes] * post_len
self.tag_windows_one_hot[i] = [[0] * config.n_outcomes] * pre_len + \
self.tag_windows_one_hot[i] + \
[[0] * config.n_outcomes] * post_len
mid = config.pot_window / 2
padded_tags = [[0] * mid + sentence + [0] * mid
for sentence in self.tags]
# get linearized window indices
self.tag_windows = [[sent[i + j] for j in range(-mid, mid + 1)]
for sent in padded_tags
for i in range(mid, len(sent) - mid)]
n_indices = config.n_tags ** config.pot_window
self.tag_windows_lin = [sum([t * (config.n_tags ** (config.pot_window - 1 - i))
for i, t in enumerate(window)]) + i * n_indices
for i, window in enumerate(self.tag_windows)]
# get linearized potential indices
self.tag_neighbours = [[sent[i + j]
for j in range(-mid, 0) + range(1, mid + 1)]
for sent in padded_tags
for i in range(mid, len(sent) - mid)]
max_pow = config.pot_window - 1
n_indices = config.n_tags ** max_pow
self.tag_neighbours_lin = [sum([idx * (config.n_tags) ** (max_pow - j - 1)
for j, idx in enumerate(token)]) + i * n_indices
for i, token in enumerate(self.tag_neighbours)]
# make mask:
self.mask = [[int(tag > 0) for tag in sent] for sent in self.tags]
def aggregate_labels(sentence, config):
pre_tags = ['<P>'] * (config.pred_window / 2)
sentence_ext = pre_tags + [token['label']
for token in sentence] + pre_tags
for i, token in enumerate(sentence):
current = token['label']
sentence[i]['label'] = '_'.join([sentence_ext[i+j]
for j in range(config.pred_window)])
def read_data(file_name, features, config):
sentences = []
sentence = []
f = open(file_name)
c = 0
for line in f:
c += 1
if c % 100000 == 0:
print c, 'lines read'
if len(line.strip()) == 0 and len(sentence) > 0:
sentences += [sentence[:]]
sentence = []
else:
sentence += [dict(zip(features, line.strip().split('\t')))]
if len(sentence) > 0:
sentences += [sentence[:]]
f.close()
foo = [aggregate_labels(sentence, config) for sentence in sentences]
return sentences
def show(sentence):
return ' '.join([token['word']+'/'+token['label'] for token in sentence])
# read pre_trained word vectors
def read_vectors(file_name, vocab):
vectors = {}
f = open(file_name)
dim = int(f.readline().strip().split()[1])
for line in f:
w = line.split()[0]
vec = [float(x) for x in line.strip().split()[1:]]
vectors[w] = np.array(vec)
f.close()
res = np.zeros((len(vocab), dim))
for i, w in enumerate(vocab):
res[i] = vectors.get(w, np.zeros(dim))
return res
# extract windows from data to fit into unrolled RNN. Independent sentences
def cut_and_pad(data, config):
pad_token = dict([(feat, '_unk_') for feat in data[0][0]])
pad_token['label'] = '_'.join(['<P>'] * config.pred_window)
num_steps = config.num_steps
res = []
seen = 0
pad_len = max(config.pred_window, config.pot_window) / 2
sen = [pad_token] * pad_len + data[0] + [pad_token] * pad_len
while seen < len(data):
if len(sen) < num_steps:
if sen[0]['label'] == '<P>':
new_sen = ((num_steps - len(sen)) / 2) * [pad_token] + sen
else:
new_sen = sen
new_sen = new_sen + (num_steps - len(new_sen)) * [pad_token]
res += [new_sen[:]]
seen += 1
if seen < len(data):
sen = [pad_token] * pad_len + data[seen] + [pad_token] * pad_len
else:
res += [sen[:num_steps]]
sen = sen[(2 * num_steps) / 3:]
return res
# extract windows from data to fit into unrolled RNN. Continuous model
def cut_batches(data, config):
pad_token = dict([(feat, '_unk_') for feat in data[0][0]])
pad_token['label'] = '_'.join(['<P>'] * config.pred_window)
padding = [pad_token] * config.pred_window
new_data = padding + [tok for sentence in data
for tok in sentence + padding]
step_size = (config.num_steps / 2)
num_cuts = len(new_data) / step_size
res = [new_data[i * step_size: i * step_size + config.num_steps]
for i in range(num_cuts)]
res[-1] = res[-1] + [pad_token] * (config.num_steps - len(res[-1]))
return res
###############################################
# NN evaluation functions #
###############################################
def treat_spans(spans_file):
span_lists = []
f = open(spans_file)
y = []
for line in f:
if line.strip() == '':
span_lists += [y[:]]
y = []
else:
lsp = line.strip().split()
y = y + [(int(lsp[0]), int(lsp[1]), lsp[2])]
f.close()
return span_lists
def find_gold(sentence):
gold = []
current_gold = []
for i, token in enumerate(sentence):
if token['label'] == 'B' or token['label'] == 'O':
if len(current_gold) > 0:
gold += [tuple(current_gold)]
current_gold = []
if 'I' in token['label'] or token['label'] == 'B':
current_gold += [i]
if len(current_gold) > 0:
gold += [tuple(current_gold)]
return gold
def make_scores(token, thr):
res = dict([(key, val)
for key, val in token.items()
if key in ['O', 'OD', 'I', 'ID', 'B'] and val > thr])
return res
def find_mentions(sentence, thr=0.02):
scores = [make_scores(token, thr) for token in sentence]
found = []
working = []
for i, score in enumerate(scores):
if 'B' in score or 'O' in score:
for work in working:
if work[0][-1] == i-1:
sc = work[1] + np.log(score.get('B', 0) +
score.get('O', 0))
sc /= (work[0][-1] + 2 - work[0][0])
found += [(tuple(work[0]), np.exp(sc))]
if len(score) == 1 and 'O' in score:
working = []
else:
new_working = []
if 'B' in score:
new_working = [[[i], np.log(score['B']), False]]
for work in working:
for tg, sc in score.items():
if tg == 'OD':
new_working += [[work[0], work[1] + np.log(sc), True]]
elif tg == 'ID' and work[2]:
new_working += [[work[0] + [i], work[1] + np.log(sc),
True]]
elif tg == 'I' and not work[2]:
new_working += [[work[0] + [i], work[1] + np.log(sc),
False]]
working = new_working[:]
if len(working) > 1000:
working = sorted(working, key=lambda x: x[1],
reverse=True)[:1000]
return sorted(found, key=lambda x: x[1], reverse=True)
def read_sentence(sentence):
return (sentence, find_gold(sentence), find_mentions(sentence))
def merge(sentences, spans):
res = []
sent = read_sentence(sentences[0])
span = spans[0]
for i, sp in enumerate(spans):
if i == 0:
continue
if sp[0] == span[0]:
sen = read_sentence(sentences[i])
gold = sorted(list(set(sen[1] + sent[1])))
sent = (sen[0], gold, sen[2])
else:
res += [(sent, span)]
sent = read_sentence(sentences[i])
span = spans[i]
res += [(sent, span)]
return res
def evaluate(merged_sentences, threshold):
TP = 0
FP = 0
FN = 0
for sentence in merged_sentences:
true_mentions = sentence[0][1]
tp = 0
for pred in sentence[0][2]:
if pred[1] >= threshold:
if pred[0] in true_mentions:
tp += 1
else:
FP += 1
TP += tp
FN += len(true_mentions) - tp
if (TP + FP) == 0:
prec = 0
recall = 0
else:
prec = float(TP) / (TP + FP)
recall = float(TP) / (TP + FN)
if prec == 0 or recall == 0:
f1 = 0
else:
f1 = 2 * (prec * recall) / (prec + recall)
print 'TH:', threshold, '\t', 'P:', prec, '\t', 'R:', recall, '\t', 'F:', f1
| StarcoderdataPython |
3203455 | <filename>turtlepoly.py
"""
File: <turtlestar.py>
Copyright (c) 2016 <<NAME>>
License MIT
<This code produces a polygon with any number of sides given that it is a natural number.>
"""
import turtle
bob = turtle.Pen()
num_sides_inp = raw_input("Enter number of sides: ")
num_sides = int(num_sides_inp)
side_len_inp = raw_input("Enter length of each side: ")
side_len = int(side_len_inp)
for side in range(num_sides):
bob.forward(side_len)
bob.left(360.0/num_sides)
stopper = raw_input("Hit <enter> to quit.")
turtle.bye()
| StarcoderdataPython |
1650817 | from os.path import isfile
from .formats.zip_file import ZIPFile
from .formats.compressed_file import TempDirectory
class Decompressor(object):
def __init__(self, f):
self.f = f
def get_fmt(self):
magic = self.f.read(8)
self.f.seek(0)
for fmt in [ ZIPFile ]:
if fmt.is_magic(magic):
return fmt
return None
def extract(self, fmt, tmp_dir):
compressed = fmt(self.f)
return [ f for f in compressed.extract(tmp_dir) if isfile(f) ]
| StarcoderdataPython |
1629293 | from __future__ import absolute_import
from celery import Celery
from django.conf import settings
app = Celery('webalyzer')
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
| StarcoderdataPython |
1770560 | # ------------------
# Only for running this script here
import logging
import sys
from os.path import dirname
sys.path.insert(1, f"{dirname(__file__)}/../../..")
logging.basicConfig(level=logging.DEBUG)
# ------------------
# ---------------------
# Flask App for Slack OAuth flow
# ---------------------
import os
import json
from slack import WebClient
from slack.signature import SignatureVerifier
logger = logging.getLogger(__name__)
signature_verifier = SignatureVerifier(os.environ["SLACK_SIGNING_SECRET"])
client = WebClient(token=os.environ["SLACK_BOT_TOKEN"])
# ---------------------
# Flask App for Slack events
# ---------------------
from concurrent.futures.thread import ThreadPoolExecutor
executor = ThreadPoolExecutor(max_workers=5)
# pip3 install flask
from flask import Flask, request, make_response
app = Flask(__name__)
app.debug = True
@app.route("/slack/events", methods=["POST"])
def slack_app():
request_body = request.get_data()
if not signature_verifier.is_valid_request(request_body, request.headers):
return make_response("invalid request", 403)
if request.headers["content-type"] == "application/json":
# Events API
body = json.loads(request_body)
if body["event"]["type"] == "workflow_step_execute":
step = body["event"]["workflow_step"]
def handle_step():
try:
client.workflows_stepCompleted(
workflow_step_execute_id=step["workflow_step_execute_id"],
outputs={
"taskName": step["inputs"]["taskName"]["value"],
"taskDescription": step["inputs"]["taskDescription"]["value"],
"taskAuthorEmail": step["inputs"]["taskAuthorEmail"]["value"],
},
)
except Exception as err:
client.workflows_stepFailed(
workflow_step_execute_id=step["workflow_step_execute_id"],
error={"message": f"Something went wrong! ({err})", }
)
executor.submit(handle_step)
return make_response("", 200)
elif "payload" in request.form:
# Action / View Submission
body = json.loads(request.form["payload"])
if body["type"] == "workflow_step_edit":
new_modal = client.views_open(
trigger_id=body["trigger_id"],
view={
"type": "workflow_step",
"callback_id": "copy_review_view",
"blocks": [
{
"type": "section",
"block_id": "intro-section",
"text": {
"type": "plain_text",
"text": "Create a task in one of the listed projects. The link to the task and other details will be available as variable data in later steps.",
},
},
{
"type": "input",
"block_id": "task_name_input",
"element": {
"type": "plain_text_input",
"action_id": "task_name",
"placeholder": {
"type": "plain_text",
"text": "Write a task name",
},
},
"label": {"type": "plain_text", "text": "Task name"},
},
{
"type": "input",
"block_id": "task_description_input",
"element": {
"type": "plain_text_input",
"action_id": "task_description",
"placeholder": {
"type": "plain_text",
"text": "Write a description for your task",
},
},
"label": {"type": "plain_text", "text": "Task description"},
},
{
"type": "input",
"block_id": "task_author_input",
"element": {
"type": "plain_text_input",
"action_id": "task_author",
"placeholder": {
"type": "plain_text",
"text": "Write a task name",
},
},
"label": {"type": "plain_text", "text": "Task author"},
},
],
},
)
return make_response("", 200)
if body["type"] == "view_submission" \
and body["view"]["callback_id"] == "copy_review_view":
state_values = body["view"]["state"]["values"]
client.workflows_updateStep(
workflow_step_edit_id=body["workflow_step"]["workflow_step_edit_id"],
inputs={
"taskName": {
"value": state_values["task_name_input"]["task_name"]["value"],
},
"taskDescription": {
"value": state_values["task_description_input"]["task_description"][
"value"
],
},
"taskAuthorEmail": {
"value": state_values["task_author_input"]["task_author"]["value"],
},
},
outputs=[
{"name": "taskName", "type": "text", "label": "Task Name", },
{
"name": "taskDescription",
"type": "text",
"label": "Task Description",
},
{
"name": "taskAuthorEmail",
"type": "text",
"label": "Task Author Email",
},
],
)
return make_response("", 200)
return make_response("", 404)
if __name__ == "__main__":
# export SLACK_BOT_TOKEN=***
# export SLACK_SIGNING_SECRET=***
# export FLASK_ENV=development
app.run("localhost", 3000)
# python3 integration_tests/samples/workflows/steps_from_apps.py
# ngrok http 3000
# POST https://{yours}.ngrok.io/slack/events
| StarcoderdataPython |
1795445 | <reponame>annahs/atmos_research
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import numpy as np
import os
import sys
from pprint import pprint
from datetime import datetime
from datetime import timedelta
import pickle
import copy
from mpl_toolkits.basemap import Basemap
import mysql.connector
timezone = -8
endpointsWHI = []
#fire times
fire_time1 = [datetime.strptime('2009/07/27 00:00', '%Y/%m/%d %H:%M'), datetime.strptime('2009/08/08 00:00', '%Y/%m/%d %H:%M')] #row_datetimes follwing Takahama et al (2011) doi:10.5194/acp-11-6367-2011 #PST
fire_time2 = [datetime.strptime('2010/07/26 09:00', '%Y/%m/%d %H:%M'), datetime.strptime('2010/07/28 09:30', '%Y/%m/%d %H:%M')] #jason's BC clear report #PST
#database connection
cnx = mysql.connector.connect(user='root', password='<PASSWORD>', host='localhost', database='black_carbon')
cursor = cnx.cursor()
SP2_data_query = ('SELECT UNIX_UTC_6h_midtime FROM whi_gc_and_sp2_6h_mass_concs WHERE RH_threshold = 90 ORDER BY UNIX_UTC_6h_midtime')
cursor.execute(SP2_data_query)
dates = cursor.fetchall()
cnx.close()
date_times = []
for date in dates:
date_time = datetime.utcfromtimestamp(date[0])
date_times.append(date_time)
endpoints_LRT = []
endpoints_SPac = []
endpoints_NPac = []
endpoints_Cont = []
#CLUSLIST_file ='C:/HYSPLIT_argh/WHI_1h_10-day_working/even_hours/CLUSLIST_4'
CLUSLIST_file = 'C:/Users/<NAME>/Documents/Data/WHI long term record/HYSPLIT/clustering/CLUSLIST_10'
with open(CLUSLIST_file,'r') as f:
for line in f:
newline = line.split()
date = datetime(2000+int(newline[2]),int(newline[3]),int(newline[4]),int(newline[5]))
if (fire_time1[0] <= date < fire_time1[1]) or (fire_time2[0] <= date < fire_time2[1]):
continue
for date_time in date_times:
if date == date_time:
cluster = int(newline[0])
file = newline[7]
tdump_file = open(file, 'r')
endpoints = []
data_start = False
for line in tdump_file:
newline = line.split()
if data_start == True:
lat = float(newline[9])
lon = float(newline[10])
endpoint = [lat, lon]
endpoints.append(endpoint)
if newline[1] == 'PRESSURE':
data_start = True
tdump_file.close()
if cluster in [4]: #N Can (Cont)
endpoints_Cont.append(endpoints)
if cluster in [6,8,9]: #S Pac
endpoints_SPac.append(endpoints)
if cluster in [2,7]: # W Pac/Asia (LRT)
endpoints_LRT.append(endpoints)
if cluster in [1,3,5,10]: #N Pac
endpoints_NPac.append(endpoints)
print len(endpoints_NPac),len(endpoints_SPac),len(endpoints_Cont),len(endpoints_LRT),
#plottting
###set up the basemap instance
lat_pt = 57.06
lon_pt = -157.96
plt_lat_min = -10
plt_lat_max = 90#44.2
plt_lon_min = -220#-125.25
plt_lon_max = -50
m = Basemap(width=9000000,height=7000000,
rsphere=(6378137.00,6356752.3142),
resolution='l',area_thresh=1000.,projection='lcc',
lat_1=45.,lat_2=55,lat_0=lat_pt,lon_0=lon_pt)
fig = plt.figure(figsize=(10,8))
ax1 = fig.add_subplot(221)
ax1.set_xlabel('Northern Pacific')
ax1.xaxis.set_label_position('top')
m.drawmapboundary(fill_color='white')
m.drawcoastlines()
m.fillcontinents(color='#FFFFBF',lake_color='#ABD9E9',zorder=0)
m.drawcountries()
ax1.text(0.6, 0.05,'88 trajectories', transform=ax1.transAxes)
for row in endpoints_NPac:
np_endpoints = np.array(row)
lats = np_endpoints[:,0]
lons = np_endpoints[:,1]
x,y = m(lons,lats)
bt = m.plot(x,y,color='b')
ax2 = fig.add_subplot(222)
ax2.set_xlabel('Southern Pacific')
ax2.xaxis.set_label_position('top')
m.drawmapboundary(fill_color='white')
m.drawcoastlines()
m.fillcontinents(color='#FFFFBF',lake_color='#ABD9E9',zorder=0)
m.drawcountries()
ax2.text(0.6, 0.05,'34 trajectories', transform=ax2.transAxes)
for row in endpoints_SPac:
np_endpoints = np.array(row)
lats = np_endpoints[:,0]
lons = np_endpoints[:,1]
x,y = m(lons,lats)
bt = m.plot(x,y,color='g')
ax4 = fig.add_subplot(223)
ax4.set_xlabel('Western Pacific/Asia')
ax4.xaxis.set_label_position('top')
m.drawmapboundary(fill_color='white')
m.drawcoastlines()
m.fillcontinents(color='#FFFFBF',lake_color='#ABD9E9',zorder=0)
m.drawcountries()
ax4.text(0.6, 0.05,'18 trajectories', transform=ax4.transAxes)
for row in endpoints_LRT:
np_endpoints = np.array(row)
lats = np_endpoints[:,0]
lons = np_endpoints[:,1]
x,y = m(lons,lats)
bt = m.plot(x,y,color='orange')
ax5 = fig.add_subplot(224)
ax5.set_xlabel('Northern Canada')
ax5.xaxis.set_label_position('top')
m.drawmapboundary(fill_color='white')
m.drawcoastlines()
m.fillcontinents(color='#FFFFBF',lake_color='#ABD9E9',zorder=0)
m.drawcountries()
ax5.text(0.6, 0.05,'14 trajectories', transform=ax5.transAxes)
for row in endpoints_Cont:
np_endpoints = np.array(row)
lats = np_endpoints[:,0]
lons = np_endpoints[:,1]
x,y = m(lons,lats)
bt = m.plot(x,y,color='r')
plt.subplots_adjust(hspace=0.15)
plt.subplots_adjust(wspace=0.1)
#labels = ['Western Pacific/Asia (15%)','Southern Pacific (19%)','Georgia Basin/Puget Sound (4%)','Northern Pacific (48%)','Northern Canada (5%)']
os.chdir('C:/Users/<NAME>/Documents/Data/WHI long term record/HYSPLIT/')
plt.savefig('WHI_FT_all_6h_HYSPLIT_BTs-4clusters.png', bbox_inches='tight')
#plt.savefig('WHI_FT_all_2h_HYSPLIT_BTs_sep_maps_by_cluster.png', bbox_inches='tight')
plt.show() | StarcoderdataPython |
3298977 | # coding: utf-8
"""
Xero Finance API
The Finance API is a collection of endpoints which customers can use in the course of a loan application, which may assist lenders to gain the confidence they need to provide capital. # noqa: E501
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
from xero_python.models import BaseModel
class ContactTotalDetail(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"total_paid": "float",
"total_outstanding": "float",
"total_credited_un_applied": "float",
}
attribute_map = {
"total_paid": "totalPaid",
"total_outstanding": "totalOutstanding",
"total_credited_un_applied": "totalCreditedUnApplied",
}
def __init__(
self, total_paid=None, total_outstanding=None, total_credited_un_applied=None
): # noqa: E501
"""ContactTotalDetail - a model defined in OpenAPI""" # noqa: E501
self._total_paid = None
self._total_outstanding = None
self._total_credited_un_applied = None
self.discriminator = None
if total_paid is not None:
self.total_paid = total_paid
if total_outstanding is not None:
self.total_outstanding = total_outstanding
if total_credited_un_applied is not None:
self.total_credited_un_applied = total_credited_un_applied
@property
def total_paid(self):
"""Gets the total_paid of this ContactTotalDetail. # noqa: E501
Total paid invoice and cash value for the contact within the period. # noqa: E501
:return: The total_paid of this ContactTotalDetail. # noqa: E501
:rtype: float
"""
return self._total_paid
@total_paid.setter
def total_paid(self, total_paid):
"""Sets the total_paid of this ContactTotalDetail.
Total paid invoice and cash value for the contact within the period. # noqa: E501
:param total_paid: The total_paid of this ContactTotalDetail. # noqa: E501
:type: float
"""
self._total_paid = total_paid
@property
def total_outstanding(self):
"""Gets the total_outstanding of this ContactTotalDetail. # noqa: E501
Total outstanding invoice value for the contact within the period. # noqa: E501
:return: The total_outstanding of this ContactTotalDetail. # noqa: E501
:rtype: float
"""
return self._total_outstanding
@total_outstanding.setter
def total_outstanding(self, total_outstanding):
"""Sets the total_outstanding of this ContactTotalDetail.
Total outstanding invoice value for the contact within the period. # noqa: E501
:param total_outstanding: The total_outstanding of this ContactTotalDetail. # noqa: E501
:type: float
"""
self._total_outstanding = total_outstanding
@property
def total_credited_un_applied(self):
"""Gets the total_credited_un_applied of this ContactTotalDetail. # noqa: E501
Total unapplied credited value for the contact within the period. # noqa: E501
:return: The total_credited_un_applied of this ContactTotalDetail. # noqa: E501
:rtype: float
"""
return self._total_credited_un_applied
@total_credited_un_applied.setter
def total_credited_un_applied(self, total_credited_un_applied):
"""Sets the total_credited_un_applied of this ContactTotalDetail.
Total unapplied credited value for the contact within the period. # noqa: E501
:param total_credited_un_applied: The total_credited_un_applied of this ContactTotalDetail. # noqa: E501
:type: float
"""
self._total_credited_un_applied = total_credited_un_applied
| StarcoderdataPython |
1794413 | from __future__ import annotations
import abc
import inspect
from typing import TYPE_CHECKING
from PySide2.QtCore import QObject, Signal
from bsmu.vision.core.data import Data
from bsmu.vision.core.plugins.processor.base import ProcessorPlugin
if TYPE_CHECKING:
from typing import Type
from pathlib import Path
class FileLoaderPlugin(ProcessorPlugin):
def __init__(self, file_loader_cls: Type[FileLoader]):
super().__init__(file_loader_cls)
class FileLoaderMeta(abc.ABCMeta, type(QObject)):
_FORMATS = ()
def __new__(mcs, name, bases, namespace):
cls = super().__new__(mcs, name, bases, namespace)
if not inspect.isabstract(cls) and not cls.formats:
raise NotImplementedError('Subclass must define _FORMATS attribute')
return cls
@property
def formats(cls) -> tuple:
return cls._FORMATS
@property
def processed_keys(cls) -> tuple:
return cls.formats
class FileLoader(QObject, metaclass=FileLoaderMeta):
file_loaded = Signal(Data)
def load_file(self, path: Path, **kwargs) -> Data:
data = self._load_file(path, **kwargs)
self.file_loaded.emit(data)
return data
@abc.abstractmethod
def _load_file(self, path: Path, **kwargs) -> Data:
pass
| StarcoderdataPython |
1783891 | <gh_stars>0
class BearPair:
def bigDistance(self, s):
i = 0
j = len(s) - 1
num = 0
first = s[i]
second = s[j]
while j > 0 and s[j] == first:
j -= 1
first_result = abs(i - j)
j = len(s) - 1
while i < len(s) - 1 and s[i] == second:
i += 1
second_result = abs(i - j)
if first_result == 0 and second_result == 0:
return -1
else:
return(max(first_result, second_result))
| StarcoderdataPython |
3206005 | <reponame>tartufotaruffetti/Catalog<filename>database_setup.py
import os
import sys
import datetime
from sqlalchemy import Column, ForeignKey, Integer
from sqlalchemy import String, DateTime, Text, LargeBinary
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
# creating the base
Base = declarative_base()
# creating the User table
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
name = Column(String(250), nullable=False)
email = Column(String(250), nullable=False)
picture = Column(String(750), nullable=True)
# creating the Category table
class Category(Base):
__tablename__ = 'category'
id = Column(Integer, primary_key=True)
name = Column(String(250), nullable=False)
user_id = Column(Integer, ForeignKey('user.id'))
user = relationship(User)
# setting the ON DELETE CASCADE
category_item = relationship(
"CategoryItem",
backref="categoria",
cascade="all, delete, delete-orphan"
)
@property
def serialize(self):
"""Return object data in easily serializeable format"""
return
{
'name': self.name,
'id': self.id
}
# creating the Items table
class CategoryItem(Base):
__tablename__ = 'category_item'
title = Column(String(80), nullable=False)
id = Column(Integer, primary_key=True)
description = Column(String(250))
category_id = Column(Integer, ForeignKey('category.id'))
category = relationship(Category)
user_id = Column(Integer, ForeignKey('user.id'))
user = relationship(User)
image = Column(Text, default="")
image_data = Column(LargeBinary, nullable=True)
creation_date = Column(DateTime, default=datetime.datetime.utcnow)
@property
def serialize(self):
"""Return object data in easily serializeable format"""
return
{
'title': self.title,
'description': self.description,
'id': self.id
}
engine = create_engine('sqlite:///catalogitem1.db')
Base.metadata.create_all(engine)
| StarcoderdataPython |
1676463 | #!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: <NAME> <<EMAIL>>
#
'''
Different FCI solvers are implemented to support different type of symmetry.
Symmetry
File Point group Spin singlet Real hermitian* Alpha/beta degeneracy
direct_spin0_symm Yes Yes Yes Yes
direct_spin1_symm Yes No Yes Yes
direct_spin0 No Yes Yes Yes
direct_spin1 No No Yes Yes
direct_uhf No No Yes No
direct_nosym No No No** Yes
* Real hermitian Hamiltonian implies (ij|kl) = (ji|kl) = (ij|lk) = (ji|lk)
** Hamiltonian is real but not hermitian, (ij|kl) != (ji|kl) ...
'''
import sys
import ctypes
import numpy
from pyscf import lib
from pyscf import ao2mo
from pyscf.lib import logger
from pyscf import symm
from pyscf.fci import cistring
from pyscf.fci import direct_spin0
from pyscf.fci import direct_spin1
from pyscf.fci import direct_spin1_symm
from pyscf.fci import addons
from pyscf.fci.spin_op import contract_ss
from pyscf import __config__
libfci = lib.load_library('libfci')
TOTIRREPS = 8
def contract_1e(f1e, fcivec, norb, nelec, link_index=None, orbsym=None):
return direct_spin0.contract_1e(f1e, fcivec, norb, nelec, link_index)
# Note eri is NOT the 2e hamiltonian matrix, the 2e hamiltonian is
# h2e = eri_{pq,rs} p^+ q r^+ s
# = (pq|rs) p^+ r^+ s q - (pq|rs) \delta_{qr} p^+ s
# so eri is defined as
# eri_{pq,rs} = (pq|rs) - (1/Nelec) \sum_q (pq|qs)
# to restore the symmetry between pq and rs,
# eri_{pq,rs} = (pq|rs) - (.5/Nelec) [\sum_q (pq|qs) + \sum_p (pq|rp)]
# Please refer to the treatment in direct_spin1.absorb_h1e
# the input fcivec should be symmetrized
def contract_2e(eri, fcivec, norb, nelec, link_index=None, orbsym=None, wfnsym=0):
if orbsym is None:
return direct_spin0.contract_2e(eri, fcivec, norb, nelec, link_index)
eri = ao2mo.restore(4, eri, norb)
neleca, nelecb = direct_spin1._unpack_nelec(nelec)
assert(neleca == nelecb)
link_indexa = direct_spin0._unpack(norb, nelec, link_index)
na, nlinka = link_indexa.shape[:2]
eri_irs, rank_eri, irrep_eri = direct_spin1_symm.reorder_eri(eri, norb, orbsym)
strsa = numpy.asarray(cistring.gen_strings4orblist(range(norb), neleca))
aidx, link_indexa = direct_spin1_symm.gen_str_irrep(strsa, orbsym, link_indexa,
rank_eri, irrep_eri)
Tirrep = ctypes.c_void_p*TOTIRREPS
linka_ptr = Tirrep(*[x.ctypes.data_as(ctypes.c_void_p) for x in link_indexa])
eri_ptrs = Tirrep(*[x.ctypes.data_as(ctypes.c_void_p) for x in eri_irs])
dimirrep = (ctypes.c_int*TOTIRREPS)(*[x.shape[0] for x in eri_irs])
fcivec_shape = fcivec.shape
fcivec = fcivec.reshape((na,na), order='C')
ci1new = numpy.zeros_like(fcivec)
nas = (ctypes.c_int*TOTIRREPS)(*[x.size for x in aidx])
ci0 = []
ci1 = []
for ir in range(TOTIRREPS):
ma, mb = aidx[ir].size, aidx[wfnsym ^ ir].size
ci0.append(numpy.zeros((ma,mb)))
ci1.append(numpy.zeros((ma,mb)))
if ma > 0 and mb > 0:
lib.take_2d(fcivec, aidx[ir], aidx[wfnsym ^ ir], out=ci0[ir])
ci0_ptrs = Tirrep(*[x.ctypes.data_as(ctypes.c_void_p) for x in ci0])
ci1_ptrs = Tirrep(*[x.ctypes.data_as(ctypes.c_void_p) for x in ci1])
libfci.FCIcontract_2e_symm1(eri_ptrs, ci0_ptrs, ci1_ptrs,
ctypes.c_int(norb), nas, nas,
ctypes.c_int(nlinka), ctypes.c_int(nlinka),
linka_ptr, linka_ptr, dimirrep,
ctypes.c_int(wfnsym))
for ir in range(TOTIRREPS):
if ci0[ir].size > 0:
lib.takebak_2d(ci1new, ci1[ir], aidx[ir], aidx[wfnsym ^ ir])
return lib.transpose_sum(ci1new, inplace=True).reshape(fcivec_shape)
def kernel(h1e, eri, norb, nelec, ci0=None, level_shift=1e-3, tol=1e-10,
lindep=1e-14, max_cycle=50, max_space=12, nroots=1,
davidson_only=False, pspace_size=400, orbsym=None, wfnsym=None,
ecore=0, **kwargs):
assert(len(orbsym) == norb)
cis = FCISolver(None)
cis.level_shift = level_shift
cis.conv_tol = tol
cis.lindep = lindep
cis.max_cycle = max_cycle
cis.max_space = max_space
cis.nroots = nroots
cis.davidson_only = davidson_only
cis.pspace_size = pspace_size
cis.orbsym = orbsym
cis.wfnsym = wfnsym
unknown = {}
for k, v in kwargs.items():
if not hasattr(cis, k):
unknown[k] = v
setattr(cis, k, v)
if unknown:
sys.stderr.write('Unknown keys %s for FCI kernel %s\n' %
(str(unknown.keys()), __name__))
wfnsym = direct_spin1_symm._id_wfnsym(cis, norb, nelec, cis.orbsym,
cis.wfnsym)
if cis.wfnsym is not None and ci0 is None:
ci0 = addons.symm_initguess(norb, nelec, orbsym, wfnsym)
e, c = cis.kernel(h1e, eri, norb, nelec, ci0, ecore=ecore, **unknown)
return e, c
make_rdm1 = direct_spin0.make_rdm1
make_rdm1s = direct_spin0.make_rdm1s
make_rdm12 = direct_spin0.make_rdm12
trans_rdm1s = direct_spin0.trans_rdm1s
trans_rdm1 = direct_spin0.trans_rdm1
trans_rdm12 = direct_spin0.trans_rdm12
def energy(h1e, eri, fcivec, norb, nelec, link_index=None, orbsym=None, wfnsym=0):
h2e = direct_spin1.absorb_h1e(h1e, eri, norb, nelec) * .5
ci1 = contract_2e(h2e, fcivec, norb, nelec, link_index, orbsym, wfnsym)
return numpy.dot(fcivec.ravel(), ci1.ravel())
def get_init_guess(norb, nelec, nroots, hdiag, orbsym, wfnsym=0):
neleca, nelecb = direct_spin1._unpack_nelec(nelec)
assert(neleca == nelecb)
strsa = cistring.gen_strings4orblist(range(norb), neleca)
airreps = direct_spin1_symm._gen_strs_irrep(strsa, orbsym)
na = nb = len(airreps)
init_strs = []
iroot = 0
for addr in numpy.argsort(hdiag):
addra = addr // nb
addrb = addr % nb
if airreps[addra] ^ airreps[addrb] == wfnsym:
if (addrb,addra) not in init_strs:
init_strs.append((addra,addrb))
iroot += 1
if iroot >= nroots:
break
ci0 = []
for addra,addrb in init_strs:
x = numpy.zeros((na,nb))
if addra == addrb == 0:
x[addra,addrb] = 1
else:
x[addra,addrb] = x[addrb,addra] = numpy.sqrt(.5)
ci0.append(x.ravel())
# Add noise
#ci0[0][0 ] += 1e-5
#ci0[0][-1] -= 1e-5
if len(ci0) == 0:
raise RuntimeError('No determinant matches the target symmetry %s' %
wfnsym)
return ci0
class FCISolver(direct_spin0.FCISolver):
davidson_only = getattr(__config__, 'fci_direct_spin1_symm_FCI_davidson_only', True)
# pspace may break point group symmetry
pspace_size = getattr(__config__, 'fci_direct_spin1_symm_FCI_pspace_size', 0)
def __init__(self, mol=None, **kwargs):
direct_spin0.FCISolver.__init__(self, mol, **kwargs)
# wfnsym will be guessed based on initial guess if it is None
self.wfnsym = None
def dump_flags(self, verbose=None):
direct_spin0.FCISolver.dump_flags(self, verbose)
log = logger.new_logger(self, verbose)
if isinstance(self.wfnsym, str):
log.info('specified CI wfn symmetry = %s', self.wfnsym)
elif isinstance(self.wfnsym, (int, numpy.number)):
log.info('specified CI wfn symmetry = %s',
symm.irrep_id2name(self.mol.groupname, self.wfnsym))
def absorb_h1e(self, h1e, eri, norb, nelec, fac=1):
return direct_spin1.absorb_h1e(h1e, eri, norb, nelec, fac)
def make_hdiag(self, h1e, eri, norb, nelec):
return direct_spin0.make_hdiag(h1e, eri, norb, nelec)
def pspace(self, h1e, eri, norb, nelec, hdiag, np=400):
return direct_spin0.pspace(h1e, eri, norb, nelec, hdiag, np)
def contract_1e(self, f1e, fcivec, norb, nelec, link_index=None, **kwargs):
return contract_1e(f1e, fcivec, norb, nelec, link_index, **kwargs)
def contract_2e(self, eri, fcivec, norb, nelec, link_index=None,
orbsym=None, wfnsym=None, **kwargs):
if orbsym is None: orbsym = self.orbsym
if wfnsym is None: wfnsym = self.wfnsym
wfnsym = direct_spin1_symm._id_wfnsym(self, norb, nelec, orbsym,
wfnsym)
return contract_2e(eri, fcivec, norb, nelec, link_index, orbsym, wfnsym, **kwargs)
def get_init_guess(self, norb, nelec, nroots, hdiag):
wfnsym = direct_spin1_symm._id_wfnsym(self, norb, nelec, self.orbsym,
self.wfnsym)
return get_init_guess(norb, nelec, nroots, hdiag, self.orbsym, wfnsym)
def guess_wfnsym(self, norb, nelec, fcivec=None, orbsym=None, wfnsym=None,
**kwargs):
if orbsym is None:
orbsym = self.orbsym
if fcivec is None:
wfnsym = direct_spin1_symm._id_wfnsym(self, norb, nelec, orbsym,
wfnsym)
else:
wfnsym = addons.guess_wfnsym(fcivec, norb, nelec, orbsym)
verbose = kwargs.get('verbose', None)
log = logger.new_logger(self, verbose)
log.debug('Guessing CI wfn symmetry = %s', wfnsym)
return wfnsym
def kernel(self, h1e, eri, norb, nelec, ci0=None,
tol=None, lindep=None, max_cycle=None, max_space=None,
nroots=None, davidson_only=None, pspace_size=None,
orbsym=None, wfnsym=None, ecore=0, **kwargs):
if nroots is None: nroots = self.nroots
if orbsym is None: orbsym = self.orbsym
if wfnsym is None: wfnsym = self.wfnsym
if self.verbose >= logger.WARN:
self.check_sanity()
self.norb = norb
self.nelec = nelec
wfnsym = self.guess_wfnsym(norb, nelec, ci0, orbsym, wfnsym, **kwargs)
with lib.temporary_env(self, orbsym=orbsym, wfnsym=wfnsym):
e, c = direct_spin0.kernel_ms0(self, h1e, eri, norb, nelec, ci0, None,
tol, lindep, max_cycle, max_space,
nroots, davidson_only, pspace_size,
ecore=ecore, **kwargs)
self.eci, self.ci = e, c
return e, c
FCI = FCISolver
if __name__ == '__main__':
from functools import reduce
from pyscf import gto
from pyscf import scf
mol = gto.Mole()
mol.verbose = 0
mol.output = None
mol.atom = [
['O', ( 0., 0. , 0. )],
['H', ( 0., -0.757, 0.587)],
['H', ( 0., 0.757 , 0.587)],]
mol.basis = {'H': 'sto-3g',
'O': 'sto-3g',}
mol.symmetry = 1
mol.build()
m = scf.RHF(mol)
ehf = m.scf()
norb = m.mo_coeff.shape[1]
nelec = mol.nelectron
h1e = reduce(numpy.dot, (m.mo_coeff.T, scf.hf.get_hcore(mol), m.mo_coeff))
eri = ao2mo.incore.full(m._eri, m.mo_coeff)
numpy.random.seed(1)
na = cistring.num_strings(norb, nelec//2)
fcivec = numpy.random.random((na,na))
fcivec = fcivec + fcivec.T
orbsym = symm.label_orb_symm(mol, mol.irrep_id, mol.symm_orb, m.mo_coeff)
print(numpy.allclose(orbsym, [0, 0, 2, 0, 3, 0, 2]))
cis = FCISolver(mol)
cis.orbsym = orbsym
fcivec = addons.symmetrize_wfn(fcivec, norb, nelec, cis.orbsym, wfnsym=0)
ci1 = cis.contract_2e(eri, fcivec, norb, nelec)
ci1ref = direct_spin0.contract_2e(eri, fcivec, norb, nelec)
print(numpy.allclose(ci1ref, ci1))
e = cis.kernel(h1e, eri, norb, nelec, ecore=m.energy_nuc(), davidson_only=True)[0]
print(e, e - -75.012647118991595)
mol.atom = [['H', (0, 0, i)] for i in range(8)]
mol.basis = {'H': 'sto-3g'}
mol.symmetry = True
mol.build()
m = scf.RHF(mol)
ehf = m.scf()
norb = m.mo_coeff.shape[1]
nelec = mol.nelectron
eri = ao2mo.incore.full(m._eri, m.mo_coeff)
na = cistring.num_strings(norb, nelec//2)
fcivec = numpy.random.random((na,na))
fcivec = fcivec + fcivec.T
orbsym = symm.label_orb_symm(mol, mol.irrep_id, mol.symm_orb, m.mo_coeff)
cis = FCISolver(mol)
cis.orbsym = orbsym
fcivec = addons.symmetrize_wfn(fcivec, norb, nelec, cis.orbsym, wfnsym=0)
ci1 = cis.contract_2e(eri, fcivec, norb, nelec)
ci1ref = direct_spin0.contract_2e(eri, fcivec, norb, nelec)
print(numpy.allclose(ci1ref, ci1))
| StarcoderdataPython |
3284649 | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for supcon.projection_head."""
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v1 as tf
from supcon import projection_head as projection_head_lib
class ProjectionHeadTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('rank_1', 1),
('rank_4', 4),
('rank_8', 8),
)
def testIncorrectRank(self, rank):
inputs = tf.compat.v1.placeholder(tf.float32, shape=[10] * rank)
with self.assertRaisesRegex(ValueError, 'is expected to have rank 2'):
projection_head = projection_head_lib.ProjectionHead()
projection_head(inputs)
@parameterized.named_parameters(
('float32', tf.float32),
('float64', tf.float64),
('float16', tf.float16),
)
def testConstructProjectionHead(self, dtype):
shape = [3, 4]
feature_dims = [2048, 128]
expected_output_shape = [3, 128]
inputs = tf.random.uniform(shape, seed=1, dtype=dtype)
projection_head = projection_head_lib.ProjectionHead(
feature_dims=feature_dims)
output = projection_head(inputs)
self.assertListEqual(expected_output_shape, output.shape.as_list())
self.assertEqual(inputs.dtype, output.dtype)
def testGradient(self):
inputs = tf.random.uniform((3, 4), dtype=tf.float64, seed=1)
projection_head = projection_head_lib.ProjectionHead()
output = projection_head(inputs)
gradient = tf.gradients(output, inputs)
self.assertIsNotNone(gradient)
@parameterized.named_parameters(
('1_layer', 1, False, False),
('1_layer_bn_with_beta', 1, True, True),
('1_layer_bn_no_beta', 1, True, False),
('2_layer', 2, False, False),
('2_layer_bn_with_beta', 2, True, True),
('2_layer_bn_no_beta', 2, True, False),
('4_layer', 4, False, False),
('4_layer_bn_with_beta', 4, True, True),
('4_layer_bn_no_beta', 4, True, False),
)
def testCreateVariables(self, num_projection_layers, use_batch_norm,
use_batch_norm_beta):
feature_dims = (128,) * num_projection_layers
inputs = tf.random.uniform((3, 4), dtype=tf.float64, seed=1)
projection_head = projection_head_lib.ProjectionHead(
feature_dims=feature_dims,
use_batch_norm=use_batch_norm,
use_batch_norm_beta=use_batch_norm_beta)
projection_head(inputs)
self.assertLen(
[var for var in tf.trainable_variables() if 'kernel' in var.name],
num_projection_layers)
self.assertLen(
[var for var in tf.trainable_variables() if 'bias' in var.name],
0 if use_batch_norm else num_projection_layers - 1)
self.assertLen(
[var for var in tf.trainable_variables() if 'gamma' in var.name],
num_projection_layers - 1 if use_batch_norm else 0)
self.assertLen(
[var for var in tf.trainable_variables() if 'beta' in var.name],
(num_projection_layers - 1 if
(use_batch_norm and use_batch_norm_beta) else 0))
def testInputOutput(self):
feature_dims = (128, 128)
expected_output_shape = (3, 128)
inputs = tf.random.uniform((3, 4), dtype=tf.float64, seed=1)
projection_head = projection_head_lib.ProjectionHead(
feature_dims=feature_dims)
output_tensor = projection_head(inputs)
with self.cached_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
outputs = sess.run(output_tensor)
# Make sure that there are no NaNs
self.assertFalse(np.isnan(outputs).any())
self.assertEqual(outputs.shape, expected_output_shape)
@parameterized.named_parameters(
('training', True),
('not_training', False),
)
def testBatchNormIsTraining(self, is_training):
feature_dims = (128, 128)
inputs = tf.random.uniform((3, 4), dtype=tf.float64, seed=1)
projection_head = projection_head_lib.ProjectionHead(
feature_dims=feature_dims, use_batch_norm=True)
outputs = projection_head(inputs, training=is_training)
statistics_vars = [
var for var in tf.all_variables() if 'moving_' in var.name
]
self.assertLen(statistics_vars, 2)
grads = tf.gradients(outputs, statistics_vars)
self.assertLen(grads, 2)
if is_training:
self.assertAllEqual([None, None], grads)
self.assertTrue(tf.get_collection(tf.GraphKeys.UPDATE_OPS))
else:
self.assertNotIn(None, grads)
if __name__ == '__main__':
tf.test.main()
| StarcoderdataPython |
3381662 | import plotly as py
import plotly.graph_objs as go
# ----------pre def
pyplt = py.offline.plot
# ----------code
trace0 = go.Scatter(
x=[1, 2, 3, 4],
y=[10, 11, 12, 13],
text=['A</br>size: 40</br>default', 'B</br>size: 60</br>default', 'C</br>size: 80</br>default', 'D</br>size: 100</br>default'],
mode='markers',
name='default',
marker=dict(
size=[400, 600, 800, 1000],
sizemode='area',
)
)
trace1 = go.Scatter(
x=[1, 2, 3, 4],
y=[14, 15, 16, 17],
text=['A</br>size: 40</br>sizeref: 0.2', 'B</br>size: 60</br>sizeref: 0.2', 'C</br>size: 80</br>sizeref: 0.2', 'D</br>size: 100</br>sizeref: 0.2'],
mode='markers',
name = 'ref0.2',
marker=dict(
size=[400, 600, 800, 1000],
sizeref=0.2,
sizemode='area',
)
)
trace2 = go.Scatter(
x=[1, 2, 3, 4],
y=[20, 21, 22, 23],
text=['A</br>size: 40</br>sizeref: 2', 'B</br>size: 60</br>sizeref: 2', 'C</br>size: 80</br>sizeref: 2', 'D</br>size: 100</br>sizeref: 2'],
mode='markers',
name='ref2',
marker=dict(
size=[400, 600, 800, 1000],
sizeref=2,
sizemode='area',
)
)
data = [trace0, trace1, trace2]
pyplt(data, filename='tmp/bubble_scale.html') | StarcoderdataPython |
192965 | """
sphinx.util.typing
~~~~~~~~~~~~~~~~~~
The composite types for Sphinx.
:copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
import typing
from struct import Struct
from types import TracebackType
from typing import Any, Callable, Dict, Generator, List, Optional, Tuple, Type, TypeVar, Union
from docutils import nodes
from docutils.parsers.rst.states import Inliner
from sphinx.deprecation import RemovedInSphinx60Warning, deprecated_alias
if sys.version_info > (3, 7):
from typing import ForwardRef
else:
from typing import _ForwardRef # type: ignore
class ForwardRef:
"""A pseudo ForwardRef class for py36."""
def __init__(self, arg: Any, is_argument: bool = True) -> None:
self.arg = arg
def _evaluate(self, globalns: Dict, localns: Dict) -> Any:
ref = _ForwardRef(self.arg)
return ref._eval_type(globalns, localns)
try:
from types import UnionType # type: ignore # python 3.10 or above
except ImportError:
UnionType = None
if False:
# For type annotation
from typing import Type # NOQA # for python3.5.1
# builtin classes that have incorrect __module__
INVALID_BUILTIN_CLASSES = {
Struct: 'struct.Struct', # Before Python 3.9
TracebackType: 'types.TracebackType',
}
# Text like nodes which are initialized with text and rawsource
TextlikeNode = Union[nodes.Text, nodes.TextElement]
# type of None
NoneType = type(None)
# path matcher
PathMatcher = Callable[[str], bool]
# common role functions
RoleFunction = Callable[[str, str, str, int, Inliner, Dict[str, Any], List[str]],
Tuple[List[nodes.Node], List[nodes.system_message]]]
# A option spec for directive
OptionSpec = Dict[str, Callable[[str], Any]]
# title getter functions for enumerable nodes (see sphinx.domains.std)
TitleGetter = Callable[[nodes.Node], str]
# inventory data on memory
InventoryItem = Tuple[str, str, str, str]
Inventory = Dict[str, Dict[str, InventoryItem]]
def get_type_hints(obj: Any, globalns: Dict = None, localns: Dict = None) -> Dict[str, Any]:
"""Return a dictionary containing type hints for a function, method, module or class object.
This is a simple wrapper of `typing.get_type_hints()` that does not raise an error on
runtime.
"""
from sphinx.util.inspect import safe_getattr # lazy loading
try:
return typing.get_type_hints(obj, globalns, localns)
except NameError:
# Failed to evaluate ForwardRef (maybe TYPE_CHECKING)
return safe_getattr(obj, '__annotations__', {})
except AttributeError:
# Failed to evaluate ForwardRef (maybe not runtime checkable)
return safe_getattr(obj, '__annotations__', {})
except TypeError:
# Invalid object is given. But try to get __annotations__ as a fallback for
# the code using type union operator (PEP 604) in python 3.9 or below.
return safe_getattr(obj, '__annotations__', {})
except KeyError:
# a broken class found (refs: https://github.com/sphinx-doc/sphinx/issues/8084)
return {}
def is_system_TypeVar(typ: Any) -> bool:
"""Check *typ* is system defined TypeVar."""
modname = getattr(typ, '__module__', '')
return modname == 'typing' and isinstance(typ, TypeVar)
def restify(cls: Optional[Type], mode: str = 'fully-qualified-except-typing') -> str:
"""Convert python class to a reST reference.
:param mode: Specify a method how annotations will be stringified.
'fully-qualified-except-typing'
Show the module name and qualified name of the annotation except
the "typing" module.
'smart'
Show the name of the annotation.
"""
from sphinx.util import inspect # lazy loading
if mode == 'smart':
modprefix = '~'
else:
modprefix = ''
try:
if cls is None or cls is NoneType:
return ':py:obj:`None`'
elif cls is Ellipsis:
return '...'
elif isinstance(cls, str):
return cls
elif cls in INVALID_BUILTIN_CLASSES:
return ':py:class:`%s%s`' % (modprefix, INVALID_BUILTIN_CLASSES[cls])
elif inspect.isNewType(cls):
if sys.version_info > (3, 10):
# newtypes have correct module info since Python 3.10+
return ':py:class:`%s%s.%s`' % (modprefix, cls.__module__, cls.__name__)
else:
return ':py:class:`%s`' % cls.__name__
elif UnionType and isinstance(cls, UnionType):
if len(cls.__args__) > 1 and None in cls.__args__:
args = ' | '.join(restify(a, mode) for a in cls.__args__ if a)
return 'Optional[%s]' % args
else:
return ' | '.join(restify(a, mode) for a in cls.__args__)
elif cls.__module__ in ('__builtin__', 'builtins'):
if hasattr(cls, '__args__'):
return ':py:class:`%s`\\ [%s]' % (
cls.__name__,
', '.join(restify(arg, mode) for arg in cls.__args__),
)
else:
return ':py:class:`%s`' % cls.__name__
else:
if sys.version_info >= (3, 7): # py37+
return _restify_py37(cls, mode)
else:
return _restify_py36(cls, mode)
except (AttributeError, TypeError):
return inspect.object_description(cls)
def _restify_py37(cls: Optional[Type], mode: str = 'fully-qualified-except-typing') -> str:
"""Convert python class to a reST reference."""
from sphinx.util import inspect # lazy loading
if mode == 'smart':
modprefix = '~'
else:
modprefix = ''
if (inspect.isgenericalias(cls) and
cls.__module__ == 'typing' and cls.__origin__ is Union):
# Union
if len(cls.__args__) > 1 and cls.__args__[-1] is NoneType:
if len(cls.__args__) > 2:
args = ', '.join(restify(a, mode) for a in cls.__args__[:-1])
return ':py:obj:`~typing.Optional`\\ [:obj:`~typing.Union`\\ [%s]]' % args
else:
return ':py:obj:`~typing.Optional`\\ [%s]' % restify(cls.__args__[0], mode)
else:
args = ', '.join(restify(a, mode) for a in cls.__args__)
return ':py:obj:`~typing.Union`\\ [%s]' % args
elif inspect.isgenericalias(cls):
if isinstance(cls.__origin__, typing._SpecialForm):
text = restify(cls.__origin__, mode) # type: ignore
elif getattr(cls, '_name', None):
if cls.__module__ == 'typing':
text = ':py:class:`~%s.%s`' % (cls.__module__, cls._name)
else:
text = ':py:class:`%s%s.%s`' % (modprefix, cls.__module__, cls._name)
else:
text = restify(cls.__origin__, mode)
origin = getattr(cls, '__origin__', None)
if not hasattr(cls, '__args__'):
pass
elif all(is_system_TypeVar(a) for a in cls.__args__):
# Suppress arguments if all system defined TypeVars (ex. Dict[KT, VT])
pass
elif cls.__module__ == 'typing' and cls._name == 'Callable':
args = ', '.join(restify(a, mode) for a in cls.__args__[:-1])
text += r"\ [[%s], %s]" % (args, restify(cls.__args__[-1], mode))
elif cls.__module__ == 'typing' and getattr(origin, '_name', None) == 'Literal':
text += r"\ [%s]" % ', '.join(repr(a) for a in cls.__args__)
elif cls.__args__:
text += r"\ [%s]" % ", ".join(restify(a, mode) for a in cls.__args__)
return text
elif isinstance(cls, typing._SpecialForm):
return ':py:obj:`~%s.%s`' % (cls.__module__, cls._name)
elif hasattr(cls, '__qualname__'):
if cls.__module__ == 'typing':
return ':py:class:`~%s.%s`' % (cls.__module__, cls.__qualname__)
else:
return ':py:class:`%s%s.%s`' % (modprefix, cls.__module__, cls.__qualname__)
elif isinstance(cls, ForwardRef):
return ':py:class:`%s`' % cls.__forward_arg__
else:
# not a class (ex. TypeVar)
if cls.__module__ == 'typing':
return ':py:obj:`~%s.%s`' % (cls.__module__, cls.__name__)
else:
return ':py:obj:`%s%s.%s`' % (modprefix, cls.__module__, cls.__name__)
def _restify_py36(cls: Optional[Type], mode: str = 'fully-qualified-except-typing') -> str:
if mode == 'smart':
modprefix = '~'
else:
modprefix = ''
module = getattr(cls, '__module__', None)
if module == 'typing':
if getattr(cls, '_name', None):
qualname = cls._name
elif getattr(cls, '__qualname__', None):
qualname = cls.__qualname__
elif getattr(cls, '__forward_arg__', None):
qualname = cls.__forward_arg__
elif getattr(cls, '__origin__', None):
qualname = stringify(cls.__origin__) # ex. Union
else:
qualname = repr(cls).replace('typing.', '')
elif hasattr(cls, '__qualname__'):
qualname = '%s%s.%s' % (modprefix, module, cls.__qualname__)
else:
qualname = repr(cls)
if (isinstance(cls, typing.TupleMeta) and # type: ignore
not hasattr(cls, '__tuple_params__')):
if module == 'typing':
reftext = ':py:class:`~typing.%s`' % qualname
else:
reftext = ':py:class:`%s%s`' % (modprefix, qualname)
params = cls.__args__
if params:
param_str = ', '.join(restify(p, mode) for p in params)
return reftext + '\\ [%s]' % param_str
else:
return reftext
elif isinstance(cls, typing.GenericMeta):
if module == 'typing':
reftext = ':py:class:`~typing.%s`' % qualname
else:
reftext = ':py:class:`%s%s`' % (modprefix, qualname)
if cls.__args__ is None or len(cls.__args__) <= 2:
params = cls.__args__
elif cls.__origin__ == Generator:
params = cls.__args__
else: # typing.Callable
args = ', '.join(restify(arg, mode) for arg in cls.__args__[:-1])
result = restify(cls.__args__[-1], mode)
return reftext + '\\ [[%s], %s]' % (args, result)
if params:
param_str = ', '.join(restify(p, mode) for p in params)
return reftext + '\\ [%s]' % (param_str)
else:
return reftext
elif (hasattr(cls, '__origin__') and
cls.__origin__ is typing.Union):
params = cls.__args__
if params is not None:
if len(params) > 1 and params[-1] is NoneType:
if len(params) > 2:
param_str = ", ".join(restify(p, mode) for p in params[:-1])
return (':py:obj:`~typing.Optional`\\ '
'[:py:obj:`~typing.Union`\\ [%s]]' % param_str)
else:
return ':py:obj:`~typing.Optional`\\ [%s]' % restify(params[0], mode)
else:
param_str = ', '.join(restify(p, mode) for p in params)
return ':py:obj:`~typing.Union`\\ [%s]' % param_str
else:
return ':py:obj:`Union`'
elif hasattr(cls, '__qualname__'):
if cls.__module__ == 'typing':
return ':py:class:`~%s.%s`' % (cls.__module__, cls.__qualname__)
else:
return ':py:class:`%s%s.%s`' % (modprefix, cls.__module__, cls.__qualname__)
elif hasattr(cls, '_name'):
# SpecialForm
if cls.__module__ == 'typing':
return ':py:obj:`~%s.%s`' % (cls.__module__, cls._name)
else:
return ':py:obj:`%s%s.%s`' % (modprefix, cls.__module__, cls._name)
elif hasattr(cls, '__name__'):
# not a class (ex. TypeVar)
if cls.__module__ == 'typing':
return ':py:obj:`~%s.%s`' % (cls.__module__, cls.__name__)
else:
return ':py:obj:`%s%s.%s`' % (modprefix, cls.__module__, cls.__name__)
else:
# others (ex. Any)
if cls.__module__ == 'typing':
return ':py:obj:`~%s.%s`' % (cls.__module__, qualname)
else:
return ':py:obj:`%s%s.%s`' % (modprefix, cls.__module__, qualname)
def stringify(annotation: Any, mode: str = 'fully-qualified-except-typing') -> str:
"""Stringify type annotation object.
:param mode: Specify a method how annotations will be stringified.
'fully-qualified-except-typing'
Show the module name and qualified name of the annotation except
the "typing" module.
'smart'
Show the name of the annotation.
'fully-qualified'
Show the module name and qualified name of the annotation.
"""
from sphinx.util import inspect # lazy loading
if mode == 'smart':
modprefix = '~'
else:
modprefix = ''
if isinstance(annotation, str):
if annotation.startswith("'") and annotation.endswith("'"):
# might be a double Forward-ref'ed type. Go unquoting.
return annotation[1:-1]
else:
return annotation
elif isinstance(annotation, TypeVar):
if (annotation.__module__ == 'typing' and
mode in ('fully-qualified-except-typing', 'smart')):
return annotation.__name__
else:
return modprefix + '.'.join([annotation.__module__, annotation.__name__])
elif inspect.isNewType(annotation):
if sys.version_info > (3, 10):
# newtypes have correct module info since Python 3.10+
return modprefix + '%s.%s' % (annotation.__module__, annotation.__name__)
else:
return annotation.__name__
elif not annotation:
return repr(annotation)
elif annotation is NoneType:
return 'None'
elif annotation in INVALID_BUILTIN_CLASSES:
return modprefix + INVALID_BUILTIN_CLASSES[annotation]
elif str(annotation).startswith('typing.Annotated'): # for py310+
pass
elif (getattr(annotation, '__module__', None) == 'builtins' and
getattr(annotation, '__qualname__', None)):
if hasattr(annotation, '__args__'): # PEP 585 generic
return repr(annotation)
else:
return annotation.__qualname__
elif annotation is Ellipsis:
return '...'
if sys.version_info >= (3, 7): # py37+
return _stringify_py37(annotation, mode)
else:
return _stringify_py36(annotation, mode)
def _stringify_py37(annotation: Any, mode: str = 'fully-qualified-except-typing') -> str:
"""stringify() for py37+."""
module = getattr(annotation, '__module__', None)
modprefix = ''
if module == 'typing' and getattr(annotation, '__forward_arg__', None):
qualname = annotation.__forward_arg__
elif module == 'typing':
if getattr(annotation, '_name', None):
qualname = annotation._name
elif getattr(annotation, '__qualname__', None):
qualname = annotation.__qualname__
else:
qualname = stringify(annotation.__origin__).replace('typing.', '') # ex. Union
if mode == 'smart':
modprefix = '~%s.' % module
elif mode == 'fully-qualified':
modprefix = '%s.' % module
elif hasattr(annotation, '__qualname__'):
if mode == 'smart':
modprefix = '~%s.' % module
else:
modprefix = '%s.' % module
qualname = annotation.__qualname__
elif hasattr(annotation, '__origin__'):
# instantiated generic provided by a user
qualname = stringify(annotation.__origin__, mode)
elif UnionType and isinstance(annotation, UnionType): # types.Union (for py3.10+)
qualname = 'types.Union'
else:
# we weren't able to extract the base type, appending arguments would
# only make them appear twice
return repr(annotation)
if getattr(annotation, '__args__', None):
if not isinstance(annotation.__args__, (list, tuple)):
# broken __args__ found
pass
elif qualname in ('Optional', 'Union'):
if len(annotation.__args__) > 1 and annotation.__args__[-1] is NoneType:
if len(annotation.__args__) > 2:
args = ', '.join(stringify(a, mode) for a in annotation.__args__[:-1])
return '%sOptional[%sUnion[%s]]' % (modprefix, modprefix, args)
else:
return '%sOptional[%s]' % (modprefix,
stringify(annotation.__args__[0], mode))
else:
args = ', '.join(stringify(a, mode) for a in annotation.__args__)
return '%sUnion[%s]' % (modprefix, args)
elif qualname == 'types.Union':
if len(annotation.__args__) > 1 and None in annotation.__args__:
args = ' | '.join(stringify(a) for a in annotation.__args__ if a)
return '%sOptional[%s]' % (modprefix, args)
else:
return ' | '.join(stringify(a) for a in annotation.__args__)
elif qualname == 'Callable':
args = ', '.join(stringify(a, mode) for a in annotation.__args__[:-1])
returns = stringify(annotation.__args__[-1], mode)
return '%s%s[[%s], %s]' % (modprefix, qualname, args, returns)
elif qualname == 'Literal':
args = ', '.join(repr(a) for a in annotation.__args__)
return '%s%s[%s]' % (modprefix, qualname, args)
elif str(annotation).startswith('typing.Annotated'): # for py39+
return stringify(annotation.__args__[0], mode)
elif all(is_system_TypeVar(a) for a in annotation.__args__):
# Suppress arguments if all system defined TypeVars (ex. Dict[KT, VT])
return modprefix + qualname
else:
args = ', '.join(stringify(a, mode) for a in annotation.__args__)
return '%s%s[%s]' % (modprefix, qualname, args)
return modprefix + qualname
def _stringify_py36(annotation: Any, mode: str = 'fully-qualified-except-typing') -> str:
"""stringify() for py36."""
module = getattr(annotation, '__module__', None)
modprefix = ''
if module == 'typing' and getattr(annotation, '__forward_arg__', None):
qualname = annotation.__forward_arg__
elif module == 'typing':
if getattr(annotation, '_name', None):
qualname = annotation._name
elif getattr(annotation, '__qualname__', None):
qualname = annotation.__qualname__
elif getattr(annotation, '__origin__', None):
qualname = stringify(annotation.__origin__) # ex. Union
else:
qualname = repr(annotation).replace('typing.', '')
if mode == 'smart':
modprefix = '~%s.' % module
elif mode == 'fully-qualified':
modprefix = '%s.' % module
elif hasattr(annotation, '__qualname__'):
if mode == 'smart':
modprefix = '~%s.' % module
else:
modprefix = '%s.' % module
qualname = annotation.__qualname__
else:
qualname = repr(annotation)
if (isinstance(annotation, typing.TupleMeta) and # type: ignore
not hasattr(annotation, '__tuple_params__')): # for Python 3.6
params = annotation.__args__
if params:
param_str = ', '.join(stringify(p, mode) for p in params)
return '%s%s[%s]' % (modprefix, qualname, param_str)
else:
return modprefix + qualname
elif isinstance(annotation, typing.GenericMeta):
params = None
if annotation.__args__ is None or len(annotation.__args__) <= 2: # type: ignore # NOQA
params = annotation.__args__ # type: ignore
elif annotation.__origin__ == Generator: # type: ignore
params = annotation.__args__ # type: ignore
else: # typing.Callable
args = ', '.join(stringify(arg, mode) for arg
in annotation.__args__[:-1]) # type: ignore
result = stringify(annotation.__args__[-1]) # type: ignore
return '%s%s[[%s], %s]' % (modprefix, qualname, args, result)
if params is not None:
param_str = ', '.join(stringify(p, mode) for p in params)
return '%s%s[%s]' % (modprefix, qualname, param_str)
elif (hasattr(annotation, '__origin__') and
annotation.__origin__ is typing.Union):
params = annotation.__args__
if params is not None:
if len(params) > 1 and params[-1] is NoneType:
if len(params) > 2:
param_str = ", ".join(stringify(p, mode) for p in params[:-1])
return '%sOptional[%sUnion[%s]]' % (modprefix, modprefix, param_str)
else:
return '%sOptional[%s]' % (modprefix, stringify(params[0], mode))
else:
param_str = ', '.join(stringify(p, mode) for p in params)
return '%sUnion[%s]' % (modprefix, param_str)
return modprefix + qualname
deprecated_alias('sphinx.util.typing',
{
'DirectiveOption': Callable[[str], Any],
},
RemovedInSphinx60Warning)
| StarcoderdataPython |
185345 | import kivy
kivy.require('2.0.0')
from kivy.app import App
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.uix.gridlayout import GridLayout
# Configuration files #
import logging
import etc.config as conf
class PiDioGrid(Screen):
def Radio(self):
print('RadioScreen')
def Bluetooth(self):
print('BluetoothScreen')
def AndIoPlay(self):
print('Android/Auto/IOS')
def Settings(self):
print('SettingsScreen')
def Auxillary(self):
print('Aux Cable')
def USB(self):
print('USB Screen')
class RadioScreen(Screen):
def __init__(self, **kwargs):
super(RadioScreen, self).__init__(**kwargs)
Config = conf.Config()
rad = Config.ch_load()
#logger.debug(f'Loading Radio Info: {rad}')
self.ids.ch_one.text = rad['stat_one']
self.ids.ch_two.text = rad['stat_two']
self.ids.ch_three.text = rad['stat_three']
self.ids.ch_four.text = rad['stat_four']
self.ids.ch_five.text = rad['stat_five']
self.ids.ch_six.text = rad['stat_six']
self.ids.cur_stat.text = rad['cur_stat']
def change_ch(self, station):
self.ids.cur_stat.text = station.text
class PiDio(App):
def build(self):
""" PiDio Main Entry Point
Creating Logging Information with information level.
TODO:
Create 'DEBUG' options, currently hardcoded in.
"""
''' Setup logging information
Set up logging level and create instance of pilog.log.
Setup log formmater as: %(asctime)s - %(name)s - %(Levelname)s -
%(message)s
Formatting kept for readability and brevity.
TODO: Setup Debugging option in gui window, currently hardcoded as
debug level.
'''
logger = logging.getLogger('PiDioApp')
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('pilog.log')
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter('''%(asctime)s - %(name)s - %(levelname)s -
%(message)s''')
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.debug('Created logging information at pilog.log')
''' Screen Managment
Screen Manager code, add here for more screens.
'''
scr_manage = ScreenManager()
scr_manage.add_widget(PiDioGrid(name='pidio_screen'))
scr_manage.add_widget(RadioScreen(name='radio_screen'))
return scr_manage
if __name__ == '__main__':
PiDio().run() | StarcoderdataPython |
1726788 | __author__='thiagocastroferreira'
"""
Author: <NAME>
Date: 28/02/2019
Description:
This script aims to generate the referring expressions.
ARGS:
[1] Path to the file with the Lexicalization step output
[2] Path to the file with the Discourse Ordering step output
[3] Path to the file where the output will be saved
[4] Flag to specify the model: NeuralREG -> neuralreg / OnlyName -> onlynames
[5] Path to the trained model
EXAMPLE:
python3 generate.py dev.lex.postprocessed dev.ordering.mapped dev.reg neuralreg reg/model1.dy
"""
import sys
sys.path.append('./')
sys.path.append('../')
from neuralreg import NeuralREG
import util
import re
class REG():
def __init__(self, model, model_path, path='data/en/reg'):
self.model = model.strip()
if model == 'neuralreg':
config = {
'LSTM_NUM_OF_LAYERS':1,
'EMBEDDINGS_SIZE':300,
'STATE_SIZE':512,
'ATTENTION_SIZE':512,
'DROPOUT':0.2,
'GENERATION':30,
'BEAM_SIZE':5,
'BATCH_SIZE': 80,
'EPOCHS': 60,
'EARLY_STOP': 20
}
#path = 'data/en/reg'
self.neuralreg = NeuralREG(path=path, config=config)
self.neuralreg.populate(model_path)
def realize_date(self, entity):
regex='([0-9]{4})-([0-9]{2})-([0-9]{2})'
dates = re.findall(regex,entity)
if len(dates) > 0:
year, month, day = dates[0]
month = int(month)
if month == 1:
month = 'January'
elif month == 2:
month = 'February'
elif month == 3:
month = 'March'
elif month == 4:
month = 'April'
elif month == 5:
month = 'May'
elif month == 6:
month = 'June'
elif month == 7:
month = 'July'
elif month == 8:
month = 'August'
elif month == 9:
month = 'September'
elif month == 10:
month = 'October'
elif month == 11:
month = 'November'
elif month == 12:
month = 'December'
refex = '{0} {1}, {2}'.format(month, str(int(day)), str(int(year)))
return True, refex
return False, ''
def realize(self, entry, entity_map):
entry = entry.split()
pre_context = ['eos']
for i, token in enumerate(entry):
if token.strip() in entity_map:
entity = entity_map[token.strip()]
isDate, refex = self.realize_date(entity)
if not isDate:
try:
isTrain = '_'.join(entity.split()) in self.neuralreg.vocab['input']
except:
isTrain = False
if entity[0] in ['\'', '\"'] or self.model != 'neuralreg' or not isTrain:
refex = entity.replace('_', ' ').replace('\"', ' ').replace('\'', ' ')
else:
try:
refex = str(int(entity))
except ValueError:
pos_context = []
for j in range(i+1, len(entry)):
if entry[j].strip() in entity_map:
pos_context.append(entity_map[entry[j].strip()])
else:
pos_context.append(entry[j].strip().lower())
pos_context.append('eos')
candidates = self.neuralreg(pre_context=pre_context, pos_context=pos_context, entity=entity, beam=self.neuralreg.config.beam)
refex = ' '.join(candidates[0]).replace('eos', '').strip()
entry[i] = refex
pre_context.append(entity)
else:
pre_context.append(token.lower())
return entry
def __call__(self, in_path, order_path, out_path):
with open(in_path) as f:
entries = f.read().split('\n')
with open(order_path) as f:
ordered_triples = [util.split_triples(t.split()) for t in f.read().split('\n')]
entity_maps = [util.entity_mapping(t) for t in ordered_triples]
result = []
for i, entry in enumerate(entries):
print('Progress: ', round(i / len(entries), 2), end='\r')
result.append(self.realize(entry, entity_maps[i]))
# result = [self.realize(entry, entity_maps[i]) for i, entry in enumerate(entries)]
with open(out_path, 'w') as f:
out = [' '.join(predicates) for predicates in result]
f.write('\n'.join(out))
if __name__ == '__main__':
path = '/roaming/tcastrof/emnlp2019/lexicalization/surfacevocab.json'
in_path = sys.argv[1]
order_path = sys.argv[2]
out_path = sys.argv[3]
model = sys.argv[4]
model_path = sys.argv[5]
path = sys.argv[6]
model = REG(model=model, model_path=model_path, path=path)
model(in_path=in_path, order_path=order_path, out_path=out_path)
| StarcoderdataPython |
3311406 | """
A hodgepodge of utilities, most of which concern working with basic types.
"""
import logging
logger = logging.getLogger(__name__)
import itertools
import os
import shutil
import pbio.misc.shell_utils as shell_utils
def raise_deprecation_warning(function, new_module, final_version=None,
old_module="misc"):
""" Ths function raises a deprecation about a function that has been
moved to a new module.
Parameters
----------
function : string
The name of the (existing) function
new_module : string
The name of the new module containing the function
old_module: string
The name of the old module for the function. Default: misc
final_version: string
The name of the last version for which the function will be available
in the old_module, or None.
Returns
-------
None, but prints a "warn" message. If final_version is not None, then the
message will include a bit about when the method will be removed from the
current module.
"""
msg = ("[{}]: This function is deprecated. Please use the version in {} "
"instead.".format(function, new_module))
if final_version is not None:
msg_2 = (" The function will be removed from the module {} in version "
"{}".format(old_module, final_version))
msg = msg + msg_2
logger.warn(msg)
### Parsing and writing utilities
trueStrings = ['true', 'yes', 't', 'y', '1']
def str2bool(string):
return (string.lower() in trueStrings)
def try_parse_int(string):
try:
return int(string)
except ValueError:
return None
def try_parse_float(string):
try:
return float(string)
except ValueError:
return None
def is_int(s):
""" This function checks whether the provided string represents and integer.
This code was adapted from: http://stackoverflow.com/questions/1265665/
Args:
s (string) : the string
Returns:
bool : whether the string can be interpretted as an integer
"""
if s[0] in ('-', '+'):
return s[1:].isdigit()
return s.isdigit()
def check_keys_exist(d, keys):
""" This function ensures the given keys are present in the dictionary. It
does not other validate the type, value, etc., of the keys or their
values. If a key is not present, a KeyError is raised.
The motivation behind this function is to verify that a config dictionary
read in at the beginning of a program contains all of the required values.
Thus, the program will immediately detect when a required config value is
not present and quit.
Input:
d (dict) : the dictionary
keys (list) : a list of keys to check
Returns:
list of string: a list of all programs which are not found
Raises:
KeyError: if any of the keys are not in the dictionary
"""
missing_keys = [k for k in keys if k not in d]
if len(missing_keys) > 0:
missing_keys = ' '.join(missing_keys)
msg = "The following keys were not found: " + missing_keys
raise KeyError(msg)
return missing_keys
# http://goo.gl/zeJZl
def bytes2human(n, format="%(value)i%(symbol)s"):
"""
>>> bytes2human(10000)
'9K'
>>> bytes2human(100001221)
'95M'
"""
symbols = ('B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i+1)*10
for symbol in reversed(symbols[1:]):
if n >= prefix[symbol]:
value = float(n) / prefix[symbol]
return format % locals()
return format % dict(symbol=symbols[0], value=n)
# http://goo.gl/zeJZl
def human2bytes(s):
"""
>>> human2bytes('1M')
1048576
>>> human2bytes('1G')
1073741824
"""
# first, check if s is already a number
if is_int(s):
return s
symbols = ('B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
letter = s[-1:].strip().upper()
num = s[:-1]
assert num.isdigit() and letter in symbols
num = float(num)
prefix = {symbols[0]:1}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i+1)*10
return int(num * prefix[letter])
def simple_fill(text, width=60):
""" This is a simplified version of textwrap.fill. It splits the string
into exactly equal-sized chuncks on length <width>. This avoids the
pathological case of one long string (e.g., when splitting long DNA
sequences).
The code is adapted from: http://stackoverflow.com/questions/11781261
Args:
text (string) : the text to split
width (int) : the (exact) length of each line after splitting
Returns:
string : a single string with lines of length width (except
possibly the last line)
"""
return '\n'.join(text[i:i+width]
for i in range(0, len(text), width))
def split(delimiters, string, maxsplit=0):
""" This function splits the given string using all delimiters in the list.
The code is taken from: http://stackoverflow.com/questions/4998629/
Args:
delimiters (list of strings): the strings to use as delimiters
string (string): the string to split
maxsplit (int): the maximum number of splits (or 0 for no limit)
Returns:
list of strings: the split string
"""
import re
regex_pattern = '|'.join(map(re.escape, delimiters))
return re.split(regex_pattern, string, maxsplit)
def read_commented_file(filename):
f = open(filename)
lines = []
for line in f:
line = line.partition("#")[0].strip()
if len(line) > 0:
lines.append(line)
return lines
def get_vars_to_save(to_save, to_remove=['parser', 'args']):
import types
# remove the system variables, modules and functions
for (var_name,value) in to_save.items():
if var_name.startswith('__'):
to_remove.append(var_name)
elif (
isinstance(value, types.FunctionType) or
isinstance(value, types.ModuleType)):
to_remove.append(var_name)
for var_name in to_remove:
if var_name in to_save:
del to_save[var_name]
return to_save
def command_line_option_to_keyword(option):
""" Convert the command line version of the option to a keyword.
Parameters
----------
option: string
The "long" command line option version
Returns
-------
keyword: string
The "keyword" version of the option. Namely, the initial "--" is
removed and all internal "-"s are replaced with "_"s.
"""
# first, remove the initial "--"
option = option[2:]
# and replace "-" with "_"
option = option.replace("-", "_")
return option
def get_config_argument(config, var_name, argument_name=None, default=None):
""" This function checks to see if the config dictionary contains the given
variable. If so, it constructs a command line argument based on the type
of the variable. If a default is given, then that value is used if the
variable is not present in the config dictionary.
Args:
config (dict): a dictionary, presumably containing configuration
options
var_name (string): the name of the variable to look up
argument_name (string): if present, then the command line argument
will be "--<argument_name>". Otherwise, the command line switch
will be: "--<var_name.replace(_,-)"
default (string or list): if present, then this value is used if
the variable is not in the dictionary
Returns:
string: either the empty string if var_name is not in config, or a
properly formatted command line switch, based on whether the
variable is a string or list
"""
import shlex
argument = ""
if (var_name in config) or (default is not None):
# check if we have a string
var = config.get(var_name, default)
# we could have included the variable in the config with a 'None' value
if var is None:
return argument
if isinstance(var, (str, )) and (len(str(var)) > 0):
argument = shlex.quote(var)
elif isinstance(var, (int, float)) and (len(str(var)) > 0):
argument = shlex.quote(str(var))
elif len(var) > 0:
# assume this is a list
argument = " ".join(shlex.quote(str(v)) for v in var)
if argument_name is None:
argument_name = var_name.replace('_', '-')
if len(argument) > 0:
argument = "--{} {}".format(argument_name, argument)
return argument
def get_config_args_value(default_value, config_value, args_value):
""" This helper function selects which value to use based on the precedence
order: args, config, default (that is, the args value is chosen if
present, etc.)
N.B. This seems like a common pattern; there may be a better way to do
this. https://pypi.python.org/pypi/ConfigArgParse, for example.
Args:
default_value: the default value to use if neither the config nor
the args value is given
config_value: the value to use (presumably from a config file) to
use if args value is not given
args_value: the value to use, if present
Returns:
obj: the selected value, according to the precedence order
"""
if args_value is not None:
return args_value
if config_value is not None:
return config_value
return default_value
def concatenate_files(in_files, out_file, call=True):
""" Concatenate the input files to the output file.
Parameters
----------
in_files: list of strings
The paths to the input files, which will be opened in binary mode
out_file: string
The path to the output file. This *should not* be the same as one of
the input files.
call: bool
Whether to actually perform the action
"""
in_files_str = ",".join(in_files)
msg = ("Concatenating files. Output file: {}; Input files: {}".format(
out_file, in_files_str))
logger.info(msg)
if not call:
msg = "Skipping concatenation due to --call value"
logger.info(msg)
return
with open(out_file, 'wb') as out:
for in_file in in_files:
with open(in_file, 'rb') as in_f:
shutil.copyfileobj(in_f, out)
def check_gzip_file(filename, has_tar=False, raise_on_error=True, logger=logger):
""" This function wraps a call to "gunzip -t". Optionally, it
raises an exception if the return code is not 0. Otherwise, it writes
a "critical" warning message.
This function can also test that a tar insize the gzipped file is valid.
This code is adapted from: http://stackoverflow.com/questions/2001709/
Args:
filename (str): a path to the bam file
has_tar (bool): whether to check for a valid tar inside the
gzipped file
raise_on_error (bool): whether to raise an OSError (if True) or log
a "critical" message (if false)
logger (logging.Logger): a logger for writing the message if an
error is not raised
Returns:
bool: whether the file was valid
Raises:
OSError: if gunzip does not return 0 and raise_on_error is True
"""
programs = ['gunzip', 'tar']
shell_utils.check_programs_exist(programs)
if has_tar:
cmd = "gunzip -c {} | tar t > /dev/null".format(filename)
else:
cmd = "gunzip -t {}".format(filename)
ret = shell_utils.check_call_step(cmd, raise_on_error=False)
if ret != 0:
msg = "The gzip file does not appear to be valid: {}".format(filename)
if raise_on_error:
raise OSError(msg)
logger.critical(msg)
return False
# then the file was okay
return True
def ensure_path_to_file_exists(f):
""" If the base path to f does not exist, create it. """
out_dir = os.path.dirname(f)
# if we are given just a filename, do not do anything
if len(out_dir) > 0:
msg = "Ensuring directory exists: {}".format(out_dir)
logger.debug(msg)
os.makedirs(out_dir, exist_ok=True)
def check_files_exist(files, raise_on_error=True, logger=logger,
msg="The following files were missing: ", source=None):
""" This function ensures that all of the files in the list exists. If any
do not, it will either raise an exception or print a warning, depending
on the value of raise_on_error.
Parameters
----------
files: list of strings
the file paths to check
raise_on_error: bool
whether to raise an error if any of the files are missing
logger: logging.Logger
a logger to use for writing the warning if an error is not raised
msg: string
a message to write before the list of missing files
source: string
a description of where the check is made, such as a module name. If
this is not None, it will be prepended in brackets before msg.
Returns
-------
all_exist: bool
True if all of the files existed, False otherwise
Raises
------
FileNotFoundError, if raise_on_error is True and any of the files
do not exist.
"""
missing_files = []
for f in files:
if not os.path.exists(f):
missing_files.append(f)
if len(missing_files) == 0:
return True
missing_files_str = ",".join(missing_files)
source_str = ""
if source is not None:
source_str = "[{}]: ".format(source)
msg = "{}{}{}".format(source_str, msg, missing_files_str)
if raise_on_error:
raise FileNotFoundError(msg)
else:
logger.warning(msg)
return False
def remove_file(filename):
"""Remove the file, if it exists. Ignore FileNotFound errors."""
import contextlib
import os
with contextlib.suppress(FileNotFoundError):
os.remove(filename)
def count_lines(filename):
""" This function counts the number of lines in filename.
Parameters
----------
filename : string
The path to the file. gzipped files are handled transparently
Returns
-------
num_lines : int
The number of lines in the file
"""
with open(filename) as f:
i = -1
for i, l in enumerate(f):
pass
return i + 1
### Path utilities
def abspath(*fn):
return os.path.abspath(os.path.join(os.sep, *fn))
def add_home_dir(*fn):
return os.path.join(os.path.expanduser('~'), *fn)
def listdir_full(path):
return [os.path.join(path, f) for f in os.listdir(path)]
def list_subdirs(path):
""" List all subdirectories directly under path
"""
subdirs = [
d for d in listdir_full(path) if os.path.isdir(d)
]
return subdirs
def get_basename(path):
return os.path.splitext(os.path.basename(path))[0]
def create_symlink(src, dst, remove=True, create=False, call=True):
""" Creates or updates a symlink at dst which points to src.
Parameters
----------
src: string
the path to the original file
dst: string
the path to the symlink
remove: bool
whether to remove any existing file at dst
create: bool
whether to create the directory structure necessary for dst
call: bool
whether to actually do anything
Returns
-------
None, but the symlink is created
Raises
------
FileExistsError, if a file already exists at dst and the remove flag is
False
"""
import logging
raise_deprecation_warning("misc.utils.create_symlink", "misc.shell_utils")
if not call:
return
if os.path.lexists(dst):
if remove:
msg = ("[utils.create_symlink]: file already exists at: '{}'. It "
"will be removed".format(dst))
logging.warning(msg)
os.remove(dst)
else:
msg = "A file already exists at: '{}'".format(dst)
raise FileExistsError(msg)
if create:
os.makedirs(os.path.dirname(dst), exist_ok=True)
os.symlink(src, dst)
### numpy stack helpers
def to_dense(data, row, dtype=float, length=-1):
import numpy as np
d = data.getrow(row).todense()
d = np.squeeze(np.asarray(d, dtype=dtype))
if length > 0:
d = d[:length]
# make sure we do not return a scalar
if isinstance(d, dtype):
d = np.array([d])
return d
def dict_to_dataframe(dic, key_name='key', value_name='value'):
""" Convert a dictionary into a two-column data frame using the given
column names. Each entry in the data frame corresponds to one row.
Parameters
----------
dic: dictionary
a dictionary
key_name: string
the name to use for the column for the keys
value_name: string
the name to use for the column for the values
Returns
-------
df: pd.DataFrame
a data frame in which each row corresponds to one entry in dic
"""
raise_deprecation_warning("dict_to_dataframe", "misc.pandas_utils",
"0.3.0", "misc")
import pandas as pd
df = pd.Series(dic, name=value_name)
df.index.name = key_name
df = df.reset_index()
return df
def dataframe_to_dict(df, key_field, value_field):
""" This function converts two columns of a data frame into a dictionary.
Args:
df (pd.DataFrame): the data frame
key_field (string): the field to use as the keys in the dictionary
value_field (string): the field to use as the values
Returns:
dict: a dictionary which has one entry for each row in the data
frame, with the keys and values as indicated by the fields
"""
raise_deprecation_warning("dataframe_to_dict", "misc.pandas_utils",
"0.3.0", "misc")
dic = dict(zip(df[key_field], df[value_field]))
return dic
def pandas_join_string_list(row, field, sep=";"):
""" This function checks if the value for field in the row is a list. If so,
it is replaced by a string in which each value is separated by the
given separator.
Args:
row (pd.Series or similar): the row to check
field (string): the name of the field
sep (string): the separator to use in joining the values
"""
raise_deprecation_warning("pandas_join_string_list", "misc.pandas_utils",
"0.3.0", "misc")
s = wrap_string_in_list(row[field])
return sep.join(s)
excel_extensions = ('xls', 'xlsx')
hdf5_extensions = ('hdf', 'hdf5', 'h5', 'he5')
def _guess_df_filetype(filename):
""" This function attempts to guess the filetype given a filename. It is
primarily intended for internal use, namely, for reading and writing
dataframes. The supported types and extensions used for guessing are:
excel: xls, xlsx
hdf5: hdf, hdf5, h5, he5
csv: all other extensions
Additionally, if filename is a pd.ExcelWriter object, then the guessed
filetype will be 'excel_writer'
Args:
filename (string): the name of the file for which we will guess
Returns:
string: the guessed file type. See above for the supported types
and extensions.
Imports:
pandas
"""
raise_deprecation_warning("_guess_df_filetype", "misc.pandas_utils",
"0.3.0", "misc")
import pandas as pd
msg = "Attempting to guess the extension. Filename: {}".format(filename)
logger.debug(msg)
if isinstance(filename, pd.ExcelWriter):
filetype = 'excel_writer'
elif filename.endswith(excel_extensions):
filetype = 'excel'
elif filename.endswith(hdf5_extensions):
filetype= 'hdf5'
else:
filetype = 'csv'
msg = "The guessed filetype was: {}".format(filetype)
logger.debug(msg)
return filetype
def read_df(filename, filetype='AUTO', sheet=None, **kwargs):
""" This function reads a data frame from a file. By default it attempts
to guess the type of the file based on its extension. Alternatively,
the filetype can be exlicitly specified. The supported types and
extensions used for guessing are:
excel: xls, xlsx
hdf5: hdf, hdf5, h5, he5
csv: all other extensions
N.B. In principle, matlab data files are hdf5, so this function should
be able to read them. This has not been tested, though.
Args:
filename (string): the input file
filetype (string): the type of file, which determines which pandas
read function will be called. If AUTO, the function uses the
extensions mentioned above to guess the filetype.
sheet (string): for excel or hdf5 files, this will be passed
to extract the desired information from the file. Please see
pandas.read_excel and pandas.read_hdf for more information on
how values are interpreted.
kwards: these will be passed unchanged to the read function
Returns:
pd.DataFrame: a data frame
Raises:
ValueError: if the filetype is not 'AUTO' or one of the values
mentioned above ('excel', 'hdf5', 'csv')
Imports:
pandas
"""
raise_deprecation_warning("read_df", "misc.pandas_utils",
"0.3.0", "misc")
import pandas as pd
# first, see if we want to guess the filetype
if filetype == 'AUTO':
filetype = _guess_df_filetype(filename)
# now, parse the file
if filetype == 'csv':
df = pd.read_csv(filename, **kwargs)
elif filetype == 'excel':
df = pd.read_excel(filename, sheetname=sheet, **kwargs)
elif filetype == 'hdf5':
df = pd.read_hdf(filename, key=sheet, **kwargs)
else:
msg = "Could not read dataframe. Invalid filetype: {}".format(filetype)
raise ValueError(msg)
return df
def write_df(df, out, create_path=False, filetype='AUTO', sheet='Sheet_1',
do_not_compress=False, **kwargs):
""" This function writes a data frame to a file of the specified type.
Unless otherwise specified, csv files are gzipped when written. By
default, the filetype will be guessed based on the extension. The
supported types and extensions used for guessing are:
excel: xls, xlsx
hdf5: hdf, hdf5, h5, he5
csv: all other extensions (e.g., "gz" or "bed")
Additionally, the filetype can be specified as 'excel_writer'. In this
case, the out object is taken to be a pd.ExcelWriter, and the df is
appended to the writer. AUTO will also guess this correctly.
N.B. The hdf5 filetype has not been tested!!!
Parameters
----------
df: pd.DataFrame
The data frame
out: string or pd.ExcelWriter
The (complete) path to the file.
The file name WILL NOT be modified. In particular, ".gz" WILL
NOT be added if the file is to be zipped. As mentioned above,
if the filetype is passed as 'excel_writer', then this is taken
to be a pd.ExcelWriter object.
create_path: bool
Whether to create the path directory structure to the file if it
does not already exist.
N.B. This will not attempt to create the path to an excel_writer
since it is possible that it does not yet have one specified.
filetype: string
The type of output file to write. If AUTO, the function uses the
extensions mentioned above to guess the filetype.
sheet: string
The name of the sheet (excel) or key (hdf5) to use when writing the
file. This argument is not used for csv. For excel, the sheet is
limited to 31 characters. It will be trimmed if necessary.
do_not_compress: bool
Whether to compress the output. This is only used for csv files.
**kwargs : other keyword arguments to pass to the df.to_XXX method
Returns
-------
None, but the file is created
"""
raise_deprecation_warning("write_df", "misc.pandas_utils",
"0.3.0", "misc")
import gzip
import pandas as pd
# first, see if we want to guess the filetype
if filetype == 'AUTO':
filetype = _guess_df_filetype(out)
# check if we want to and can create the path
if create_path:
if filetype != 'excel_writer':
ensure_path_to_file_exists(out)
else:
msg = ("[utils.write_df]: create_path was passed as True, but the "
"filetype is 'excel_writer'. This combination does not work. "
"The path to the writer will not be created.")
logger.warning(msg)
if filetype == 'csv':
if do_not_compress:
df.to_csv(out, **kwargs)
else:
with gzip.open(out, 'wt') as out:
df.to_csv(out, **kwargs)
elif filetype == 'excel':
with pd.ExcelWriter(out) as out:
df.to_excel(out, sheet[:31], **kwargs)
elif filetype == 'excel_writer':
df.to_excel(out, sheet[:31], **kwargs)
elif filetype == 'hdf5':
df.to_hdf(out, sheet, **kwargs)
else:
msg = ("Could not write the dataframe. Invalid filetype: {}".format(
filetype))
raise ValueError(msg)
def append_to_xlsx(df, xlsx, sheet='Sheet_1', **kwargs):
""" This function appends the given dataframe to the excel file if it
already exists. If the file does not exist, it will be created.
N.B. This *will not* work with an open file handle! The xlsx argument
*must be* the path to the file.
Args:
df (pd.DataFrame): the data frame to write
xlsx (string): the path to the excel file.
sheet (string): the name of the sheet, which will be truncated to
31 characters
**kwargs : other keyword arguments to pass to the df.to_XXX method
Returns:
None
Imports:
pandas
openpyxl
"""
raise_deprecation_warning("append_to_xlsx", "misc.pandas_utils",
"0.3.0", "misc")
import os
import pandas as pd
import openpyxl
# check if the file already exists
if os.path.exists(xlsx):
book = openpyxl.load_workbook(xlsx)
with pd.ExcelWriter(xlsx, engine='openpyxl') as writer:
writer.book = book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
write_df(df, writer, sheet=sheet, **kwargs)
else:
# then we can just create it fresh
write_df(df, xlsx, sheet=sheet, **kwargs)
###
# Functions to help with built-in (ish) data structures
###
def list_to_dict(l, f=None):
""" Convert the list to a dictionary in which keys and values are adjacent
in the list. Optionally, a function can be passed to apply to each value
before adding it to the dictionary.
Example:
list = ["key1", "value1", "key2", "value2"]
dict = {"key1": "value1", "key2": "value2"}
Parameters
----------
l: sequence
The list of items
f: function
A function to apply to each value before inserting it into the list.
For example, "float" could be passed to convert each value to a float.
Returns
-------
dict: dictionary
The dictionary, defined as described above
"""
if len(l) % 2 != 0:
msg = ("[utils.list_to_dict]: the list must contain an even number"
"of elements")
raise ValueError(msg)
if f is None:
f = lambda x: x
keys = l[::2]
values = l[1::2]
d = {k:f(v) for k, v in zip(keys, values)}
return d
def merge_sets(*set_args):
""" Given any number of sets, merge them into a single set
N.B. This function only performs a "shallow" merge. It does not handle
nested containers within the "outer" sets.
Parameters
----------
set_args: iterable of sets
The sets to merge
Returns
-------
merged_set: set
A single set containing unique elements from each of the input sets
"""
ret = {item for s in set_args for item in s}
return ret
def merge_dicts(*dict_args):
""" Given any number of dicts, shallow copy and merge into a new dict,
precedence goes to key value pairs in latter dicts.
This is exactly taken from: http://stackoverflow.com/questions/38987
"""
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result
def sort_dict_keys_by_value(d):
""" Sort the keys in the dictionary by their value and return as a list
This function uses `sorted`, so the values should be able to be sorted
appropriately by that builtin function.
"""
ret = sorted(d, key=d.get)
return ret
def get_type(type_string):
""" Find the type object corresponding to the fully qualified class
Parameters
----------
type_string : string
A fully qualified class name.
Example: sklearn.neighbors.regression.KNeighborsRegressor
Returns
-------
type : type
The type object specified by the string. For example, this can be used
for calls to "isinstance"
"""
import importlib
class_ = None
try:
module, class_ = type_string.rsplit(".", 1)
module = importlib.import_module(module)
class_ = getattr(module, class_)
except Exception as e:
msg = "[utils.get_type]: could not parse type: {}".format(type_string)
logger.debug(msg)
return class_
def is_sequence(maybe_sequence):
""" This function is a light wrapper around collections.Sequence to check
if the provided object is a sequence-like object. It also checks for
numpy arrays.
The function specifically checks is maybe_sequence is an instance of a
string and returns False if it is a string.
Args:
maybe_sequence (object) an object which may be a list-like
Returns:
bool: whether the object is recognized as an instance of
collections.Sequence or numpy.ndarray
Imports:
collections
numpy
"""
import collections
import numpy
if isinstance(maybe_sequence, str):
return False
is_sequence = isinstance(maybe_sequence, collections.Sequence)
is_ndarray = isinstance(maybe_sequence, numpy.ndarray)
return is_sequence or is_ndarray
def wrap_in_list(maybe_list):
""" This function checks if maybe_list is a list (or anything derived
from list). If not, it wraps it in a list.
The motivation for this function is that some functions return either
a single object (e.g., a dictionary) or a list of those objects. The
return value of this function can be iterated over safely.
N.B. This function would not be helpful for ensuring something is a
list of lists, for example.
Args:
maybe_list (obj): an object which may be a list
Returns:
either maybe_list if it is a list, or maybe_list wrapped in a list
"""
if isinstance(maybe_list, list):
return maybe_list
return [maybe_list]
def wrap_string_in_list(maybe_string):
""" This function checks if maybe_string is a string (or anything derived
from str). If so, it wraps it in a list.
The motivation for this function is that some functions return either a
single string or multiple strings as a list. The return value of this
function can be iterated over safely.
Args:
maybe_string (obj): an object which may be a string
Returns:
either the original object, or maybe_string wrapped in a list, if
it was a string
"""
if isinstance(maybe_string, str):
return [maybe_string]
return maybe_string
def flatten_lists(list_of_lists):
""" This function flattens a list of lists into a single list.
Args:
list_of_lists (list): the list to flatten
Returns:
list: the flattened list
"""
return [item for sublist in list_of_lists for item in sublist]
def list_remove_list(l, to_remove):
""" This function removes items in to_remove from the list l. Note that
"not in" is used to match items in the list.
Args:
l (list): a list
to_remove (list): a list of things to remove from l
Returns:
list: a copy of l, excluding items in to_remove
"""
ret = [i for i in l if i not in to_remove]
return ret
def list_insert_list(l, to_insert, index):
""" This function inserts items from one list into another list at the
specified index. This function returns a copy; it does not alter the
original list.
This function is adapted from: http://stackoverflow.com/questions/7376019/
Example:
a_list = [ "I", "rad", "list" ]
b_list = [ "am", "a" ]
c_list = list_insert_list(a_list, b_list, 1)
print( c_list ) # outputs: ['I', 'am', 'a', 'rad', 'list']
"""
ret = list(l)
ret[index:index] = list(to_insert)
return ret
def remove_keys(d, to_remove):
""" This function removes the given keys from the dictionary d. N.B.,
"not in" is used to match the keys.
Args:
d (dict): a dictionary
to_remove (list): a list of keys to remove from d
Returns:
dict: a copy of d, excluding keys in to_remove
"""
ret = {
k:v for k,v in d.items() if k not in to_remove
}
return ret
def remove_nones(l, return_np_array=False):
""" This function removes "None" values from the given list. Importantly,
compared to other single-function tests, this uses "is" and avoids
strange behavior with data frames, lists of bools, etc.
Optionally, the filtered list can be returned as an np array.
This function returns a copy of the list (but not a deep copy).
N.B. This does not test nested lists. So, for example, a list of lists
of Nones would be unchanged by this function.
Args:
l (list-like): a list which may contain Nones
return_np_array (bool): if true, the filtered list will be wrapped
in an np.array.
Returns:
list: a list or np.array with the Nones removed
Imports:
numpy
"""
import numpy as np
ret = [i for i in l if i is not None]
if return_np_array:
ret = np.array(ret)
return ret
def replace_none_with_empty_iter(iterator):
""" If it is "None", return an empty iterator; otherwise, return iterator.
The purpose of this function is to make iterating over results from
functions which return either an iterator or None cleaner.
Parameters
----------
it: None or some object
Returns
-------
empty_iterator: list of size 0
If iterator is None
--- OR ---
iterator:
The original iterator, if it was not None
"""
if iterator is None:
return []
return iterator
def open(filename, mode='r', compress=False, is_text=True, *args, **kwargs):
""" Return a file handle to the given file.
The only difference between this and the standard open command is that this
function transparently opens zip files, if specified. If a gzipped file is
to be opened, the mode is adjusted according to the "is_text" flag.
Parameters
---------
filename: string
the file to open
mode: string
the mode to open the file. This *should not* include
"t" for opening gzipped text files. That is handled by the
"is_text" flag.
compress: bool
whether to open the file as a gzipped file
is_text: bool
for gzip files, whether to open in text (True) or
binary (False) mode
args, kwargs
Additional arguments are passed to the call to open
Returns
-------
file_handle: the file handle to the file
"""
import builtins
if compress:
import gzip
if is_text:
mode = mode + "t"
out = gzip.open(filename, mode, *args, **kwargs)
else:
out = builtins.open(filename, mode, *args, **kwargs)
return out
def grouper(n, iterable):
""" This function returns lists of size n of elements from the iterator. It
does not pad the last group.
The code was directly take from stackoverflow:
http://stackoverflow.com/questions/3992735/
"""
iterable = iter(iterable)
return iter(lambda: list(itertools.islice(iterable, n)), [])
def nth(iterable, n, default=None):
""" Returns the nth item or a default value.
This code is mildly adapted from the documentation.
N.B. This returns the *base-0* nth item in the iterator. For example,
nth(range(10), 1) returns 1.
"""
return next(itertools.islice(iterable, n, None), default)
def dict_product(dicts):
""" Create an iterator from a GridSearchCV-like dictionary
This code is directly take from stackoverflow:
http://stackoverflow.com/a/40623158/621449
"""
return (dict(zip(dicts, x)) for x in itertools.product(*dicts.values()))
def call_func_if_not_exists(func, out_files, *args, in_files=[], overwrite=False,
call=True, raise_on_error=True, file_checkers=None, to_delete=[],
keep_delete_files=False, **kwargs):
"""Call a python function with extra checks on input/output files, etc.
This is adapted from shell_utils.call_if_not_exists, see this function
for more details.
N.B. The function must not have any return value, so this should be used with
functions that perform writing operations, etc. In particular first 2 positional
arguments are input and output files, respectively. As they are passed as lists,
the function is called as many times as required, but in and out files must match!
Arguments:
func (function object): the function to execute
out_files (string or list of strings): path to output files to
check. If they do not exist, then this wil be created.
in_files (list of strings): paths to input files to check
before calling the function
overwrite (bool): whether to overwrite the files
call (bool): whether to call the function, regardless of whether the
file exists or not
raise_on_error (bool): whether to raise an exception
file_checkers (dict-like): a mapping from a file name to a function
which is used to verify that file. The function should return
True to indicate the file is okay or False if it is corrupt. The
functions must also accept "raise_on_error" and "logger"
keyword arguments.
to_delete (list of strings): paths to files to delete if the function
is executed successfully
keep_delete_files (bool): if this value is True, then the to_delete
files will not be deleted, regardless of whether the command
succeeded or not. If the call does not complete, these are never deleted.
Return:
None
Import:
os
shlex
"""
import os
import shlex
func_name = func.__name__
args_str = [str(arg) for arg in args]
args_str = ",".join(args_str)
kwargs_str = ["{}={}".format(key, value) for key, value in kwargs.items()]
kwargs_str = ",".join(kwargs_str)
all_args = args_str + kwargs_str
# make sure we are working with a list
if isinstance(out_files, str):
out_files = [out_files]
# then check number of i/o files
if len(in_files) != len(out_files):
msg = "Expected same number of input and output files!"
raise ValueError(msg)
# check if the input files exist
missing_in_files = []
for in_f in in_files:
# remove surrounding quotes if file name has a space, pass it through shell
in_f = shlex.split(in_f)[0]
if not os.path.exists(in_f):
missing_in_files.append(in_f)
if len(missing_in_files) > 0:
msg = "Some input files {} are missing. Skipping call: \n{}:{}".format(missing_in_files,
func_name, all_args)
logger.warning(msg)
return
# check if the output files exist
all_out_exists = False
if out_files is not None:
all_out_exists = all([os.path.exists(of) for of in out_files])
all_valid = True
if overwrite or not all_out_exists:
# create necessary paths and
if out_files is not None:
[os.makedirs(os.path.dirname(x), exist_ok=True) for x in out_files]
# make the call
if call:
for in_f, out_f in zip(in_files, out_files):
func(in_f, out_f, *args, **kwargs)
else:
msg = "skipping due to [--do-not-call] flag"
logger.info(msg)
if (not call) or (file_checkers is None):
# do not check the files if we are not calling anything
pass
else:
# check the files
for filename, checker_function in file_checkers.items():
msg = "Checking file for validity: {}".format(filename)
logger.debug(msg)
is_valid = checker_function(filename, logger=logger,
raise_on_error=False)
if not is_valid:
all_valid = False
msg = "File {} appears to be corrupted".format(filename)
if raise_on_error:
raise OSError(msg)
else:
logger.critical(msg)
else:
msg = "All output files {} already exist. Skipping call: \n{}:{}".format(out_files,
func_name, all_args)
logger.warning(msg)
if (not keep_delete_files) and all_valid:
# the command succeeded, so delete the specified files
for filename in to_delete:
if os.path.exists(filename):
msg = "Removing file: {}".format(filename)
logger.info(msg)
os.remove(filename) | StarcoderdataPython |
3351041 | <reponame>swarmee/swagger-4-es
update_document_examples = {
"update_document_new_field": {
"summary": "Add a field to a document",
"description": "Add one additional field to a elasticsearch document",
"value": {
"doc": {
"newField": "newFieldValue"
}
}
},
"update_document_script_remove_field": {
"summary": "Remove a field from a document",
"description": "Remove a field from one elasticsearch document",
"value": {
"script": "ctx._source.remove('newField')"
}
},
"update_document_add_list": {
"summary": "Add a list to a document",
"description": "Add a list to an existing document",
"value": {
"doc": {
"newList": [1, 3, 45, 777]
}
}
},
"update_document_add_item_to_list_if_not_present": {
"summary": "Add an item to a list if not already present",
"description":
"Add an item to a list if not already present, note how the version number is not incremented and the result is `noop` when the value already exists in the list",
"value": {
"script": {
"source":
"""/* first check to see if value is in the list */ if (ctx._source.newList.contains(params.tag) != true) /* add it if not present */ {ctx._source.newList.add(params.tag)} else { ctx.op = 'none' }""",
"lang": "painless",
"params": {
"tag": 10000000000
}
}
}
}
}
update_document_by_query_examples = {
"update_document_by_query_add_new_field": {
"summary": "Add a field to documents that meet query",
"description":
"Add one additional field to all documents that meet the query",
"value": {
"script": {
"source": "ctx._source.bulkAddedField = 'updateByQuery'",
"lang": "painless"
},
"query": {
"match": {
"subregion": "Asia"
}
}
}
},
"update_document_by_query_remove_new_field": {
"summary": "Remove a field from a document based on a query",
"description": "Remove one field to all documents that meet the query",
"value": {
"script": {
"source": "ctx._source.remove('bulkAddedField')",
"lang": "painless"
},
"query": {
"match": {
"subregion": "Asia"
}
}
}
}
} | StarcoderdataPython |
3215764 | """Remove points and cells from a mesh
which are closest to a specified point."""
from vedo import *
pu = Mesh(dataurl+'apple.ply').c('lightgreen').bc('tomato').lw(0.1)
pt = [1, 0.5, 1]
ids = pu.closestPoint(pt, N=200, returnPointId=True)
pu.deletePoints(ids, renamePoints=1)
show(Point(pt), pu, __doc__, axes=1).close()
| StarcoderdataPython |
1715414 | # -*- mode: python; coding: utf-8; indent-tabs-mode: nil; python-indent: 2 -*-
#
# $Id$
"""Toolbox for images from the Cornell SLAC Pixel Array Detector
(CSpad).
XXX Better named cspad_common?
XXX Read out detector temperature (see Hart et al., 2012)?
"""
from __future__ import absolute_import, division, print_function
from six.moves import range
import math
import numpy
import os
import time
from libtbx import easy_pickle
from scitbx.array_family import flex
from xfel.cxi.cspad_ana.parse_calib import Section
import six
from six.moves import zip
__version__ = "$Revision$"
# The CAMP and CSpad counters are both 14 bits wide (Strüder et al
# 2010; Philipp et al., 2007), which means the physical limit is 2**14 - 1.
# However, in practice, when the pixels are in the low gain mode, after
# correcting by a gain value of around 6.87, the pixels tend to saturate
# around 90000. See xpp experiment xppe0314, run 184 as evidence.
cspad_saturated_value = 90000
# The dark average for the CSPAD detector is around 1100-1500. A pixel
# histogram of a minimum projection of an uncorrected (raw) light run shows
# a mostly flat tail up to ~800 ADU with a few bumps in the tail which
# represent true underloads. Assume a dark average of 1200 ADU. After dark
# subtraction, 800 - 1200 gives a minimum trusted value of -400. Reject
# pixels less than this.
cspad_min_trusted_value = -400
# As long as the mask value is outside of the trusted range, the pixel should
# be ignored by any downstream software.
cspad_mask_value = -100000
# The side length of a square quadrant from the old XtcExplorer code.
# XXX This should be obsoleted!
npix_quad = 850
# The pixel size in mm. The pixel size is fixed and square, with side
# length of 110 µm (Philipp et al., 2007). XXX Should really clarify
# this with Sol and Chris.
#
# XXX Andor: 13.5 µm square, CAMP: 75 µm, square (Strüder et al.,
# 2010)
pixel_size = 110e-3
# origin of section in quad coordinate system. x-position
# correspond to column number. XXX Note/reference the source!
# XXX This should be obsoleted!
xpos_sec2x1 = [[ 414, 626, 0, 0, 213, 1, 418, 419], # 2:5 were not measured
[ 421, 634, 0, 0, 213, 1, 424, 425],
[ 417, 630, 0, 1, 212, 0, 425, 426],
[ 416, 630, 0, 0, 213, 1, 420, 421]] # 2:5 were not measured
# y-position correspond to maxrows - row number
ypos_sec2x1 = [[ 0, 0, 214, 1, 425, 425, 615, 402], # 2:5 were not measured
[ 0, 0, 214, 1, 425, 425, 615, 402],
[ 0, 0, 215, 3, 431, 431, 616, 403],
[ 0, 0, 214, 1, 425, 425, 615, 403]] # 2:5 were not measured
def address_split(address, env=None):
"""The address_split() function splits an address into its four
components. Address strings are on the form
detector-detectorID|device-deviceID, where the detectors must be in
dir(xtc.DetInfo.Detector) and device must be in
(xtc.DetInfo.Device).
@param address Full data source address of the DAQ device
@param env Optional env to dereference an alias into an address
@return Four-tuple of detector name, detector ID, device, and
device ID
"""
import re
# pyana
m = re.match(
r"^(?P<det>\S+)\-(?P<det_id>\d+)\|(?P<dev>\S+)\-(?P<dev_id>\d+)$", address)
if m is not None:
return (m.group('det'), m.group('det_id'), m.group('dev'), m.group('dev_id'))
# psana
m = re.match(
r"^(?P<det>\S+)\.(?P<det_id>\d+)\:(?P<dev>\S+)\.(?P<dev_id>\d+)$", address)
if m is not None:
return (m.group('det'), m.group('det_id'), m.group('dev'), m.group('dev_id'))
# psana DetInfo string
m = re.match(
r"^DetInfo\((?P<det>\S+)\.(?P<det_id>\d+)\:(?P<dev>\S+)\.(?P<dev_id>\d+)\)$", address)
if m is not None:
return (m.group('det'), m.group('det_id'), m.group('dev'), m.group('dev_id'))
if env is not None:
# Try to see if this is a detector alias, and if so, dereference it. Code from psana's Detector/PyDetector.py
amap = env.aliasMap()
alias_src = amap.src(address) # string --> DAQ-style psana.Src
# if it is an alias, look up the full name
if amap.alias(alias_src) != '': # alias found
address = str(alias_src)
return address_split(address)
return (None, None, None, None)
def cbcaa(config, sections):
"""The cbcaa() function uses on-disk calibration data to estimate
the beam centre and the active detector areas. The beam centre is
approximated as the average of the four ASIC corners closest to the
detector centre. That is the first corner of the section 1 in every
quadrant. Note that first corner index is vertical coordinate,
second index is the horizontal coordinate. XXX Construct the active
areas in "spotfinder format", i.e. opposing corners. XXX This is a
really bad function name! XXX The beam centre may be extracted from
the ebeam object?
@param config XXX
@param sections XXX Directory with calibration information
@return Tuple of 2D beam centre, and active areas in
"spotfinder format"
"""
aa = flex.int()
if (sections is None):
# The active areas of the detector, (UL_slow, UL_fast, LR_slow,
# LR_fast) A two-by-one is 185-by-392 pixels with a 4-pixel gap.
# An ASIC is 185-by-194 pixels. XXX Still need to sort out the
# inclusive/exclusive detail. Upper-left corner is inclusive,
# lower-right corner is exclusive. XXX Should subtract one from
# x, y on lower-right corner and verify with zoom. XXX All this
# should probably go by now
for q in range(4): # loop over quadrants
for i in range(8): # loop over two-by-one:s XXX variable name!
# Skip this two-by-one if it is missing.
if not (config.roiMask(q) & 0x1 << i):
continue
# XXX Note the different coordinate systems in use here!
xpos = xpos_sec2x1[q][i] # x-value of lower, left corner
ypos = 850 - ypos_sec2x1[q][i] # y-value of lower, left corner
if (i == 0 or i == 1 or i == 4 or i == 5):
UL1_x = xpos
UL2_x = xpos
UL1_y = ypos - 194 - 4 - 194
UL2_y = ypos - 194
LR1_x = UL1_x + 185
LR2_x = UL2_x + 185
LR1_y = UL1_y + 194
LR2_y = UL2_y + 194
elif (i == 2 or i == 3 or i == 6 or i == 7):
UL1_x = xpos
UL2_x = xpos + 194 + 4
UL1_y = ypos - 185
UL2_y = ypos - 185
LR1_x = UL1_x + 194
LR2_x = UL2_x + 194
LR1_y = UL1_y + 185
LR2_y = UL2_y + 185
# Quadrant rotations, counter-clockwise. Zeroth quadrant
# needs no special action.
if (q == 0):
pass
elif (q == 1):
UL1_x, UL1_y = 850 + 850 - UL1_y, UL1_x
LR1_x, LR1_y = 850 + 850 - LR1_y, LR1_x
UL2_x, UL2_y = 850 + 850 - UL2_y, UL2_x
LR2_x, LR2_y = 850 + 850 - LR2_y, LR2_x
UL1_x, LR1_x = LR1_x, UL1_x
UL2_x, LR2_x = LR2_x, UL2_x
elif (q == 2):
UL1_x, UL1_y = 850 + 850 - UL1_x, 850 + 850 - UL1_y
LR1_x, LR1_y = 850 + 850 - LR1_x, 850 + 850 - LR1_y
UL2_x, UL2_y = 850 + 850 - UL2_x, 850 + 850 - UL2_y
LR2_x, LR2_y = 850 + 850 - LR2_x, 850 + 850 - LR2_y
UL1_x, UL1_y, LR1_x, LR1_y = LR1_x, LR1_y, UL1_x, UL1_y
UL2_x, UL2_y, LR2_x, LR2_y = LR2_x, LR2_y, UL2_x, UL2_y
elif (q == 3):
UL1_x, UL1_y = UL1_y, 850 + 850 - UL1_x
LR1_x, LR1_y = LR1_y, 850 + 850 - LR1_x
UL2_x, UL2_y = UL2_y, 850 + 850 - UL2_x
LR2_x, LR2_y = LR2_y, 850 + 850 - LR2_x
UL1_y, LR1_y = LR1_y, UL1_y
UL2_y, LR2_y = LR2_y, UL2_y
# This is row-major matrix layout; FAST <=> x, SLOW <=> y.
aa.extend(flex.int([UL1_y, UL1_x, LR1_y, LR1_x]))
aa.extend(flex.int([UL2_y, UL2_x, LR2_y, LR2_x]))
# The beam centre is estimated as the centre of the image.
return ([npix_quad, npix_quad], aa)
# Old way of computing beam center, phased out 05/19/15
#bc = [0, 0]
# XXX Make up a quadrant mask for the emission detector. Needs to
# be checked!
if len(sections) <= 1:
q_mask = 1
else:
q_mask = config.quadMask()
for q in range(len(sections)):
if (not((1 << q) & q_mask)):
continue
# Old way of computing beam center, phased out 05/19/15
#corner = sections[q][1].corners(True)[0]
#bc = [bc[0] + corner[1] / len(sections),
# bc[1] + corner[0] / len(sections)]
# XXX Make up section mask for the emission detector. Needs to be
# checked!
try:
import _pdsdata
types = _pdsdata.cspad2x2.ConfigV1, _pdsdata.cspad2x2.ConfigV2
except ImportError:
import psana
types = psana.CsPad2x2.ConfigV1, psana.CsPad2x2.ConfigV2
if len(sections) == 1 and type(config) in types:
s_mask = config.roiMask()
else:
s_mask = config.roiMask(q)
for s in range(len(sections[q])):
if (not((1 << s) & s_mask)):
continue
c = sections[q][s].corners_asic()
aa.extend(flex.int(c[0]))
aa.extend(flex.int(c[1]))
# The beam center was defined above as the center of the innermost 4 sensors. Recently,
# that center has drifted too much from the true image center (Spring 2015). So, here we
# use the true image center instead.
return [882.5,882.5], aa
def CsPad2x2Image(data, config, sections):
"""The CsPad2x2Image() function assembles a two-dimensional image
from the Sc1 detector readout in @p data.
@param data Detector readout from XTC stream
@param config XXX
@param sections XXX Directory with calibration information
@return Assembled detector image
"""
assert (data.shape[2] == 2)
det = numpy.zeros((2 * 185, 2 * 194 + 3))
# XXX config.sections is now a function returning a list? Since the
# masking was disabled in December commenting out this bit does not
# cause any further breakage XXX Does this still work for runs 4 and
# 5?
# s = config.sections
# mask = map(s, range(2))
# For this detector, the quadrant index is always zero.
q_idx = 0
for s in range(2):
# XXX DAQ misconfiguration? This mask appears not to work
# reliably for the Sc1 detector.
# if (s not in mask[q_idx]):
# continue
asics = numpy.vsplit(numpy.rot90(data[:, :, s], -1), 2)
gap = numpy.zeros((3, 185), dtype = data.dtype)
s_data = numpy.vstack((asics[0], gap, asics[1]))
angle = sections[q_idx][s].angle
center = sections[q_idx][s].center
rplace(det, s_data, angle, center)
return (det)
def evt_get_quads(address, evt, env):
try:
# pyana
quads = evt.getCsPadQuads(address, env)
except AttributeError:
# psana
from psana import Source, CsPad
src = Source(address)
cspad = evt.get(CsPad.DataV2, src)
if cspad is None:
return None
quads = [cspad.quads(i) for i in range(cspad.quads_shape()[0])]
return quads
def CsPadDetector(address, evt, env, sections, right=True, quads=None):
"""The CsPadDetector() function assembles a two-dimensional image
from the Ds1 detector readout in @p data3d and the calibration
information in @p sections. XXX General question: do
variable/function names make sense?
@param address Full data source address of the DAQ device
@param evt Event data object, a configure object
@param env Environment object
@param sections XXX Directory with calibration information
@param right @c True to restrict rotations to right angles
@return Assembled detector image
"""
device = address_split(address)[2]
if device is None or device != 'Cspad':
return None
# Get a current configure object for the detector
config = getConfig(address, env)
if config is None:
return None
# For consistency, one could/should verify that len(quads) is equal
# to len(sections).
if quads is None:
quads = evt_get_quads(address, evt, env)
if quads is None or len(quads) != len(sections):
return None
# This is from <NAME>'s
# HDF5Explorer/src/ConfigCSpad.py, which uses a detector size of
# 1765-by-1765 pixels.
extra_space = (1765 - 2 * Section.q_size[0],
1765 - 2 * Section.q_size[1])
# Start out with a blank image of the detector. This assumes that
# the type of the first section in the first quadrant is identical
# to the type of all the other sections.
det = numpy.zeros((2 * Section.q_size[0] + extra_space[0],
2 * Section.q_size[1] + extra_space[1]),
dtype=quads[0].data()[0].dtype)
### need to swap the quadrants for data collected mid October, 2013
evttime = time.gmtime(evt_time(evt)[0])
swap = evttime.tm_year == 2013 and evttime.tm_mon == 10 and evttime.tm_mday >= 20 and evttime.tm_mday <= 25
for quad in quads:
q_data = quad.data()
q_idx = quad.quad()
if swap:
q_idx = [0,1,3,2][q_idx]
try:
# pyana
# example: if the third sensor (2x1) is disabled, q_mask = [0,1,3,4,5,6,7]
q_mask = config.sections(q_idx)
except AttributeError:
# psana
# as above, using config.roiMask, a bitstring where the ith bit is true if the ith sensor is active. x << y means bitwise shift
# x, y times, and & is the bitwise AND operator
q_mask = [i for i in range(len(sections[q_idx])) if 1 << i & config.roiMask(q_idx)]
# For consistency, assert that there is data for each unmasked
# section.
assert len(q_data) == len(q_mask)
for (s_data, s_idx) in zip(q_data, q_mask):
# Rotate the section from the XTC-stream by -90 degrees to
# conform to the "standing up" convention used by the
# calibration data, and insert a 3-pixel gap between the ASIC:s.
# This requires the horizontal dimension of the unrotated
# section to be even.
assert s_data.shape[1] % 2 == 0
asics = numpy.vsplit(numpy.rot90(s_data, -1), 2)
gap = numpy.zeros((3, s_data.shape[0]), dtype=s_data.dtype)
s_data = numpy.vstack((asics[0], gap, asics[1]))
# Place the section in the detector image, either by forcing
# rotation to right angles or by interpolating.
angle = sections[q_idx][s_idx].angle
center = sections[q_idx][s_idx].center
if right:
rplace(det, s_data, angle, center)
else:
iplace(det, s_data, angle, center)
return det
def CsPadElement(data3d, qn, config):
"""Construct one image for each quadrant, each with 8 sections from
a data3d = 3 x 2*194 x 185 data array. This function was originally
written by <NAME> for pyana's XtcExplorer module. XXX
Documentation!
"""
# If any sections are missing, insert zeros.
mask = [config.sections(i) for i in range(4)]
if (len(data3d) < 8):
zsec = numpy.zeros((185, 388), dtype = data3d.dtype)
for i in range(8) :
if (i not in mask[qn]):
data3d = numpy.insert(data3d, i, zsec, axis = 0)
pairs = []
for i in range(8) :
# Insert gap between ASIC:s in the 2x1.
asics = numpy.hsplit(data3d[i], 2)
gap = numpy.zeros((185, 4), dtype = data3d.dtype)
pair = numpy.hstack((asics[0], gap, asics[1]))
# Sections 2,3 and 6,7 are as is. The others need some rotation,
# implemented as a matrix transposition here.
if (i == 0 or i == 1):
pair = pair[:, ::-1].T
if (i == 4 or i == 5):
pair = pair[::-1, :].T
pairs.append(pair)
# Make the array for this quadrant, and insert the 2x1 sections.
quadrant = numpy.zeros((npix_quad, npix_quad), dtype = data3d.dtype)
for sec in range(8):
nrows, ncols = pairs[sec].shape
# x,y in quadrant coordinate system
xpos = xpos_sec2x1[qn][sec]
ypos = ypos_sec2x1[qn][sec]
colp = xpos
rowp = npix_quad - ypos
quadrant[(rowp - nrows):rowp, colp:(colp + ncols)] = \
pairs[sec][0:nrows, 0:ncols]
# Finally, rotate the quadrant as needed.
if (qn > 0):
quadrant = numpy.rot90(quadrant, 4 - qn)
return quadrant
def dpack(active_areas=None,
address=None,
beam_center_x=None,
beam_center_y=None,
ccd_image_saturation=None,
data=None,
distance=None,
pixel_size=pixel_size,
saturated_value=None,
timestamp=None,
wavelength=None,
xtal_target=None,
min_trusted_value=None):
"""XXX Check completeness. Should fill in sensible defaults."""
# Must have data.
if data is None:
return None
# Create a time stamp of the current time if none was supplied.
if timestamp is None:
timestamp = evt_timestamp()
# For unknown historical reasons, the dictionary must contain both
# CCD_IMAGE_SATURATION and SATURATED_VALUE items.
if ccd_image_saturation is None:
if saturated_value is None:
ccd_image_saturation = cspad_saturated_value
else:
ccd_image_saturation = saturated_value
if saturated_value is None:
saturated_value = ccd_image_saturation
# Use a minimum value if provided for the pixel range
if min_trusted_value is None:
min_trusted_value = cspad_min_trusted_value
# By default, the beam center is the center of the image. The slow
# (vertical) and fast (horizontal) axes correspond to x and y,
# respectively.
if beam_center_x is None:
beam_center_x = pixel_size * data.focus()[1] / 2
if beam_center_y is None:
beam_center_y = pixel_size * data.focus()[0] / 2
# By default, the entire detector image is an active area. There is
# no sensible default for distance nor wavelength. XXX But setting
# wavelength to zero may be disastrous?
if active_areas is None:
# XXX Verify order with non-square detector
active_areas = flex.int((0, 0, data.focus()[0], data.focus()[1]))
if distance is None:
distance = 0
if wavelength is None:
wavelength = 0
# The size must match the image dimensions. The length along the
# slow (vertical) axis is SIZE1, the length along the fast
# (horizontal) axis is SIZE2.
return {'ACTIVE_AREAS': active_areas,
'BEAM_CENTER_X': beam_center_x,
'BEAM_CENTER_Y': beam_center_y,
'CCD_IMAGE_SATURATION': ccd_image_saturation,
'DATA': data,
'DETECTOR_ADDRESS': address,
'DISTANCE': distance,
'PIXEL_SIZE': pixel_size,
'SATURATED_VALUE': saturated_value,
'MIN_TRUSTED_VALUE': min_trusted_value,
'SIZE1': data.focus()[0],
'SIZE2': data.focus()[1],
'TIMESTAMP': timestamp,
'SEQUENCE_NUMBER': 0, # XXX Deprecated
'WAVELENGTH': wavelength,
'xtal_target': xtal_target}
def hdf5pack(hdf5_file,
active_areas=None,
address=None,
attenuation=None,
beam_center_x=None,
beam_center_y=None,
ccd_image_saturation=None,
data=None,
distance=None,
pixel_size=None,
pulse_length=None,
saturated_value=None,
timestamp=None,
wavelength=None,
xtal_target=None):
"""Similar but far from identical to the HDF5 output from CASS. XXX
Poor diagnostics--we don't know if it failed or not.
@note Does not include the deprecated SEQUENCE_NUMBER attribute.
While some redundant items are written in order to keep the
HDF5 synchronised to the pickle format, neither SIZE1 nor
SIZE2 are included.
"""
# Need this because we cannot write None values to the HDF5 file.
if address is None:
address = repr(None)
if attenuation is None:
attenuation = 0
if xtal_target is None:
xtal_target = repr(None)
if pixel_size is None:
pixel_size = globals()['pixel_size'] # XXX CSpad-specific!
if pulse_length is None:
pulse_length = 0
d = dpack(address=address,
active_areas=active_areas,
beam_center_x=beam_center_x,
beam_center_y=beam_center_y,
ccd_image_saturation=ccd_image_saturation,
data=data,
distance=distance,
pixel_size=pixel_size,
saturated_value=saturated_value,
timestamp=timestamp,
wavelength=wavelength,
xtal_target=xtal_target)
if d is None:
return
grp_event = hdf5_file.create_group(d['TIMESTAMP'])
grp_detector = grp_event.create_group(address)
for (key, value) in six.iteritems(d):
if key == 'ACTIVE_AREAS':
grp_detector.create_dataset(key, data=value.as_numpy_array())
elif key == 'DATA':
# Compress the image data with gzip at the default level (4).
# CASS seems to use maximum compression level (9), which gives a
# moderate decrease in file size at the price of much longer
# running time.
grp_detector.create_dataset(
key, compression='gzip', data=value.as_numpy_array())
else:
grp_event.create_dataset(key, data=[value])
grp_event.create_dataset('ATTENUATION', data=[attenuation])
grp_event.create_dataset('PULSE_LENGTH', data=[pulse_length])
def write_tiff(d, dirname=None, basename=None):
"""The write an image tiff. Basic implementation no frills, no metadata
"""
if basename is None:
basename = ""
if dirname is None:
dirname = "."
if not os.path.isdir(dirname):
os.makedirs(dirname)
# The output path should not contain any funny characters which may
# not work in all environments. This constructs a sequence number à
# la evt_seqno() from the dictionary's timestamp.
t = d['TIMESTAMP']
s = t[0:4] + t[5:7] + t[8:10] + t[11:13] + t[14:16] + t[17:19] + t[20:23]
path = os.path.join(dirname, basename + s + '.tiff')
#assure that the 2-byte data are within the unsigned limits
selecthi = d["DATA"]>65535
d["DATA"].set_selected(selecthi,0)
selectlo = d["DATA"]<0
d["DATA"].set_selected(selectlo,0)
idata = d["DATA"].as_numpy_array()
idata = idata.astype("uint16")
import cv2 # psdm install should have this extension
cv2.imwrite(path,idata)
return path
def dwritef(d, dirname=None, basename=None):
"""The dwritef() function pickles the dictionary pointed to by @p d
to the file whose directory and filename portions are pointed to by
@p dirname and @p basename, respectively. The directory at @p
dirname, as well as any intermediate directories, are recursively
created if they do not already exist. The name of the written file
is the concatenation of the @p basename parameter and a sequence
number derived from the timestamp in the dictionary, @p d.
@param d Dictionary, as created by e.g. dpack()
@param dirname Directory portion of output file
@param basename Filename prefix of output file
@return Path of output file
"""
if basename is None:
basename = ""
if dirname is None:
dirname = "."
if not os.path.isdir(dirname):
os.makedirs(dirname)
# The output path should not contain any funny characters which may
# not work in all environments. This constructs a sequence number à
# la evt_seqno() from the dictionary's timestamp.
t = d['TIMESTAMP']
s = t[0:4] + t[5:7] + t[8:10] + t[11:13] + t[14:16] + t[17:19] + t[20:23]
# XXX Several non-pyana tools rely on the .pickle extension. Fix
# those before migrating to .pkl.
path = os.path.join(dirname, basename + s + '.pickle')
easy_pickle.dump(path, d)
return path
def dwritef2(obj, path):
"""The dwritef2() function writes the object @p obj to the Python
pickle file whose path is pointed to by @p path. Non-existent
directories of @p path are created as necessary.
@param obj Object to write, as created by e.g. dpack()
@param path Path of output file
@return Path of output file
"""
dirname = os.path.dirname(path)
if dirname != "" and not os.path.isdir(dirname):
os.makedirs(dirname)
easy_pickle.dump(path, obj)
return path
def pathsubst(format_string, evt, env, **kwargs):
"""The pathsubst() function provides variable substitution and value
formatting as described in PEP 3101. The function returns a copy of
the input string, @p format_string, with field names replaced by
their appropriate values as determined by either @p evt, @p env, or
the user-supplied keyworded arguments, @p kwargs.
chunk: Chunk number or -1 if unknown.
epoch: Time of the event, in number of seconds since midnight,
1 January 1970 UTC (Unix time), to millisecond
precision.
experiment: Experiment name, or empty string if unknown.
expNum: Experiment number or -1 if unknown.
instrument: Instrument name, or empty string if unknown.
iso8601: The time of the event as an extended human-readable ISO
8601 timestamp, to millisecond precision, or the empty
string if unknown. Not suitable for file names, because
it contains characters that do not play well with
certain file systems (e.g. NTFS).
jobName: Job name.
jobNameSub: Combination of job name and subprocess index as a string
which is unique for all subprocesses in a job.
run: Run number or -1 if unknown.
seqno: Sequence number or -1 if unknown.
stream: Stream number or -1 if unknown.
subprocess: Subprocess number. This is a non-negative integer in
the range [0, nproc) when multiprocessing, or -1 for a
single-process job.
user: The "login name" of the user.
In addition to the standard conversion flags, the pathsubst()
function implements the <code>!u</code> and <code>!l</code> flags
for conversion to upper- and lower-case strings, respectively.
Literal braces can be escaped by doubling, i.e. <code>{</code> is
written <code>{{</code>, and <code>}</code> as <code>}}</code>.
@note Chunk number, expNum, run number, and stream number are
determined from the input XTC file name. If a file does not
adhere to the standard format, it may not be possible to
determine these quantities.
@note String substitution requires PSDM pyana version 0.10.3 or
greater.
@param format_string String containing replacement fields
@param evt Event data object, a configure object
@param env Environment object
@param kwargs User-supplied replacements, on the form
<code>field_name=value</code>
@return Copy of @p format_string, with replacement
fields substituted by their appropriate values
"""
from getpass import getuser
from string import Formatter
class CaseFormatter(Formatter):
def convert_field(self, value, conversion):
# Extends the stock Formatter class with lower() and upper()
# conversion types.
if conversion == 'l':
return str(value).lower()
elif conversion == 'u':
return str(value).upper()
return super(CaseFormatter, self).convert_field(value, conversion)
def get_value(self, key, args, kwargs_local):
# The get_value() function sequentially applies user-supplied
# and standard substitutions, and implements suitable defaults
# in case a field name evaluates to None. XXX Emit a warning
# when this happens?
if key in kwargs:
return kwargs[key]
value = super(CaseFormatter, self).get_value(key, args, kwargs_local)
if value is None:
if key == 'chunk':
return -1
elif key == 'expNum':
return -1
elif key == 'iso8601':
return ''
elif key == 'run':
return -1
elif key == 'seqno':
return -1
elif key == 'stream':
return -1
return value
t = evt_time(evt)
if t is not None:
epoch = t[0] + t[1] / 1000
else:
epoch = None
fmt = CaseFormatter()
try:
# psana
expNum = env.expNum()
except AttributeError:
# pyana
expNum = evt.expNum()
try:
# pyana
chunk = evt.chunk()
except AttributeError:
# not supported in psana
chunk = None
try:
# pyana
stream = evt.stream()
except AttributeError:
# not supported in psana
stream = None
# If chunk or stream numbers cannot be determined, which may happen
# if the XTC file has a non-standard name, evt.chunk() and
# evt.stream() will return None.
return fmt.format(format_string,
chunk=chunk,
epoch=epoch,
experiment=env.experiment(),
expNum=expNum,
instrument=env.instrument(),
iso8601=evt_timestamp(t),
jobName=env.jobName(),
jobNameSub=env.jobNameSub(),
run=evt.run(),
seqno=int(evt_seqno(evt)),
stream=stream,
subprocess=env.subprocess(),
user=getuser())
def get_ebeam(evt):
try:
# pyana
ebeam = evt.getEBeam()
except AttributeError as e:
from psana import Source, Bld
src = Source('BldInfo(EBeam)')
ebeam = evt.get(Bld.BldDataEBeamV6, src)
if ebeam is None:
ebeam = evt.get(Bld.BldDataEBeamV5, src)
if ebeam is None:
ebeam = evt.get(Bld.BldDataEBeamV4, src)
if ebeam is None:
ebeam = evt.get(Bld.BldDataEBeamV3, src)
if ebeam is None:
ebeam = evt.get(Bld.BldDataEBeamV2, src)
if ebeam is None:
ebeam = evt.get(Bld.BldDataEBeamV1, src)
if ebeam is None:
ebeam = evt.get(Bld.BldDataEBeamV0, src)
if ebeam is None:
ebeam = evt.get(Bld.BldDataEBeam, src) # recent version of psana will return a V7 event or higher if this type is asked for
return ebeam
def env_laser_status(env, laser_id):
"""The return value is a bool that indicates whether the laser in
question was on for that particular shot. Bear in mind that sample
hit by the laser will only encounter the X-rays some time after,
depending on the flow rate.
"""
if env is not None:
pv_in = env.epicsStore().value('CXI:LAS:SHT:%02i:IN' % laser_id)
pv_out = env.epicsStore().value('CXI:LAS:SHT:%02i:OUT' % laser_id)
if pv_in is None or pv_out is None:
return
if hasattr(pv_in, "values"):
if len(pv_in.values) != 1:
return
laser_off = pv_in.values[0]
else:
laser_off = pv_in
if hasattr(pv_out, "values"):
if len(pv_out.values) != 1:
return
laser_on = pv_out.values[0]
else:
laser_on = pv_out
if laser_on and laser_off:
# According to LCLS staff, this means the laser is not plugged in
return False
return bool(laser_on)
def env_injector_xyz(env):
"""Returns the coordinates of the sample injector. XXX units unknown?"""
if env is not None:
return tuple([
env.epicsStore().value("CXI:USR:MZM:0%i:ENCPOSITIONGET" %(i+1))
for i in range(3)])
def env_detz(address, env):
"""The env_detz() function returns the position of the detector with
the given address string on the z-axis in mm. The zero-point is as
far away as possible from the sample, and values decrease as the
detector is moved towards the sample.
@param address Full data source address of the DAQ device
@param env Environment object
@return Detector z-position, in mm
"""
if env is not None:
detector = address_split(address, env)[0]
if detector is None:
return None
elif detector == 'CxiDs1':
pv = env.epicsStore().value('CXI:DS1:MMS:06.RBV')
if pv is None:
# Even though potentially unsafe, fall back on the commanded
# value if the corresponding read-back value cannot be read.
# According to <NAME>, this particular motor has not
# caused any problem in the past.
pv = env.epicsStore().value('CXI:DS1:MMS:06')
if pv is None:
# Try the other detector. These are sometimes inconsistent
pv = env.epicsStore().value('CXI:DS2:MMS:06.RBV')
elif detector == 'CxiDsd' or detector == 'CxiDs2':
# XXX Note inconsistency in naming: Dsd vs Ds2!
pv = env.epicsStore().value('CXI:DS2:MMS:06.RBV')
if pv is None:
# Try the other detector. These are sometimes inconsistent
pv = env.epicsStore().value('CXI:DS1:MMS:06.RBV')
elif detector == 'XppGon':
# There is no distance recorded for the XPP's CSPAD on the robot
# arm. Always return zero to allow the distance to be set using
# the offset.
return 0
elif detector == 'XppEndstation' or \
detector == 'MfxEndstation':
# There is no distance recorded for the XPP's or MFX's Rayonix
# on the robot arm. Always return zero to allow the distance to
# be set using the offset.
return 0
else:
return None
if pv is None:
return None
if hasattr(pv, "values"):
if len(pv.values) == 1:
return pv.values[0]
else:
return None
return pv
return None
def env_distance(address, env, offset):
"""The env_distance() function returns the distance between the
sample and the detector with the given address string in mm. The
distance between the sample and the the detector's zero-point can
vary by an inch or more between different LCLS runs. According to
<NAME> the offset should be stable to within ±0.5 mm
during a normal experiment.
@param address Full data source address of the DAQ device
@param env Environment object
@param offset Detector-sample offset in mm, corresponding to
longest detector-sample distance
@return Detector-sample distance, in mm
"""
detz = env_detz(address, env)
if detz is not None:
return detz + offset
return None
def env_sifoil(env):
"""The env_sifoil() function returns the total thickness of Si-foil,
in um, that attenuates the beam. According to an e-mail from <NAME>, the centres of the attenuators are in the beam at around 0
mm, and leave the beam at something like -7 mm. The "out" position
is at approximately -15 mm.
@param env Environment object
@return Total thickness of attenuating Si-foil
"""
if (env is None):
return (None)
# the pv name (? XXX) and the length of Si-foil it corresponds to
# XXX static?
dia = { "XRT:DIA:MMS:02.RBV": 20,
"XRT:DIA:MMS:03.RBV": 40,
"XRT:DIA:MMS:04.RBV": 80,
"XRT:DIA:MMS:05.RBV": 160,
"XRT:DIA:MMS:06.RBV": 320,
"XRT:DIA:MMS:07.RBV": 640,
"XRT:DIA:MMS:08.RBV": 1280,
"XRT:DIA:MMS:09.RBV": 2560,
"XRT:DIA:MMS:10.RBV": 5120,
"XRT:DIA:MMS:11.RBV": 10240 }
si_tot = 0
for pvname, si_len in six.iteritems(dia):
pv = env.epicsStore().value(pvname)
# XXX Why is this an EpicsPvTime object? The absorption
# coefficient of Si is E-18 * n_{0} * lambda^2, (for lambda >= 5
# um, Schroder, <NAME>., <NAME>, and <NAME>, IEEE
# Trans. Electron. Dev. ED-25, 2(1978) 254-261). See also
# http://henke.lbl.gov/optical_constants/filter2.html
#print "For ", pvname, " got ", pv, " and ", pv.values[0]
if pv is not None: # and pv.units == "mm"
if hasattr(pv, "values"):
# pyana
if len(pv.values) == 1 and abs(pv.values[0]) < 7:
si_tot += si_len
else:
# psana
if abs(pv) < 7:
si_tot += si_len
return (si_tot)
def env_wavelength_sxr(evt, env):
"""The env_wavelength_sxr() function returns the wavelength in
Ångström of the environment pointed to by @p env at the time of the
event @p evt. The function returns a positive value or @c None if
no wavelength is available for the event. See Heimann et al. (2011)
Rev. Sci. Instrum. 82, 093104.
@note The wavelength in eV is 12398.4187 divided by the value
returned from env_wavelength_sxr().
@param evt Event data object, a configure object
@param env Environment object
@return Wavelength, in Ångström
"""
from calendar import timegm
from time import strptime
if evt is None or env is None:
return None
t = evt.getTime()
if t is None:
return None
es = env.epicsStore()
if es is None:
return None
# Note that the monochromator coefficients could change from day to
# day. Unless specific values for the requested time are available,
# attempt to retrieve them from EPICS.
#
# The compiler could recognize that strptime() and timegm() are pure
# and reduce the test expression to an integer comparison.
f = '%Y-%m-%d, %H:%M %Z'
s = t.seconds()
if s is None:
return None
elif s < timegm(strptime('2012-11-12, 17:00 UTC', f)):
return None
elif s < timegm(strptime('2012-11-17, 17:00 UTC', f)):
abc = [+3.65920, -0.76851, +0.02105]
elif s < timegm(strptime('2012-11-20, 17:00 UTC', f)):
abc = [+4.18190, -0.77650, +0.01020]
if 'abc' not in locals():
pv = []
for name in ['SXR:IOC:POLY:POLY:Lambda:O1:G3:A',
'SXR:IOC:POLY:POLY:Lambda:O1:G3:B',
'SXR:IOC:POLY:POLY:Lambda:O1:G3:C']:
pv.append(es.value(name))
if pv[-1] is None or len(pv[-1].values) != 1:
return None
pv[-1] = pv[-1].values[0]
if pv[-1] is None:
return None
abc = [pv[i] for i in range(3)]
# Get the grating motor position from EPICS.
pv = es.value('SXR:MON:MMS:06.RBV')
if pv is not None and len(pv.values) == 1:
x = pv.values[0]
e = 10 * (abc[0] + abc[1] * x + abc[2] * x**2)
if e > 0:
return e
return None
def evt_pulse_energy(evt):
"""The evt_pulse_energy() function returns the energy, or the
intensity, of the pulse in arbitrary units. The returned value
should be proportional to the number of photons in the pulse, and
may be negative due to noise.
@note An absolute, but less accurate, estimate of the number of
photons in the pulse may be obtained from the gas monitor
detector's fMilliJoulesPerPulse value.
@param evt Event data object, a configure object
@return Pulse intensity, in arbitrary units
"""
from pypdsdata.xtc import TypeId
if evt is None:
return None
gmd = evt.get(key=TypeId.Type.Id_GMD)
if hasattr(gmd, 'fRelativeEnergyPerPulse') and evt.expNum() == 208:
# Note that for L632 (experiment number 208)
# fRelativeEnergyPerPulse actually gives the negated value
# sought. Details are given in Moeller, S. (2012) "GMD Look
# up Sheet for variable names in the DAQ (BLD) versus the C++
# code".
return -gmd.fRelativeEnergyPerPulse
elif hasattr(gmd, 'fCorrectedSumPerPulse'):
# This relatively pressure-independent quantity in arbitrary
# units is preferable. It is also known as
# SXR:GMD:BLD:CumSumAllPeaks.
return gmd.fCorrectedSumPerPulse
return None
def evt_pulse_length(evt):
"""The evt_pulse_length() function returns the pulse length in fs.
It is calculated as the ratio of the charge (in nC) and the peak
current (in A).
@param evt Event data object, a configure object
@return Pulse length, in fs
"""
if (evt is not None):
ebeam = get_ebeam(evt)
if ebeam is None:
return
try:
if ebeam.fEbeamPkCurrBC2 > 0:
return 1e6 * ebeam.fEbeamCharge / ebeam.fEbeamPkCurrBC2
except AttributeError:
if ebeam.ebeamPkCurrBC2() > 0:
return 1e6 * ebeam.ebeamCharge() / ebeam.ebeamPkCurrBC2()
return None
def evt_repetition_rate(evt, address='*'):
"""The evt_repetition_rate() function returns the repetition rate of
the instrument in Hz. See
https://confluence.slac.stanford.edu/display/PCDS/EVR+Event+Codes
@param evt Event data object, a configure object
@param address Data source address of the DAQ device
@return Integer repetition rate, in Hz
"""
evr = evt.getEvrData(address)
if evr is not None:
event_code_map = [120, 60, 30, 10, 5, 1]
for i in range(evr.numFifoEvents() - 1, -1, -1):
# Search for the last repetition rate event code.
j = evr.fifoEvent(i).EventCode
if j >= 40 and j <= 45:
# These are the NO BEAM event codes.
return event_code_map[j - 40]
if j >= 140 and j <= 145:
# These are the undocumented BEAM event codes.
return event_code_map[j - 140]
return None
def evt_beam_charge(evt):
"""The evt_beam_charge() function returns the charge of the pulse in
nC.
@param evt Event data object, a configure object
@return Pulse charge, in nC
"""
if evt is not None:
ebeam = get_ebeam(evt)
if ebeam is None:
return
try:
ebeam = evt.getEBeam()
return ebeam.fEbeamCharge
except AttributeError:
return ebeam.ebeamCharge()
return None
def evt_seqno(evt=None):
"""The evt_seqno() function returns string representation of a
sequence number. If @p evt is not @c None the return value reflects
the time at which @p evt occurred, otherwise the current time is
used. If @p evt does not contain a time, evt_seqno() returns @c
None. XXX Should probably return an integer type instead?
@param evt Event data object, a configure object
@return String representation of sequence number
"""
t = evt_time(evt=evt)
if t is None:
return None
return time.strftime("%Y%m%d%H%M%S", time.gmtime(t[0])) + ("%03d" % t[1])
def evt_time(evt=None):
"""The evt_time() function returns the time of the event @p evt since
midnight, 1 January 1970 UTC (Unix time) to millisecond precision.
If @p evt does not contain a time, evt_time() returns @c None. If
@p evt is @c None the return value reflects current time is used.
@note Millisecond precision is sufficient, because at 120 Hz, shots
are taken at 8.3 ms intervals.
@param evt Event data object, a configure object
@return Unix time as a tuple of seconds and milliseconds
"""
if evt is None:
t = time.time()
s = int(math.floor(t))
return (s, int(round((t - s) * 1000)))
if hasattr(evt, "getTime"):
t = evt.getTime()
if t is None:
return None
return (t.seconds(), t.nanoseconds() // 1000000)
else:
from psana import EventId
id = evt.get(EventId)
return (id.time()[0], id.time()[1] // 1000000)
def evt_timestamp(t=None):
"""The evt_timestamp() function returns a string representation of
an extended human-readable ISO 8601 timestamp. If @p t is @c None
the current time is used. The function returns @c None on failure.
@param t Tuple of the time in seconds and milliseconds
@return Human-readable ISO 8601 timestamp in string representation
"""
if t is None:
t = evt_time(evt=None)
if t is None:
return None
return time.strftime("%Y-%m-%dT%H:%MZ%S", time.gmtime(t[0])) + \
(".%03d" % t[1])
def evt_wavelength(evt, delta_k=0):
"""The evt_wavelength() function returns the wavelength in Ångström
of the event pointed to by @p evt. From Margaritondo & <NAME> (2011): the dimensionless relativistic γ-factor is derived
from beam energy in MeV and the electron rest mass, K is a
dimensionless "undulator parameter", and L is the macroscopic
undulator period in Ångström. See also
https://people.eecs.berkeley.edu/~attwood/srms/2007/Lec10.pdf
@param evt Event data object, a configure object
@param delta_k Optional K-value correction
@return Wavelength, in Ångström
"""
if evt is not None:
ebeam = get_ebeam(evt)
if hasattr(ebeam, 'fEbeamPhotonEnergy') and ebeam.fEbeamPhotonEnergy > 0:
# pyana
return 12398.4187 / ebeam.fEbeamPhotonEnergy
if hasattr(ebeam, 'ebeamPhotonEnergy') and ebeam.ebeamPhotonEnergy() > 0:
# psana
return 12398.4187 / ebeam.ebeamPhotonEnergy()
if hasattr(ebeam, 'fEbeamL3Energy') and ebeam.fEbeamL3Energy > 0:
# pyana
gamma = ebeam.fEbeamL3Energy / 0.510998910
elif hasattr(ebeam, 'ebeamL3Energy') and ebeam.ebeamL3Energy() > 0:
# psana
gamma = ebeam.ebeamL3Energy() / 0.510998910
else:
return None
K = 3.5 + delta_k
L = 3.0e8
return L / (2 * gamma**2) * (1 + K**2 / 2)
return None
def old_address_to_new_address(address):
""" Change between old and new style detector addresses.
I.E. CxiDs1-0|Cspad-0 becomes CxiDs1.0:Cspad.0
@param address detector address to convert
"""
return address.replace('-','.').replace('|',':')
def getConfig(address, env):
""" Given a detector address, find the config object in an env object
that goes with it.
@param address detector address
@param env environment object to search"""
if hasattr(env, 'configStore'):
good_key = None
address = old_address_to_new_address(address)
for key in env.configStore().keys():
if address in str(key.src()) and key.type() is not None:
good_key = key
break
if good_key is None:
return None
return env.configStore().get(good_key.type(),good_key.src())
else:
# Try the pyana method for older data
from pypdsdata.xtc import TypeId
return env.getConfig(TypeId.Type.Id_CspadConfig, address)
def getOptBool(s):
if s is None or s == "None": return False
elif isinstance(s, bool):
return s
s = s.strip().lower()
return s == "true"
def getOptEvalOrString(s) :
"""Allow python code macros in the pyana configuration file, e.g.
dark_path = "/location_of_darks/r%%04d/Ds1-avg.pickle"%%(max([{True:dark,False:0}[3 > dark] for dark in [1,2,6,9,12,14,17,19]]))
"""
possible_string = getOptString(s)
try:
eval_string = eval(possible_string,{},{})
return eval_string
except (SyntaxError, TypeError):
return possible_string
def getOptString(s) :
"""XXX Return the string, strip of any white space (make sure there
are no newline characters here). This function was originally
written by In<NAME> for pyana's XtcExplorer module.
"""
if (s is None):
return (None)
s = s.strip()
if (s == "" or s == "No" or s == "None"):
return (None)
return (s)
def getOptStrings(s, default=None) :
"""XXX Return a list of strings. This function was originally
written by In<NAME>te for pyana's XtcExplorer module.
"""
if (s is None):
return default
# strip off any leading or trailing whitespace
s = s.strip()
# make sure there are no newline characters here
s = s.split("\n")
s = " ".join(s)
# make a list
l = s.split()
if (len(l) == 0 or (len(l) == 1 and (s == "" or s == "No" or s == "None"))):
return ([])
# all other cases:
return (l)
def getOptInteger(s):
"""XXX Return a single integer. This function was originally
written by In<NAME> for pyana's XtcExplorer module. XXX What if
conversion fails?
"""
if (s is None or s == "" or s == "None"):
return None
return (int(s))
def getOptFloat(s):
"""Return a single float.
"""
if (s is None or s == "" or s == "None"):
return None
return (float(s))
def getOptROI(s):
"""Return a tuple of the region of interest.
Format: roi = fast_low:fast_high,slow_low:slow_high
"""
roi_str = getOptString(s)
if (roi_str is not None and roi_str != ""):
ivl = roi_str.strip("()").split(",")
ivl_x = ivl[0].split(":")
ivl_y = ivl[1].split(":")
roi = [ivl_x[0], ivl_x[1], ivl_y[0], ivl_y[1]]
for i in range(4):
if roi[i] == "": roi[i] = None
else: roi[i] = int(roi[i])
return tuple(roi)
def image(address, config, evt, env, sections=None):
"""Assemble the uint16 detector image, and sum it up as int32. Sum
the image of squared intensities as uint64. XXX Documentation! XXX
Would be nice to get rid of the constant string names. XXX Better
named evt_image()?
@param address Full data source address of the DAQ device
@param config XXX This should go--get up-to-date object on the fly!
@param evt Event data object, a configure object
@param env Environment object
@param sections XXX
@return XXX
"""
device = address_split(address)[2]
if device is None:
return None
elif device == 'Andor':
# XXX There is no proper getter for Andor frames yet, and
# evt.getFrameValue(address) does not appear to work.
from pypdsdata.xtc import TypeId
value = evt.get(TypeId.Type.Id_AndorFrame, address)
if value is not None:
img = value.data()
return img
elif device == 'Cspad':
if sections is not None:
return CsPadDetector(address, evt, env, sections)
else:
# XXX This is obsolete code, provided for backwards
# compatibility with the days before detector metrology was
# used.
assert False # sections always required now as of Sep 1 2014
quads = evt.getCsPadQuads(address, env)
qimages = numpy.empty((4, npix_quad, npix_quad), dtype='uint16')
for q in quads:
qimages[q.quad()] = CsPadElement(q.data(), q.quad(), config)
return numpy.vstack((numpy.hstack((qimages[0], qimages[1])),
numpy.hstack((qimages[3], qimages[2]))))
elif device == 'Cspad2x2':
from pypdsdata.xtc import TypeId
quads = evt.get(TypeId.Type.Id_Cspad2x2Element, address)
if quads is not None:
return CsPad2x2Image(quads.data(), config, sections)
elif device == 'pnCCD':
value = evt.getPnCcdValue(address, env)
if value is not None:
# Returns the image data as a numpy 1024-by-1024 uint16 array
# XXX Should be split up into tiles (halves) to allow metrology
# to be adjusted? Will require a sections parameter!
img = value.data()
# Deal with overflows. XXX This might be dependent on the
# particular version of pyana. CASS ignores the two most
# significant bits, which is different from what is done below,
# but <NAME> says they do contain data which could be used.
img[img > 2**14 - 1] = 2**14 - 1
return img
return None
def image_xpp(address, evt, env, aa, quads = None):
"""Assemble the uint16 detector image, see also
cspad_tbx.CsPadDetector(). XXX Documentation! XXX Would be nice to
get rid of the constant string names. XXX Better named evt_image()?
@param address Full data source address of the DAQ device
@param evt Event data object, a configure object
@param env Environment object
@param aa Active areas, in lieu of full metrology object
@param quads Data, if None get it from the event
@return XXX
"""
if address != 'XppGon-0|Cspad-0':
return None
# Get a current configure object for the detector
config = getConfig(address, env)
if config is None:
return None
if quads is None:
# For consistency, one could/should verify that len(quads) is equal
# to len(sections).
quads = evt_get_quads(address, evt, env)
if quads is None or len(quads) != len(aa) // (8 * 2 * 4):
return None
# Start out with a blank image of the detector. Mikhail
# <NAME>'s HDF5Explorer/src/ConfigCSpad.py uses a detector
# size of 1765-by-1765 pixels. This assumes that the type of the
# first section in the first quadrant is identical to the type of
# all the other sections.
det = numpy.zeros((1765, 1765), dtype=quads[0].data()[0].dtype)
for quad in quads:
q_data = quad.data()
q_idx = quad.quad()
try:
# pyana
# example: if the third sensor (2x1) is disabled, q_mask = [0,1,3,4,5,6,7]
q_mask = config.sections(q_idx)
except AttributeError:
# psana
# as above, using config.roiMask, a bitstring where the ith bit is true if the ith sensor is active. x << y means bitwise shift
# x, y times, and & is the bitwise AND operator
q_mask = [i for i in range(config.numSect()//config.numQuads()) if 1 << i & config.roiMask(q_idx)]
# For consistency, one could/should verify that len(q_data) is
# equal to len(sections[q_idx]).
assert len(q_data) == len(q_mask)
for (s_data, s_idx) in zip(q_data, q_mask):
# Rotate the "lying down" sensor readout from the XTC stream by
# an integer multiple of 90 degrees to match the orientation on
# the detector. This assumes that the horizontal dimension of
# the unrotated sensor is even. Note that the XPP CSPAD is
# rotated by 180 degrees with respect to the optical metrology
# measurements.
assert s_data.shape[1] % 2 == 0
if q_idx == 0 and s_idx in [2, 3, 6, 7] or \
q_idx == 1 and s_idx in [0, 1] or \
q_idx == 3 and s_idx in [4, 5]:
asics = numpy.hsplit(numpy.rot90(s_data, 0 + 2), 2)
asics.reverse()
elif q_idx == 0 and s_idx in [0, 1] or \
q_idx == 2 and s_idx in [4, 5] or \
q_idx == 3 and s_idx in [2, 3, 6, 7]:
asics = numpy.vsplit(numpy.rot90(s_data, 1 + 2), 2)
elif q_idx == 1 and s_idx in [4, 5] or \
q_idx == 2 and s_idx in [2, 3, 6, 7] or \
q_idx == 3 and s_idx in [0, 1]:
asics = numpy.hsplit(numpy.rot90(s_data, 2 + 2), 2)
elif q_idx == 0 and s_idx in [4, 5] or \
q_idx == 1 and s_idx in [2, 3, 6, 7] or \
q_idx == 2 and s_idx in [0, 1]:
asics = numpy.vsplit(numpy.rot90(s_data, 3 + 2), 2)
asics.reverse()
else:
# NOTREACHED
return None
# Use the active areas to place the two ASICS on the
# destination detector image.
for a_idx in range(len(asics)):
aa_idx = q_idx * (8 * 2 * 4) + s_idx * (2 * 4) + a_idx * 4
det[aa[aa_idx + 0]:aa[aa_idx + 2],
aa[aa_idx + 1]:aa[aa_idx + 3]] = asics[a_idx]
return det
def iplace(dst, src, angle, center):
"""The iplace() function places @p src in @p dst centred on @p
center after rotating it by @p angle degrees counter-clockwise.
The source image is mapped onto the destination image by bilinear
interpolation. While this may introduce interpolation artifacts
it is significantly simpler than many other interpolation
methods--and bog slow.
@p dst Destination image
@p src Source image
@p angle Rotation angle, in degrees
@p center Centre of @p src in @p dst, after rotation
"""
a = math.radians(angle)
c = math.cos(a)
s = math.sin(a)
# Find the origin-centred bounding box of the rotated source
# image. Due to the symmetry of a rectangle, the extrema can be
# determined by the transformed coordinates of two adjacent
# corners.
hsize = [0.5 * max(abs(c * src.shape[0] - s * src.shape[1]),
abs(c * src.shape[0] + s * src.shape[1])),
0.5 * max(abs(s * src.shape[0] + c * src.shape[1]),
abs(s * src.shape[0] - c * src.shape[1]))]
xlim = [int(math.floor(-hsize[0])),
int(math.ceil( +hsize[0])) + 1]
ylim = [int(math.floor(-hsize[1])),
int(math.ceil( +hsize[1])) + 1]
# For each pixel in the bounding box, determine the real-valued
# components in coordinate system of the untransformed source
# image, (xp, yp). Then do bilinear interpolation based on the
# four pixels with integer coordinates around (xp, yp).
for x in range(xlim[0], xlim[1]):
for y in range(ylim[0], ylim[1]):
xp = c * x + s * y + 0.5 * src.shape[0]
yp = -s * x + c * y + 0.5 * src.shape[1]
if (xp >= 0 and math.ceil(xp) < src.shape[0] and
yp >= 0 and math.ceil(yp) < src.shape[1]):
xi =[int(math.floor(xp)), int(math.ceil(xp))]
yi =[int(math.floor(yp)), int(math.ceil(yp))]
xf = xp - xi[0]
yf = yp - yi[0]
dst[int(round(x + center[0])),
int(round(y + center[1]))] = \
src[xi[0], yi[0]] * (1 - xf) * (1 - yf) + \
src[xi[1], yi[0]] * xf * (1 - yf) + \
src[xi[0], yi[1]] * (1 - xf) * yf + \
src[xi[1], yi[1]] * xf * yf
def rplace(dst, src, angle, center):
"""The rplace() function places @p src in @p dst centred on @p
centre after rotating it by @p angle degrees counter-clockwise.
The rotation angle is rounded to the nearest integer multiple of
90 degrees before transformation.
@p dst Destination image
@p src Source image
@p angle Rotation angle, in degrees
@p center Centre of @p src in @p dst, after rotation
"""
# Rotate the source image, and determine the upper, left corner of
# its location in the destination image.
rot = numpy.rot90(src, int(round(angle / 90.0)) % 4)
ulc = [int(round(center[0] - 0.5 * rot.shape[0])),
int(round(center[1] - 0.5 * rot.shape[1]))]
dst[ulc[0]:(ulc[0] + rot.shape[0]),
ulc[1]:(ulc[1] + rot.shape[1])] = rot
# For the moment, the XPP CSPAD detector's metrology is stored here
# as a series of active areas
_xpp_active_areas = {
'XPP 7.1': { # metrology recorded 1/24/13 and processed by flatfile.py
'active_areas': flex.int([
865, 1121, 1059, 1306, 1062, 1121, 1256, 1306,
864, 909, 1058, 1094, 1061, 909, 1255, 1094,
1083, 1534, 1268, 1728, 1083, 1337, 1268, 1531,
871, 1538, 1056, 1732, 871, 1341, 1056, 1535,
1495, 1326, 1689, 1511, 1298, 1326, 1492, 1511,
1496, 1539, 1690, 1724, 1299, 1539, 1493, 1724,
1482, 1105, 1667, 1299, 1482, 908, 1667, 1102,
1270, 1107, 1455, 1301, 1270, 910, 1455, 1104,
1123, 706, 1308, 900, 1123, 509, 1308, 703,
910, 706, 1095, 900, 910, 509, 1095, 703,
1535, 498, 1729, 683, 1338, 498, 1532, 683,
1534, 711, 1728, 896, 1337, 711, 1531, 896,
1324, 77, 1509, 271, 1324, 274, 1509, 468,
1537, 75, 1722, 269, 1537, 272, 1722, 466,
1104, 97, 1298, 282, 907, 97, 1101, 282,
1105, 310, 1299, 495, 908, 310, 1102, 495,
706, 457, 900, 642, 509, 457, 703, 642,
705, 669, 899, 854, 508, 669, 702, 854,
496, 36, 681, 230, 496, 233, 681, 427,
709, 38, 894, 232, 709, 235, 894, 429,
77, 256, 271, 441, 274, 256, 468, 441,
77, 44, 271, 229, 274, 44, 468, 229,
98, 467, 283, 661, 98, 664, 283, 858,
311, 467, 496, 661, 311, 664, 496, 858,
457, 867, 642, 1061, 457, 1064, 642, 1258,
670, 865, 855, 1059, 670, 1062, 855, 1256,
37, 1084, 231, 1269, 234, 1084, 428, 1269,
37, 871, 231, 1056, 234, 871, 428, 1056,
256, 1495, 441, 1689, 256, 1298, 441, 1492,
43, 1497, 228, 1691, 43, 1300, 228, 1494,
469, 1481, 663, 1666, 666, 1481, 860, 1666,
467, 1269, 661, 1454, 664, 1269, 858, 1454]),
'rotations' : flex.int([
3,3,3,3,2,2,2,2,1,1,1,1,2,2,2,2,
2,2,2,2,1,1,1,1,0,0,0,0,1,1,1,1,
1,1,1,1,0,0,0,0,3,3,3,3,0,0,0,0,
0,0,0,0,3,3,3,3,2,2,2,2,3,3,3,3
])
},
'XPP 7.2': { # metrology recorded 1/29/13 and processed by flatfile.py
'active_areas': flex.int([
868, 1122, 1062, 1307, 1065, 1122, 1259, 1307,
868, 910, 1062, 1095, 1065, 910, 1259, 1095,
1087, 1534, 1272, 1728, 1087, 1337, 1272, 1531,
874, 1536, 1059, 1730, 874, 1339, 1059, 1533,
1497, 1328, 1691, 1513, 1300, 1328, 1494, 1513,
1499, 1541, 1693, 1726, 1302, 1541, 1496, 1726,
1483, 1105, 1668, 1299, 1483, 908, 1668, 1102,
1271, 1106, 1456, 1300, 1271, 909, 1456, 1103,
1122, 705, 1307, 899, 1122, 508, 1307, 702,
909, 705, 1094, 899, 909, 508, 1094, 702,
1534, 497, 1728, 682, 1337, 497, 1531, 682,
1533, 710, 1727, 895, 1336, 710, 1530, 895,
1323, 76, 1508, 270, 1323, 273, 1508, 467,
1536, 75, 1721, 269, 1536, 272, 1721, 466,
1103, 97, 1297, 282, 906, 97, 1100, 282,
1103, 310, 1297, 495, 906, 310, 1100, 495,
705, 456, 899, 641, 508, 456, 702, 641,
704, 669, 898, 854, 507, 669, 701, 854,
495, 35, 680, 229, 495, 232, 680, 426,
707, 38, 892, 232, 707, 235, 892, 429,
75, 256, 269, 441, 272, 256, 466, 441,
75, 43, 269, 228, 272, 43, 466, 228,
97, 467, 282, 661, 97, 664, 282, 858,
310, 466, 495, 660, 310, 663, 495, 857,
456, 866, 641, 1060, 456, 1063, 641, 1257,
669, 865, 854, 1059, 669, 1062, 854, 1256,
36, 1084, 230, 1269, 233, 1084, 427, 1269,
35, 870, 229, 1055, 232, 870, 426, 1055,
254, 1494, 439, 1688, 254, 1297, 439, 1491,
42, 1496, 227, 1690, 42, 1299, 227, 1493,
468, 1481, 662, 1666, 665, 1481, 859, 1666,
465, 1268, 659, 1453, 662, 1268, 856, 1453]),
'rotations' : flex.int([
3,3,3,3,2,2,2,2,1,1,1,1,2,2,2,2,
2,2,2,2,1,1,1,1,0,0,0,0,1,1,1,1,
1,1,1,1,0,0,0,0,3,3,3,3,0,0,0,0,
0,0,0,0,3,3,3,3,2,2,2,2,3,3,3,3
])
},
'XPP 8.1': { # metrology recorded 10/09/13 and processed by flatfile.py
'active_areas': flex.int([
863, 1118, 1057, 1303, 1060, 1118, 1254, 1303,
865, 913, 1059, 1098, 1062, 913, 1256, 1098,
1070, 1532, 1255, 1726, 1070, 1335, 1255, 1529,
863, 1532, 1048, 1726, 863, 1335, 1048, 1529,
1484, 1335, 1678, 1520, 1287, 1335, 1481, 1520,
1484, 1543, 1678, 1728, 1287, 1543, 1481, 1728,
1475, 1110, 1660, 1304, 1475, 913, 1660, 1107,
1268, 1109, 1453, 1303, 1268, 912, 1453, 1106,
1119, 707, 1304, 901, 1119, 510, 1304, 704,
912, 707, 1097, 901, 912, 510, 1097, 704,
1533, 506, 1727, 691, 1336, 506, 1530, 691,
1533, 715, 1727, 900, 1336, 715, 1530, 900,
1334, 84, 1519, 278, 1334, 281, 1519, 475,
1541, 85, 1726, 279, 1541, 282, 1726, 476,
1108, 103, 1302, 288, 911, 103, 1105, 288,
1108, 311, 1302, 496, 911, 311, 1105, 496,
706, 460, 900, 645, 509, 460, 703, 645,
706, 666, 900, 851, 509, 666, 703, 851,
507, 38, 692, 232, 507, 235, 692, 429,
713, 38, 898, 232, 713, 235, 898, 429,
82, 241, 276, 426, 279, 241, 473, 426,
82, 37, 276, 222, 279, 37, 473, 222,
103, 459, 288, 653, 103, 656, 288, 850,
310, 460, 495, 654, 310, 657, 495, 851,
460, 862, 645, 1056, 460, 1059, 645, 1253,
666, 863, 851, 1057, 666, 1060, 851, 1254,
38, 1070, 232, 1255, 235, 1070, 429, 1255,
38, 864, 232, 1049, 235, 864, 429, 1049,
242, 1484, 427, 1678, 242, 1287, 427, 1481,
37, 1484, 222, 1678, 37, 1287, 222, 1481,
458, 1475, 652, 1660, 655, 1475, 849, 1660,
459, 1267, 653, 1452, 656, 1267, 850, 1452]),
'rotations' : flex.int([
3,3,3,3,2,2,2,2,1,1,1,1,2,2,2,2,
2,2,2,2,1,1,1,1,0,0,0,0,1,1,1,1,
1,1,1,1,0,0,0,0,3,3,3,3,0,0,0,0,
0,0,0,0,3,3,3,3,2,2,2,2,3,3,3,3
])
},
#SOME BIG ISSUES REMAIN WITH Sacla.MPCCD.8tile format
# Evidently the data from Takanori 22 Sep 2015 already has slight rotation
# applied to the MPCCD modules, as the data rectangles displayed in cctbx.image_viewer are tilted
# This is inconsistent with the expectation that npy.py should get the raw data, not preprocessed.
'Sacla.MPCCD.8tile': { # as given by Takanori 22 Sep 2015
'active_areas': flex.int([
112, 189, 622, 1212, 647, 188, 1156, 1212,
1180, 140, 1691, 1163, 1714, 140, 2226, 1163,
159, 1231, 671, 2254, 694, 1230, 1206, 2253,
1229, 1180, 1740, 2203, 1762, 1180, 2274, 2202,
]),
'rotations' : flex.int([
0,0,0,0,0,0,0,0,
])
},
}
_xpp_active_areas['XPP 11.1'] = _xpp_active_areas['XPP 9.1'] = _xpp_active_areas['XPP 8.1']
xpp_active_areas = _xpp_active_areas
| StarcoderdataPython |
119759 | import os
import urllib.parse
from pelican import signals, contents
# Generate an XML sitemap for the blog
# The XML sitemap is NOT manually sent to Google but it is publicaly
# available
# The output filename
FILENAME = 'sitemap.xml'
# Table for change frequencies
# These are default values that can be overriden in the configuration file
# of the blog
# The underscore values come from Pelican
CHANGE_FREQUENCIES = {
'_index': 'daily',
'_articles': 'monthly',
'_pages': 'monthly',
'_default': 'weekly',
}
# Table for the priorities
# These are default values that can be overriden in the configuration file
# of the blog
PRIORITIES = {
'_default': 0.5
}
# In order to generate the sitemap, we use a bunch of Python templates
# that we glue together
# Last modificagtion template
DATE_TEMPLATE = '\n <lastmod>{}</lastmod>'
# URL Template
URL_TEMPLATE = ''' <url>
<loc>{loc}</loc>{lastmod}
<changefreq>{changefreq}</changefreq>
<priority>{priority}</priority>
</url>'''
# Root template
SITEMAP_TEMPLATE = '''<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
{}
</urlset>
'''
# Get the content priority associated with a Pelican content object
def get_content_priority(content):
if content.slug in PRIORITIES:
return PRIORITIES[content.slug]
return PRIORITIES['_default']
# Get the content change frequency associated with a Pelican content object
def get_content_change_frequency(content):
if content.slug in CHANGE_FREQUENCIES:
return CHANGE_FREQUENCIES[content.slug]
if isinstance(content, contents.Article):
return CHANGE_FREQUENCIES['_articles']
if isinstance(content, contents.Page):
return CHANGE_FREQUENCIES['_pages']
return CHANGE_FREQUENCIES['_default']
# Get the last modification date for a Pelican content object
def get_content_last_date(content):
# Prioritize the last update date
if hasattr(content, 'modified'):
return content.modified
if hasattr(content, 'date'):
return content.date
return None
class SitemapGenerator():
def __init__(self, context, settings, path, theme, output_path):
self.context = context
self.output_path = output_path
# Merge constants with configuration
CHANGE_FREQUENCIES.update(context['CHANGE_FREQUENCIES'])
PRIORITIES.update(context['PRIORITIES'])
# Get slugs to exclude
self.exclude = self.context['API_EXCLUDE_SLUGS']
def generate_output(self, writer):
# Final file path
path = os.path.join(self.output_path, FILENAME)
# Extract pages and articles
content = \
self.context['articles'] + \
self.context['pages']
# Remove the content that must be excluded
content = [c for c in content if c.slug not in self.exclude]
# Store all the url blocks
buffer = []
# Iterate over all pages, articles, mixed
for c in content:
# Date can be YYYY-MM-DD or nothing
date = get_content_last_date(c)
date_formated = None
if date is not None:
date_formated = DATE_TEMPLATE.format(date.strftime('%Y-%m-%d'))
# Join site url and content slug
url = urllib.parse.urljoin(self.context['SITEURL'], c.slug)
# Update frequency
frequency = get_content_change_frequency(c)
# Document priority
priority = get_content_priority(c)
# Store the URL block
buffer.append(URL_TEMPLATE.format(
loc=url,
lastmod=date_formated or '',
changefreq=frequency,
priority=priority
))
# Don't forget the index page
buffer.append(URL_TEMPLATE.format(
loc=self.context['SITEURL'],
lastmod=None,
changefreq=CHANGE_FREQUENCIES['_index'],
priority=PRIORITIES['_default']
))
# Join all the URL blocks into the final template
sitemap = SITEMAP_TEMPLATE.format('\n'.join(buffer))
# Write sitemap to disk
with open(path, 'w+') as f:
f.write(sitemap)
def get_generators(generators):
return SitemapGenerator
def register():
signals.get_generators.connect(get_generators)
| StarcoderdataPython |
3223852 | <reponame>danaraujocr/trendfitter<gh_stars>1-10
from trendfitter.models.DiPLS import DiPLS
from trendfitter.models import PCA, PLS, SMB_PLS, MB_PCA, MB_PLS, MLSMB_PLS
import pandas as pd
from numpy import sqrt, mean
import numpy as np
from sklearn.model_selection import KFold, TimeSeriesSplit
"""
#pca_data = pd.read_csv('pca_test_dataset_incomplete.csv', index_col=0, delimiter = ';')
pca_data = pd.read_csv('pca_test_dataset_complete.csv', index_col=0)
pca_data = (pca_data - pca_data.mean()) / pca_data.std()
#print(pca_data.head())
pca_model = PCA()
pca_model.fit(pca_data)
print(f'PCA Model fitted with : {pca_model.principal_components} principal components')
print(pca_model.loadings)
print('VIPs')
print(pca_model.feature_importances_)
print('OMEGA')
print(pca_model.omega)
print('q2')
print(pca_model.q2)
print('chi2_params')
print(pca_model._chi2_params)
print('predict 5 first lines')
print(pca_model.predict(pca_data.iloc[:5]))
print('scores')
print(pca_model.transform(pca_data.iloc[:5]))
print('r2')
print(pca_model.score(pca_data))
print('Hot T2')
print(pca_model.Hotellings_T2(pca_data.iloc[:5]))
print('T2 Limit')
print(pca_model.T2_limit(0.95))
print('SPEs')
print(pca_model.SPEs(pca_data.iloc[:5]))
print('SPE limit')
print(pca_model.SPE_limit(0.99))
print('contributions scores ind')
print(pca_model.contributions_scores_ind(pca_data.iloc[:5]))
print('contributions spe')
print(pca_model.contributions_spe(pca_data.iloc[:5]))
mbpca_model = MB_PCA()
mbpca_model.fit(pca_data, [2,5])
print('block loadings')
print(mbpca_model.block_loadings)
print('superlevel loadings')
print( mbpca_model.superlevel_loadings)
print('superlevel scores')
print(mbpca_model.superlevel_training_scores[:5])
print('transform b')
print(mbpca_model.transform_b(pca_data.iloc[:,:2], 0)[:5])
print('predict b')
print(mbpca_model.predict_b(pca_data.iloc[:,:2], 0)[:5])
print('score b')
print(mbpca_model.score_b(pca_data.iloc[:,:2], 0))
"""
"""
#from trendfitter.models.PLS import *
pls_data = pd.read_csv('pls_dataset_complete.csv', delimiter=';', index_col=[0]).drop(columns=['ObsNum']).dropna()
#pls_data.to_csv('pls_data_w_no_na.csv')
pls_data = (pls_data - pls_data.mean()) / pls_data.std()
#print(pls_data.head())
X_pls = pls_data.drop(columns='Y-Kappa')
Y_pls = pls_data['Y-Kappa']
#X_pls = X_pls.values
#Y_pls = np.array(pls_data['Y-Kappa'].values, ndmin = 2).T
pls_model = PLS()
pls_model.fit(X_pls, Y_pls, random_state = 2)
print(pls_model.score(X_pls, Y_pls))
print(f'PLS Model fitted with : {pls_model.latent_variables} latent variables')
print('r²')
print(pls_model.score(X_pls, Y_pls))
print('training scores')
print(pls_model.training_scores[:5])
print('predicted scores')
print(pls_model.transform(X_pls[:5]))
print('P')
print(pls_model.p_loadings[:3, :])
print('W')
print(pls_model.weights[:3, :])
print('W*')
print(pls_model.weights_star[:3, :])
print('C')
print(pls_model.c_loadings[:3, :])
print('coeffs')
print(pls_model.coefficients)
print('VIPs')
print(pls_model.feature_importances_)
print('scores')
print(pls_model.training_scores[:5,:3])
print('omega')
print(pls_model.omega[:3, :3])
print('x_chi2')
print(pls_model._x_chi2_params)
print('y_chi2')
print(pls_model._y_chi2_params)
print('q2')
print(pls_model.q2y)
print('predict')
print(pls_model.predict(X_pls[:5]))
print('transform')
print(pls_model.transform(X_pls[:5]))
print('transform_inv')
print(pls_model.transform_inv(pls_model.transform(X_pls[:3])))
print('T2')
print(pls_model.Hotellings_T2(X_pls[:5]))
print('T2_limit')
print(pls_model.T2_limit(0.95))
print('SPEs X')
print(pls_model.SPEs_X(X_pls[:5]))
print('SPEs X_limit')
print(pls_model.SPE_X_limit(0.95))
print('SPEs Y')
print(pls_model.SPEs_Y(X_pls[:5], Y_pls[:5]))
print('SPEs Y_limit')
print(pls_model.SPE_Y_limit(0.95))
print('RMSEE')
print(pls_model.RMSEE(X_pls, Y_pls))
print('contributions_scores_ind')
print(pls_model.contributions_scores_ind(X_pls[:5]))
print('contributions_SPE_X')
print(pls_model.contributions_SPE_X(X_pls[:5]))
"""
smbpls_data = pd.read_csv('smb_pls_dataset.csv', delimiter=';', index_col = 0).dropna()
smbpls_data_miss = pd.read_csv('smb_pls_dataset.csv', delimiter=';', index_col = 0)
smbpls_data = (smbpls_data - smbpls_data.mean()) / smbpls_data.std()
smbpls_data_miss = (smbpls_data_miss - smbpls_data_miss.mean()) / smbpls_data_miss.std()
X = smbpls_data.drop(columns=['y'])
X[['Var1', 'Var2', 'Var3', 'rand1', 'rand2', 'rand3']] = X[['Var1', 'Var2', 'Var3', 'rand1', 'rand2', 'rand3']] / sqrt(6)
X[['Var4', 'Var5', 'rand4', 'rand5']] = X[['Var4', 'Var5', 'rand4', 'rand5']] / sqrt(4)
Y = smbpls_data['y']
X_miss = smbpls_data_miss.drop(columns=['y'])
X_miss[['Var1', 'Var2', 'Var3', 'rand1', 'rand2', 'rand3']] = X_miss[['Var1', 'Var2', 'Var3', 'rand1', 'rand2', 'rand3']] / sqrt(6)
X_miss[['Var4', 'Var5', 'rand4', 'rand5']] = X_miss[['Var4', 'Var5', 'rand4', 'rand5']] / sqrt(4)
Y_miss = smbpls_data_miss['y']
smbpls_model = SMB_PLS(tol = 1e-8, cv_splits_number = 7)
#smbpls_model.fit(X, [3,5], Y, latent_variables = [2, 1])
smbpls_model.fit(X, [6,10], Y, random_state = 2)#, latent_variables = [1,1])
#smbpls_model.fit(X, [3,5], Y, latent_variables = [2, 1])
"""
print(f'SMBPLS Model fitted with : {smbpls_model.latent_variables} latent variables')
print('block_p_loadings')
print(smbpls_model.block_p_loadings)
print('superlevel_p_loadings')
print(smbpls_model.superlevel_p_loadings)
print('x_weights_star')
print(smbpls_model.x_weights_star)
print('x_weights')
print(smbpls_model.x_weights)
print('superlevel_weights')
print(smbpls_model.superlevel_weights)
print('c_loadings')
print(smbpls_model.c_loadings)
print('weights_block')
print(smbpls_model.block_weights)
print(f'score is {smbpls_model.score(X, Y) * 100:.4f}%')
print(smbpls_model.q2y)
for i in range(sum(smbpls_model.latent_variables)):
print(smbpls_model.score(X, Y, latent_variables = i+1))
print('Hotellings_T2')
print(smbpls_model.Hotellings_T2(X.iloc[:5]))
"""
smbpls_model_miss = SMB_PLS(tol = 1e-8, cv_splits_number = 7)
smbpls_model_miss.fit(X_miss, [6,10], Y_miss, random_state = 2)
print('miss data section')
print(f'SMBPLS Model fitted with : {smbpls_model_miss.latent_variables} latent variables')
print('scores')
print(smbpls_model_miss.transform(X.iloc[:5]))
print('scores calculated with x_weights star (pinv expression)')
new_block_scores = X_miss.values @ smbpls_model_miss.x_weights_star.T
print(new_block_scores[:5])
print('scores calc with x_weights star2 (G matrix expression)')
new_block_scores = X_miss.values @ smbpls_model_miss.x_weights_star2.T
print(new_block_scores[:5])
print('block_p_loadings')
print(smbpls_model_miss.block_p_loadings)
print('superlevel_p_loadings')
print(smbpls_model_miss.superlevel_p_loadings)
print('x_weights_star')
print(smbpls_model_miss.x_weights_star)
print('x_weights')
print(smbpls_model_miss.x_weights)
print('superlevel_weights')
print(smbpls_model_miss.superlevel_weights)
print('weights_block')
print(smbpls_model_miss.block_weights)
print('c_loadings')
print(smbpls_model_miss.c_loadings)
print('Q2')
print(smbpls_model_miss.q2y)
print('scores')
print(smbpls_model_miss.transform(X_miss[:5]))
print('predictions')
print(smbpls_model_miss.predict(X_miss[:5]))
print(f'score is {smbpls_model_miss.score(X_miss[:5], Y_miss.iloc[:5])*100:.2f}%')
#print('Hotellings T²')
#print(smbpls_model_miss.Hotellings_T2(X_miss[:5]))
"""
mbpls_model = MB_PLS()
print(X_pls.shape)
block_divs = [10, 21]
mbpls_model.fit(X_pls, block_divs, Y_pls, latent_variables=2)
print('SL Weights')
print(mbpls_model.superlevel_weights)
print('SL Scores')
print(mbpls_model.training_sl_scores[:5,:])
print('block loadings')
print(mbpls_model.block_p_loadings)
print('block weights')
print(mbpls_model.block_weights)
print('transform_b')
print(mbpls_model.transform_b(X_pls.iloc[:,:10], 0)[:5,:])
print('score_b')
print(mbpls_model.score_b(X_pls.iloc[:,:10], 0))
dipls_model = DiPLS()
LV = 3
s = 3
dipls_model.fit(X_pls, Y_pls, latent_variables = LV, s = s)
print(f'DiPLS r² is {dipls_model.score(X_pls, Y_pls):.2f}')
q2_model = DiPLS()
q2_model.fit(X_pls.iloc[:-200], Y_pls.iloc[:-200], latent_variables = LV, s = s) # this model is trained with only a partition of the total dataset
q2_final = q2_model.score(X_pls.iloc[-200: :], Y_pls.iloc[-200:]) # its performance is registered in a list
print(f'DiPLS q² is {q2_final:.2f}')
kf = KFold(n_splits = 7 , random_state = 2)
pls_model = PLS()
pls_model.fit(X_pls, Y_pls)
print(f'PLS r² is {pls_model.score(X_pls, Y_pls):.2f}')
testq2 = []
for train_index, test_index in kf.split(X_pls):
q2_model = PLS()
q2_model.fit(X_pls.iloc[train_index], Y_pls.iloc[train_index], latent_variables = pls_model.latent_variables) # this model is trained with only a partition of the total dataset
testq2.append(q2_model.score(X_pls.iloc[test_index], Y_pls.iloc[test_index])) # its performance is registered in a list
q2_final = mean(testq2)
print(f'PLS q² is {q2_final:.2f}')
from numpy.random import default_rng
rng = default_rng()
X1 = rng.standard_normal((1000, 2))
X2 = rng.standard_normal((1000, 2))
X3 = rng.standard_normal((1000, 2)) + X1 * 0.2 + X2 * 0.3
all_Xs = np.concatenate([X1, X2, X3], axis = 1)
second_level = [4, 6]
third_level = [2, 4]
#Y = all_Xs @ wstar.T
#Y = (Y - np.mean(Y)) / np.std(Y)
Y = np.array(np.mean(all_Xs, axis = 1), ndmin = 2 ).T
Y = (Y - np.mean(Y)) / np.std(Y)
model = MLSMB_PLS()
model.fit(all_Xs,third_level, second_level, Y, latent_variables = [1, 1])
""" | StarcoderdataPython |
3367965 | <reponame>mateusguida/ExerciciosPython<gh_stars>0
import os
os.system("cls") #limpa janela terminal antes da execução
numero = [[], []]
for i in range(0,7):
num = int(input("Digite um numero: "))
if num % 2 == 0:
numero[0].append(num)
else:
numero[1].append(num)
print('-=' * 30)
numero[0].sort()
numero[1].sort()
print(f'{numero}') | StarcoderdataPython |
3279293 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.fsl.preprocess import ApplyXfm
def test_ApplyXfm_inputs():
input_map = dict(angle_rep=dict(argstr='-anglerep %s',
),
apply_isoxfm=dict(argstr='-applyisoxfm %f',
xor=['apply_xfm'],
),
apply_xfm=dict(argstr='-applyxfm',
requires=['in_matrix_file'],
usedefault=True,
),
args=dict(argstr='%s',
),
bbrslope=dict(argstr='-bbrslope %f',
min_ver='5.0.0',
),
bbrtype=dict(argstr='-bbrtype %s',
min_ver='5.0.0',
),
bgvalue=dict(argstr='-setbackground %f',
),
bins=dict(argstr='-bins %d',
),
coarse_search=dict(argstr='-coarsesearch %d',
units='degrees',
),
cost=dict(argstr='-cost %s',
),
cost_func=dict(argstr='-searchcost %s',
),
datatype=dict(argstr='-datatype %s',
),
display_init=dict(argstr='-displayinit',
),
dof=dict(argstr='-dof %d',
),
echospacing=dict(argstr='-echospacing %f',
min_ver='5.0.0',
),
environ=dict(nohash=True,
usedefault=True,
),
fieldmap=dict(argstr='-fieldmap %s',
min_ver='5.0.0',
),
fieldmapmask=dict(argstr='-fieldmapmask %s',
min_ver='5.0.0',
),
fine_search=dict(argstr='-finesearch %d',
units='degrees',
),
force_scaling=dict(argstr='-forcescaling',
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='-in %s',
mandatory=True,
position=0,
),
in_matrix_file=dict(argstr='-init %s',
),
in_weight=dict(argstr='-inweight %s',
),
interp=dict(argstr='-interp %s',
),
min_sampling=dict(argstr='-minsampling %f',
units='mm',
),
no_clamp=dict(argstr='-noclamp',
),
no_resample=dict(argstr='-noresample',
),
no_resample_blur=dict(argstr='-noresampblur',
),
no_search=dict(argstr='-nosearch',
),
out_file=dict(argstr='-out %s',
hash_files=False,
name_source=['in_file'],
name_template='%s_flirt',
position=2,
),
out_log=dict(keep_extension=True,
name_source=['in_file'],
name_template='%s_flirt.log',
requires=['save_log'],
),
out_matrix_file=dict(argstr='-omat %s',
hash_files=False,
keep_extension=True,
name_source=['in_file'],
name_template='%s_flirt.mat',
position=3,
),
output_type=dict(),
padding_size=dict(argstr='-paddingsize %d',
units='voxels',
),
pedir=dict(argstr='-pedir %d',
min_ver='5.0.0',
),
ref_weight=dict(argstr='-refweight %s',
),
reference=dict(argstr='-ref %s',
mandatory=True,
position=1,
),
rigid2D=dict(argstr='-2D',
),
save_log=dict(),
schedule=dict(argstr='-schedule %s',
),
searchr_x=dict(argstr='-searchrx %s',
units='degrees',
),
searchr_y=dict(argstr='-searchry %s',
units='degrees',
),
searchr_z=dict(argstr='-searchrz %s',
units='degrees',
),
sinc_width=dict(argstr='-sincwidth %d',
units='voxels',
),
sinc_window=dict(argstr='-sincwindow %s',
),
terminal_output=dict(nohash=True,
),
uses_qform=dict(argstr='-usesqform',
),
verbose=dict(argstr='-verbose %d',
),
wm_seg=dict(argstr='-wmseg %s',
min_ver='5.0.0',
),
wmcoords=dict(argstr='-wmcoords %s',
min_ver='5.0.0',
),
wmnorms=dict(argstr='-wmnorms %s',
min_ver='5.0.0',
),
)
inputs = ApplyXfm.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_ApplyXfm_outputs():
output_map = dict(out_file=dict(),
out_log=dict(),
out_matrix_file=dict(),
)
outputs = ApplyXfm.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| StarcoderdataPython |
1726994 | <gh_stars>0
# DAY 11
from typing import Dict, Tuple
def read_input():
with open("./input.txt", "r", encoding="utf-8") as f:
return parse_input(f.read())
def parse_input(input_str: str):
ret = {}
for i, line in enumerate(input_str.splitlines(keepends=False)):
for j, char in enumerate(line):
ret[(i,j)] = int(char)
return ret
starting_energy_levels = read_input()
def get_adjacent(c : Tuple[int, int]):
ret = []
for i in [-1, 0, +1]:
for j in [-1, 0, +1]:
ret.append((c[0] + i, c[1] + j))
return [(i, j) for i, j in ret if i >=0 and i <= 9 and j >= 0 and j <= 9 ]
def simulate_step(starting_levels: Dict[Tuple[int, int], int]):
# Increase levels with 1
levels = {k: v + 1 for k, v in starting_levels.items()}
flashes_we_already_had = set()
# do flashes
while True:
flashes = {k for k, v in levels.items() if v > 9}
if flashes == flashes_we_already_had:
break
for f in flashes:
if f not in flashes_we_already_had:
flashes_we_already_had.add(f)
for a in get_adjacent(f):
levels[a] += 1
# print(flashes_we_already_had)
levels = {k: v if v <=9 else 0 for k, v in levels.items()}
return levels, flashes_we_already_had
# print(starting_energy_levels)
def simulate_steps(starting_levels: Dict[Tuple[int, int], int], steps=100):
num_flashes = 0
levels = starting_levels
for step in range(steps):
levels, flashes = simulate_step(levels)
num_flashes += len(flashes)
# print(step, num_flashes)
return levels, num_flashes
def find_first_step_with_100_flashes(levels: Dict[Tuple[int, int], int]):
step = 0
while True:
levels, flashes = simulate_step(levels)
num_flashes = len(flashes)
step += 1
# print(step, num_flashes)
if num_flashes == 100:
break
return step
print("Part 1")
print(simulate_steps(starting_energy_levels))
print()
print("Part 2")
print(find_first_step_with_100_flashes(starting_energy_levels)) | StarcoderdataPython |
86843 | a,b,c=map(int,input().split())
x,d=0,0
while x<c:
d+=1
x+=a
if d%7==0: x+=b
print(d) | StarcoderdataPython |
1786363 | import logging
import pytest
from mergify_engine import logs
@pytest.fixture()
def logger_checker(request, caplog):
# daiquiri removes all handlers during setup, as we want to sexy output and the pytest
# capability at the same, we must add back the pytest handler
logs.setup_logging()
logging.getLogger(None).addHandler(caplog.handler)
yield
for when in ("setup", "call", "teardown"):
messages = [
rec.getMessage()
for rec in caplog.get_records(when)
if rec.levelname in ("CRITICAL", "ERROR")
]
# NOTE(sileht): The asyncio task spawned to automatically close redis connection
# cleanly is not held by an variable, making hard to track them. Since this is one
# annoying for testing just ignore message about them.
messages = [
m
for m in messages
if "coro=<ConnectionPool.disconnect_on_idle_time_exceeded()" not in m
]
assert [] == messages
| StarcoderdataPython |
4824485 | <reponame>allenalvin333/Hackerrank_Prep<filename>3M/W5/2.py<gh_stars>1-10
# https://www.hackerrank.com/challenges/three-month-preparation-kit-strong-password/problem
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'minimumNumber' function below.
#
# The function is expected to return an INTEGER.
# The function accepts following parameters:
# 1. INTEGER n
# 2. STRING password
#
def minimumNumber(n, password, k = r'(\d){1,}|([a-z]){1,}|([A-Z]){1,}|(\W){1,}'):
return max(6-n, 4-len([1 for z in zip(*re.findall(k, password)) if("".join(z))]))
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input().strip())
password = input()
answer = minimumNumber(n, password)
fptr.write(str(answer) + '\n')
fptr.close() | StarcoderdataPython |
129586 | <gh_stars>1-10
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Handling confounds.
.. testsetup::
>>> import os
>>> import pandas as pd
"""
import os
import re
import shutil
import numpy as np
import pandas as pd
from nipype import logging
from nipype.utils.filemanip import fname_presuffix
from nipype.interfaces.base import (
traits,
TraitedSpec,
BaseInterfaceInputSpec,
File,
Directory,
isdefined,
SimpleInterface,
)
LOGGER = logging.getLogger("nipype.interface")
class GatherConfoundsInputSpec(BaseInterfaceInputSpec):
signals = File(exists=True, desc="input signals")
dvars = File(exists=True, desc="file containing DVARS")
std_dvars = File(exists=True, desc="file containing standardized DVARS")
fd = File(exists=True, desc="input framewise displacement")
# rmsd = File(exists=True, desc="input RMS framewise displacement")
tcompcor = File(exists=True, desc="input tCompCorr")
acompcor = File(exists=True, desc="input aCompCorr")
cos_basis = File(exists=True, desc="input cosine basis")
motion = File(exists=True, desc="input motion parameters")
aroma = File(exists=True, desc="input ICA-AROMA")
class GatherConfoundsOutputSpec(TraitedSpec):
confounds_file = File(exists=True, desc="output confounds file")
confounds_list = traits.List(traits.Str, desc="list of headers")
class GatherConfounds(SimpleInterface):
r"""
Combine various sources of confounds in one TSV file
.. testsetup::
>>> from tempfile import TemporaryDirectory
>>> tmpdir = TemporaryDirectory()
>>> os.chdir(tmpdir.name)
.. doctest::
>>> pd.DataFrame({'a': [0.1]}).to_csv('signals.tsv', index=False, na_rep='n/a')
>>> pd.DataFrame({'b': [0.2]}).to_csv('dvars.tsv', index=False, na_rep='n/a')
>>> gather = GatherConfounds()
>>> gather.inputs.signals = 'signals.tsv'
>>> gather.inputs.dvars = 'dvars.tsv'
>>> res = gather.run()
>>> res.outputs.confounds_list
['Global signals', 'DVARS']
>>> pd.read_csv(res.outputs.confounds_file, sep='\s+', index_col=None,
... engine='python') # doctest: +NORMALIZE_WHITESPACE
a b
0 0.1 0.2
.. testcleanup::
>>> tmpdir.cleanup()
"""
input_spec = GatherConfoundsInputSpec
output_spec = GatherConfoundsOutputSpec
def _run_interface(self, runtime):
combined_out, confounds_list = _gather_confounds(
signals=self.inputs.signals,
dvars=self.inputs.dvars,
std_dvars=self.inputs.std_dvars,
fdisp=self.inputs.fd,
# rmsd=self.inputs.rmsd,
tcompcor=self.inputs.tcompcor,
acompcor=self.inputs.acompcor,
cos_basis=self.inputs.cos_basis,
motion=self.inputs.motion,
aroma=self.inputs.aroma,
newpath=runtime.cwd,
)
self._results["confounds_file"] = combined_out
self._results["confounds_list"] = confounds_list
return runtime
class ICAConfoundsInputSpec(BaseInterfaceInputSpec):
in_directory = Directory(
mandatory=True, desc="directory where ICA derivatives are found"
)
skip_vols = traits.Int(desc="number of non steady state volumes identified")
err_on_aroma_warn = traits.Bool(
False, usedefault=True, desc="raise error if aroma fails"
)
class ICAConfoundsOutputSpec(TraitedSpec):
aroma_confounds = traits.Either(
None, File(exists=True, desc="output confounds file extracted from ICA-AROMA")
)
aroma_noise_ics = File(exists=True, desc="ICA-AROMA noise components")
melodic_mix = File(exists=True, desc="melodic mix file")
aroma_metadata = File(exists=True, desc="tabulated ICA-AROMA metadata")
class ICAConfounds(SimpleInterface):
"""Extract confounds from ICA-AROMA result directory
"""
input_spec = ICAConfoundsInputSpec
output_spec = ICAConfoundsOutputSpec
def _run_interface(self, runtime):
(
aroma_confounds,
motion_ics_out,
melodic_mix_out,
aroma_metadata,
) = _get_ica_confounds(
self.inputs.in_directory, self.inputs.skip_vols, newpath=runtime.cwd
)
if self.inputs.err_on_aroma_warn and aroma_confounds is None:
raise RuntimeError("ICA-AROMA failed")
aroma_confounds = self._results["aroma_confounds"] = aroma_confounds
self._results["aroma_noise_ics"] = motion_ics_out
self._results["melodic_mix"] = melodic_mix_out
self._results["aroma_metadata"] = aroma_metadata
return runtime
def _gather_confounds(
signals=None,
dvars=None,
std_dvars=None,
fdisp=None, # rmsd=None,
tcompcor=None,
acompcor=None,
cos_basis=None,
motion=None,
aroma=None,
newpath=None,
):
r"""
Load confounds from the filenames, concatenate together horizontally
and save new file.
>>> from tempfile import TemporaryDirectory
>>> tmpdir = TemporaryDirectory()
>>> os.chdir(tmpdir.name)
>>> pd.DataFrame({'Global Signal': [0.1]}).to_csv('signals.tsv', index=False, na_rep='n/a')
>>> pd.DataFrame({'stdDVARS': [0.2]}).to_csv('dvars.tsv', index=False, na_rep='n/a')
>>> out_file, confound_list = _gather_confounds('signals.tsv', 'dvars.tsv')
>>> confound_list
['Global signals', 'DVARS']
>>> pd.read_csv(out_file, sep='\s+', index_col=None,
... engine='python') # doctest: +NORMALIZE_WHITESPACE
global_signal std_dvars
0 0.1 0.2
>>> tmpdir.cleanup()
"""
def less_breakable(a_string):
""" hardens the string to different envs (i.e., case insensitive, no whitespace, '#' """
return "".join(a_string.split()).strip("#")
# Taken from https://stackoverflow.com/questions/1175208/
# If we end up using it more than just here, probably worth pulling in a well-tested package
def camel_to_snake(name):
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
def _adjust_indices(left_df, right_df):
# This forces missing values to appear at the beggining of the DataFrame
# instead of the end
index_diff = len(left_df.index) - len(right_df.index)
if index_diff > 0:
right_df.index = range(index_diff, len(right_df.index) + index_diff)
elif index_diff < 0:
left_df.index = range(-index_diff, len(left_df.index) - index_diff)
all_files = []
confounds_list = []
for confound, name in (
(signals, "Global signals"),
(std_dvars, "Standardized DVARS"),
(dvars, "DVARS"),
(fdisp, "Framewise displacement"),
# (rmsd, "Framewise displacement (RMS)"),
(tcompcor, "tCompCor"),
(acompcor, "aCompCor"),
(cos_basis, "Cosine basis"),
(motion, "Motion parameters"),
(aroma, "ICA-AROMA"),
):
if confound is not None and isdefined(confound):
confounds_list.append(name)
if os.path.exists(confound) and os.stat(confound).st_size > 0:
all_files.append(confound)
confounds_data = pd.DataFrame()
for file_name in all_files: # assumes they all have headings already
new = pd.read_csv(file_name, sep="\t")
for column_name in new.columns:
new.rename(
columns={column_name: camel_to_snake(less_breakable(column_name))},
inplace=True,
)
_adjust_indices(confounds_data, new)
confounds_data = pd.concat((confounds_data, new), axis=1)
if newpath is None:
newpath = os.getcwd()
combined_out = os.path.join(newpath, "confounds.tsv")
confounds_data.to_csv(combined_out, sep="\t", index=False, na_rep="n/a")
return combined_out, confounds_list
def _get_ica_confounds(ica_out_dir, skip_vols, newpath=None):
if newpath is None:
newpath = os.getcwd()
# load the txt files from ICA-AROMA
melodic_mix = os.path.join(ica_out_dir, "melodic.ica/melodic_mix")
motion_ics = os.path.join(ica_out_dir, "classified_motion_ICs.txt")
aroma_metadata = os.path.join(ica_out_dir, "classification_overview.txt")
aroma_icstats = os.path.join(ica_out_dir, "melodic.ica/melodic_ICstats")
# Change names of motion_ics and melodic_mix for output
melodic_mix_out = os.path.join(newpath, "MELODICmix.tsv")
motion_ics_out = os.path.join(newpath, "AROMAnoiseICs.csv")
aroma_metadata_out = os.path.join(newpath, "classification_overview.tsv")
# copy metion_ics file to derivatives name
shutil.copyfile(motion_ics, motion_ics_out)
# -1 since python lists start at index 0
motion_ic_indices = np.loadtxt(motion_ics, dtype=int, delimiter=",", ndmin=1) - 1
melodic_mix_arr = np.loadtxt(melodic_mix, ndmin=2)
# pad melodic_mix_arr with rows of zeros corresponding to number non steadystate volumes
if skip_vols > 0:
zeros = np.zeros([skip_vols, melodic_mix_arr.shape[1]])
melodic_mix_arr = np.vstack([zeros, melodic_mix_arr])
# save melodic_mix_arr
np.savetxt(melodic_mix_out, melodic_mix_arr, delimiter="\t")
# process the metadata so that the IC column entries match the BIDS name of
# the regressor
aroma_metadata = pd.read_csv(aroma_metadata, sep="\t")
aroma_metadata["IC"] = [
"aroma_motion_{}".format(name) for name in aroma_metadata["IC"]
]
aroma_metadata.columns = [
re.sub(r"[ |\-|\/]", "_", c) for c in aroma_metadata.columns
]
# Add variance statistics to metadata
aroma_icstats = pd.read_csv(aroma_icstats, header=None, sep=" ")[[0, 1]] / 100
aroma_icstats.columns = ["model_variance_explained", "total_variance_explained"]
aroma_metadata = pd.concat([aroma_metadata, aroma_icstats], axis=1)
aroma_metadata.to_csv(aroma_metadata_out, sep="\t", index=False)
# Return dummy list of ones if no noise components were found
if motion_ic_indices.size == 0:
LOGGER.warning("No noise components were classified")
return None, motion_ics_out, melodic_mix_out, aroma_metadata_out
# the "good" ics, (e.g., not motion related)
good_ic_arr = np.delete(melodic_mix_arr, motion_ic_indices, 1).T
# return dummy lists of zeros if no signal components were found
if good_ic_arr.size == 0:
LOGGER.warning("No signal components were classified")
return None, motion_ics_out, melodic_mix_out, aroma_metadata_out
# transpose melodic_mix_arr so x refers to the correct dimension
aggr_confounds = np.asarray([melodic_mix_arr.T[x] for x in motion_ic_indices])
# add one to motion_ic_indices to match melodic report.
aroma_confounds = os.path.join(newpath, "AROMAAggrCompAROMAConfounds.tsv")
pd.DataFrame(
aggr_confounds.T,
columns=["aroma_motion_%02d" % (x + 1) for x in motion_ic_indices],
).to_csv(aroma_confounds, sep="\t", index=None)
return aroma_confounds, motion_ics_out, melodic_mix_out, aroma_metadata_out
class FMRISummaryInputSpec(BaseInterfaceInputSpec):
in_func = File(
exists=True,
mandatory=True,
desc="input BOLD time-series (4D file) or dense timeseries CIFTI",
)
in_mask = File(exists=True, desc="3D brain mask")
in_segm = File(exists=True, desc="resampled segmentation")
confounds_file = File(exists=True, desc="BIDS' _confounds.tsv file")
str_or_tuple = traits.Either(
traits.Str,
traits.Tuple(traits.Str, traits.Either(None, traits.Str)),
traits.Tuple(
traits.Str, traits.Either(None, traits.Str), traits.Either(None, traits.Str)
),
)
confounds_list = traits.List(
str_or_tuple,
minlen=1,
desc="list of headers to extract from the confounds_file",
)
tr = traits.Either(None, traits.Float, usedefault=True, desc="the repetition time")
class FMRISummaryOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="written file path")
class FMRISummary(SimpleInterface):
"""
Copy the x-form matrices from `hdr_file` to `out_file`.
"""
input_spec = FMRISummaryInputSpec
output_spec = FMRISummaryOutputSpec
def _run_interface(self, runtime):
from niworkflows.viz.plots import fMRIPlot
self._results["out_file"] = fname_presuffix(
self.inputs.in_func,
suffix="_fmriplot.svg",
use_ext=False,
newpath=runtime.cwd,
)
dataframe = pd.read_csv(
self.inputs.confounds_file,
sep="\t",
index_col=None,
dtype="float32",
na_filter=True,
na_values="n/a",
)
headers = []
units = {}
names = {}
for conf_el in self.inputs.confounds_list:
if isinstance(conf_el, (list, tuple)):
headers.append(conf_el[0])
if conf_el[1] is not None:
units[conf_el[0]] = conf_el[1]
if len(conf_el) > 2 and conf_el[2] is not None:
names[conf_el[0]] = conf_el[2]
else:
headers.append(conf_el)
if not headers:
data = None
units = None
else:
data = dataframe[headers]
colnames = data.columns.ravel().tolist()
for name, newname in list(names.items()):
colnames[colnames.index(name)] = newname
data.columns = colnames
fig = fMRIPlot(
self.inputs.in_func,
mask_file=self.inputs.in_mask if isdefined(self.inputs.in_mask) else None,
seg_file=(self.inputs.in_segm if isdefined(self.inputs.in_segm) else None),
tr=self.inputs.tr,
data=data,
units=units,
).plot()
fig.savefig(self._results["out_file"], bbox_inches="tight")
return runtime
| StarcoderdataPython |
3231144 | <reponame>marlenebDC/pycon
import json
class GraphQLClient:
def __init__(self, client):
self._client = client
def query(self, query, op_name=None, variables=None, headers=None):
"""
Args:
query (string) - GraphQL query to run
op_name (string) - If the query is a mutation or named query, you
must supply the op_name. For anon queries
("{ ... }"), should be None (default).
variables (dict) - If provided, the variables in GraphQL will be
set to this value
Returns:
dict, response from graphql endpoint.
The response has the "data" key.
It will have the "error" key if any error happened.
"""
body = {"query": query}
if op_name:
body["operation_name"] = op_name
if variables:
body["variables"] = variables
headers = headers or {}
resp = self._client.post(
"/graphql", json.dumps(body), content_type="application/json", **headers
)
return json.loads(resp.content.decode())
def force_login(self, user):
self._client.force_login(user)
| StarcoderdataPython |
3395097 | <filename>iliasCorrector/utils.py
from iliasCorrector import app, db
from iliasCorrector.models import Exercise, Submission, File
from flask import g
from sqlalchemy import func
import os
import statistics
def import_grades(exercise, points):
if os.path.isfile(points):
with open(points) as f:
for line in f:
data = line.split(';')
grade = data[1]
remarks = data[2]
if grade == '---':
grade = None
if remarks.strip() == '-- keine Bemerkung --':
remarks = None
submission = exercise.submissions.filter_by(student_ident=data[0]).first()
if not submission:
submission = Submission(exercise_id=exercise.id,
student_ident=data[0], grade=0, remarks='-- keine Abgabe --')
db.session.add(submission)
db.session.commit()
continue
if grade or remarks:
submission.remarks = remarks or submission.remarks
submission.grade = float(grade or '0') or submission.grade
db.session.add(submission)
db.session.commit()
def export_grades(exercise):
lines = []
for submission in exercise.submissions.order_by(func.lower(Submission.student_ident)).all():
grade = submission.grade
if grade is not None:
grade = str(int(grade))
else:
grade = '---'
remarks = submission.remarks
if remarks:
remarks = '"' + remarks.strip().replace('"', "'") + '"'
else:
remarks = '-- keine Bemerkung --'
lines.append(';'.join([submission.student_ident, grade, remarks]))
return lines
def update_exercises():
exercises = next(os.walk(os.path.join(app.config['BASE_DIR'], 'data')))[1]
for exercise in exercises:
if Exercise.query.filter_by(name=exercise).first() is not None:
continue
path = os.path.join(app.config['BASE_DIR'], 'data', exercise)
root = [x[0] for x in os.walk(path)]
dirs = next(os.walk(path))[1]
files = [x[2] for x in os.walk(path)]
exercise_path = root[0]
exercise_name = root[0].split('/')[-1]
students = [x for x in dirs]
if len(students) < 1:
continue
exercise = Exercise(name=exercise_name, path=exercise_path)
db.session.add(exercise)
db.session.commit()
for i in range(len(students)):
submission = Submission(student_ident=students[i], exercise=exercise)
db.session.add(submission)
db.session.commit()
for f in files[i + 1]:
stored_file = File(submission=submission, name=f, path=root[i + 1])
db.session.add(stored_file)
db.session.commit()
# import grades
import_grades(exercise, os.path.join(path, 'points.csv'))
def submission_median(submissions):
grades = list(filter((None).__ne__, [s.grade for s in submissions]))
if grades:
return statistics.median(grades)
return "Can't be determined yet"
def submission_mean(submissions):
grades = list(filter((None).__ne__, [s.grade for s in submissions]))
if grades:
return statistics.mean(grades)
return "Can't be determined yet"
def split_ident(ident):
data = ident.split('_')
matr = int(data[-1])
last = data[0]
first = ' '.join(data[1:-2])
return first, last, matr
| StarcoderdataPython |
34354 | from bottle import request, response, HTTPResponse
import os, datetime, re
import json as JSON
import jwt
class auth:
def gettoken(mypass):
secret = str(os.getenv('API_SCRT', '!@ws4RT4ws212@#%'))
password = str(os.getenv('API_PASS', 'password'))
if mypass == password:
exp = datetime.datetime.utcnow() + datetime.timedelta(hours=24)
ret = jwt.encode({'exp': exp, 'password': hash(password + secret)}, secret).decode('utf-8')
return [True, {'exp': str(exp), "token": str(ret)}, None, {"token": str(ret)}]
return [False, "Invalid password", 403]
def verify(token):
secret = str(os.getenv('API_SCRT', '!@ws4RT4ws212@#%'))
password = str(os.getenv('API_PASS', 'password'))
try:
decoded = jwt.decode(token, secret, leeway=10, algorithms=['HS256'])
if decoded["password"] != hash(password + secret):
raise
except jwt.ExpiredSignature:
return [False, "Signature expired", 403]
except:
return [False, "Invalid token", 403]
return [True, None, None]
class ret:
def __init__(self, route = None, params=None, header = None, cookie = None, anon = None) :
self.data = {
'queryInfos' : {
'route': route,
'params': params,
'header': header,
'cookie': cookie
},
'status' : 200,
'error' : None,
'data' : None,
'succes' : False,
'mod' : None
}
self.err = False
self.anon = anon
def get(self):
return self.data
def ret(self):
self.__anon()
self.data['mod'] = self.anon
if self.data['error'] is None :
self.data['succes'] = True
self.data['status'] = 200
return self.data
def __anon(self):
level = self.__getannon()
if level == 0 :
return
if level == 2 :
if "queryInfos" in self.data:
del self.data["queryInfos"]
return
forb = ["content-type", "connection", "x-real-ip", "x-forwarded-for",
"x-forwarded-proto", "x-forwarded-ssl", "x-forwarded-port",
"user-agent", "accept", "cache-control", "accept-encoding",
"cookie", "content-length"]
for i in self.data["queryInfos"]:
if i is None:
continue
for i2 in forb:
if self.data["queryInfos"][i] is None or i2 not in self.data["queryInfos"][i]:
continue
del self.data["queryInfos"][i][i2]
def add_error(self, error = None, code = None):
self.data['error'] = error
self.data['status'] = code
self.data['data'] = None
if code is None:
self.add_error("Bad code input", 500)
return 1
if error is None:
self.add_error("Bad error input", 500)
return 1
self.err = True
return self.ret()
def add_data(self, data = None):
self.data['data'] = data
self.set_code(200)
if data is None:
self.add_error("Bad data input", 500)
return 1
return 0
def set_code(self, code = None):
self.data['status'] = code
if code is None:
self.add_error("Bad code input", 500)
return 1
return 0
def __getannon(self):
self.anon = self.anon if self.anon is not None else str(os.getenv('API_MOD', 'PROD'))
return self.__anon_to_lvl(self.anon)
def __anon_to_lvl(self, anon = 'PROD'):
mod = {
"PROD": 2,
"DEV" : 1,
"TEST": 0
}
if anon in mod:
return mod[anon]
return 2
class check:
def contain(json, array, type = "body"):
type = type.upper()
if json is None:
return [False, "Invalid json received ", 400]
for i in array:
if isinstance(i, list):
if not check.contain_opt(json, i):
return [False, "[" + type +"] Missing on of parameters: " + JSON.dumps(i), 400]
json = check.setnoneopt(json, i)
elif i not in json:
return [False, "[" + type +"] Missing parameter : " + i, 400]
elif json[i] is None:
return [False, "[" + type +"] Null parameter : " + i, 400]
return [True, json, 200]
def contain_opt(json, arr_opt):
for i in arr_opt:
if isinstance(i, list):
if check.contain(json, i):
return True
elif i in json:
return True
return False
def setnoneopt(json, arr_opt):
for i in arr_opt:
if i not in json:
json[i] = None
return json
def json(request):
res = {}
#try:
res = request.json
#except:
# pass
return res
def head_json(request, cookie = None):
res = {}
try:
for i in cookie:
res[i.lower()] = cookie[i]
for i in request.headers.keys():
res[i.lower()] = request.headers.raw(i)
except:
pass
return res
def cookies_json(request):
res = {}
try:
cookie = request.headers.raw("Cookie")
for i in cookie.split(";"):
i = i.split("=")
res[i[0].strip()] = i[1]
except:
pass
return res
def route_json(request):
res = {}
dat = request.path[1:].split('/')
i = 0
while i < len(dat) - 1:
res[str(dat[i])] = str(dat[i + 1])
i += 1
return res
class callnext:
def __init__(self, req, resp = None, err = None, anonlvl = None):
self.pr = check.json(req)
self.ck = check.cookies_json(req)
self.hd = check.head_json(req, self.ck)
self.rt = check.route_json(req)
self.get = dict(req.query.decode())
self.private = {}
self.cookie = {}
self.toret = ret(req.path, self.pr, self.hd, self.ck, anonlvl)
self.req = req
self.resp = resp
self.err = err
def call(self, nextc):
if self.req.method == 'OPTIONS':
return {}
if len(nextc) == 0:
return self.ret()
return nextc[0](self, nextc)
def call_next(self, nextc, err = [True]):
if not err[0]:
self.resp.status = err[2]
return self.toret.add_error(err[1], err[2])
nextc.pop(0)
if len(nextc) == 0:
if len(err) >= 4 and err[3] is not None:
self.__merge_cookie(err[3])
self.toret.add_data(err[1])
return self.ret()
return nextc[0](self, nextc)
def ret(self):
if self.resp is not None:
for cookie in self.cookie:
self.resp.set_cookie(cookie, self.cookie[cookie], path='/')
self.resp.content_type = 'application/json'
self.resp.status = self.toret.data['status']
return self.toret.ret()
return self.toret.ret()
def __merge_cookie(self, cookies):
self.cookie = merged = {**self.cookie, **cookies}
| StarcoderdataPython |
4835276 | # -*- coding: utf-8 -*-
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of acl command for cloud storage providers."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import encoding
from gslib import metrics
from gslib.cloud_api import AccessDeniedException
from gslib.cloud_api import BadRequestException
from gslib.cloud_api import PreconditionException
from gslib.cloud_api import Preconditions
from gslib.cloud_api import ServiceException
from gslib.command import Command
from gslib.command import SetAclExceptionHandler
from gslib.command import SetAclFuncWrapper
from gslib.command_argument import CommandArgument
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.help_provider import CreateHelpText
from gslib.storage_url import StorageUrlFromString
from gslib.storage_url import UrlsAreForSingleProvider
from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages
from gslib.utils import acl_helper
from gslib.utils.constants import NO_MAX
from gslib.utils.retry_util import Retry
_SET_SYNOPSIS = """
gsutil acl set [-f] [-r] [-a] file-or-canned_acl_name url...
"""
_GET_SYNOPSIS = """
gsutil acl get url
"""
_CH_SYNOPSIS = """
gsutil acl ch [-f] [-r] <grant>... url...
where each <grant> is one of the following forms:
-u <id|email>:<perm>
-g <id|email|domain|All|AllAuth>:<perm>
-p <viewers|editors|owners>-<project number>:<perm>
-d <id|email|domain|All|AllAuth|<viewers|editors|owners>-<project number>>:<perm>
"""
_GET_DESCRIPTION = """
<B>GET</B>
The "acl get" command gets the ACL text for a bucket or object, which you can
save and edit for the acl set command.
"""
_SET_DESCRIPTION = """
<B>SET</B>
The "acl set" command allows you to set an Access Control List on one or
more buckets and objects. The simplest way to use it is to specify one of
the canned ACLs, e.g.,:
gsutil acl set private gs://bucket
If you want to make an object or bucket publicly readable or writable, it is
recommended to use "acl ch", to avoid accidentally removing OWNER permissions.
See the "acl ch" section for details.
See `Predefined ACLs
<https://cloud.google.com/storage/docs/access-control/lists#predefined-acl>`_
for a list of canned ACLs.
If you want to define more fine-grained control over your data, you can
retrieve an ACL using the "acl get" command, save the output to a file, edit
the file, and then use the "acl set" command to set that ACL on the buckets
and/or objects. For example:
gsutil acl get gs://bucket/file.txt > acl.txt
Make changes to acl.txt such as adding an additional grant, then:
gsutil acl set acl.txt gs://cats/file.txt
Note that you can set an ACL on multiple buckets or objects at once,
for example:
gsutil acl set acl.txt gs://bucket/*.jpg
If you have a large number of ACLs to update you might want to use the
gsutil -m option, to perform a parallel (multi-threaded/multi-processing)
update:
gsutil -m acl set acl.txt gs://bucket/*.jpg
Note that multi-threading/multi-processing is only done when the named URLs
refer to objects, which happens either if you name specific objects or
if you enumerate objects by using an object wildcard or specifying
the acl -r flag.
<B>SET OPTIONS</B>
The "set" sub-command has the following options
-R, -r Performs "acl set" request recursively, to all objects under
the specified URL.
-a Performs "acl set" request on all object versions.
-f Normally gsutil stops at the first error. The -f option causes
it to continue when it encounters errors. If some of the ACLs
couldn't be set, gsutil's exit status will be non-zero even if
this flag is set. This option is implicitly set when running
"gsutil -m acl...".
"""
_CH_DESCRIPTION = """
<B>CH</B>
The "acl ch" (or "acl change") command updates access control lists, similar
in spirit to the Linux chmod command. You can specify multiple access grant
additions and deletions in a single command run; all changes will be made
atomically to each object in turn. For example, if the command requests
deleting one grant and adding a different grant, the ACLs being updated will
never be left in an intermediate state where one grant has been deleted but
the second grant not yet added. Each change specifies a user or group grant
to add or delete, and for grant additions, one of R, W, O (for the
permission to be granted). A more formal description is provided in a later
section; below we provide examples.
<B>CH EXAMPLES</B>
Examples for "ch" sub-command:
Grant anyone on the internet READ access to the object example-object:
gsutil acl ch -u AllUsers:R gs://example-bucket/example-object
NOTE: By default, publicly readable objects are served with a Cache-Control
header allowing such objects to be cached for 3600 seconds. If you need to
ensure that updates become visible immediately, you should set a
Cache-Control header of "Cache-Control:private, max-age=0, no-transform" on
such objects. For help doing this, see "gsutil help setmeta".
Grant anyone on the internet WRITE access to the bucket example-bucket
(WARNING: this is not recommended as you will be responsible for the content):
gsutil acl ch -u AllUsers:W gs://example-bucket
Grant the user <EMAIL> WRITE access to the bucket
example-bucket:
gsutil acl ch -u <EMAIL>:WRITE gs://example-bucket
Grant the group <EMAIL> OWNER access to all jpg files in
the top level of example-bucket:
gsutil acl ch -g <EMAIL>:O gs://example-bucket/*.jpg
Grant the owners of project example-project WRITE access to the bucket
example-bucket:
gsutil acl ch -p owners-example-project:W gs://example-bucket
NOTE: You can replace 'owners' with 'viewers' or 'editors' to grant access
to a project's viewers/editors respectively.
Remove access to the bucket example-bucket for the viewers of project number
12345:
gsutil acl ch -d viewers-12345 gs://example-bucket
NOTE: You cannot remove the project owners group from ACLs of gs:// buckets in
the given project. Attempts to do so will appear to succeed, but the service
will add the project owners group into the new set of ACLs before applying it.
Note that removing a project requires you to reference the project by
its number (which you can see with the acl get command) as opposed to its
project ID string.
Grant the user with the specified canonical ID READ access to all objects
in example-bucket that begin with folder/:
gsutil acl ch -r \\
-u 84fac329bceSAMPLE777d5d22b8SAMPLE785ac2SAMPLE2dfcf7c4adf34da46:R \\
gs://example-bucket/folder/
Grant the service account <EMAIL> WRITE access to
the bucket example-bucket:
gsutil acl ch -u <EMAIL>:W gs://example-bucket
Grant all users from the `G Suite
<https://www.google.com/work/apps/business/>`_ domain my-domain.org READ
access to the bucket gcs.my-domain.org:
gsutil acl ch -g my-domain.org:R gs://gcs.my-domain.org
Remove any current access by <EMAIL> from the bucket
example-bucket:
gsutil acl ch -d <EMAIL> gs://example-bucket
If you have a large number of objects to update, enabling multi-threading
with the gsutil -m flag can significantly improve performance. The
following command adds OWNER for <EMAIL> using
multi-threading:
gsutil -m acl ch -r -u <EMAIL>:O gs://example-bucket
Grant READ access to everyone from my-domain.org and to all authenticated
users, and grant OWNER to <EMAIL>, for the buckets
my-bucket and my-other-bucket, with multi-threading enabled:
gsutil -m acl ch -r -g my-domain.org:R -g AllAuth:R \\
-u <EMAIL>:O gs://my-bucket/ gs://my-other-bucket
<B>CH ROLES</B>
You may specify the following roles with either their shorthand or
their full name:
R: READ
W: WRITE
O: OWNER
For more information on these roles and the access they grant, see the
permissions section of the `Access Control Lists page
<https://cloud.google.com/storage/docs/access-control/lists#permissions>`_.
<B>CH ENTITIES</B>
There are four different entity types: Users, Groups, All Authenticated Users,
and All Users.
Users are added with -u and a plain ID or email address, as in
"-u <EMAIL>:r". Note: Service Accounts are considered to be users.
Groups are like users, but specified with the -g flag, as in
"-g <EMAIL>:fc". Groups may also be specified as a full
domain, as in "-g my-company.com:r".
AllAuthenticatedUsers and AllUsers are specified directly, as
in "-g AllUsers:R" or "-g AllAuthenticatedUsers:O". These are case
insensitive, and may be shortened to "all" and "allauth", respectively.
Removing roles is specified with the -d flag and an ID, email
address, domain, or one of AllUsers or AllAuthenticatedUsers.
Many entities' roles can be specified on the same command line, allowing
bundled changes to be executed in a single run. This will reduce the number of
requests made to the server.
<B>CH OPTIONS</B>
The "ch" sub-command has the following options
-d Remove all roles associated with the matching entity.
-f Normally gsutil stops at the first error. The -f option causes
it to continue when it encounters errors. With this option the
gsutil exit status will be 0 even if some ACLs couldn't be
changed.
-g Add or modify a group entity's role.
-p Add or modify a project viewers/editors/owners role.
-R, -r Performs acl ch request recursively, to all objects under the
specified URL.
-u Add or modify a user entity's role.
"""
_SYNOPSIS = (_SET_SYNOPSIS + _GET_SYNOPSIS.lstrip('\n') +
_CH_SYNOPSIS.lstrip('\n') + '\n\n')
_DESCRIPTION = ("""
The acl command has three sub-commands:
""" + '\n'.join([_GET_DESCRIPTION, _SET_DESCRIPTION, _CH_DESCRIPTION]))
_DETAILED_HELP_TEXT = CreateHelpText(_SYNOPSIS, _DESCRIPTION)
_get_help_text = CreateHelpText(_GET_SYNOPSIS, _GET_DESCRIPTION)
_set_help_text = CreateHelpText(_SET_SYNOPSIS, _SET_DESCRIPTION)
_ch_help_text = CreateHelpText(_CH_SYNOPSIS, _CH_DESCRIPTION)
def _ApplyExceptionHandler(cls, exception):
cls.logger.error('Encountered a problem: %s', exception)
cls.everything_set_okay = False
def _ApplyAclChangesWrapper(cls, url_or_expansion_result, thread_state=None):
cls.ApplyAclChanges(url_or_expansion_result, thread_state=thread_state)
class AclCommand(Command):
"""Implementation of gsutil acl command."""
# Command specification. See base class for documentation.
command_spec = Command.CreateCommandSpec(
'acl',
command_name_aliases=['getacl', 'setacl', 'chacl'],
usage_synopsis=_SYNOPSIS,
min_args=2,
max_args=NO_MAX,
supported_sub_args='afRrg:u:d:p:',
file_url_ok=False,
provider_url_ok=False,
urls_start_arg=1,
gs_api_support=[ApiSelector.XML, ApiSelector.JSON],
gs_default_api=ApiSelector.JSON,
argparse_arguments={
'set': [
CommandArgument.MakeFileURLOrCannedACLArgument(),
CommandArgument.MakeZeroOrMoreCloudURLsArgument()
],
'get': [CommandArgument.MakeNCloudURLsArgument(1)],
'ch': [CommandArgument.MakeZeroOrMoreCloudURLsArgument()],
})
# Help specification. See help_provider.py for documentation.
help_spec = Command.HelpSpec(
help_name='acl',
help_name_aliases=['getacl', 'setacl', 'chmod', 'chacl'],
help_type='command_help',
help_one_line_summary='Get, set, or change bucket and/or object ACLs',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={
'get': _get_help_text,
'set': _set_help_text,
'ch': _ch_help_text
},
)
def _CalculateUrlsStartArg(self):
if not self.args:
self.RaiseWrongNumberOfArgumentsException()
if (self.args[0].lower() == 'set') or (self.command_alias_used == 'setacl'):
return 1
else:
return 0
def _SetAcl(self):
"""Parses options and sets ACLs on the specified buckets/objects."""
self.continue_on_error = False
if self.sub_opts:
for o, unused_a in self.sub_opts:
if o == '-a':
self.all_versions = True
elif o == '-f':
self.continue_on_error = True
elif o == '-r' or o == '-R':
self.recursion_requested = True
else:
self.RaiseInvalidArgumentException()
try:
self.SetAclCommandHelper(SetAclFuncWrapper, SetAclExceptionHandler)
except AccessDeniedException as unused_e:
self._WarnServiceAccounts()
raise
if not self.everything_set_okay:
raise CommandException('ACLs for some objects could not be set.')
def _ChAcl(self):
"""Parses options and changes ACLs on the specified buckets/objects."""
self.parse_versions = True
self.changes = []
self.continue_on_error = False
if self.sub_opts:
for o, a in self.sub_opts:
if o == '-f':
self.continue_on_error = True
elif o == '-g':
if 'gserviceaccount.com' in a:
raise CommandException(
'Service accounts are considered users, not groups; please use '
'"gsutil acl ch -u" instead of "gsutil acl ch -g"')
self.changes.append(
acl_helper.AclChange(a, scope_type=acl_helper.ChangeType.GROUP))
elif o == '-p':
self.changes.append(
acl_helper.AclChange(a, scope_type=acl_helper.ChangeType.PROJECT))
elif o == '-u':
self.changes.append(
acl_helper.AclChange(a, scope_type=acl_helper.ChangeType.USER))
elif o == '-d':
self.changes.append(acl_helper.AclDel(a))
elif o == '-r' or o == '-R':
self.recursion_requested = True
else:
self.RaiseInvalidArgumentException()
if not self.changes:
raise CommandException('Please specify at least one access change '
'with the -g, -u, or -d flags')
if (not UrlsAreForSingleProvider(self.args) or
StorageUrlFromString(self.args[0]).scheme != 'gs'):
raise CommandException(
'The "{0}" command can only be used with gs:// URLs'.format(
self.command_name))
self.everything_set_okay = True
self.ApplyAclFunc(_ApplyAclChangesWrapper,
_ApplyExceptionHandler,
self.args,
object_fields=['acl', 'generation', 'metageneration'])
if not self.everything_set_okay:
raise CommandException('ACLs for some objects could not be set.')
def _RaiseForAccessDenied(self, url):
self._WarnServiceAccounts()
raise CommandException('Failed to set acl for %s. Please ensure you have '
'OWNER-role access to this resource.' % url)
@Retry(ServiceException, tries=3, timeout_secs=1)
def ApplyAclChanges(self, name_expansion_result, thread_state=None):
"""Applies the changes in self.changes to the provided URL.
Args:
name_expansion_result: NameExpansionResult describing the target object.
thread_state: If present, gsutil Cloud API instance to apply the changes.
"""
if thread_state:
gsutil_api = thread_state
else:
gsutil_api = self.gsutil_api
url = name_expansion_result.expanded_storage_url
if url.IsBucket():
bucket = gsutil_api.GetBucket(url.bucket_name,
provider=url.scheme,
fields=['acl', 'metageneration'])
current_acl = bucket.acl
elif url.IsObject():
gcs_object = encoding.JsonToMessage(apitools_messages.Object,
name_expansion_result.expanded_result)
current_acl = gcs_object.acl
if not current_acl:
self._RaiseForAccessDenied(url)
if self._ApplyAclChangesAndReturnChangeCount(url, current_acl) == 0:
self.logger.info('No changes to %s', url)
return
try:
if url.IsBucket():
preconditions = Preconditions(meta_gen_match=bucket.metageneration)
bucket_metadata = apitools_messages.Bucket(acl=current_acl)
gsutil_api.PatchBucket(url.bucket_name,
bucket_metadata,
preconditions=preconditions,
provider=url.scheme,
fields=['id'])
else: # Object
preconditions = Preconditions(gen_match=gcs_object.generation,
meta_gen_match=gcs_object.metageneration)
object_metadata = apitools_messages.Object(acl=current_acl)
try:
gsutil_api.PatchObjectMetadata(url.bucket_name,
url.object_name,
object_metadata,
preconditions=preconditions,
provider=url.scheme,
generation=url.generation,
fields=['id'])
except PreconditionException as e:
# Special retry case where we want to do an additional step, the read
# of the read-modify-write cycle, to fetch the correct object
# metadata before reattempting ACL changes.
self._RefetchObjectMetadataAndApplyAclChanges(url, gsutil_api)
self.logger.info('Updated ACL on %s', url)
except BadRequestException as e:
# Don't retry on bad requests, e.g. invalid email address.
raise CommandException('Received bad request from server: %s' % str(e))
except AccessDeniedException:
self._RaiseForAccessDenied(url)
except PreconditionException as e:
# For objects, retry attempts should have already been handled.
if url.IsObject():
raise CommandException(str(e))
# For buckets, raise PreconditionException and continue to next retry.
raise e
@Retry(PreconditionException, tries=3, timeout_secs=1)
def _RefetchObjectMetadataAndApplyAclChanges(self, url, gsutil_api):
"""Reattempts object ACL changes after a PreconditionException."""
gcs_object = gsutil_api.GetObjectMetadata(
url.bucket_name,
url.object_name,
provider=url.scheme,
fields=['acl', 'generation', 'metageneration'])
current_acl = gcs_object.acl
if self._ApplyAclChangesAndReturnChangeCount(url, current_acl) == 0:
self.logger.info('No changes to %s', url)
return
object_metadata = apitools_messages.Object(acl=current_acl)
preconditions = Preconditions(gen_match=gcs_object.generation,
meta_gen_match=gcs_object.metageneration)
gsutil_api.PatchObjectMetadata(url.bucket_name,
url.object_name,
object_metadata,
preconditions=preconditions,
provider=url.scheme,
generation=gcs_object.generation,
fields=['id'])
def _ApplyAclChangesAndReturnChangeCount(self, storage_url, acl_message):
modification_count = 0
for change in self.changes:
modification_count += change.Execute(storage_url, acl_message, 'acl',
self.logger)
return modification_count
def RunCommand(self):
"""Command entry point for the acl command."""
action_subcommand = self.args.pop(0)
self.ParseSubOpts(check_args=True)
# Commands with both suboptions and subcommands need to reparse for
# suboptions, so we log again.
metrics.LogCommandParams(sub_opts=self.sub_opts)
self.def_acl = False
if action_subcommand == 'get':
metrics.LogCommandParams(subcommands=[action_subcommand])
self.GetAndPrintAcl(self.args[0])
elif action_subcommand == 'set':
metrics.LogCommandParams(subcommands=[action_subcommand])
self._SetAcl()
elif action_subcommand in ('ch', 'change'):
metrics.LogCommandParams(subcommands=[action_subcommand])
self._ChAcl()
else:
raise CommandException(
('Invalid subcommand "%s" for the %s command.\n'
'See "gsutil help acl".') % (action_subcommand, self.command_name))
return 0
| StarcoderdataPython |
1631225 | from flask import Flask, Blueprint
from flask_restful import Api, Resource
from app.api.v1.views import CreateParcels, AllOrders, SpecificOrder, UserLogin,UserSignup, CancelOrder, GetOneOrder
v1 = Blueprint('v1', __name__, url_prefix='/api/v1')
api = Api(v1)
"""register the blueprints"""
api.add_resource(CreateParcels, "/parcel", strict_slashes=False)
api.add_resource(AllOrders, "/parcels", strict_slashes=False)
api.add_resource(SpecificOrder, '/parcels/<int:order_id>', strict_slashes=False)
api.add_resource(UserSignup, "/users/signup", strict_slashes=False)
api.add_resource(UserLogin, "/users/login", strict_slashes=False)
api.add_resource(CancelOrder, "/parcels/cancel/<int:order_id>", strict_slashes=False)
api.add_resource(GetOneOrder, "/parcels/userorder/<receiver_name>", strict_slashes=False)
| StarcoderdataPython |
4830241 | <reponame>limeonion/Python-Programming
'''
url= https://www.hackerrank.com/challenges/python-tuples/problem?h_r=next-challenge&h_v=zen
'''
n = int(input())
integer_list = map(int, input().split())
print(hash(tuple(integer_list)))
| StarcoderdataPython |
3345397 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
from __future__ import absolute_import, division, print_function, unicode_literals
"""An implementation of the session and presentation layers as used in the Debug
Adapter Protocol (DAP): channels and their lifetime, JSON messages, requests,
responses, and events.
https://microsoft.github.io/debug-adapter-protocol/overview#base-protocol
"""
import collections
import contextlib
import functools
import itertools
import os
import socket
import sys
import threading
from debugpy.common import compat, fmt, json, log
from debugpy.common.compat import unicode
class JsonIOError(IOError):
"""Indicates that a read or write operation on JsonIOStream has failed.
"""
def __init__(self, *args, **kwargs):
stream = kwargs.pop("stream")
cause = kwargs.pop("cause", None)
if not len(args) and cause is not None:
args = [str(cause)]
super(JsonIOError, self).__init__(*args, **kwargs)
self.stream = stream
"""The stream that couldn't be read or written.
Set by JsonIOStream.read_json() and JsonIOStream.write_json().
JsonMessageChannel relies on this value to decide whether a NoMoreMessages
instance that bubbles up to the message loop is related to that loop.
"""
self.cause = cause
"""The underlying exception, if any."""
class NoMoreMessages(JsonIOError, EOFError):
"""Indicates that there are no more messages that can be read from or written
to a stream.
"""
def __init__(self, *args, **kwargs):
args = args if len(args) else ["No more messages"]
super(NoMoreMessages, self).__init__(*args, **kwargs)
class JsonIOStream(object):
"""Implements a JSON value stream over two byte streams (input and output).
Each value is encoded as a DAP packet, with metadata headers and a JSON payload.
"""
MAX_BODY_SIZE = 0xFFFFFF
json_decoder_factory = json.JsonDecoder
"""Used by read_json() when decoder is None."""
json_encoder_factory = json.JsonEncoder
"""Used by write_json() when encoder is None."""
@classmethod
def from_stdio(cls, name="stdio"):
"""Creates a new instance that receives messages from sys.stdin, and sends
them to sys.stdout.
On Win32, this also sets stdin and stdout to binary mode, since the protocol
requires that to work properly.
"""
if sys.version_info >= (3,):
stdin = sys.stdin.buffer
stdout = sys.stdout.buffer
else:
stdin = sys.stdin
stdout = sys.stdout
if sys.platform == "win32":
import os, msvcrt
msvcrt.setmode(stdin.fileno(), os.O_BINARY)
msvcrt.setmode(stdout.fileno(), os.O_BINARY)
return cls(stdin, stdout, name)
@classmethod
def from_process(cls, process, name="stdio"):
"""Creates a new instance that receives messages from process.stdin, and sends
them to process.stdout.
"""
return cls(process.stdout, process.stdin, name)
@classmethod
def from_socket(cls, sock, name=None):
"""Creates a new instance that sends and receives messages over a socket.
"""
sock.settimeout(None) # make socket blocking
if name is None:
name = repr(sock)
# TODO: investigate switching to buffered sockets; readline() on unbuffered
# sockets is very slow! Although the implementation of readline() itself is
# native code, it calls read(1) in a loop - and that then ultimately calls
# SocketIO.readinto(), which is implemented in Python.
socket_io = sock.makefile("rwb", 0)
# SocketIO.close() doesn't close the underlying socket.
def cleanup():
try:
sock.shutdown(socket.SHUT_RDWR)
except Exception:
pass
sock.close()
return cls(socket_io, socket_io, name, cleanup)
def __init__(self, reader, writer, name=None, cleanup=lambda: None):
"""Creates a new JsonIOStream.
reader must be a BytesIO-like object, from which incoming messages will be
read by read_json().
writer must be a BytesIO-like object, into which outgoing messages will be
written by write_json().
cleanup must be a callable; it will be invoked without arguments when the
stream is closed.
reader.readline() must treat "\n" as the line terminator, and must leave "\r"
as is - it must not replace "\r\n" with "\n" automatically, as TextIO does.
"""
if name is None:
name = fmt("reader={0!r}, writer={1!r}", reader, writer)
self.name = name
self._reader = reader
self._writer = writer
self._cleanup = cleanup
self._closed = False
def close(self):
"""Closes the stream, the reader, and the writer.
"""
if self._closed:
return
self._closed = True
log.debug("Closing {0} message stream", self.name)
try:
try:
# Close the writer first, so that the other end of the connection has
# its message loop waiting on read() unblocked. If there is an exception
# while closing the writer, we still want to try to close the reader -
# only one exception can bubble up, so if both fail, it'll be the one
# from reader.
try:
self._writer.close()
finally:
if self._reader is not self._writer:
self._reader.close()
finally:
self._cleanup()
except Exception:
# On Python 2, close() will raise an exception if there is a concurrent
# read() or write(), which is a common and expected occurrence with
# JsonMessageChannel, so don't even bother logging it.
if sys.version_info >= (3,):
log.reraise_exception(
"Error while closing {0} message stream", self.name
)
def _log_message(self, dir, data, logger=log.debug):
format_string = "{0} {1} " + (
"{2!j:indent=None}" if isinstance(data, list) else "{2!j}"
)
return logger(format_string, self.name, dir, data)
def _read_line(self, reader):
line = b""
while True:
try:
line += reader.readline()
except Exception as exc:
raise NoMoreMessages(str(exc), stream=self)
if not line:
raise NoMoreMessages(stream=self)
if line.endswith(b"\r\n"):
line = line[0:-2]
return line
def read_json(self, decoder=None):
"""Read a single JSON value from reader.
Returns JSON value as parsed by decoder.decode(), or raises NoMoreMessages
if there are no more values to be read.
"""
decoder = decoder if decoder is not None else self.json_decoder_factory()
reader = self._reader
read_line = functools.partial(self._read_line, reader)
# If any error occurs while reading and parsing the message, log the original
# raw message data as is, so that it's possible to diagnose missing or invalid
# headers, encoding issues, JSON syntax errors etc.
def log_message_and_reraise_exception(format_string="", *args, **kwargs):
if format_string:
format_string += "\n\n"
format_string += "{name} -->\n{raw_lines}"
raw_lines = b"".join(raw_chunks).split(b"\n")
raw_lines = "\n".join(repr(line) for line in raw_lines)
log.reraise_exception(
format_string, *args, name=self.name, raw_lines=raw_lines, **kwargs
)
raw_chunks = []
headers = {}
while True:
try:
line = read_line()
except Exception:
# Only log it if we have already read some headers, and are looking
# for a blank line terminating them. If this is the very first read,
# there's no message data to log in any case, and the caller might
# be anticipating the error - e.g. NoMoreMessages on disconnect.
if headers:
log_message_and_reraise_exception(
"Error while reading message headers:"
)
else:
raise
raw_chunks += [line, b"\n"]
if line == b"":
break
key, _, value = line.partition(b":")
headers[key] = value
try:
length = int(headers[b"Content-Length"])
if not (0 <= length <= self.MAX_BODY_SIZE):
raise ValueError
except (KeyError, ValueError):
try:
raise IOError("Content-Length is missing or invalid:")
except Exception:
log_message_and_reraise_exception()
body_start = len(raw_chunks)
body_remaining = length
while body_remaining > 0:
try:
chunk = reader.read(body_remaining)
if not chunk:
raise EOFError
except Exception as exc:
# Not logged due to https://github.com/microsoft/ptvsd/issues/1699
raise NoMoreMessages(str(exc), stream=self)
raw_chunks.append(chunk)
body_remaining -= len(chunk)
assert body_remaining == 0
body = b"".join(raw_chunks[body_start:])
try:
body = body.decode("utf-8")
except Exception:
log_message_and_reraise_exception()
try:
body = decoder.decode(body)
except Exception:
log_message_and_reraise_exception()
# If parsed successfully, log as JSON for readability.
self._log_message("-->", body)
return body
def write_json(self, value, encoder=None):
"""Write a single JSON value into writer.
Value is written as encoded by encoder.encode().
"""
if self._closed:
# Don't log this - it's a common pattern to write to a stream while
# anticipating EOFError from it in case it got closed concurrently.
raise NoMoreMessages(stream=self)
encoder = encoder if encoder is not None else self.json_encoder_factory()
writer = self._writer
# Format the value as a message, and try to log any failures using as much
# information as we already have at the point of the failure. For example,
# if it fails after it is serialized to JSON, log that JSON.
try:
body = encoder.encode(value)
except Exception:
raise self._log_message("<--", value, logger=log.exception)
if not isinstance(body, bytes):
body = body.encode("utf-8")
header = fmt("Content-Length: {0}\r\n\r\n", len(body))
header = header.encode("ascii")
data = header + body
data_written = 0
try:
while data_written < len(data):
written = writer.write(data[data_written:])
# On Python 2, socket.makefile().write() does not properly implement
# BytesIO.write(), and always returns None instead of the number of
# bytes written - but also guarantees that it is always a full write.
if written is None:
break
data_written += written
writer.flush()
except Exception as exc:
self._log_message("<--", value, logger=log.exception)
raise JsonIOError(stream=self, cause=exc)
self._log_message("<--", value)
def __repr__(self):
return fmt("{0}({1!r})", type(self).__name__, self.name)
class MessageDict(collections.OrderedDict):
"""A specialized dict that is used for JSON message payloads - Request.arguments,
Response.body, and Event.body.
For all members that normally throw KeyError when a requested key is missing, this
dict raises InvalidMessageError instead. Thus, a message handler can skip checks
for missing properties, and just work directly with the payload on the assumption
that it is valid according to the protocol specification; if anything is missing,
it will be reported automatically in the proper manner.
If the value for the requested key is itself a dict, it is returned as is, and not
automatically converted to MessageDict. Thus, to enable convenient chaining - e.g.
d["a"]["b"]["c"] - the dict must consistently use MessageDict instances rather than
vanilla dicts for all its values, recursively. This is guaranteed for the payload
of all freshly received messages (unless and until it is mutated), but there is no
such guarantee for outgoing messages.
"""
def __init__(self, message, items=None):
assert message is None or isinstance(message, Message)
if items is None:
super(MessageDict, self).__init__()
else:
super(MessageDict, self).__init__(items)
self.message = message
"""The Message object that owns this dict.
For any instance exposed via a Message object corresponding to some incoming
message, it is guaranteed to reference that Message object. There is no similar
guarantee for outgoing messages.
"""
def __repr__(self):
return fmt("{0!j}", self)
def __call__(self, key, validate, optional=False):
"""Like get(), but with validation.
The item is first retrieved as if with self.get(key, default=()) - the default
value is () rather than None, so that JSON nulls are distinguishable from
missing properties.
If optional=True, and the value is (), it's returned as is. Otherwise, the
item is validated by invoking validate(item) on it.
If validate=False, it's treated as if it were (lambda x: x) - i.e. any value
is considered valid, and is returned unchanged. If validate is a type or a
tuple, it's treated as json.of_type(validate). Otherwise, if validate is not
callable(), it's treated as json.default(validate).
If validate() returns successfully, the item is substituted with the value
it returns - thus, the validator can e.g. replace () with a suitable default
value for the property.
If validate() raises TypeError or ValueError, raises InvalidMessageError with
the same text that applies_to(self.messages).
See debugpy.common.json for reusable validators.
"""
if not validate:
validate = lambda x: x
elif isinstance(validate, type) or isinstance(validate, tuple):
validate = json.of_type(validate, optional=optional)
elif not callable(validate):
validate = json.default(validate)
value = self.get(key, ())
try:
value = validate(value)
except (TypeError, ValueError) as exc:
message = Message if self.message is None else self.message
err = fmt("{0}", exc)
if not err.startswith("["):
err = " " + err
raise message.isnt_valid("{0!j}{1}", key, err)
return value
def _invalid_if_no_key(func):
def wrap(self, key, *args, **kwargs):
try:
return func(self, key, *args, **kwargs)
except KeyError:
message = Message if self.message is None else self.message
raise message.isnt_valid("missing property {0!r}", key)
return wrap
__getitem__ = _invalid_if_no_key(collections.OrderedDict.__getitem__)
__delitem__ = _invalid_if_no_key(collections.OrderedDict.__delitem__)
pop = _invalid_if_no_key(collections.OrderedDict.pop)
del _invalid_if_no_key
def _payload(value):
"""JSON validator for message payload.
If that value is missing or null, it is treated as if it were {}.
"""
if value is not None and value != ():
if isinstance(value, dict): # can be int, str, list...
assert isinstance(value, MessageDict)
return value
# Missing payload. Construct a dummy MessageDict, and make it look like it was
# deserialized. See JsonMessageChannel._parse_incoming_message for why it needs
# to have associate_with().
def associate_with(message):
value.message = message
value = MessageDict(None)
value.associate_with = associate_with
return value
class Message(object):
"""Represents a fully parsed incoming or outgoing message.
https://microsoft.github.io/debug-adapter-protocol/specification#protocolmessage
"""
def __init__(self, channel, seq, json=None):
self.channel = channel
self.seq = seq
"""Sequence number of the message in its channel.
This can be None for synthesized Responses.
"""
self.json = json
"""For incoming messages, the MessageDict containing raw JSON from which
this message was originally parsed.
"""
def __str__(self):
return fmt("{0!j}", self.json) if self.json is not None else repr(self)
def describe(self):
"""A brief description of the message that is enough to identify it.
Examples:
'#1 request "launch" from IDE'
'#2 response to #1 request "launch" from IDE'.
"""
raise NotImplementedError
@property
def payload(self):
"""Payload of the message - self.body or self.arguments, depending on the
message type.
"""
raise NotImplementedError
def __call__(self, *args, **kwargs):
"""Same as self.payload(...)."""
return self.payload(*args, **kwargs)
def __contains__(self, key):
"""Same as (key in self.payload)."""
return key in self.payload
def is_event(self, *event):
"""Returns True if this message is an Event of one of the specified types.
"""
if not isinstance(self, Event):
return False
return event == () or self.event in event
def is_request(self, *command):
"""Returns True if this message is a Request of one of the specified types.
"""
if not isinstance(self, Request):
return False
return command == () or self.command in command
def is_response(self, *command):
"""Returns True if this message is a Response to a request of one of the
specified types.
"""
if not isinstance(self, Response):
return False
return command == () or self.request.command in command
def error(self, exc_type, format_string, *args, **kwargs):
"""Returns a new exception of the specified type from the point at which it is
invoked, with the specified formatted message as the reason.
The resulting exception will have its cause set to the Message object on which
error() was called. Additionally, if that message is a Request, a failure
response is immediately sent.
"""
assert issubclass(exc_type, MessageHandlingError)
silent = kwargs.pop("silent", False)
reason = fmt(format_string, *args, **kwargs)
exc = exc_type(reason, self, silent) # will log it
if isinstance(self, Request):
self.respond(exc)
return exc
def isnt_valid(self, *args, **kwargs):
"""Same as self.error(InvalidMessageError, ...).
"""
return self.error(InvalidMessageError, *args, **kwargs)
def cant_handle(self, *args, **kwargs):
"""Same as self.error(MessageHandlingError, ...).
"""
return self.error(MessageHandlingError, *args, **kwargs)
class Event(Message):
"""Represents an incoming event.
https://microsoft.github.io/debug-adapter-protocol/specification#event
It is guaranteed that body is a MessageDict associated with this Event, and so
are all the nested dicts in it. If "body" was missing or null in JSON, body is
an empty dict.
To handle the event, JsonMessageChannel tries to find a handler for this event in
JsonMessageChannel.handlers. Given event="X", if handlers.X_event exists, then it
is the specific handler for this event. Otherwise, handlers.event must exist, and
it is the generic handler for this event. A missing handler is a fatal error.
No further incoming messages are processed until the handler returns, except for
responses to requests that have wait_for_response() invoked on them.
To report failure to handle the event, the handler must raise an instance of
MessageHandlingError that applies_to() the Event object it was handling. Any such
failure is logged, after which the message loop moves on to the next message.
Helper methods Message.isnt_valid() and Message.cant_handle() can be used to raise
the appropriate exception type that applies_to() the Event object.
"""
def __init__(self, channel, seq, event, body, json=None):
super(Event, self).__init__(channel, seq, json)
self.event = event
if isinstance(body, MessageDict) and hasattr(body, "associate_with"):
body.associate_with(self)
self.body = body
def describe(self):
return fmt("#{0} event {1!j} from {2}", self.seq, self.event, self.channel)
@property
def payload(self):
return self.body
@staticmethod
def _parse(channel, message_dict):
seq = message_dict("seq", int)
event = message_dict("event", unicode)
body = message_dict("body", _payload)
message = Event(channel, seq, event, body, json=message_dict)
channel._enqueue_handlers(message, message._handle)
def _handle(self):
channel = self.channel
handler = channel._get_handler_for("event", self.event)
try:
try:
result = handler(self)
assert result is None, fmt(
"Handler {0} tried to respond to {1}.",
compat.srcnameof(handler),
self.describe(),
)
except MessageHandlingError as exc:
if not exc.applies_to(self):
raise
log.error(
"Handler {0}\ncouldn't handle {1}:\n{2}",
compat.srcnameof(handler),
self.describe(),
str(exc),
)
except Exception:
log.reraise_exception(
"Handler {0}\ncouldn't handle {1}:",
compat.srcnameof(handler),
self.describe(),
)
NO_RESPONSE = object()
"""Can be returned from a request handler in lieu of the response body, to indicate
that no response is to be sent.
Request.respond() must be invoked explicitly at some later point to provide a response.
"""
class Request(Message):
"""Represents an incoming or an outgoing request.
Incoming requests are represented directly by instances of this class.
Outgoing requests are represented by instances of OutgoingRequest, which provides
additional functionality to handle responses.
For incoming requests, it is guaranteed that arguments is a MessageDict associated
with this Request, and so are all the nested dicts in it. If "arguments" was missing
or null in JSON, arguments is an empty dict.
To handle the request, JsonMessageChannel tries to find a handler for this request
in JsonMessageChannel.handlers. Given command="X", if handlers.X_request exists,
then it is the specific handler for this request. Otherwise, handlers.request must
exist, and it is the generic handler for this request. A missing handler is a fatal
error.
The handler is then invoked with the Request object as its sole argument.
If the handler itself invokes respond() on the Request at any point, then it must
not return any value.
Otherwise, if the handler returns NO_RESPONSE, no response to the request is sent.
It must be sent manually at some later point via respond().
Otherwise, a response to the request is sent with the returned value as the body.
To fail the request, the handler can return an instance of MessageHandlingError,
or respond() with one, or raise one such that it applies_to() the Request object
being handled.
Helper methods Message.isnt_valid() and Message.cant_handle() can be used to raise
the appropriate exception type that applies_to() the Request object.
"""
def __init__(self, channel, seq, command, arguments, json=None):
super(Request, self).__init__(channel, seq, json)
self.command = command
if isinstance(arguments, MessageDict) and hasattr(arguments, "associate_with"):
arguments.associate_with(self)
self.arguments = arguments
self.response = None
"""Response to this request.
For incoming requests, it is set as soon as the request handler returns.
For outgoing requests, it is set as soon as the response is received, and
before self._handle_response is invoked.
"""
def describe(self):
return fmt("#{0} request {1!j} from {2}", self.seq, self.command, self.channel)
@property
def payload(self):
return self.arguments
def respond(self, body):
assert self.response is None
d = {"type": "response", "request_seq": self.seq, "command": self.command}
if isinstance(body, Exception):
d["success"] = False
err_text = str(body)
try:
err_text = compat.force_unicode(err_text, "utf-8")
except Exception:
# On Python 2, the error message might not be Unicode, and we don't
# really know what encoding it is. So if treating it as UTF-8 failed,
# use repr() as a fallback - it should escape all non-ASCII chars in
# the string.
err_text = compat.force_unicode(repr(body), "ascii", errors="replace")
d["message"] = err_text
else:
d["success"] = True
if body is not None and body != {}:
d["body"] = body
with self.channel._send_message(d) as seq:
pass
self.response = Response(self.channel, seq, self, body)
@staticmethod
def _parse(channel, message_dict):
seq = message_dict("seq", int)
command = message_dict("command", unicode)
arguments = message_dict("arguments", _payload)
message = Request(channel, seq, command, arguments, json=message_dict)
channel._enqueue_handlers(message, message._handle)
def _handle(self):
channel = self.channel
handler = channel._get_handler_for("request", self.command)
try:
try:
result = handler(self)
except MessageHandlingError as exc:
if not exc.applies_to(self):
raise
result = exc
log.error(
"Handler {0}\ncouldn't handle {1}:\n{2}",
compat.srcnameof(handler),
self.describe(),
str(exc),
)
if result is NO_RESPONSE:
assert self.response is None, fmt(
"Handler {0} for {1} must not return NO_RESPONSE if it has already "
"invoked request.respond().",
compat.srcnameof(handler),
self.describe(),
)
elif self.response is not None:
assert result is None or result is self.response.body, fmt(
"Handler {0} for {1} must not return a response body if it has "
"already invoked request.respond().",
compat.srcnameof(handler),
self.describe(),
)
else:
assert result is not None, fmt(
"Handler {0} for {1} must either call request.respond() before it "
"returns, or return the response body, or return NO_RESPONSE.",
compat.srcnameof(handler),
self.describe(),
)
try:
self.respond(result)
except NoMoreMessages:
log.warning(
"Channel was closed before the response from handler {0} to {1} could be sent",
compat.srcnameof(handler),
self.describe(),
)
except Exception:
log.reraise_exception(
"Handler {0}\ncouldn't handle {1}:",
compat.srcnameof(handler),
self.describe(),
)
class OutgoingRequest(Request):
"""Represents an outgoing request, for which it is possible to wait for a
response to be received, and register a response handler.
"""
_parse = _handle = None
def __init__(self, channel, seq, command, arguments):
super(OutgoingRequest, self).__init__(channel, seq, command, arguments)
self._response_handlers = []
def describe(self):
return fmt("#{0} request {1!j} to {2}", self.seq, self.command, self.channel)
def wait_for_response(self, raise_if_failed=True):
"""Waits until a response is received for this request, records the Response
object for it in self.response, and returns response.body.
If no response was received from the other party before the channel closed,
self.response is a synthesized Response with body=NoMoreMessages().
If raise_if_failed=True and response.success is False, raises response.body
instead of returning.
"""
with self.channel:
while self.response is None:
self.channel._handlers_enqueued.wait()
if raise_if_failed and not self.response.success:
raise self.response.body
return self.response.body
def on_response(self, response_handler):
"""Registers a handler to invoke when a response is received for this request.
The handler is invoked with Response as its sole argument.
If response has already been received, invokes the handler immediately.
It is guaranteed that self.response is set before the handler is invoked.
If no response was received from the other party before the channel closed,
self.response is a dummy Response with body=NoMoreMessages().
The handler is always invoked asynchronously on an unspecified background
thread - thus, the caller of on_response() can never be blocked or deadlocked
by the handler.
No further incoming messages are processed until the handler returns, except for
responses to requests that have wait_for_response() invoked on them.
"""
with self.channel:
self._response_handlers.append(response_handler)
self._enqueue_response_handlers()
def _enqueue_response_handlers(self):
response = self.response
if response is None:
# Response._parse() will submit the handlers when response is received.
return
def run_handlers():
for handler in handlers:
try:
try:
handler(response)
except MessageHandlingError as exc:
if not exc.applies_to(response):
raise
log.error(
"Handler {0}\ncouldn't handle {1}:\n{2}",
compat.srcnameof(handler),
response.describe(),
str(exc),
)
except Exception:
log.reraise_exception(
"Handler {0}\ncouldn't handle {1}:",
compat.srcnameof(handler),
response.describe(),
)
handlers = self._response_handlers[:]
self.channel._enqueue_handlers(response, run_handlers)
del self._response_handlers[:]
class Response(Message):
"""Represents an incoming or an outgoing response to a Request.
https://microsoft.github.io/debug-adapter-protocol/specification#response
error_message corresponds to "message" in JSON, and is renamed for clarity.
If success is False, body is None. Otherwise, it is a MessageDict associated
with this Response, and so are all the nested dicts in it. If "body" was missing
or null in JSON, body is an empty dict.
If this is a response to an outgoing request, it will be handled by the handler
registered via self.request.on_response(), if any.
Regardless of whether there is such a handler, OutgoingRequest.wait_for_response()
can also be used to retrieve and handle the response. If there is a handler, it is
executed before wait_for_response() returns.
No further incoming messages are processed until the handler returns, except for
responses to requests that have wait_for_response() invoked on them.
To report failure to handle the event, the handler must raise an instance of
MessageHandlingError that applies_to() the Response object it was handling. Any
such failure is logged, after which the message loop moves on to the next message.
Helper methods Message.isnt_valid() and Message.cant_handle() can be used to raise
the appropriate exception type that applies_to() the Response object.
"""
def __init__(self, channel, seq, request, body, json=None):
super(Response, self).__init__(channel, seq, json)
self.request = request
"""The request to which this is the response."""
if isinstance(body, MessageDict) and hasattr(body, "associate_with"):
body.associate_with(self)
self.body = body
"""Body of the response if the request was successful, or an instance
of some class derived from Exception it it was not.
If a response was received from the other side, but request failed, it is an
instance of MessageHandlingError containing the received error message. If the
error message starts with InvalidMessageError.PREFIX, then it's an instance of
the InvalidMessageError specifically, and that prefix is stripped.
If no response was received from the other party before the channel closed,
it is an instance of NoMoreMessages.
"""
def describe(self):
return fmt("#{0} response to {1}", self.seq, self.request.describe())
@property
def payload(self):
return self.body
@property
def success(self):
"""Whether the request succeeded or not.
"""
return not isinstance(self.body, Exception)
@property
def result(self):
"""Result of the request. Returns the value of response.body, unless it
is an exception, in which case it is raised instead.
"""
if self.success:
return self.body
else:
raise self.body
@staticmethod
def _parse(channel, message_dict, body=None):
seq = message_dict("seq", int) if (body is None) else None
request_seq = message_dict("request_seq", int)
command = message_dict("command", unicode)
success = message_dict("success", bool)
if body is None:
if success:
body = message_dict("body", _payload)
else:
error_message = message_dict("message", unicode)
exc_type = MessageHandlingError
if error_message.startswith(InvalidMessageError.PREFIX):
error_message = error_message[len(InvalidMessageError.PREFIX) :]
exc_type = InvalidMessageError
body = exc_type(error_message, silent=True)
try:
with channel:
request = channel._sent_requests.pop(request_seq)
known_request = True
except KeyError:
# Synthetic Request that only has seq and command as specified in response
# JSON, for error reporting purposes.
request = OutgoingRequest(channel, request_seq, command, "<unknown>")
known_request = False
if not success:
body.cause = request
response = Response(channel, seq, request, body, json=message_dict)
with channel:
request.response = response
request._enqueue_response_handlers()
if known_request:
return response
else:
raise response.isnt_valid(
"request_seq={0} does not match any known request", request_seq
)
class Disconnect(Message):
"""A dummy message used to represent disconnect. It's always the last message
received from any channel.
"""
def __init__(self, channel):
super(Disconnect, self).__init__(channel, None)
def describe(self):
return fmt("disconnect from {0}", self.channel)
class MessageHandlingError(Exception):
"""Indicates that a message couldn't be handled for some reason.
If the reason is a contract violation - i.e. the message that was handled did not
conform to the protocol specification - InvalidMessageError, which is a subclass,
should be used instead.
If any message handler raises an exception not derived from this class, it will
escape the message loop unhandled, and terminate the process.
If any message handler raises this exception, but applies_to(message) is False, it
is treated as if it was a generic exception, as desribed above. Thus, if a request
handler issues another request of its own, and that one fails, the failure is not
silently propagated. However, a request that is delegated via Request.delegate()
will also propagate failures back automatically. For manual propagation, catch the
exception, and call exc.propagate().
If any event handler raises this exception, and applies_to(event) is True, the
exception is silently swallowed by the message loop.
If any request handler raises this exception, and applies_to(request) is True, the
exception is silently swallowed by the message loop, and a failure response is sent
with "message" set to str(reason).
Note that, while errors are not logged when they're swallowed by the message loop,
by that time they have already been logged by their __init__ (when instantiated).
"""
def __init__(self, reason, cause=None, silent=False):
"""Creates a new instance of this class, and immediately logs the exception.
Message handling errors are logged immediately unless silent=True, so that the
precise context in which they occured can be determined from the surrounding
log entries.
"""
self.reason = reason
"""Why it couldn't be handled. This can be any object, but usually it's either
str or Exception.
"""
assert cause is None or isinstance(cause, Message)
self.cause = cause
"""The Message object for the message that couldn't be handled. For responses
to unknown requests, this is a synthetic Request.
"""
if not silent:
try:
raise self
except MessageHandlingError:
log.swallow_exception()
def __hash__(self):
return hash((self.reason, id(self.cause)))
def __eq__(self, other):
if not isinstance(other, MessageHandlingError):
return NotImplemented
if type(self) is not type(other):
return NotImplemented
if self.reason != other.reason:
return False
if self.cause is not None and other.cause is not None:
if self.cause.seq != other.cause.seq:
return False
return True
def __ne__(self, other):
return not self == other
def __str__(self):
return str(self.reason)
def __repr__(self):
s = type(self).__name__
if self.cause is None:
s += fmt("(reason={0!r})", self.reason)
else:
s += fmt(
"(channel={0!r}, cause={1!r}, reason={2!r})",
self.cause.channel.name,
self.cause.seq,
self.reason,
)
return s
def applies_to(self, message):
"""Whether this MessageHandlingError can be treated as a reason why the
handling of message failed.
If self.cause is None, this is always true.
If self.cause is not None, this is only true if cause is message.
"""
return self.cause is None or self.cause is message
def propagate(self, new_cause):
"""Propagates this error, raising a new instance of the same class with the
same reason, but a different cause.
"""
raise type(self)(self.reason, new_cause, silent=True)
class InvalidMessageError(MessageHandlingError):
"""Indicates that an incoming message did not follow the protocol specification -
for example, it was missing properties that are required, or the message itself
is not allowed in the current state.
Raised by MessageDict in lieu of KeyError for missing keys.
"""
PREFIX = "Invalid message: "
"""Automatically prepended to the "message" property in JSON responses, when the
handler raises InvalidMessageError.
If a failed response has "message" property that starts with this prefix, it is
reported as InvalidMessageError rather than MessageHandlingError.
"""
def __str__(self):
return InvalidMessageError.PREFIX + str(self.reason)
class JsonMessageChannel(object):
"""Implements a JSON message channel on top of a raw JSON message stream, with
support for DAP requests, responses, and events.
The channel can be locked for exclusive use via the with-statement::
with channel:
channel.send_request(...)
# No interleaving messages can be sent here from other threads.
channel.send_event(...)
"""
def __init__(self, stream, handlers=None, name=None):
self.stream = stream
self.handlers = handlers
self.name = name if name is not None else stream.name
self.started = False
self._lock = threading.RLock()
self._closed = False
self._seq_iter = itertools.count(1)
self._sent_requests = {} # {seq: Request}
self._handler_queue = [] # [(what, handler)]
self._handlers_enqueued = threading.Condition(self._lock)
self._handler_thread = None
self._parser_thread = None
def __str__(self):
return self.name
def __repr__(self):
return fmt("{0}({1!r})", type(self).__name__, self.name)
def __enter__(self):
self._lock.acquire()
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self._lock.release()
def close(self):
"""Closes the underlying stream.
This does not immediately terminate any handlers that are already executing,
but they will be unable to respond. No new request or event handlers will
execute after this method is called, even for messages that have already been
received. However, response handlers will continue to executed for any request
that is still pending, as will any handlers registered via on_response().
"""
with self:
if not self._closed:
self._closed = True
self.stream.close()
def start(self):
"""Starts a message loop which parses incoming messages and invokes handlers
for them on a background thread, until the channel is closed.
Incoming messages, including responses to requests, will not be processed at
all until this is invoked.
"""
assert not self.started
self.started = True
self._parser_thread = threading.Thread(
target=self._parse_incoming_messages, name=fmt("{0} message parser", self)
)
self._parser_thread.pydev_do_not_trace = True
self._parser_thread.is_pydev_daemon_thread = True
self._parser_thread.daemon = True
self._parser_thread.start()
def wait(self):
"""Waits for the message loop to terminate, and for all enqueued Response
message handlers to finish executing.
"""
parser_thread = self._parser_thread
if parser_thread is not None:
parser_thread.join()
handler_thread = self._handler_thread
if handler_thread is not None:
handler_thread.join()
# Order of keys for _prettify() - follows the order of properties in
# https://microsoft.github.io/debug-adapter-protocol/specification
_prettify_order = (
"seq",
"type",
"request_seq",
"success",
"command",
"event",
"message",
"arguments",
"body",
"error",
)
def _prettify(self, message_dict):
"""Reorders items in a MessageDict such that it is more readable.
"""
for key in self._prettify_order:
if key not in message_dict:
continue
value = message_dict[key]
del message_dict[key]
message_dict[key] = value
@contextlib.contextmanager
def _send_message(self, message):
"""Sends a new message to the other party.
Generates a new sequence number for the message, and provides it to the
caller before the message is sent, using the context manager protocol::
with send_message(...) as seq:
# The message hasn't been sent yet.
...
# Now the message has been sent.
Safe to call concurrently for the same channel from different threads.
"""
assert "seq" not in message
with self:
seq = next(self._seq_iter)
message = MessageDict(None, message)
message["seq"] = seq
self._prettify(message)
with self:
yield seq
self.stream.write_json(message)
def send_request(self, command, arguments=None, on_before_send=None):
"""Sends a new request, and returns the OutgoingRequest object for it.
If arguments is None or {}, "arguments" will be omitted in JSON.
If on_before_send is not None, invokes on_before_send() with the request
object as the sole argument, before the request actually gets sent.
Does not wait for response - use OutgoingRequest.wait_for_response().
Safe to call concurrently for the same channel from different threads.
"""
d = {"type": "request", "command": command}
if arguments is not None and arguments != {}:
d["arguments"] = arguments
with self._send_message(d) as seq:
request = OutgoingRequest(self, seq, command, arguments)
if on_before_send is not None:
on_before_send(request)
self._sent_requests[seq] = request
return request
def send_event(self, event, body=None):
"""Sends a new event.
If body is None or {}, "body" will be omitted in JSON.
Safe to call concurrently for the same channel from different threads.
"""
d = {"type": "event", "event": event}
if body is not None and body != {}:
d["body"] = body
with self._send_message(d):
pass
def request(self, *args, **kwargs):
"""Same as send_request(...).wait_for_response()
"""
return self.send_request(*args, **kwargs).wait_for_response()
def propagate(self, message):
"""Sends a new message with the same type and payload.
If it was a request, returns the new OutgoingRequest object for it.
"""
assert message.is_request() or message.is_event()
if message.is_request():
return self.send_request(message.command, message.arguments)
else:
self.send_event(message.event, message.body)
def delegate(self, message):
"""Like propagate(message).wait_for_response(), but will also propagate
any resulting MessageHandlingError back.
"""
try:
result = self.propagate(message)
if result.is_request():
result = result.wait_for_response()
return result
except MessageHandlingError as exc:
exc.propagate(message)
def _parse_incoming_messages(self):
log.debug("Starting message loop for channel {0}", self)
try:
while True:
self._parse_incoming_message()
except NoMoreMessages as exc:
log.debug("Exiting message loop for channel {0}: {1}", self, exc)
with self:
# Generate dummy responses for all outstanding requests.
err_message = compat.force_unicode(str(exc), "utf-8", errors="replace")
# Response._parse() will remove items from _sent_requests, so
# make a snapshot before iterating.
sent_requests = list(self._sent_requests.values())
for request in sent_requests:
response_json = MessageDict(
None,
{
"seq": -1,
"request_seq": request.seq,
"command": request.command,
"success": False,
"message": err_message,
},
)
Response._parse(self, response_json, body=exc)
assert not len(self._sent_requests)
self._enqueue_handlers(Disconnect(self), self._handle_disconnect)
self.close()
_message_parsers = {
"event": Event._parse,
"request": Request._parse,
"response": Response._parse,
}
def _parse_incoming_message(self):
"""Reads incoming messages, parses them, and puts handlers into the queue
for _run_handlers() to invoke, until the channel is closed.
"""
# Set up a dedicated decoder for this message, to create MessageDict instances
# for all JSON objects, and track them so that they can be later wired up to
# the Message they belong to, once it is instantiated.
def object_hook(d):
d = MessageDict(None, d)
if "seq" in d:
self._prettify(d)
d.associate_with = associate_with
message_dicts.append(d)
return d
# A hack to work around circular dependency between messages, and instances of
# MessageDict in their payload. We need to set message for all of them, but it
# cannot be done until the actual Message is created - which happens after the
# dicts are created during deserialization.
#
# So, upon deserialization, every dict in the message payload gets a method
# that can be called to set MessageDict.message for *all* dicts belonging to
# that message. This method can then be invoked on the top-level dict by the
# parser, after it has parsed enough of the dict to create the appropriate
# instance of Event, Request, or Response for this message.
def associate_with(message):
for d in message_dicts:
d.message = message
del d.associate_with
message_dicts = []
decoder = self.stream.json_decoder_factory(object_hook=object_hook)
message_dict = self.stream.read_json(decoder)
assert isinstance(message_dict, MessageDict) # make sure stream used decoder
msg_type = message_dict("type", json.enum("event", "request", "response"))
parser = self._message_parsers[msg_type]
try:
parser(self, message_dict)
except InvalidMessageError as exc:
log.error(
"Failed to parse message in channel {0}: {1} in:\n{2!j}",
self,
str(exc),
message_dict,
)
except Exception as exc:
if isinstance(exc, NoMoreMessages) and exc.stream is self.stream:
raise
log.swallow_exception(
"Fatal error in channel {0} while parsing:\n{1!j}", self, message_dict
)
os._exit(1)
def _enqueue_handlers(self, what, *handlers):
"""Enqueues handlers for _run_handlers() to run.
`what` is the Message being handled, and is used for logging purposes.
If the background thread with _run_handlers() isn't running yet, starts it.
"""
with self:
self._handler_queue.extend((what, handler) for handler in handlers)
self._handlers_enqueued.notify_all()
# If there is anything to handle, but there's no handler thread yet,
# spin it up. This will normally happen only once, on the first call
# to _enqueue_handlers(), and that thread will run all the handlers
# for parsed messages. However, this can also happen is somebody calls
# Request.on_response() - possibly concurrently from multiple threads -
# after the channel has already been closed, and the initial handler
# thread has exited. In this case, we spin up a new thread just to run
# the enqueued response handlers, and it will exit as soon as it's out
# of handlers to run.
if len(self._handler_queue) and self._handler_thread is None:
self._handler_thread = threading.Thread(
target=self._run_handlers, name=fmt("{0} message handler", self)
)
self._handler_thread.pydev_do_not_trace = True
self._handler_thread.is_pydev_daemon_thread = True
self._handler_thread.start()
def _run_handlers(self):
"""Runs enqueued handlers until the channel is closed, or until the handler
queue is empty once the channel is closed.
"""
while True:
with self:
closed = self._closed
if closed:
# Wait for the parser thread to wrap up and enqueue any remaining
# handlers, if it is still running.
self._parser_thread.join()
# From this point on, _enqueue_handlers() can only get called
# from Request.on_response().
with self:
if not closed and not len(self._handler_queue):
# Wait for something to process.
self._handlers_enqueued.wait()
# Make a snapshot before releasing the lock.
handlers = self._handler_queue[:]
del self._handler_queue[:]
if closed and not len(handlers):
# Nothing to process, channel is closed, and parser thread is
# not running anymore - time to quit! If Request.on_response()
# needs to call _enqueue_handlers() later, it will spin up
# a new handler thread.
self._handler_thread = None
return
for what, handler in handlers:
# If the channel is closed, we don't want to process any more events
# or requests - only responses and the final disconnect handler. This
# is to guarantee that if a handler calls close() on its own channel,
# the corresponding request or event is the last thing to be processed.
if closed and handler in (Event._handle, Request._handle):
continue
with log.prefixed("/handling {0}/\n", what.describe()):
try:
handler()
except Exception:
# It's already logged by the handler, so just fail fast.
self.close()
os._exit(1)
def _get_handler_for(self, type, name):
"""Returns the handler for a message of a given type.
"""
with self:
handlers = self.handlers
for handler_name in (name + "_" + type, type):
try:
return getattr(handlers, handler_name)
except AttributeError:
continue
raise AttributeError(
fmt(
"handler object {0} for channel {1} has no handler for {2} {3!r}",
compat.srcnameof(handlers),
self,
type,
name,
)
)
def _handle_disconnect(self):
handler = getattr(self.handlers, "disconnect", lambda: None)
try:
handler()
except Exception:
log.reraise_exception(
"Handler {0}\ncouldn't handle disconnect from {1}:",
compat.srcnameof(handler),
self,
)
class MessageHandlers(object):
"""A simple delegating message handlers object for use with JsonMessageChannel.
For every argument provided, the object gets an attribute with the corresponding
name and value.
"""
def __init__(self, **kwargs):
for name, func in kwargs.items():
setattr(self, name, func)
| StarcoderdataPython |
3271234 | <filename>back/__init__.py
# pylint: disable=wildcard-import
from back.models import *
from back.predictors import *
from back.readers import *
| StarcoderdataPython |
1612563 | import config
import requests
from pixivpy3 import ByPassSniApi
RECOMMENDED = 0
KONACHAN = 1
YANDERE = 2
DANBOORU = 3
PIXIV = 4
pixivApi = ByPassSniApi()
pixivApi.require_appapi_hosts()
def login():
if config.pixiv_login_mode == 0:
pixivApi.auth(refresh_token=config.pixiv_refresh_token)
else:
pixivApi.login(username=config.pixiv_username, password=config.pixiv_password)
if config.pixiv_print_refresh_token:
print("Your pixiv account refresh_token is '{}'.".format(pixivApi.refresh_token))
| StarcoderdataPython |
110883 | <gh_stars>0
from typing import Final, List
SELF_CLOSING_TAGS: Final[List[str]] = [
"area",
"base",
"br",
"col",
"embed",
"hr",
"img",
"input",
"keygen",
"link",
"meta",
"param",
"source",
"track",
"wbr",
]
HIGHLIGHT_LANGUAGES: Final[List[str]] = [
"1c",
"abnf",
"accesslog",
"actionscript",
"ada",
"angelscript",
"apache",
"applescript",
"arcade",
"arduino",
"armasm",
"xml",
"asciidoc",
"aspectj",
"autohotkey",
"autoit",
"avrasm",
"awk",
"axapta",
"bash",
"basic",
"bnf",
"brainfuck",
"cal",
"capnproto",
"ceylon",
"clean",
"clojure",
"clojure-repl",
"cmake",
"coffeescript",
"coq",
"cos",
"cpp",
"crmsh",
"crystal",
"csharp",
"csp",
"css",
"d",
"markdown",
"dart",
"delphi",
"diff",
"django",
"dns",
"dockerfile",
"dos",
"dsconfig",
"dts",
"dust",
"ebnf",
"elixir",
"elm",
"ruby",
"erb",
"erlang-repl",
"erlang",
"excel",
"fix",
"flix",
"fortran",
"fsharp",
"gams",
"gauss",
"gcode",
"gherkin",
"glsl",
"gml",
"go",
"golo",
"gradle",
"groovy",
"haml",
"handlebars",
"haskell",
"haxe",
"hsp",
"http",
"hy",
"inform7",
"ini",
"irpf90",
"isbl",
"java",
"javascript",
"jboss-cli",
"json",
"julia",
"julia-repl",
"kotlin",
"lasso",
"latex",
"ldif",
"leaf",
"less",
"lisp",
"livecodeserver",
"livescript",
"llvm",
"lsl",
"lua",
"makefile",
"mathematica",
"matlab",
"maxima",
"mel",
"mercury",
"mipsasm",
"mizar",
"perl",
"mojolicious",
"monkey",
"moonscript",
"n1ql",
"nginx",
"nim",
"nix",
"node-repl",
"nsis",
"objectivec",
"ocaml",
"openscad",
"oxygene",
"parser3",
"pf",
"pgsql",
"php",
"php-template",
"pony",
"powershell",
"processing",
"profile",
"prolog",
"properties",
"protobuf",
"puppet",
"purebasic",
"python",
"python-repl",
"q",
"qml",
"r",
"reasonml",
"rib",
"roboconf",
"routeros",
"rsl",
"ruleslanguage",
"rust",
"sas",
"scala",
"scheme",
"scilab",
"scss",
"shell",
"smali",
"smalltalk",
"sml",
"sqf",
"sql_more",
"sql",
"stan",
"stata",
"step21",
"stylus",
"subunit",
"swift",
"taggerscript",
"yaml",
"tap",
"tcl",
"thrift",
"tp",
"twig",
"typescript",
"vala",
"vbnet",
"vbscript",
"vbscript-html",
"verilog",
"vhdl",
"vim",
"x86asm",
"xl",
"xquery",
"zephir",
]
| StarcoderdataPython |
121501 | import azureml.dataprep as dprep
import azureml.core
import pandas as pd
import logging
import os
import datetime
import shutil
from azureml.core import Workspace, Datastore, Dataset, Experiment, Run
from sklearn.model_selection import train_test_split
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
from sklearn.tree import DecisionTreeClassifier
run = Run.get_context()
workspace = run.experiment.workspace
dataset_name = 'training_data'
dataset = Dataset.get(workspace=workspace, name=dataset_name)
dflow = dataset.get_definition()
dflow_val, dflow_train = dflow.random_split(percentage=0.3)
y_df = dflow_train.keep_columns(['HasDetections']).to_pandas_dataframe()
x_df = dflow_train.drop_columns(['HasDetections']).to_pandas_dataframe()
y_val = dflow_val.keep_columns(['HasDetections']).to_pandas_dataframe()
x_val = dflow_val.drop_columns(['HasDetections']).to_pandas_dataframe()
data = {"train": {"X": x_df, "y": y_df},
"validation": {"X": x_val, "y": y_val}}
clf = DecisionTreeClassifier().fit(data["train"]["X"], data["train"]["y"])
print('Accuracy of Decision Tree classifier on training set: {:.2f}'.format(clf.score(x_df, y_df)))
print('Accuracy of Decision Tree classifier on validation set: {:.2f}'.format(clf.score(x_val, y_val)))
| StarcoderdataPython |
1619553 | <reponame>artemvalieiev/project_course_work
import pandas as pd
from typing import List, Tuple
from ..predictor.ctwin_after_plant_predictor import CTWinAfterPlantPredictor
class TestMainPredictor:
_TEST_FILE_PATH = "src/tests/test_examples.csv"
_BASE_PATH = "./model/"
_MODEL_NAME = 'model.ctb'
def test_predict(self) -> None:
frame = pd.read_csv(self._TEST_FILE_PATH, sep=';')
predictor = CTWinAfterPlantPredictor.load(self._BASE_PATH, self._MODEL_NAME)
X = frame.drop(["CT-Win"], axis=1)
answers = [int(x) for x in frame['CT-Win']]
predicted = predictor.predict_round_raw(X.values.tolist())
assert predicted == answers
test = TestMainPredictor()
test.test_predict()
| StarcoderdataPython |
77662 | <filename>tests/py/test_state_chain.py
# coding: utf8
from __future__ import absolute_import, division, print_function, unicode_literals
from base64 import b64encode
import json
from pando.exceptions import MalformedBody, UnknownBodyType
from pando.http.request import Request
from pando.http.response import Response
from liberapay.constants import SESSION
from liberapay.security import csrf
from liberapay.testing import Harness
class Tests(Harness):
def setUp(self):
Harness.setUp(self)
self.client.website.canonical_scheme = 'https'
self.client.website.canonical_host = 'example.com'
self._cookie_domain = self.client.website.cookie_domain
self.client.website.cookie_domain = b'.example.com'
def tearDown(self):
Harness.tearDown(self)
website = self.client.website
website.canonical_scheme = website.env.canonical_scheme
website.canonical_host = website.env.canonical_host
website.cookie_domain = self._cookie_domain
def test_canonize_canonizes(self):
response = self.client.GxT("/",
HTTP_HOST=b'example.com',
HTTP_X_FORWARDED_PROTO=b'http',
)
assert response.code == 302
assert response.headers[b'Location'] == b'https://example.com/'
assert response.headers[b'Cache-Control'] == b'public, max-age=86400'
def test_no_cookies_over_http(self):
"""
We don't want to send cookies over HTTP, especially not CSRF and
session cookies, for obvious security reasons.
"""
alice = self.make_participant('alice')
redirect = self.client.GET("/",
auth_as=alice,
HTTP_X_FORWARDED_PROTO=b'http',
HTTP_HOST=b'example.com',
raise_immediately=False,
)
assert redirect.code == 302
assert not redirect.headers.cookie
def test_early_failures_dont_break_everything(self):
old_from_wsgi = Request.from_wsgi
def broken_from_wsgi(*a, **kw):
raise Response(400)
try:
Request.from_wsgi = classmethod(broken_from_wsgi)
assert self.client.GET("/", raise_immediately=False).code == 400
finally:
Request.from_wsgi = old_from_wsgi
def test_i18n_subdomain_works(self):
r = self.client.GET(
'/',
HTTP_X_FORWARDED_PROTO=b'https', HTTP_HOST=b'fr.example.com',
raise_immediately=False,
)
assert r.code == 200
assert '<html lang="fr">' in r.text
assert 'À propos' in r.text
def test_i18n_subdomain_is_redirected_to_https(self):
r = self.client.GET(
'/',
HTTP_X_FORWARDED_PROTO=b'http', HTTP_HOST=b'en.example.com',
raise_immediately=False,
)
assert r.code == 302
assert not r.headers.cookie
assert r.headers[b'Location'] == b'https://en.example.com/'
def test_csrf_cookie_properties(self):
r = self.client.GET(
'/',
HTTP_X_FORWARDED_PROTO=b'https', HTTP_HOST=b'en.example.com',
csrf_token=None, raise_immediately=False,
)
assert r.code == 200
cookie = r.headers.cookie[csrf.CSRF_TOKEN]
assert cookie[str('domain')] == str('.example.com')
assert cookie[str('expires')][-4:] == str(' GMT')
assert cookie[str('path')] == str('/')
assert cookie[str('secure')] is True
class Tests2(Harness):
def test_basic_auth_works_and_doesnt_return_a_session_cookie(self):
alice = self.make_participant('alice')
password = 'password'
alice.update_password(password)
auth_header = b'Basic ' + b64encode(('%s:%s' % (alice.id, password)).encode('ascii'))
r = self.client.GET('/', HTTP_AUTHORIZATION=auth_header)
assert r.code == 200
assert SESSION not in r.headers.cookie
def test_basic_auth_malformed_header_returns_400(self):
auth_header = b'Basic ' + b64encode(b'bad')
r = self.client.GxT('/', HTTP_AUTHORIZATION=auth_header)
assert r.code == 400
assert r.text == 'Malformed "Authorization" header'
def test_basic_auth_bad_userid_returns_401(self):
auth_header = b'Basic ' + b64encode(b'admin:admin')
r = self.client.GxT('/', HTTP_AUTHORIZATION=auth_header)
assert r.code == 401
def test_basic_auth_no_password_returns_401(self):
alice = self.make_participant('alice')
assert alice.id == 1
auth_header = b'Basic ' + b64encode(b'1:')
r = self.client.GxT('/', HTTP_AUTHORIZATION=auth_header)
assert r.code == 401
def test_accept_header_is_respected(self):
r = self.client.GET('/about/stats', HTTP_ACCEPT=b'application/json')
assert r.headers[b'Content-Type'] == b'application/json; charset=UTF-8'
json.loads(r.text)
def test_error_spt_works(self):
r = self.client.POST('/', csrf_token=False, raise_immediately=False)
assert r.code == 403
def test_cors_is_not_allowed_by_default(self):
r = self.client.GET('/')
assert b'Access-Control-Allow-Origin' not in r.headers
def test_cors_is_allowed_for_assets(self):
r = self.client.GET('/assets/jquery.min.js')
assert r.code == 200
assert r.headers[b'Access-Control-Allow-Origin'] == b'*'
def test_caching_of_assets(self):
r = self.client.GET('/assets/jquery.min.js')
assert r.headers[b'Cache-Control'] == b'public, max-age=3600'
assert b'Vary' not in r.headers
assert not r.headers.cookie
def test_caching_of_assets_with_etag(self):
r = self.client.GET(self.client.website.asset('jquery.min.js'))
assert r.headers[b'Cache-Control'] == b'public, max-age=31536000'
assert b'Vary' not in r.headers
assert not r.headers.cookie
def test_caching_of_simplates(self):
r = self.client.GET('/')
assert r.headers[b'Cache-Control'] == b'no-cache'
assert b'Vary' not in r.headers
def test_no_csrf_cookie(self):
r = self.client.POST('/', csrf_token=False, raise_immediately=False)
assert r.code == 403
assert "Bad CSRF cookie" in r.text
assert csrf.CSRF_TOKEN in r.headers.cookie
def test_no_csrf_cookie_unknown_method_on_asset(self):
r = self.client.hit('UNKNOWN', '/assets/base.css', csrf_token=False,
raise_immediately=False)
assert r.code == 200 # this should be a 405, that's a "bug" in aspen
def test_bad_csrf_cookie(self):
r = self.client.POST('/', csrf_token='bad_<PASSWORD>', raise_immediately=False)
assert r.code == 403
assert "Bad CSRF cookie" in r.text
assert r.headers.cookie[csrf.CSRF_TOKEN].value != 'bad_token'
def test_csrf_cookie_set_for_most_requests(self):
r = self.client.GET('/')
assert csrf.CSRF_TOKEN in r.headers.cookie
def test_no_csrf_cookie_set_for_assets(self):
r = self.client.GET('/assets/base.css')
assert csrf.CSRF_TOKEN not in r.headers.cookie
def test_sanitize_token_passes_through_good_token(self):
token = '<PASSWORD>'
assert csrf._sanitize_token(token) == token
def test_sanitize_token_rejects_overlong_token(self):
token = 'ddd<PASSWORD>'
assert csrf._sanitize_token(token) is None
def test_sanitize_token_rejects_underlong_token(self):
token = 'ddd<PASSWORD>'
assert csrf._sanitize_token(token) is None
def test_sanitize_token_rejects_goofy_token(self):
token = '<PASSWORD>'
assert csrf._sanitize_token(token) is None
def test_malformed_body(self):
with self.assertRaises(MalformedBody):
self.client.POST('/', body=b'a', content_type=b'application/json')
def test_unknown_body_type(self):
with self.assertRaises(UnknownBodyType):
self.client.POST('/', body=b'x', content_type=b'unknown/x')
def test_non_dict_body(self):
r = self.client.POST('/', body=b'[]', content_type=b'application/json')
assert r.code == 200
def test_no_trailing_slash_redirects(self):
r = self.client.GET('/foo', raise_immediately=False)
assert r.code == 404, r.text
def test_null_byte_results_in_400(self):
r = self.client.GET('/foo%00', raise_immediately=False)
assert r.code == 400, r.text
| StarcoderdataPython |
1629575 | from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField, SubmitField, SelectField
from wtforms.validators import InputRequired
class PitchForm(FlaskForm):
title = StringField('pitch_title')
text = TextAreaField('pitch_text')
category = SelectField('pitch_type', choices=[(
'technology', 'Product-Pitch'), ('travels', 'Interview-Pitch'), ('sports', 'Promotion-Pitch')])
submit = SubmitField('submit')
class CommentForm(FlaskForm):
text = TextAreaField('yoursay')
submit = SubmitField('submit')
class UpdateProfile(FlaskForm):
bio = TextAreaField('Tell us about you', validators=[InputRequired()])
submit = SubmitField('Submit')
| StarcoderdataPython |
1684622 | # -*- coding:utf-8 -*-
# !/usr/bin/python
'''
Created on 21.05.2012
@author: philkraf
'''
from . import db
# Import smtplib for the actual sending function
import sys
# Import the email modules we'll need
from .tools.mail import EMail, send
from datetime import datetime
msgtemplate = """
Liebe/r %(you)s,
bis %(due)s sollte "%(job)s" erledigt werden und Du bist dafür eingetragen. Wenn der Job noch warten kann,
ändere doch einfach das Datum (due).
Falls etwas unklar ist, bitte nachfragen. Wenn Du die Aufgabe schon erledigt hast
gehe bitte auf http://fb09-pasig.umwelt.uni-giessen.de:8081/job/%(id)s
und hake den Job ab.
Danke und Schöne Grüße
%(me)s
P.S.: Diese Nachricht wurde automatisch von der Schwingbach-Datenbank generiert
Dear %(you)s,
the task "%(job)s" in the Schwingbach area was due at %(due)s, and you have been assigned
for it. If the job can wait, please change the due date. If you have any questions regarding this task, do not hesitate to ask. If you have already
finished the tasked, please mark it as done at http://fb09-pasig.umwelt.uni-giessen.de:8081/job/%(id)s.
Thank you,
with kind regards
%(me)s
This mail has been generated automatically from the Schwingbach database
"""
if __name__ == "__main__":
session = db.Session()
today = datetime.today()
print(today.strftime('%d.%m.%Y %H:%M'))
mails = []
for job in session.query(db.Job).filter(db.Job.done is False, db.Job.due < today):
if job.is_due():
if job.description:
job.parse_description(action='due')
subject = 'Studienlandschaft Schwingbach: %s' % job.name
msgdata = dict(id=job.id, you=job.responsible.firstname, due=job.due, job=job.name, descr=job.description,
me=job.author.firstname)
msg = msgtemplate % msgdata
mails.append(EMail(job.author.email, [
job.responsible.email], subject, msg))
print((" %s->%s: %s" % (job.author.username,
job.responsible.username, job.name)).encode('utf-8'))
send(mails)
| StarcoderdataPython |
1643925 | <filename>Snake.py<gh_stars>1-10
# from Item import *
from ItemExtensions import *
from Fleet import *
class Snake(Fleet):
def __init__(self, game_handle, head_coordinates, speed=2, rotation_speed=6, length=4, separation=16):
super().__init__(game_handle)
self.speed = speed
self.rotation_speed = rotation_speed
self.separation = separation
self.items = [Segment(game_handle,
color=green,
tricoordinates=head_coordinates) for i in range(length)] # populates the snake segments
self.Head = self.items[0]
# load starting position frame stream in queue, interpolating between segment positions for each frame:
self.frames_per_segment = int(separation/speed)
self.position_queue = [(head_coordinates[0], head_coordinates[1]+i, 0)
for i in range(int(length*self.frames_per_segment))]
self.boost_multiplier = 1
self.is_alive = True
def update(self):
if self.is_alive:
for i in range(self.boost_multiplier): # do this part as many times as the multiplier
# move head:
self.Head.translate_forward(self.speed)
# add head position to stream of positions for the body to follow:
self.push_head_position()
# different segments access different positions in the stream of positions:
for index, segment in enumerate(self.items):
segment.queue_card(self.position_queue[index*self.frames_per_segment])
# remove unnecessary positions at the end of the list:
self.position_queue.pop()
# food/growth handling:
for food in self.game_handle.foods.items:
if self.Head.collides_with(food):
self.eat(food)
# death handling:
for segment in self.items[6:]:
if self.Head.collides_with(segment):
self.die()
else:
self.die()
super().update()
def push_head_position(self):
new_coordinate = (self.Head.center[0],
self.Head.center[1],
self.Head.rotation)
self.position_queue.insert(0, new_coordinate)
def forward(self, boost_multiplier=2):
# boost_multiplier is the multiplier for update iterations per frame,
# and as such it also speeds up the snake
self.boost_multiplier = int(boost_multiplier)
def left(self):
self.items[0].rotate(self.rotation_speed)
def right(self):
self.items[0].rotate(-self.rotation_speed)
# Note: rotate is not included in update(), so faster boost will not also boost effective rotation speed
def eat(self, food):
self.game_handle.foods.remove_into_belly(food)
self.append(Segment(self.game_handle,
tricoordinates=self.items[-1].get_tricoordinates()))
for i in range(self.frames_per_segment):
self.position_queue.append(self.items[-1].get_tricoordinates())
def die(self):
self.is_alive = False
if len(self.items) > 0:
self.remove(self.items[-1])
class FoodCluster(Fleet):
def __init__(self, game_handle, foods=3):
super().__init__(game_handle)
for food in range(foods):
self.append(Food(game_handle))
def remove_into_belly(self, food_bit):
self.remove(food_bit)
self.append(Food(self.game_handle)) # add another food to screen
| StarcoderdataPython |
3266173 | # Optional Parameters tutorial nr1
# def func(x=1):
# return x ** 2
# def func(word, add=5, freq=1):
# print(word*(freq+add))
# call = func("hello", 0)
class Car(object):
def __init__(self, make, model, year, condition="New", kms=0):
self.make = make
self.model = model
self.year = year
self.condition = condition
self.kms = kms
def display(self, showAll=True):
if showAll:
print("This car is a %s %s from %s, it is %s and has %s kms." %
(self.make, self.model, self.year, self.condition, self.kms))
else:
print("This car is a %s %s from %s." % (self.make, self.model, self.year))
whip = Car("Ford", "Fusion", 2012)
whip.display(False)
| StarcoderdataPython |
1779914 | <reponame>ethankelly/PythonFundamentals
#!/usr/bin/env python
# coding: utf-8
# # 4: Lists Solutions
#
# 1. Reverse a given list, e.g. if you get the list `[10, 20, 30, 40, 50]` you should print `[50, 40, 30, 20, 10]`.
# * There are two possible ways you might try this. The first is using the `reverse()` function (which is a bit like cheating), and the second is using a _scicing operator._ If you can't find anything to help online, have a look at the solutions.
# In[21]:
# Solution
input_list = [10, 20, 30, 40, 50]
print(input_list)
# Using slicing operator
input_list = input_list[::-1]
print(input_list)
# 2. Square every number in a given list of numbers, e.g. if you're given the list `[1, 2, 3, 4, 5]` you should return `[1, 4, 9, 16, 25]`.
# In[22]:
# Solution
input_list1 = [1, 2, 3, 4, 5]
# first way to do this
for i in range(len(input_list1)) :
input_list1[i] **= 2
print(input_list1)
# second way
input_list2 = [1, 2, 3, 4, 5]
input_list2 = [x * x for x in input_list2]
print(input_list2)
# 3. Write a program that counts the number of times a specified element occurs within a given list. You should try writing this inside a function.
# In[23]:
# Solution
def find_elem_in_list(elem, my_list) :
count = 0
for x in my_list :
if x == elem :
count += 1
return count
my_list = [10, 15, 20, 3, 9, 99, 20, 20, 17, 20, "Star Wars", 30, 20]
print(find_elem_in_list(20, my_list))
# 4. Write a program that, given a list of numbers, removes any even numbers and returns a list of all the odd numbers
# In[24]:
# Solution
def remove_even(my_next_list) :
to_return = []
for x in my_next_list :
if x % 2 != 0 :
to_return.append(x)
return to_return
my_next_list = [1, 2, 3, 5, 7, 8, 9, 10, 12, 14, 22, 23, 27, 99]
print(remove_even(my_next_list))
# In[ ]:
| StarcoderdataPython |
4800155 | from .criss_cross_attention import CrissCrossAttention
from .switchable_norm import SwitchableNorm, SwitchableNorm1D, SwitchableNorm2D, SwitchableNorm3D, SwitchableNormND
| StarcoderdataPython |
4816687 | from infosystem.common import subsystem
from infosystem.subsystem.domain import manager, resource, controller, router
subsystem = subsystem.Subsystem(resource=resource.Domain,
controller=controller.Controller,
manager=manager.Manager,
router=router.Router)
| StarcoderdataPython |
1608959 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Framework of debug-wrapped sessions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import threading
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.wrappers import framework
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
# Import resource_variable_ops for the variables-to-tensor implicit conversion.
from tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import monitored_session
from tensorflow.python.util import tf_inspect
class TestDebugWrapperSession(framework.BaseDebugWrapperSession):
"""A concrete implementation of BaseDebugWrapperSession for test."""
def __init__(self, sess, dump_root, observer, thread_name_filter=None):
# Supply dump root.
self._dump_root = dump_root
# Supply observer.
self._obs = observer
# Invoke superclass constructor.
framework.BaseDebugWrapperSession.__init__(
self, sess, thread_name_filter=thread_name_filter)
def on_session_init(self, request):
"""Override abstract on-session-init callback method."""
self._obs["sess_init_count"] += 1
self._obs["request_sess"] = request.session
return framework.OnSessionInitResponse(
framework.OnSessionInitAction.PROCEED)
def on_run_start(self, request):
"""Override abstract on-run-start callback method."""
self._obs["on_run_start_count"] += 1
self._obs["run_fetches"] = request.fetches
self._obs["run_feed_dict"] = request.feed_dict
return framework.OnRunStartResponse(
framework.OnRunStartAction.DEBUG_RUN,
["file://" + self._dump_root])
def on_run_end(self, request):
"""Override abstract on-run-end callback method."""
self._obs["on_run_end_count"] += 1
self._obs["performed_action"] = request.performed_action
self._obs["tf_error"] = request.tf_error
return framework.OnRunEndResponse()
class TestDebugWrapperSessionBadAction(framework.BaseDebugWrapperSession):
"""A concrete implementation of BaseDebugWrapperSession for test.
This class intentionally puts a bad action value in OnSessionInitResponse
and/or in OnRunStartAction to test the handling of such invalid cases.
"""
def __init__(
self,
sess,
bad_init_action=None,
bad_run_start_action=None,
bad_debug_urls=None):
"""Constructor.
Args:
sess: The TensorFlow Session object to be wrapped.
bad_init_action: (str) bad action value to be returned during the
on-session-init callback.
bad_run_start_action: (str) bad action value to be returned during the
the on-run-start callback.
bad_debug_urls: Bad URL values to be returned during the on-run-start
callback.
"""
self._bad_init_action = bad_init_action
self._bad_run_start_action = bad_run_start_action
self._bad_debug_urls = bad_debug_urls
# Invoke superclass constructor.
framework.BaseDebugWrapperSession.__init__(self, sess)
def on_session_init(self, request):
if self._bad_init_action:
return framework.OnSessionInitResponse(self._bad_init_action)
else:
return framework.OnSessionInitResponse(
framework.OnSessionInitAction.PROCEED)
def on_run_start(self, request):
debug_urls = self._bad_debug_urls or []
if self._bad_run_start_action:
return framework.OnRunStartResponse(
self._bad_run_start_action, debug_urls)
else:
return framework.OnRunStartResponse(
framework.OnRunStartAction.DEBUG_RUN, debug_urls)
def on_run_end(self, request):
return framework.OnRunEndResponse()
@test_util.run_v1_only("Sessions are not available in TF 2.x")
class DebugWrapperSessionTest(test_util.TensorFlowTestCase):
def _no_rewrite_session_config(self):
rewriter_config = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
return config_pb2.ConfigProto(graph_options=graph_options)
def setUp(self):
self._observer = {
"sess_init_count": 0,
"request_sess": None,
"on_run_start_count": 0,
"run_fetches": None,
"run_feed_dict": None,
"on_run_end_count": 0,
"performed_action": None,
"tf_error": None,
}
self._dump_root = tempfile.mkdtemp()
self._sess = session.Session(config=self._no_rewrite_session_config())
self._a_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
self._b_init_val = np.array([[2.0], [-1.0]])
self._c_val = np.array([[-4.0], [6.0]])
self._a_init = constant_op.constant(
self._a_init_val, shape=[2, 2], name="a_init")
self._b_init = constant_op.constant(
self._b_init_val, shape=[2, 1], name="b_init")
self._ph = array_ops.placeholder(dtype=dtypes.float64, name="ph")
self._a = variables.Variable(self._a_init, name="a1")
self._b = variables.Variable(self._b_init, name="b")
self._c = constant_op.constant(self._c_val, shape=[2, 1], name="c")
# Matrix product of a and b.
self._p = math_ops.matmul(self._a, self._b, name="p1")
# Matrix product of a and ph.
self._q = math_ops.matmul(self._a, self._ph, name="q")
# Sum of two vectors.
self._s = math_ops.add(self._p, self._c, name="s")
# Initialize the variables.
self._sess.run(self._a.initializer)
self._sess.run(self._b.initializer)
def tearDown(self):
# Tear down temporary dump directory.
if os.path.isdir(self._dump_root):
file_io.delete_recursively(self._dump_root)
ops.reset_default_graph()
def testSessionInit(self):
self.assertEqual(0, self._observer["sess_init_count"])
wrapper_sess = TestDebugWrapperSession(self._sess, self._dump_root,
self._observer)
# Assert that on-session-init callback is invoked.
self.assertEqual(1, self._observer["sess_init_count"])
# Assert that the request to the on-session-init callback carries the
# correct session object.
self.assertEqual(self._sess, self._observer["request_sess"])
# Verify that the wrapper session implements the session.SessionInterface.
self.assertTrue(isinstance(wrapper_sess, session.SessionInterface))
self.assertEqual(self._sess.sess_str, wrapper_sess.sess_str)
self.assertEqual(self._sess.graph, wrapper_sess.graph)
self.assertEqual(self._sess.graph_def, wrapper_sess.graph_def)
# Check that the partial_run_setup and partial_run are not implemented for
# the debug wrapper session.
with self.assertRaises(NotImplementedError):
wrapper_sess.partial_run_setup(self._p)
def testInteractiveSessionInit(self):
"""The wrapper should work also on other subclasses of session.Session."""
TestDebugWrapperSession(
session.InteractiveSession(), self._dump_root, self._observer)
def testSessionRun(self):
wrapper = TestDebugWrapperSession(
self._sess, self._dump_root, self._observer)
# Check initial state of the observer.
self.assertEqual(0, self._observer["on_run_start_count"])
self.assertEqual(0, self._observer["on_run_end_count"])
s = wrapper.run(self._s)
# Assert the run return value is correct.
self.assertAllClose(np.array([[3.0], [4.0]]), s)
# Assert the on-run-start method is invoked.
self.assertEqual(1, self._observer["on_run_start_count"])
# Assert the on-run-start request reflects the correct fetch.
self.assertEqual(self._s, self._observer["run_fetches"])
# Assert the on-run-start request reflects the correct feed_dict.
self.assertIsNone(self._observer["run_feed_dict"])
# Assert the file debug URL has led to dump on the filesystem.
dump = debug_data.DebugDumpDir(self._dump_root)
self.assertEqual(7, len(dump.dumped_tensor_data))
# Assert the on-run-end method is invoked.
self.assertEqual(1, self._observer["on_run_end_count"])
# Assert the performed action field in the on-run-end callback request is
# correct.
self.assertEqual(
framework.OnRunStartAction.DEBUG_RUN,
self._observer["performed_action"])
# No TensorFlow runtime error should have happened.
self.assertIsNone(self._observer["tf_error"])
def testSessionInitInvalidSessionType(self):
"""Attempt to wrap a non-Session-type object should cause an exception."""
wrapper = TestDebugWrapperSessionBadAction(self._sess)
with self.assertRaisesRegex(TypeError, "Expected type .*; got type .*"):
TestDebugWrapperSessionBadAction(wrapper)
def testSessionInitBadActionValue(self):
with self.assertRaisesRegex(
ValueError, "Invalid OnSessionInitAction value: nonsense_action"):
TestDebugWrapperSessionBadAction(
self._sess, bad_init_action="nonsense_action")
def testRunStartBadActionValue(self):
wrapper = TestDebugWrapperSessionBadAction(
self._sess, bad_run_start_action="nonsense_action")
with self.assertRaisesRegex(
ValueError, "Invalid OnRunStartAction value: nonsense_action"):
wrapper.run(self._s)
def testRunStartBadURLs(self):
# debug_urls ought to be a list of str, not a str. So an exception should
# be raised during a run() call.
wrapper = TestDebugWrapperSessionBadAction(
self._sess, bad_debug_urls="file://foo")
with self.assertRaisesRegex(TypeError, "Expected type .*; got type .*"):
wrapper.run(self._s)
def testErrorDuringRun(self):
wrapper = TestDebugWrapperSession(self._sess, self._dump_root,
self._observer)
# No matrix size mismatch.
self.assertAllClose(
np.array([[11.0], [-1.0]]),
wrapper.run(self._q, feed_dict={self._ph: np.array([[1.0], [2.0]])}))
self.assertEqual(1, self._observer["on_run_end_count"])
self.assertIsNone(self._observer["tf_error"])
# Now there should be a matrix size mismatch error.
wrapper.run(self._q, feed_dict={self._ph: np.array([[1.0], [2.0], [3.0]])})
self.assertEqual(2, self._observer["on_run_end_count"])
self.assertTrue(
isinstance(self._observer["tf_error"], errors.InvalidArgumentError))
def testUsingWrappedSessionShouldWorkAsContextManager(self):
wrapper = TestDebugWrapperSession(self._sess, self._dump_root,
self._observer)
with wrapper as sess:
self.assertAllClose([[3.0], [4.0]], self._s)
self.assertEqual(1, self._observer["on_run_start_count"])
self.assertEqual(self._s, self._observer["run_fetches"])
self.assertEqual(1, self._observer["on_run_end_count"])
self.assertAllClose(
[[11.0], [-1.0]],
sess.run(self._q, feed_dict={self._ph: np.array([[1.0], [2.0]])}))
self.assertEqual(2, self._observer["on_run_start_count"])
self.assertEqual(self._q, self._observer["run_fetches"])
self.assertEqual(2, self._observer["on_run_end_count"])
def testUsingWrappedSessionShouldSupportEvalWithAsDefault(self):
wrapper = TestDebugWrapperSession(self._sess, self._dump_root,
self._observer)
with wrapper.as_default():
foo = constant_op.constant(42, name="foo")
self.assertEqual(42, self.evaluate(foo))
self.assertEqual(foo, self._observer["run_fetches"])
def testWrapperShouldSupportSessionClose(self):
wrapper = TestDebugWrapperSession(self._sess, self._dump_root,
self._observer)
wrapper.close()
def testWrapperThreadNameFilterMainThread(self):
wrapper = TestDebugWrapperSession(
self._sess, self._dump_root, self._observer,
thread_name_filter="MainThread")
child_run_output = []
def child_thread_job():
child_run_output.append(wrapper.run(self._b_init))
thread = threading.Thread(name="ChildThread", target=child_thread_job)
thread.start()
self.assertAllClose(self._a_init_val, wrapper.run(self._a_init))
thread.join()
self.assertAllClose([self._b_init_val], child_run_output)
dump = debug_data.DebugDumpDir(self._dump_root)
self.assertEqual(1, dump.size)
self.assertEqual("a_init", dump.dumped_tensor_data[0].node_name)
def testWrapperThreadNameFilterChildThread(self):
wrapper = TestDebugWrapperSession(
self._sess, self._dump_root, self._observer,
thread_name_filter=r"Child.*")
child_run_output = []
def child_thread_job():
child_run_output.append(wrapper.run(self._b_init))
thread = threading.Thread(name="ChildThread", target=child_thread_job)
thread.start()
self.assertAllClose(self._a_init_val, wrapper.run(self._a_init))
thread.join()
self.assertAllClose([self._b_init_val], child_run_output)
dump = debug_data.DebugDumpDir(self._dump_root)
self.assertEqual(1, dump.size)
self.assertEqual("b_init", dump.dumped_tensor_data[0].node_name)
def testWrapperThreadNameFilterBothThreads(self):
wrapper = TestDebugWrapperSession(
self._sess, self._dump_root, self._observer,
thread_name_filter=None)
child_run_output = []
def child_thread_job():
child_run_output.append(wrapper.run(self._b_init))
thread = threading.Thread(name="ChildThread", target=child_thread_job)
thread.start()
self.assertAllClose(self._a_init_val, wrapper.run(self._a_init))
thread.join()
self.assertAllClose([self._b_init_val], child_run_output)
dump = debug_data.DebugDumpDir(self._dump_root, validate=False)
self.assertEqual(2, dump.size)
self.assertItemsEqual(
["a_init", "b_init"],
[datum.node_name for datum in dump.dumped_tensor_data])
def _is_public_method_name(method_name):
return (method_name.startswith("__") and method_name.endswith("__")
or not method_name.startswith("_"))
class SessionWrapperPublicMethodParityTest(test_util.TensorFlowTestCase):
def testWrapperHasAllPublicMethodsOfSession(self):
session_public_methods = [
method_tuple[0] for method_tuple in
tf_inspect.getmembers(session.Session, predicate=tf_inspect.ismethod)
if _is_public_method_name(method_tuple[0])]
wrapper_public_methods = [
method_tuple[0] for method_tuple in
tf_inspect.getmembers(
framework.BaseDebugWrapperSession, predicate=tf_inspect.ismethod)
if _is_public_method_name(method_tuple[0])]
missing_public_methods = [
method for method in session_public_methods
if method not in wrapper_public_methods]
self.assertFalse(missing_public_methods)
def testWrapperHasAllPublicMethodsOfMonitoredSession(self):
session_public_methods = [
method_tuple[0] for method_tuple in
tf_inspect.getmembers(monitored_session.MonitoredSession,
predicate=tf_inspect.ismethod)
if _is_public_method_name(method_tuple[0])]
wrapper_public_methods = [
method_tuple[0] for method_tuple in
tf_inspect.getmembers(
framework.BaseDebugWrapperSession, predicate=tf_inspect.ismethod)
if _is_public_method_name(method_tuple[0])]
missing_public_methods = [
method for method in session_public_methods
if method not in wrapper_public_methods]
self.assertFalse(missing_public_methods)
if __name__ == "__main__":
googletest.main()
| StarcoderdataPython |
3202768 | <reponame>annanda/jogo-othello-ia<filename>models/minimax_alfabeta.py
# -*- coding: utf-8 -*-
from models.move import Move
class MiniMaxAlfaBeta(object):
def __init__(self, max_depth):
self.chosen_move = None
self.max_depth = max_depth
def mini_max_alfa_beta(self, board, depth, color, parent_alfa, parent_beta, max_gamer, heuristic_function):
"""
"""
valid_moves = board.valid_moves(color)
my_best_value = None
if depth == 0 or not valid_moves:
my_best_value = heuristic_function(board, color)
return my_best_value
alfa = float('-inf')
beta = float('inf')
enemy_color = board._opponent(color)
for valid_move in valid_moves:
board_copy = board.get_clone()
board_copy.play(valid_move, color)
best_value = self.mini_max_alfa_beta(
board_copy,
depth - 1,
enemy_color,
alfa,
beta,
not(max_gamer),
heuristic_function
)
if max_gamer:
if best_value > alfa:
alfa = best_value
best_move = valid_move
my_best_value = alfa
if my_best_value > parent_beta:
break
else:
beta = min(best_value, beta)
my_best_value = beta
if my_best_value < parent_alfa:
break
if depth == self.max_depth:
self.chosen_move = best_move
return my_best_value
| StarcoderdataPython |
3378635 | from .library import acquire_library
try:
import simplejson as json
except ImportError:
import json
class Context(object):
UNINITIALIZED = 0
INITIALIZED = 1
def __init__(self, ffi, library):
self._context = None
self.ffi = ffi
self._library = library
self.state = self.UNINITIALIZED
self._config_proxy = None
def __call__(self, methodname, *args, **kwargs):
return getattr(self._library, methodname)(*args, **kwargs)
def initialize(self):
if self.state == self.UNINITIALIZED:
self._context = self._library.ph_context_init()
self._config_proxy = ContextConfig(self._library)
self.state = self.INITIALIZED
return self
def destroy(self):
if self.state & self.INITIALIZED:
self._config_proxy = None
self._library.ph_context_free(self._context)
self.state = self.UNINITIALIZED
return self
def clear_memory_caches(self):
if self.state & self.INITIALIZED:
self._library.ph_context_clear_memory_cache()
return self
return None
def set_object_cache_capacity(self, min_dead_capacity, max_dead,
total_capacity):
if self.state & self.INITIALIZED:
self._library.ph_context_set_object_cache_capacity(
min_dead_capacity, max_dead, total_capacity)
return self
return None
def set_max_pages_in_cache(self, number_of_pages):
if self.state & self.INITIALIZED:
self._library.ph_context_set_max_pages_in_cache(number_of_pages)
return self
return None
def get_all_cookies(self):
cookies_json = None
if self.state & self.INITIALIZED:
cookies_json = self.ffi.string(
self._library.ph_context_get_all_cookies())
cookies_json = json.loads(cookies_json)
return cookies_json
@property
def config(self):
return self._config_proxy
class ContextConfig(object):
SETTINGS = (None,
('LoadImages', bool,),
('Javascript', bool,),
('DnsPrefetching', bool,),
('Plugins', bool,),
('PrivateBrowsing', bool,),
('OfflineStorageDB', bool,),
('OfflineStorageQuota', int,),
('OfflineAppCache', bool,),
('FrameFlattening', bool,),
('LocalStorage', bool,),)
def __init__(self, library):
self._library = library
def set_prop(key, prop_type):
def setter(self, new_val):
if prop_type is bool:
self._library.ph_context_set_boolean_config(key, int(new_val))
elif prop_type is int:
self._library.ph_context_set_int_config(key, int(new_val))
return setter
def get_prop(key, prop_type):
def getter(self):
return prop_type(self._library.ph_context_get_boolean_config(key))
return getter
for index, prop_and_type in enumerate(ContextConfig.SETTINGS):
if prop_and_type:
prop_name, prop_type = prop_and_type
setattr(ContextConfig, prop_name.lower(),
property(
get_prop(index, prop_type),
set_prop(index, prop_type)))
def acquire_context():
'''
Get a context from the pyphantom library.
'''
return Context(*acquire_library())
| StarcoderdataPython |
1697665 | <reponame>motorny/chip-seq-pipeline2
#!/usr/bin/env python
# ENCODE DCC TSS enrich wrapper
# Author: <NAME>, <NAME> (<EMAIL>)
import matplotlib as mpl
mpl.use('Agg')
import pybedtools
import numpy as np
from matplotlib import mlab
from matplotlib import pyplot as plt
import sys
import os
import argparse
from encode_lib_common import (
strip_ext_bam, ls_l, log, logging, mkdir_p, rm_f)
from encode_lib_genomic import (
remove_read_group, samtools_index)
import metaseq
import warnings
warnings.filterwarnings("ignore")
def parse_arguments():
parser = argparse.ArgumentParser(prog='ENCODE TSS enrichment.')
parser.add_argument('--read-len-log', type=str,
help='Read length log file (from aligner task).')
parser.add_argument('--read-len', type=int,
help='Read length (integer). This is ignored if '
'--read-len-log is defined.')
parser.add_argument('--nodup-bam', type=str,
help='Raw BAM file (from task filter).')
parser.add_argument('--chrsz', type=str,
help='2-col chromosome sizes file.')
parser.add_argument('--tss', type=str, help='TSS definition bed file.')
parser.add_argument('--out-dir', default='', type=str,
help='Output directory.')
parser.add_argument('--log-level', default='INFO', help='Log level',
choices=['NOTSET', 'DEBUG', 'INFO', 'WARNING',
'CRITICAL', 'ERROR', 'CRITICAL'])
args = parser.parse_args()
if args.read_len_log is None and args.read_len is None:
raise ValueError('Either --read-len-log or --read-len must be defined.')
log.setLevel(args.log_level)
log.info(sys.argv)
return args
def make_tss_plot(bam_file, tss, prefix, chromsizes,
read_len, bins=400, bp_edge=2000,
processes=8, greenleaf_norm=True):
'''
Take bootstraps, generate tss plots, and get a mean and
standard deviation on the plot. Produces 2 plots. One is the
aggregation plot alone, while the other also shows the signal
at each TSS ordered by strength.
'''
logging.info('Generating tss plot...')
tss_plot_file = '{0}.tss_enrich.png'.format(prefix)
tss_plot_large_file = '{0}.large_tss_enrich.png'.format(prefix)
tss_log_file = '{0}.tss_enrich.qc'.format(prefix)
# Load the TSS file
tss = pybedtools.BedTool(tss)
tss_ext = tss.slop(b=bp_edge, g=chromsizes)
# Load the bam file
# Need to shift reads and just get ends, just load bed file?
bam = metaseq.genomic_signal(bam_file, 'bam')
# Shift to center the read on the cut site
bam_array = bam.array(tss_ext, bins=bins, shift_width=-read_len/2,
processes=processes, stranded=True)
# Normalization (Greenleaf style): Find the avg height
# at the end bins and take fold change over that
if greenleaf_norm:
# Use enough bins to cover 100 bp on either end
num_edge_bins = int(100/(2*bp_edge/bins))
bin_means = bam_array.mean(axis=0)
avg_noise = (sum(bin_means[:num_edge_bins]) +
sum(bin_means[-num_edge_bins:]))/(2*num_edge_bins)
bam_array /= avg_noise
else:
bam_array /= bam.mapped_read_count() / 1e6
# Generate a line plot
fig = plt.figure()
ax = fig.add_subplot(111)
x = np.linspace(-bp_edge, bp_edge, bins)
ax.plot(x, bam_array.mean(axis=0), color='r', label='Mean')
ax.axvline(0, linestyle=':', color='k')
# Note the middle high point (TSS)
tss_point_val = max(bam_array.mean(axis=0))
# write tss_point_val to file
with open(tss_log_file, 'w') as fp:
fp.write(str(tss_point_val))
ax.set_xlabel('Distance from TSS (bp)')
if greenleaf_norm:
ax.set_ylabel('TSS Enrichment')
else:
ax.set_ylabel('Average read coverage (per million mapped reads)')
ax.legend(loc='best')
fig.savefig(tss_plot_file)
# Print a more complicated plot with lots of info
# Find a safe upper percentile - we can't use X if the Xth percentile is 0
upper_prct = 99
if mlab.prctile(bam_array.ravel(), upper_prct) == 0.0:
upper_prct = 100.0
plt.rcParams['font.size'] = 8
fig = metaseq.plotutils.imshow(bam_array,
x=x,
figsize=(5, 10),
vmin=5, vmax=upper_prct, percentile=True,
line_kwargs=dict(color='k', label='All'),
fill_kwargs=dict(color='k', alpha=0.3),
sort_by=bam_array.mean(axis=1))
# And save the file
fig.savefig(tss_plot_large_file)
return tss_plot_file, tss_plot_large_file, tss_log_file
def main():
# read params
args = parse_arguments()
CHROMSIZES = args.chrsz
TSS = args.tss if args.tss and os.path.basename(args.tss) != 'null' else ''
FINAL_BAM = args.nodup_bam
OUTPUT_PREFIX = os.path.join(
args.out_dir,
os.path.basename(strip_ext_bam(FINAL_BAM)))
samtools_index(FINAL_BAM) # make an index first
RG_FREE_FINAL_BAM = remove_read_group(FINAL_BAM)
log.info('Initializing and making output directory...')
mkdir_p(args.out_dir)
# Also get read length
# read_len = get_read_length(FASTQ)
if args.read_len_log:
with open(args.read_len_log, 'r') as fp:
read_len = int(fp.read().strip())
elif args.read_len:
read_len = args.read_len
else:
read_len = None
# Enrichments: V plot for enrichment
# Use final to avoid duplicates
tss_plot, tss_large_plot, tss_enrich_qc = \
make_tss_plot(FINAL_BAM,
TSS,
OUTPUT_PREFIX,
CHROMSIZES,
read_len)
# remove temporary files
rm_f(RG_FREE_FINAL_BAM)
log.info('List all files in output directory...')
ls_l(args.out_dir)
log.info('All done.')
if __name__ == '__main__':
main()
| StarcoderdataPython |
169827 | <filename>data_analytics/tensorflow_/keras_tutorials/cat_dog.py<gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
import cv2
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D
import pickle
from tensorflow.keras.callbacks import TensorBoard
# python included:
import random
import os
import pickle
import time
model_name = 'Cats-vs-dogs-cnn-64x2-{}'.format(int(time.time()))
tensorboard = TensorBoard(log_dir='logs/{}'.format(model_name))
# get_ipython().run_line_magic('matplotlib', 'inline')
plt.style.use('dark_background')
# plt.rcParams['figure.figsize'] = [13, 8]
plt.rcParams['axes.facecolor'] = '#121417'
plt.rcParams['figure.facecolor'] = '#282C34'
data_dir = 'C:\\Users\\Martin\\Dev\\keras_tutorial\\02_data'
categories = ['Dog', 'Cat']
img_size = 50
training_data = []
dataset_size = 100
def create_training_data():
for category in categories:
path = os.path.join(data_dir, category)
class_num = categories.index(category)
for img in os.listdir(path)[:dataset_size]:
try:
img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)
resized_img_array = cv2.resize(img_array, (img_size, img_size))
training_data.append([resized_img_array, class_num])
except Exception as e: # why does it need to be capital Exception?
pass
create_training_data()
random.shuffle(training_data)
X = [] # Capital bequause it is a feature
y = [] # lowercase, because its a lable
for feature, label in training_data:
X.append(feature)
y.append(label)
X = np.array(X).reshape(-1, img_size, img_size, 1)
X = X / 255.0
model = Sequential()
# Layer 1
model.add(Conv2D(64, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
# Layer 2
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
# Layer 3
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
# Layer 4
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(X, y, batch_size=32, epochs=3, validation_split=0.1, callbacks=[tensorboard])
| StarcoderdataPython |
4814200 | class OkApiException(Exception):
pass
class ApiError(OkApiException):
def __init__(self, message):
self.message = message
class OkUploadException(OkApiException):
pass
class UploadPhotoError(OkUploadException):
def __init__(self, message):
self.message = message
class UploadVideoError(OkUploadException):
def __init__(self, message):
self.message = message | StarcoderdataPython |
3358107 | <reponame>Don-Joel/MyDash<filename>weather/forms.py
from django import forms
from django.forms import ModelForm, TextInput
from .models import City, Zipcode
class CityForm(ModelForm):
class Meta:
model = City
fields = ['name']
widgets = {
'name': TextInput(attrs={'class' : 'input', 'placeholder' : 'City Name'}),
} #updates the input class to have the correct Bulma class and placeholder
class ZipForm(ModelForm):
class Meta:
model = Zipcode
fields = ['zip', 'user']
widgets = {
'zip': TextInput(attrs={'class' : 'form-control', 'placeholder' : 'Enter Zipcode e.g. 22904'}),
'user': forms.HiddenInput()
} #updates the input class to have the correct Bulma class and placeholder
def validate(self, value):
"""Check if value consists only of valid emails."""
# Use the parent's handling of required fields, etc.
super().validate(value)
for email in value:
validate_email(email) | StarcoderdataPython |
1609268 | import numpy
from skimage.data import camera
from dexp.processing.interpolation.warp import warp
from dexp.utils.backends import Backend, CupyBackend, NumpyBackend
from dexp.utils.timeit import timeit
def demo_warp_2d_numpy():
try:
with NumpyBackend():
_demo_warp_2d()
except NotImplementedError:
print("Numpy version not yet implemented")
def demo_warp_2d_cupy():
try:
with CupyBackend():
_demo_warp_2d()
except ModuleNotFoundError:
print("Cupy module not found! Test passes nevertheless!")
def _demo_warp_2d(grid_size=8):
image = camera().astype(numpy.float32) / 255
image = image[0:477, 0:507]
magnitude = 15
vector_field = numpy.random.uniform(low=-magnitude, high=+magnitude, size=(grid_size,) * 2 + (2,))
with timeit("warp"):
warped = warp(image, vector_field, vector_field_upsampling=4)
with timeit("dewarped"):
dewarped = warp(warped, -vector_field, vector_field_upsampling=4)
from napari import Viewer, gui_qt
with gui_qt():
def _c(array):
return Backend.to_numpy(array)
viewer = Viewer()
viewer.add_image(_c(image), name="image")
viewer.add_image(_c(vector_field), name="vector_field")
viewer.add_image(_c(warped), name="warped")
viewer.add_image(_c(dewarped), name="dewarped")
if __name__ == "__main__":
demo_warp_2d_cupy()
# demo_warp_2d_numpy()
| StarcoderdataPython |
186735 | import io
import os
import time
from collections import Counter
from tempfile import NamedTemporaryFile
import cv2
import numpy as np
import pyautogui
from gtts import gTTS
from mpg123 import Mpg123, Out123
def get_screen_image():
with NamedTemporaryFile() as f:
pil_image = pyautogui.screenshot(imageFilename=f.name)
opencvImage = cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR)
return opencvImage
def extract_qr_codes(image):
qrCodeDetector = cv2.QRCodeDetector()
res = qrCodeDetector.detectAndDecodeMulti(image)
return Counter(res[1])
def audio_describe(codes):
text = ''
for code, count in codes.items():
if code == 'thumbs_up':
text += f'{count} users are presenting thumbs-up, '
elif code == 'smiling':
text += f'{count} users are smiling, '
if text == '':
return
with io.BytesIO() as f:
gTTS(text=text, lang='en', slow=False).write_to_fp(f)
f.seek(0)
mp3 = Mpg123()
mp3.feed(f.read())
out = Out123()
for frame in mp3.iter_frames(out.start):
out.play(frame)
if __name__ == '__main__':
while True:
# Sanity check using a pre-made image.
# Comment out the get_screen_image() call to test this.
# image = cv2.imread('multi.png')
s = time.perf_counter()
image = get_screen_image()
print(f'Screenshot time: {time.perf_counter() - s:0.2f} secs')
s = time.perf_counter()
codes_and_counts = extract_qr_codes(image)
print(f'QR extraction time: {time.perf_counter() - s:0.2f} secs')
if len(codes_and_counts) > 0:
s = time.perf_counter()
audio_describe(codes_and_counts)
print(f'Audio time: {time.perf_counter() - s:0.2f} secs')
else:
print('No QR codes detected')
| StarcoderdataPython |
1697816 | #!/usr/bin/env python3
from build import ninja_common
build = ninja_common.Build('control')
build.install('auv-controld3', f='control/auv_controld3.py')
build.install('auv-navigated', f='control/auv_navigated.py')
| StarcoderdataPython |
1658379 | __version_info__ = (2,0,2)
__version__ = '2.0.2'
from discogs_client.client import Client
from discogs_client.models import Artist, Release, Master, Label, User, \
Listing, Track, Price, Video
| StarcoderdataPython |
117588 | class User:
"""
class that generates new instances of user
"""
user_list=[]
def __init__(self,first_name,last_name,create_pw,confirm_pw):
'''
__init__ method that helps us define properties for our objects.
Args:
first_name: New user first name.
last_name : New user last name.
create_pw: New user create pw.
confirm_pw : New user confirm pw .
'''
self.first_name = first_name
self.last_name = last_name
self.create_pw = create_pw
self.confirm_pw= confirm_pw
user_list=[]
def login_user(self):
'''
login_user method saves contact object into user_list
'''
User.user_list.append(self) | StarcoderdataPython |
1638401 | # coding=utf-8
import cv2
import tools.feature_extract as ife
def orb_img(img, features_count):
orb = cv2.ORB_create(features_count)
orb_key_points, orb_desc = orb.detectAndCompute(img, None)
orb_signed_img = cv2.drawKeypoints(img, orb_key_points, None)
return orb_key_points, orb_desc, orb_signed_img
def sift_img(img):
sift = cv2.xfeatures2d.SIFT_create()
sift_key_points, sift_desc = sift.detectAndCompute(img, None)
sift_signed_img = cv2.drawKeypoints(img, sift_key_points, None)
return sift_key_points, sift_desc, sift_signed_img
def surf_img(img):
surf = cv2.xfeatures2d.SURF_create()
surf_key_points, surf_desc = surf.detectAndCompute(img, None)
surf_signed_img = cv2.drawKeypoints(img, surf_key_points, None)
return surf_key_points, surf_desc, surf_signed_img
if __name__ == '__main__':
jpg = "/Users/philip.du/Documents/Projects/research/tea-recognition/sample_1/1a.JPG"
# jpg = "/Users/philip.du/Downloads/image1.JPG"
## ---- orb --- ##
# gray_img = ife.get_gray_img(jpg)
# key_points, desc, signed_img = orb_img(gray_img, 5000)
# print("gray# kps: {}, descriptors: {}".format(len(key_points), desc.shape))
# cv2.imwrite("gray_img.signed.jpg", signed_img)
#
# sobel_img = ife.get_sobel_img(jpg)
# key_points, desc, signed_img = orb_img(sobel_img, 5000)
# print("sobel# kps: {}, descriptors: {}".format(len(key_points), desc.shape))
# cv2.imwrite("sobel_img.signed.jpg", signed_img)
#
# canny_img = ife.get_canny_img(jpg)
# key_points, desc, signed_img = orb_img(canny_img, 5000)
# print("canny# kps: {}, descriptors: {}".format(len(key_points), desc.shape))
# cv2.imwrite("canny_img.signed.jpg", signed_img)
## ---- sift --- ##
gray_img = ife.get_gray_img(jpg)
key_points, desc, signed_img = sift_img(gray_img)
print("gray# kps: {}, descriptors: {}".format(len(key_points), desc.shape))
cv2.imwrite("gray_img.signed.jpg", signed_img)
sobel_img = ife.get_sobel_img(jpg)
key_points, desc, signed_img = sift_img(sobel_img)
print("sobel# kps: {}, descriptors: {}".format(len(key_points), desc.shape))
cv2.imwrite("sobel_img.signed.jpg", signed_img)
canny_img = ife.get_canny_img(jpg)
key_points, desc, signed_img = sift_img(canny_img)
print("canny# kps: {}, descriptors: {}".format(len(key_points), desc.shape))
cv2.imwrite("canny_img.signed.jpg", signed_img)
## ---- surf --- ##
# gray_img = ife.get_gray_img(jpg)
# key_points, desc, signed_img = surf_img(gray_img)
# print("gray# kps: {}, descriptors: {}".format(len(key_points), desc.shape))
# cv2.imwrite("gray_img.signed.jpg", signed_img)
#
# sobel_img = ife.get_sobel_img(jpg)
# key_points, desc, signed_img = surf_img(sobel_img)
# print("sobel# kps: {}, descriptors: {}".format(len(key_points), desc.shape))
# cv2.imwrite("sobel_img.signed.jpg", signed_img)
#
# canny_img = ife.get_canny_img(jpg)
# key_points, desc, signed_img = surf_img(canny_img)
# print("canny# kps: {}, descriptors: {}".format(len(key_points), desc.shape))
# cv2.imwrite("canny_img.signed.jpg", signed_img)
| StarcoderdataPython |
3274492 | <gh_stars>0
# 8-16
def make_car(
manufacturer,
type,
**additions):
"""
Build a car profile.
:param manufacturer:
:param type:
:param additions:
:return car:
"""
car = dict()
car['manufacturer'] = manufacturer
car['type'] = type
for k, v in additions.items():
car[k] = v
return car | StarcoderdataPython |
3317995 | <filename>mal_news.py
import urllib.request as urllib2
from bs4 import BeautifulSoup
print("\n\t\t--- MAL NEWS ---\n\n\tAuthor: <NAME>.\n\tSee my projects on Github: github.com/mynameismaz")
class ScrapeWebsite():
def __init__(self, url):
self.url = urllib2.Request(url, headers={'User-Agent': 'Mozilla/5.0'})
self.ok = True
try:
self.page = urllib2.urlopen(self.url)
except:
self.ok = False
pages = int(input("\n\nNo. pages: "))
page_counter = 0
for i in range(pages):
print(f"\n\t\t\t\tPage {i + 1}\n", "-"*70, sep="")
scraper = ScrapeWebsite(f"https://myanimelist.net/news?p={i + 1}")
if(scraper.ok):
soup = BeautifulSoup(scraper.page, 'html.parser')
p = soup.find_all('p', attrs={'class': 'title'})
for items in p:
page_counter += 1
link = items.find('a')
print(f"{page_counter}) {link.get_text()[0:70]}... - {link['href']}")
print("Done, thanks for using MAL News.\n")
| StarcoderdataPython |
1678631 | """Patch extraction for images."""
# Copyright 2019 CSIRO (Data61)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import List, NamedTuple, Tuple
import numpy as np
log = logging.getLogger(__name__)
class PatchRowRW(NamedTuple):
idx: int
x: slice
y: int
xp: slice
yp: int
class PatchMaskRowRW(NamedTuple):
idx: int
xp: slice
yp: int
def patches(
x_coords: np.ndarray,
y_coords: np.ndarray,
halfwidth: int,
image_width: int,
image_height: int,
) -> Tuple[List[PatchRowRW], List[PatchMaskRowRW]]:
"""
Generate the Read and write ops for patches given a set of coords.
This function describes read and write operations in terms of a single
index in y (row) followed by a contiguous slice in x. Patches are made
up of many of these structs (PatchRowRW) (1 for each row in the patch).
The output gives the read location in the image, and the write location
in the patch array.
The function also outputs the write operations for the image *mask*,
ie what writes should be done on a 'False' mask to turn missing values into
true. This is generally much more efficient that using the patch writes
to write false into a 'True' mask.
Parameters
----------
x_coords : np.ndarray
The x coordinates of the patch centres. Must be 1d and equal in size
to y_coords.
y_coords : np.ndarray
The y coordinates of teh patch centres. Must be 1d and equal in size
to the y_coords.
halfwidth : int
Integer describing the number of pixels out from the centre the patch
should extend. A 1x1 patch has halfwidth 0. A 3x3 patch has halfwidth
1 etc.
image_width : int
The width of the image in pixels. Needed for masking calculations.
image_height : int
The height of the image in pixels. Needed for masking calculations.
Returns
-------
result : Tuple[List[PatchRowRW], List[PatchMaskRowRW]]
The list of patch row read/writes and mask writes corresponding to
the patches requested.
"""
assert x_coords.shape[0] == y_coords.shape[0]
assert x_coords.ndim == 1
assert y_coords.ndim == 1
assert halfwidth >= 0
assert image_width > 0
ncoords = x_coords.shape[0]
xmins = x_coords - halfwidth
ymins = y_coords - halfwidth
n = halfwidth * 2 + 1
xmaxs = xmins + n - 1 # INCLUSIVE
# What lines to read?
y_reads = (ymins[np.newaxis, :] + np.arange(n)[:, np.newaxis]).flatten()
patch_indices = np.tile(np.arange(ncoords), n)
order = np.lexsort((patch_indices, y_reads))
y_reads_sorted = y_reads[order]
patch_indices_sorted = patch_indices[order]
patch_rws = _patch_reads(
n,
y_reads_sorted,
xmins,
xmaxs,
ymins,
patch_indices_sorted,
image_width,
image_height,
)
mask_ws = _mask_patches(
n,
y_reads_sorted,
xmins,
xmaxs,
ymins,
patch_indices_sorted,
image_width,
image_height,
)
return patch_rws, mask_ws
def _patch_reads(
n: int,
y_reads: np.ndarray,
xmins: np.ndarray,
xmaxs: np.ndarray,
ymins: np.ndarray,
patch_indices: np.ndarray,
image_width: int,
image_height: int,
) -> List[PatchRowRW]:
"""Compute the read and writes for the patches."""
y_mask = np.logical_and(y_reads >= 0, y_reads < image_height)
x_starts = np.maximum(xmins, 0)
x_stops = np.minimum(xmaxs + 1, image_width)
# patch space
y_patch_reads = y_reads - ymins[patch_indices]
x_patch_starts = x_starts - xmins
x_patch_stops = x_stops - xmins
patch_rw_list = []
for i, m, y, yp in zip(patch_indices, y_mask, y_reads, y_patch_reads):
if m:
r = PatchRowRW(
i,
slice(x_starts[i], x_stops[i]),
y,
slice(x_patch_starts[i], x_patch_stops[i]),
yp,
)
patch_rw_list.append(r)
return patch_rw_list
def _mask_patches(
n: int,
y_reads: np.ndarray,
xmins: np.ndarray,
xmaxs: np.ndarray,
ymins: np.ndarray,
patch_indices: np.ndarray,
image_width: int,
image_height: int,
) -> List[PatchMaskRowRW]:
"""Compute the inverse writes for the mask for the patches."""
# Inverse (mask) writes
inv_y_mask = np.logical_or(y_reads < 0, y_reads >= image_height)
x_premask = xmins < 0
x_postmask = xmaxs >= image_width
y_patch_reads = y_reads - ymins[patch_indices]
# There can be two x writes in general: pre- and post-image.
x_patch_prestarts = np.zeros_like(xmins, dtype=int)
x_patch_prestops = -1 * xmins
x_patch_poststarts = np.full(xmins.shape, image_width) - xmins
x_patch_poststops = (xmaxs + 1) - xmins
mask_w_list = []
for i, m, yp in zip(patch_indices, inv_y_mask, y_patch_reads):
if m:
mask_w_list.append(PatchMaskRowRW(i, slice(0, n), yp))
else:
if x_premask[i]:
mask_w_list.append(
PatchMaskRowRW(
i, slice(x_patch_prestarts[i], x_patch_prestops[i]), yp
)
)
if x_postmask[i]:
mask_w_list.append(
PatchMaskRowRW(
i, slice(x_patch_poststarts[i], x_patch_poststops[i]), yp
)
)
return mask_w_list
| StarcoderdataPython |
96600 | # -*- coding: utf-8 -*-
import os
import re
import shutil
from queue import Queue
from threading import Thread
def load_features(file):
with open(file, mode='r', encoding='utf-8') as fp:
features_list = fp.readlines()
return [feature.strip('\n') for feature in features_list]
def valid_feature(q, feature_list, pattern):
while not q.empty():
pakeage_path = q.get()
file_list = os.listdir(pakeage_path)
for file in file_list:
ret = file.rsplit('.', 1)
if pattern.match(file) and ret[0] not in feature_list:
print(file)
file_path = pakeage_path + '\\' + file
os.remove(file_path)
new_file_list = os.listdir(pakeage_path)
if len(new_file_list) == 3:
shutil.rmtree(pakeage_path)
def select_features(features_path, feature_list):
q = Queue()
threading_list = []
pakeages_list = os.listdir(features_path)
pattern = re.compile(r'^PNF_|^SCP_')
for pakeage in pakeages_list:
pakeage_path = features_path + '\\' + pakeage
q.put(pakeage_path)
for i in range(4):
t = Thread(target=valid_feature, args=(q, feature_list, pattern))
threading_list.append(t)
t.start()
for t in threading_list:
t.join()
def main():
feature_file = './usg12000_vcmu_om'
features_path = r'D:\WorkDocument\SimulateEnv\VCMU_12000_0821\feature'
feature_list = load_features(feature_file)
select_features(features_path, feature_list)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1622420 | <filename>source/rpg_game/__init__.py
from .main import rpg_game | StarcoderdataPython |
1655267 | <reponame>elizusha/hypertoc<filename>scripts/converter.py
#!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import feedparser
import json
import argparse
from typing import Any, Dict
def convert_rss_item(item) -> Dict[str, Any]:
return {
"@type": "HyperTocEntry",
"name": item["title"],
"url": item["links"][1]["href"],
"utterances": [item["title"]]
}
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("input_file", help="rss file")
parser.add_argument("output_file", help="json file")
return parser.parse_args()
def main():
args = parse_args()
rss_data = feedparser.parse(args.input_file)
json_data: Dict[str, Any] = {
"@context": "https://schema.org/",
"@type": "HyperToc",
"associatedMedia": {
"@type": "AudioObject",
"encodingFormat": "audio/mpeg",
"contentUrl": "",
},
"tocEntry": [
convert_rss_item(item)
for item in rss_data["items"]
]
}
with open(args.output_file, "w") as f:
print(json.dumps(json_data, indent=4), file=f)
if __name__ == "__main__":
main()
| StarcoderdataPython |
3353306 | import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import folium
def plt_bar_demo(df, anno, provincia):
fig = px.bar(
df[(df.ANNO == anno) & (df.PROVINCIA == provincia)],
x="FASCIA_ETA",
y="POPOLAZIONE",
color="GENERE",
title="Distribuzione popolazione per fasce d'età",
)
return fig
def plt_pie_livello_emerg(df):
fig = px.pie(
df,
values="CONTEGGIO",
names="LIVELLO_EMERGENZA",
title="Tipologia livello di emergenza gestito dalle strutture",
)
return fig
def plt_bar_tipo_strutture(df):
fig = px.bar(
df,
x="CONTEGGIO",
y="TIPO_STRUTTURA",
title="Tipologia di strutture ospedaliere presenti in Lombardia",
)
return fig
def plt_num_mortalita(df, anno, provincia):
fig = go.Figure()
fig.add_trace(
go.Indicator(
value=df[(df.ANNO == anno) & (df.PROVINCIA == provincia)].iloc[0][
"SPERANZA_VITA_NASCITA"
],
title="Speranza di vita alla nascita",
domain={"row": 0, "column": 0},
gauge={"axis": {"visible": True}},
)
)
fig.add_trace(
go.Indicator(
value=df[(df.ANNO == anno) & (df.PROVINCIA == provincia)].iloc[0][
"TASSO_MORTALITA"
],
title="Tasso di mortalità",
domain={"row": 1, "column": 0},
gauge={"axis": {"visible": True}},
)
)
fig.update_layout(grid={"rows": 2, "columns": 1, "pattern": "independent"})
return fig
def plt_map_hospital(df):
mappa = folium.Map(
location=[45.464664, 9.188540],
zoom_start=7.5,
position="central",
tiles="CartoDB positron",
)
# insert marker for all hospital in Lombardy
for row in df.index:
folium.Marker(
location=[df["COORDINATA_X"][row], df["COORDINATA_Y"][row]],
tooltip=[df["DENOM_STRUTTURA"][row], df["DENOM_ENTE"][row]],
icon=folium.Icon(color="red", icon="plus"),
).add_to(mappa)
# draw boundaries
json_boundaries = r"./app/static/province_lombardia.geojson"
folium.GeoJson(json_boundaries, overlay=True).add_to(mappa)
return mappa
def plt_bar_reparti(df):
fig = px.bar(
df,
x="descrizione_disciplina",
y="totale_ricoveri",
color="descrizione_disciplina",
color_discrete_sequence=px.colors.qualitative.Pastel,
title="Ripartizione per reparto",
)
return fig
def plt_performance_ospdeli(df):
perf_plot = make_subplots(
rows=2,
cols=2,
subplot_titles=(
"Trasferimenti tra strutture",
"Tasso dimissioni volontarie",
"Tasso ritorni in sala operatoria",
"Tasso ricoveri ripetuti",
),
)
perf_plot.add_trace(
go.Scatter(
x=df["ANNO"],
y=df["NUM_TRASFERIMENTI_FRA_STRUTTURE"],
hoverinfo="none",
mode="lines+markers",
line_color="#f0f921",
name="Tasso trasferimenti tra strutture",
),
row=1,
col=1,
)
perf_plot.add_trace(
go.Scatter(
x=df["ANNO"],
y=df["MEDIA_TRASFERIMENTI_FRA_STRUTTURE"],
mode="lines+markers",
line_color="firebrick",
name="Media trasferimenti tra strutture",
),
row=1,
col=1,
)
perf_plot.add_trace(
go.Scatter(
x=df["ANNO"],
y=df["TASSO_DIMISSIONI_VOLONTARIE"],
mode="lines+markers",
line_color="#ed7953",
name="Tasso dimissioni volontarie",
),
row=1,
col=2,
)
perf_plot.add_trace(
go.Scatter(
x=df["ANNO"],
y=df["MEDIA_DIMISSIONI_VOLONTARIE"],
mode="lines+markers",
line_color="firebrick",
name="Media dimissioni volontarie",
),
row=1,
col=2,
)
perf_plot.add_trace(
go.Scatter(
x=df["ANNO"],
y=df["TASSO_RITORNI_IN_SALA_OPERATORIA"],
mode="lines+markers",
line_color="#7201a8",
name="Tasso ritorni in sala operatoria",
),
row=2,
col=1,
)
perf_plot.add_trace(
go.Scatter(
x=df["ANNO"],
y=df["MEDIA_RITORNI_SALA_OPERATORIA"],
mode="lines+markers",
line_color="firebrick",
name="Media ritorni in sala operatoria",
),
row=2,
col=1,
)
perf_plot.add_trace(
go.Scatter(
x=df["ANNO"],
y=df["TASSO_INEFFICACIA_CURE"],
mode="lines+markers",
line_color="#0d0887",
name="Tasso ricoveri ripetuti",
),
row=2,
col=2,
)
perf_plot.add_trace(
go.Scatter(
x=df["ANNO"],
y=df["MEDIA_INEFFICACIA_CURE"],
mode="lines+markers",
line_color="firebrick",
name="Media ricoveri ripetuti",
),
row=2,
col=2,
)
return perf_plot
def plt_sankey_acc_clinica(df):
source = []
target = []
label = (
df["descrizione_acc_di_diagnosi"].unique().tolist()
+ df.descrizione_drg.tolist()
)
for x in range(0, df.shape[0]):
source.append(0)
value = df.totale_ricoveri.tolist()
label = (
df["descrizione_acc_di_diagnosi"].unique().tolist()
+ df.descrizione_drg.tolist()
)
target.extend(range(1, df.shape[0] + 1))
# data to dict, dict to sankey
link = dict(source=source, target=target, value=value)
node = dict(label=label, pad=50, thickness=5)
data = go.Sankey(link=link, node=node)
# plot
fig = go.Figure(data)
fig.update_layout(
hovermode="x", title="Ripartizione delle diagnosi per categoria clinica"
)
return fig
| StarcoderdataPython |
116274 | <gh_stars>0
from typing import NamedTuple
from coordinates import spaced_coordinate
Coordinates = spaced_coordinate("Coordinates", "xy")
Orientation = NamedTuple(
"Orientation", [("rot_x", float), ("rot_y", float), ("rot_z", float)]
)
ThreeDCoordinates = spaced_coordinate("ThreeDCoordinates", "xyz")
Spherical = NamedTuple("Spherical", [("rot_x", float), ("rot_y", float), ("dist", int)])
| StarcoderdataPython |
3258462 | <filename>glb_reader.py
# glb reader
import json
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation
from mpl_toolkits.mplot3d import Axes3D
import mpl_toolkits.mplot3d as plt3d
import bone
def read_jsonglb(file_path):
with open(file_path) as f:
# Remove the binary data from the start and end of glb first
data = json.load(f)
decoded = []
for i in data['nodes']:
c = i['translation']
x, y, z = c
c = [x, y, z, 1]
q = i['rotation']
x, y, z, w = q
q = w, x, y, z
decoded.append(np.array([c,q]))
decoded = np.array(decoded)
# OpenGloves Skips the first root node
decoded = decoded[1:,:,:]
return decoded
def plot_glb(file_path, title="GlbPlot"):
decoded = read_jsonglb(file_path)
print("Glb shape: ", decoded.shape)
print("Expected Shape:", "(31,2,4)")
np.save('decoded', decoded)
# 0th element should be root
# 1st element should be wrist
print("Root: ", decoded[0, :, :])
print("Wrist: ", decoded[1, :, :])
print(decoded.shape)
points = bone.build_hand(decoded, True)
# Plot the points
# Plot setup
fig = plt.figure(title)
ax = fig.add_subplot(111, projection='3d')
bone.plot_steam_hand(points, "Lerped Pose", ax=ax)
if __name__ == "__main__":
file_path_skeleton = 'assets/vr_glove_right_skeleton.json'
# json from danwillm glb
file_path_dwm = 'assets/glb.json'
plot_glb(file_path_skeleton, "vr_glove_right_skeletonon")
plot_glb(file_path_dwm, "danwillm")
plot_glb('assets/vr_glove_left_model.json','vr_glove_left_model') | StarcoderdataPython |
159679 | <gh_stars>0
from django.views import generic
from .models import Author, Book
class IndexView(generic.ListView):
template_name = 'library/index.html'
context_object_name = 'all_books'
def get_queryset(self):
return Book.objects.order_by('pub_date')
class AuthorView(generic.DetailView):
model = Author
template_name = 'library/author.html'
| StarcoderdataPython |
4829879 | """
Algorithm to check Prime number
[Language used] - Python
Author: Phanatagama
"""
number = int(input('Input a Number: '))
# If given number is greater than 1
if number > 1:
# Iterate from 2 to n / 2
for i in range(2, number):
# If num is divisible by any number between
# 2 and n / 2, it is not prime
if (number % i) == 0:
print(f"{number} is not a prime number")
break
else:
print(f"{number} is a prime number")
else:
print(f"{number} is not a prime number") | StarcoderdataPython |
3260648 | <filename>keycloak_admin_aio/_resources/roles/by_id/composites/composites.py
from keycloak_admin_aio.types import RoleRepresentation
from .... import KeycloakResource
class RolesByIdComposites(KeycloakResource):
"""Get composites for a role by id.
.. code:: python
from keycloak_admin_aio import KeycloakAdmin, RoleRepresentation
kc: KeycloakAdmin # needs to be instantiated
role_id: str # uuid
"""
def get_url(self) -> str:
return f"{self._get_parent_url()}/composites"
async def create(self, composite_roles: list[RoleRepresentation]):
"""Create composites for a role by id.
.. code:: python
role_representations: list[RoleRepresentation] = [] # needs to be populated
await kc.roles.by_id(role_id).composites.create(role_representations)
"""
connection = await self._get_connection()
await connection.post(
self.get_url(),
json=RoleRepresentation.to_dict_list(composite_roles),
)
async def get(self) -> list[RoleRepresentation]:
"""Get composites for a role by id.
.. code:: python
composites: list[RoleRepresentation] = await kc.roles.by_id(role_id).composites.get()
"""
connection = await self._get_connection()
response = await connection.get(self.get_url())
response.raise_for_status()
return RoleRepresentation.from_list(response.json())
async def delete(self, composite_roles: list[RoleRepresentation]):
"""Delete composites for a role by id.
.. code:: python
role_representations: list[RoleRepresentation] = [] # needs to be populated
await kc.roles.by_id(role_id).composites.delete(role_representations)
"""
connection = await self._get_connection()
await connection.request(
method="DELETE",
url=self.get_url(),
json=RoleRepresentation.to_dict_list(composite_roles),
)
| StarcoderdataPython |
3255623 | <reponame>blackapple1202/TensorflowCodeRepo<filename>04.Create_Images_to_Table/create_image_table.py
import PIL
from PIL import Image, ImageOps, ImageDraw
import pandas as pd
import shutil
import os.path
import random
from pathlib import Path
############### CONFIGURE ########################
# Table Configure Variables
# Image Size Configuration
IMAGE_START_NUMBER = 1
IMAGE_END_NUMBER = 200
TABLE_IM_PIXEL = 480
TABLE_IM_WIDTH_NUMBER = 4
TABLE_IM_HEIGHT_NUMBER = 4
# Image Background Configuration
BACKGROUND_START_NUMBER = 1
BACKGROUND_END_NUMBER = 16
BACKGROUND_FOLDER = 'backgrounds'
BACKGROUND_IMAGE_FILE_NAME = '{}_background.jpg'
# Set input path and output path
INPUT_FOLDER = 'images'
INPUT_IMAGE_FILE_NAME = '{}_crop.png'
OUTPUT_FOLDER = 'data'
OUTPUT_IMAGE_FILE_NAME = '{}_table{}.jpg'
OUTPUT_CLONE_FOLDER = 'data/clone'
# Set REPETITION number of extraction
EXTRACT_OUTPUT_INDEX_MIN = 181
EXTRACT_OUTPUT_INDEX_MAX = 240
# REPETITION NUMBER = EXTRACT_OUTPUT_INDEX_MAX - EXTRACT_OUTPUT_INDEX_MIN + 1
# Toggle options
TOGGLE_BACKGROUND = True
TOGGLE_SHUFFLE_BACKGROUND = False
TOGGLE_SHUFFLE_IMAGE = True
TOGGLE_CSV_TO_SAVE_INDIVIDUAL = False
TOGGLE_CLONE_IMAGE_TO_SHOW = False
TOGGLE_CLONE_IMAGE_TO_SAVE = True
OUTPUT_CLONE_IMAGE_FILE_NAME = 'include_boundaries_{}_table{}.jpg'
# Set index of EXTRACT_MODE to OUTPUT_IMAGE_EXTRACT_MODE
# Default is same as 'all'
EXTRACT_MODE = ['default', 'all', 'odd', 'even' , 'random']
RANDOM_START_RANGE_MIN = 0
RANDOM_START_RANGE_MAX = 3
RANDOM_INCREMENT_RANGE_MIN = 2
RANDOM_INCREMENT_RANGE_MAX = 6
OUTPUT_IMAGE_EXTRACT_MODE = EXTRACT_MODE[4]
# Table Boundary Configure
BOUNDARY_PADDING_PIXEL = {'top': 4, 'bottom': 4, 'left': 4, 'right': 4}
# CSV Configure
LABEL = 'face'
OUTPUT_CSV_FILE_NAME = '{}_labels{}.csv'
# Extract Training(True) or Testing(False)?
DATA_USAGE = True
###################################################
start_step = 0
increment_step = 1
def check_image_with_pil(path):
try:
Image.open(path)
except IOError:
return False
return True
def show_table_image(tableImg):
tableImg.show()
def save_table_image(path , tableImg):
tableImg.save(path)
def save_boundaries_to_csv(path, input_image_list):
column_name = ['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax']
images_df = pd.DataFrame(input_image_list, columns=column_name)
images_df.to_csv(path, index=None)
def append_boundary_to_csv(output_image_list, filename, width, height, label, xmin, ymin, xmax, ymax):
value = (filename, width, height, label, xmin, ymin, xmax, ymax)
output_image_list.append(value)
def extract(repeat_index, background_index, all_image_list):
if DATA_USAGE:
usage = 'train'
else:
usage = 'test'
image_list = []
SOURCE_IM_PIXEL = (TABLE_IM_PIXEL / TABLE_IM_WIDTH_NUMBER)
tableImage = Image.new('RGB', (TABLE_IM_PIXEL,TABLE_IM_PIXEL))
IMAGES_COUNT = IMAGE_START_NUMBER
clone_tableImage = Image.new('RGB', (TABLE_IM_PIXEL,TABLE_IM_PIXEL))
DrawImg = ImageDraw.Draw(clone_tableImage)
if TOGGLE_BACKGROUND:
background = Image.open('{}/{}'.format(BACKGROUND_FOLDER, BACKGROUND_IMAGE_FILE_NAME.format(background_index)))
background = background.resize((TABLE_IM_PIXEL, TABLE_IM_PIXEL), PIL.Image.ANTIALIAS)
tableImage.paste(background, (0, 0))
clone_tableImage.paste(background, (0, 0))
if not RANDOM_INCREMENT_RANGE_MIN > 0 or not RANDOM_INCREMENT_RANGE_MAX > RANDOM_INCREMENT_RANGE_MIN:
print('RANDOM_INCREMENT_RANGE should be set properly')
return
for directory in [INPUT_FOLDER]:
for i in range(0, TABLE_IM_WIDTH_NUMBER):
start_step = 0
increment_step = 1
if OUTPUT_IMAGE_EXTRACT_MODE == 'all':
start_step = 0
increment_step = 1
elif OUTPUT_IMAGE_EXTRACT_MODE == 'odd':
if i % 2 == 0:
start_step = 1
else:
start_step = 0
increment_step = 2
elif OUTPUT_IMAGE_EXTRACT_MODE == 'even':
if i % 2 == 0:
start_step = 0
else:
start_step = 1
increment_step = 2
elif OUTPUT_IMAGE_EXTRACT_MODE == 'random':
start_step = random.randrange(RANDOM_START_RANGE_MIN, RANDOM_START_RANGE_MAX)
increment_step = random.randrange(RANDOM_INCREMENT_RANGE_MIN, RANDOM_INCREMENT_RANGE_MAX)
for j in range(start_step, TABLE_IM_HEIGHT_NUMBER, increment_step):
# Open image on images directory
if TOGGLE_SHUFFLE_IMAGE:
IMAGES_COUNT = random.randrange(IMAGE_START_NUMBER, IMAGE_END_NUMBER)
else:
IMAGES_COUNT = IMAGES_COUNT + 1
# If image is not exist on folder
while not check_image_with_pil('{}/{}'.format(directory, INPUT_IMAGE_FILE_NAME.format(IMAGES_COUNT))):
# Skip to next index
if TOGGLE_SHUFFLE_IMAGE:
IMAGES_COUNT = random.randrange(IMAGE_START_NUMBER, IMAGE_END_NUMBER)
else:
IMAGES_COUNT = IMAGES_COUNT + 1
# If image index is overwhelmed the end number
if IMAGES_COUNT > IMAGE_END_NUMBER:
# Save process35f
save_table_image('{}/{}'.format(OUTPUT_FOLDER, OUTPUT_IMAGE_FILE_NAME.format(usage,repeat_index)), tableImage)
print('Successfully save images to table')
if TOGGLE_CSV_TO_SAVE_INDIVIDUAL:
csv_path = '{}/{}'.format(OUTPUT_FOLDER, OUTPUT_CSV_FILE_NAME.format(usage,repeat_index))
save_boundaries_to_csv(csv_path, image_list)
print('Successfully save boundaries to csv')
if TOGGLE_CLONE_IMAGE_TO_SAVE:
save_table_image('{}/{}'.format(OUTPUT_CLONE_FOLDER, OUTPUT_CLONE_IMAGE_FILE_NAME.format(usage,repeat_index)), clone_tableImage)
# Show process
if TOGGLE_CLONE_IMAGE_TO_SHOW:
show_table_image(clone_tableImage)
print('End of file is {}'.format(INPUT_IMAGE_FILE_NAME.format(IMAGES_COUNT)))
# End of script
return
im = Image.open('{}/{}'.format(directory, INPUT_IMAGE_FILE_NAME.format(IMAGES_COUNT)))
im = ImageOps.expand(im, border=(int)(SOURCE_IM_PIXEL*0.01), fill='white')
im = im.resize((TABLE_IM_PIXEL, TABLE_IM_PIXEL), PIL.Image.ANTIALIAS)
im.thumbnail((SOURCE_IM_PIXEL, SOURCE_IM_PIXEL))
xmin = (j * SOURCE_IM_PIXEL) + BOUNDARY_PADDING_PIXEL['left']
ymin = (i * SOURCE_IM_PIXEL) + BOUNDARY_PADDING_PIXEL['top']
xmax = (j * SOURCE_IM_PIXEL) + SOURCE_IM_PIXEL - BOUNDARY_PADDING_PIXEL['right']
ymax = (i * SOURCE_IM_PIXEL) + SOURCE_IM_PIXEL - BOUNDARY_PADDING_PIXEL['bottom']
append_boundary_to_csv(image_list,
OUTPUT_IMAGE_FILE_NAME.format(usage, repeat_index),
TABLE_IM_PIXEL,
TABLE_IM_PIXEL,
LABEL,
xmin,
ymin,
xmax,
ymax)
append_boundary_to_csv(all_image_list,
OUTPUT_IMAGE_FILE_NAME.format(usage, repeat_index),
TABLE_IM_PIXEL,
TABLE_IM_PIXEL,
LABEL,
xmin,
ymin,
xmax,
ymax)
tableImage.paste(im, ((j * SOURCE_IM_PIXEL),(i * SOURCE_IM_PIXEL)))
clone_tableImage.paste(im, ((j * SOURCE_IM_PIXEL),(i * SOURCE_IM_PIXEL)))
DrawImg.rectangle([(xmin, ymin), (xmax, ymax)], fill=None, outline='green')
# Save process
save_table_image('{}/{}'.format(OUTPUT_FOLDER, OUTPUT_IMAGE_FILE_NAME.format(usage,repeat_index)), tableImage)
print('Successfully save images to table')
if TOGGLE_CSV_TO_SAVE_INDIVIDUAL:
csv_path = '{}/{}'.format(OUTPUT_FOLDER, OUTPUT_CSV_FILE_NAME.format(usage,repeat_index))
save_boundaries_to_csv(csv_path, image_list)
print('Successfully save boundaries to csv')
if TOGGLE_CLONE_IMAGE_TO_SAVE:
save_table_image('{}/{}'.format(OUTPUT_CLONE_FOLDER, OUTPUT_CLONE_IMAGE_FILE_NAME.format(usage,repeat_index)), clone_tableImage)
# Show process
if TOGGLE_CLONE_IMAGE_TO_SHOW:
show_table_image(clone_tableImage)
print('End of file is {}'.format(INPUT_IMAGE_FILE_NAME.format(IMAGES_COUNT)))
# End of Script
def main():
if not EXTRACT_OUTPUT_INDEX_MIN > 0 or not EXTRACT_OUTPUT_INDEX_MAX >= EXTRACT_OUTPUT_INDEX_MIN:
print('EXTRACT_OUTPUT_INDEX should be set properly')
return
background_index = 0
image_list = []
for i in range(EXTRACT_OUTPUT_INDEX_MIN, EXTRACT_OUTPUT_INDEX_MAX + 1):
if TOGGLE_SHUFFLE_BACKGROUND:
background_index = random.randrange(BACKGROUND_START_NUMBER, BACKGROUND_END_NUMBER)
else:
background_index = background_index + 1;
if(background_index >= BACKGROUND_END_NUMBER):
background_index = BACKGROUND_START_NUMBER
extract(i, background_index, image_list)
if DATA_USAGE:
usage = 'train'
else:
usage = 'test'
csv_path = '{}/{}'.format(OUTPUT_FOLDER, OUTPUT_CSV_FILE_NAME.format(usage, ''))
save_boundaries_to_csv(csv_path, image_list)
main() | StarcoderdataPython |
141282 | import os
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from ...models import (
TranscriptPhrase, TranscriptPhraseVote,
TranscriptPhraseCorrection, TranscriptPhraseCorrectionVote
)
class Command(BaseCommand):
help = '''Deletes all votes, corrections, and non-staff users;
resets all phrase confidence ratings to zero'''
def handle(self, *args, **options):
if os.environ['DJANGO_SETTINGS_MODULE'] == 'mla_game.settings.prod':
print('You cannot run this in production')
else:
TranscriptPhraseVote.objects.all().delete()
TranscriptPhraseCorrectionVote.objects.all().delete()
TranscriptPhraseCorrection.objects.all().delete()
TranscriptPhrase.objects.all().update(current_game=1, confidence=0.00)
User.objects.filter(is_staff=False).delete()
| StarcoderdataPython |
3250843 | from dataclasses import dataclass
import dacite
from . import file_store
@dataclass
class GitHub:
access_token: str
@dataclass
class Secrets:
github: GitHub
def load(path: str) -> Secrets:
content = file_store.load(path)
return normer(content)
def normer(data: dict) -> Secrets:
return dacite.from_dict(data_class=Secrets, data=data)
| StarcoderdataPython |
1689122 |
def _patch_descriptor_backwards_relation(descriptor):
# we need to patch backwards relation for nullable fields to override
# related_manager_cls.remove and related_manager_cls.clear methods
# since they are using update orm technique w/o having any signal
# see django.db.models.fields.related.ForeignRelatedObjectsDescriptor
# (django 1.6-1.8) or
# django.db.models.fields.related_descriptors.ReverseManyToOneDescriptor
# (django 1.9+) related_manager_cls property for details
_remove_orig = descriptor.related_manager_cls.remove
_clear_orig = descriptor.related_manager_cls.clear
def remove(self, *objs, **kwargs):
result = _remove_orig(self, *objs, **kwargs)
for field in descriptor._abnorm_slave_fields:
field.update_value(self.instance)
return result
remove.alters_data = True
remove._wrapped = _remove_orig
def clear(self):
result = _clear_orig(self)
for field in descriptor._abnorm_slave_fields:
field.update_value(self.instance)
return result
clear.alters_data = True
clear._wrapped = _clear_orig
descriptor.related_manager_cls.remove = remove
descriptor.related_manager_cls.clear = clear
def _patch_foreign_related_object_descriptor_once(descriptor):
if not hasattr(descriptor, '_abnorm_slave_fields'):
descriptor._abnorm_slave_fields = []
_patch_descriptor_backwards_relation(descriptor)
class EveryDjango(object):
def perform_delayed_setup_signals(self, sender, **kwargs):
from .. import state
while state.delayed_setup_signals:
model, field = state.delayed_setup_signals.pop()
field.setup_signals(model)
def add_foreign_related_object_descriptor_slave_field(
self, descriptor, field):
_patch_foreign_related_object_descriptor_once(descriptor)
descriptor._abnorm_slave_fields.append(field)
| StarcoderdataPython |
3394167 | """
this is my second py code
for my second lecture
"""
#print('hello world') #this is a single line comment
"""
This is my second regional comment
"""
# this is my second line comment
#print(type('123'))
#print("Hello World".upper())
#print("Hello World".lower())
#print(" hello world ")
#print(" hello world ".strip())
#print("hello, world".split("l"))
#print("hello" + "world"+ ".")
#print(2+3)
#print(2/3)
#print(type(print(2/3)))
#print(2**3)
#my_str = "hello world"
#print(my_str.upper())
#my_str = "Tom"
#print(my_str)
my_int = 2
my_float = 3.0
print(my_int + my_float)
| StarcoderdataPython |
124039 | <filename>predict_recognition.py
import argparse
import os
import time
import numpy as np
import pyaudio
import tensorflow
from record_demo import get_voice
import random
from audio import read_mfcc
from batcher import sample_from_mfcc
from constants import SAMPLE_RATE, NUM_FRAMES
from conv_models import DeepSpeakerModel
from test import batch_cosine_similarity
np.random.seed(123)
random.seed(123)
parser = argparse.ArgumentParser()
# # set up training configuration.
# parser.add_argument('--n_classes', default=5994, type=int, help='class dim number')
parser.add_argument('--audio_db', default='audio_db/', type=str, help='person audio database')
# parser.add_argument('--resume', default=r'pretrained/weights.h5', type=str, help='resume model path')
# # set up network configuration.
# parser.add_argument('--net', default='resnet34s', choices=['resnet34s', 'resnet34l'], type=str)
# parser.add_argument('--ghost_cluster', default=2, type=int)
# parser.add_argument('--vlad_cluster', default=8, type=int)
# parser.add_argument('--bottleneck_dim', default=512, type=int)
# parser.add_argument('--aggregation_mode', default='gvlad', choices=['avg', 'vlad', 'gvlad'], type=str)
# # set up learning rate, training loss and optimizer.
# parser.add_argument('--loss', default='softmax', choices=['softmax', 'amsoftmax'], type=str)
args = parser.parse_args()
person_feature = []
person_name = []
# 減少显存占用
config = tensorflow.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
_ = tensorflow.compat.v1.Session(config=config)
# ==================================
# Get Model
# ==================================
# construct the data generator.
# params = {'dim': (257, None, 1),
# 'nfft': 512,
# 'spec_len': 250,
# 'win_length': 400,
# 'hop_length': 160,
# 'n_classes': args.n_classes,
# 'sampling_rate': 16000,
# 'normalize': True}
#
# network_eval = model.vggvox_resnet2d_icassp(input_dim=params['dim'],
# num_class=params['n_classes'],
# mode='eval', args=args)
# ==> load pre-trained model
# network_eval.load_weights(os.path.join(args.resume), by_name=True)
model = DeepSpeakerModel()
model.m.load_weights('/home/ubuntu/PycharmProjects/deep-speaker/checkpoints/ResCNN_triplet_training_checkpoint_265.h5', by_name=True)
print('==> successfully loading model {}.')
# 预测获取声纹特征
def predict(input_filename):
mfcc = sample_from_mfcc(read_mfcc(input_filename, SAMPLE_RATE), NUM_FRAMES)
predict_fea = model.m.predict(np.expand_dims(mfcc, axis=0))
return predict_fea
# 加载要识别的音频库
def load_audio_db(audio_db_path):
start = time.time()
audios = os.listdir(audio_db_path)
for audio in audios:
# path = os.path.join(audio_db_path, audio)
name = audio[:-4]
predict_fea = predict('audio_db/' + audio)
person_name.append(name)
person_feature.append(predict_fea)
print("Loaded %s audio." % name)
end = time.time()
print('加载音频库完成,消耗时间:%fms' % (round((end - start) * 1000)))
# 识别声纹
def recognition(audio):
name = ''
pro = 0
predict_fea = predict(audio)
for i, person_f in enumerate(person_feature):
# 计算相识度
dist = batch_cosine_similarity(predict_fea, person_f)
# dist = np.dot(feature, person_f.T)
if dist > pro:
pro = dist
name = person_name[i]
return name, pro
def start_recognition():
# 录音参数
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 16000
RECORD_SECONDS = 4
WAVE_OUTPUT_FILENAME = "infer_audio.wav"
while True:
i = input("按下回车键开机录音,录音%s秒中:" % RECORD_SECONDS)
print("开始录音......")
get_voice()
"""
# 打开录音
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
i = input("按下回车键开机录音,录音%s秒中:" % RECORD_SECONDS)
print("开始录音......")
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
print("录音已结束!")
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
"""
print("录音已结束!")
os.system("~/PycharmProjects/Kersa-Speaker-Recognition/noisered.sh");
WAVE_OUTPUT_FILENAME_CLEAN = "infer_audio_clean.wav"
# 识别对比音频库的音频
start = time.time()
name, p = recognition(WAVE_OUTPUT_FILENAME_CLEAN)
end = time.time()
# print("预测时间为:%d,识别说话的为:%s,相似度为:%f" % (round((end - start) * 1000), name, p))
if p > 0.8:
print("预测时间为:%d,识别说话的为:%s,相似度为:%f" % (round((end - start) * 1000), name, p))
else:
print("预测时间为:%d,音频库没有该用户的语音,相似度为:%f" % (round((end - start) * 1000), p))
if __name__ == '__main__':
load_audio_db(args.audio_db)
start_recognition()
| StarcoderdataPython |
3220135 | from datetime import datetime, timezone
import uuid
import json
import requests
def main():
print("Velkommen til verdens enkleste meldingsklient.")
print("Klienten kan avsluttes når som helst ved å holde inne CTRL + C.")
while True:
print()
print("Skriv inn meldingen du ønsker å sende til køen. For at meldingen faktisk skal bli sendt må du trykke på enter-tasten.")
user_input = input()
id = str(uuid.uuid4())
timestamp = datetime.now(timezone.utc).astimezone().isoformat()
print("Legg til bakgrunnsfarge på meldingen din:")
background = input()
print("Legg til font-style (italic, bold, oblique osv...)")
fontStyle = input()
stil = f"background-color: {background}; font-style: {fontStyle};"
# + background + "; " fontStyle}
msg = {
"message": user_input,
"id": id,
"timestamp": timestamp,
"style": stil,
}
resp = requests.post('https://5wjbztusyb.execute-api.eu-central-1.amazonaws.com/dev/messages', json=msg)
print("Melding sendt!")
if __name__ == '__main__':
main() | StarcoderdataPython |
3208671 | """
------------------------------------------------------------------------------
@file parser.py
@author <NAME> (<EMAIL>)
@brief ...
@version 0.1
@date 2020-08-26
@copyright Copyright (c) 2020
Distributed under the MIT software license, see the accompanying
file COPYING or http://www.opensource.org/licenses/mit-license.php.
------------------------------------------------------------------------------
"""
from src.constants import *
from src.token import Token
from src.tokenizer import Tokenizer
from src.symbol_table import SymbolTable
from src.variable import Variable
from copy import copy
CLASSES = []
SUBROUTINES = []
class Parser(object):
def __init__(self, tokenizer):
""" Constructs parser object. """
self.xml_data = [] # For xml export.
self.symbol_table = SymbolTable() # Create symbol table for class scope.
self.tokenizer = tokenizer # Tokenizer.
self.token = None # Current token.
self.compile_class()
def check_for_value(self, value):
""" Check if current token has expected value. """
self.token = self.tokenizer.advance()
if self.token.value != value:
raise Exception("Error: Excpected value => '{0}' but got => '{1}'".format(value, self.token.value))
if self.token.value in XML_REPLACE.keys():
self.xml_data.append("<{0}> {1} </{0}>".format(self.token.type, XML_REPLACE[self.token.value]))
else:
self.xml_data.append(self.token.__str__())
def check_for_identifier(self):
""" Check if current token is valid identifier. """
self.token = self.tokenizer.advance()
if self.token.type != "identifier" or (not re.match(R_IDENTIFIER, self.token.value)):
raise Exception("Error: Identifier name not valid => '{0}'".format(self.token.value))
self.xml_data.append(self.token.__str__())
def check_for_type(self):
""" Check if current token has valid type. """
self.token = self.tokenizer.advance()
if self.token.value not in list(TYPES) + CLASSES:
raise Exception("Error: Not valid type => '{0}'".format(self.token.value))
self.xml_data.append(self.token.__str__())
def check_for_operator(self):
""" Check if current token is operator. """
self.token = self.tokenizer.advance()
if self.token.value not in OP:
raise Exception("Error: Invalid operator => '{0}'".format(self.token.value))
if self.token.value in XML_REPLACE.keys():
self.xml_data.append("<{0}> {1} </{0}>".format(self.token.type, XML_REPLACE[self.token.value]))
else:
self.xml_data.append(self.token.__str__())
def compile_class(self):
"""
Compile class.
-------------------------------------------------------------
Rule => 'class' className '{' classVarDec* subroutineDec* '}'
-------------------------------------------------------------
"""
self.xml_data.append("<class>") # Xml rep: <class>
self.check_for_value('class') # Xml rep: <keyword> class </keyword>
self.check_for_identifier() # Xml rep: <identifier> className </identifier>
CLASSES.append(self.token.value) # Add class name to list of classes.
self.check_for_value('{') # Xml rep: <symbol> { </symbol>
while self.tokenizer.next().value != "}":
self.token = self.tokenizer.advance()
if self.token.value in ['static', 'field']:
self.compile_class_var_dec() # Compile class variable declarations.
elif self.token.value in ['constructor', 'function', 'method']:
self.compile_subroutine_dec() # Compile class subroutine declarations.
self.check_for_value("}") # Xml rep: <symbol> } </symbol>
self.xml_data.append("</class>") # Xml rep: </class>
def compile_class_var_dec(self):
"""
Compile class variable declarations.
-------------------------------------------------------------
Rule => ('static' | 'field') type varName (',', varName)* ';'
-------------------------------------------------------------
"""
self.xml_data.append("<classVarDec>") # Xml rep: <classVarDec>
variable = Variable()
self.xml_data.append(self.token.__str__()) # Xml rep: <keyword> ('static' | 'field') </keyword>
variable.kind = self.token.value
self.check_for_type() # Xml rep: <keyword> type </keyword>
variable.type = self.token.value
self.check_for_identifier() # Xml rep: <identifier> varName </identifier>
variable.name = self.token.value
self.symbol_table.add(variable) # Add variable to class scope symbol table.
while self.tokenizer.next().value != ";":
self.check_for_value(",") # Xml rep: <symbol> , </symbol>
self.check_for_identifier() # Xml rep: <identifier> varName </identifier>
v = copy(variable)
v.name = self.token.value
self.symbol_table.add(v) # Add variable to class scope symbol table.
self.check_for_value(";") # Xml rep: <symbol> ; </symbol>
self.xml_data.append("</classVarDec>") # Xml rep: </classVarDec>
def compile_subroutine_dec(self):
"""
Compile class subroutine declarations.
-------------------------------------------------------------------------------------------------------------------
Rule => ('constructor' | 'function' | 'method') ('void' | type) subroutineName '(' parameterList ')' subroutineBody
-------------------------------------------------------------------------------------------------------------------
"""
self.xml_data.append("<subroutineDec>") # Xml rep: <subroutineDec>
self.xml_data.append(self.token.__str__()) # Xml rep: <keyword> ('constructor' | 'function' | 'method')) </keyword>
self.check_for_type() # Xml rep: <keyword> type </keyword>
self.check_for_identifier() # Xml rep: <identifier> subroutineName </identifier>
SUBROUTINES.append(self.token.value) # Add subroutine name to subroutine list.
self.check_for_value("(") # Xml rep: <symbol> ( </symbol>
self.compile_parameter_list() # Compile parameter list.
self.check_for_value(")") # Xml rep: <symbol> ) </symbol>
self.compile_subroutine_body() # Compile subroutine body.
self.xml_data.append("</subroutineDec>") # Xml rep: </subroutineDec>
def compile_parameter_list(self):
"""
Compile parameter list.
---------------------------------------------
Rule => ((type varName) (',' type varName)*)?
---------------------------------------------
"""
self.xml_data.append("<parameterList>") # Xml rep: <parameterList>
if self.tokenizer.next().value != ")":
self.check_for_type() # Xml rep: <keyword> type </keyword>
self.check_for_identifier() # Xml rep: <identifier> varName </identifier>
while self.tokenizer.next().value == ",":
self.check_for_value(",") # Xml rep: <symbol> , </symbol>
self.check_for_type() # Xml rep: <keyword> type </keyword>
self.check_for_identifier() # Xml rep: <identifier> varName </identifier>
self.xml_data.append("</parameterList>") # Xml rep: </parameterList>
def compile_subroutine_body(self):
"""
Compile subroutine body.
----------------------------------
Rule => '{' varDec* statements '}'
----------------------------------
"""
self.xml_data.append("<subroutineBody>") # Xml rep: <subroutineBody>
self.check_for_value("{") # Xml rep: <symbol> { </symbol>
while self.tokenizer.next().value == "var":
self.compile_var_dec() # Compile variable declarations.
self.compile_statements() # Compile statements.
self.check_for_value("}") # Xml rep: <symbol> } </symbol>
self.xml_data.append("</subroutineBody>") # Xml rep: </subroutineBody>
def compile_var_dec(self):
"""
Compile variable declarations.
----------------------------------------------
Rule => 'var' type varName (',', varName)* ';'
----------------------------------------------
"""
self.xml_data.append("<varDec>") # Xml rep: <varDec>
self.check_for_value("var") # Xml rep: <keyword> var </keyword>
self.check_for_type() # Xml rep: <keyword> type </keyword>
self.check_for_identifier() # Xml rep: <identifier> varName </identifier>
while self.tokenizer.next().value != ";":
self.check_for_value(",") # Xml rep: <symbol> ; </symbol>
self.check_for_identifier() # Xml rep: <identifier> varName </identifier>
self.check_for_value(";") # Xml rep: <symbol> ; </symbol>
self.xml_data.append("</varDec>") # Xml rep: </varDec>
def compile_statements(self):
"""
Compile statements.
-----------------------------------------------------------------------------------
Rule => letStatement | ifStatement | whileStatement | doStatement | returnStatement
-----------------------------------------------------------------------------------
"""
self.xml_data.append("<statements>") # Xml rep: <statements>
while self.tokenizer.next().value != "}":
token = self.tokenizer.next().value
if token == 'let':
self.compile_let_statement() # Compile let statement.
elif token == 'while':
self.compile_while_statement() # Compile while statement.
elif token == 'if':
self.compile_if_statement() # Compile if statement.
elif token == 'do':
self.compile_do_statement() # Compile do statement.
elif token == 'return':
self.compile_return_statement() # Compile return statement.
self.xml_data.append("</statements>") # Xml rep: </statements>
def compile_let_statement(self):
"""
Compile let statement.
--------------------------------------------------------------
Rule => 'let' varName ('[' expression ']')? '=' expression ';'
--------------------------------------------------------------
"""
self.xml_data.append("<letStatement>") # Xml rep: <letStatement>
self.check_for_value("let") # Xml rep: <keyword> let </keyword>
self.check_for_identifier() # Xml rep: <identifier> varName </identifier>
var = self.symbol_table.find(self.token.value)
if self.tokenizer.next().value == '[':
self.check_for_value("[") # Xml rep: <symbol> [ </symbol>
self.compile_expression("]") # Compile expression.
self.check_for_value("]") # Xml rep: <symbol> ] </symbol>
self.check_for_value("=") # Xml rep: <symbol> = </symbol>
self.compile_expression(";") # Compile expression.
self.check_for_value(";") # Xml rep: <symbol> ; </symbol>
self.xml_data.append("</letStatement>") # Xml rep: </letStatement>
def compile_while_statement(self):
"""
Compile while statement.
-----------------------------------------------------
Rule => 'while' '(' expression ')' '{' statements '}'
-----------------------------------------------------
"""
self.xml_data.append("<whileStatement>") # Xml rep: <whileStatement>
self.check_for_value("while") # Xml rep: <keyword> while </keyword>
self.check_for_value("(") # Xml rep: <symbol> ( </symbol>
self.compile_expression(")") # Compile expression.
self.check_for_value(")") # Xml rep: <symbol> ) </symbol>
self.check_for_value("{") # Xml rep: <symbol> { </symbol>
self.compile_statements() # Compile statements.
self.check_for_value("}") # Xml rep: <symbol> } </symbol>
self.xml_data.append("</whileStatement>") # Xml rep: </whileStatement>
def compile_if_statement(self):
"""
Compile if statement.
-------------------------------------------------------------------------------
Rule => 'if' '(' expression ')' '{' statements '}' ('else' '{' statements '}')?
-------------------------------------------------------------------------------
"""
self.xml_data.append("<ifStatement>") # Xml rep: <ifStatement>
self.check_for_value("if") # Xml rep: <keyword> if </keyword>
self.check_for_value("(") # Xml rep: <symbol> ( </symbol>
self.compile_expression(")") # Compile expression.
self.check_for_value(")") # Xml rep: <symbol> ) </symbol>
self.check_for_value("{") # Xml rep: <symbol> { </symbol>
self.compile_statements() # Compile statements.
self.check_for_value("}") # Xml rep: <symbol> } </symbol>
if self.tokenizer.next().value == 'else':
self.check_for_value('else') # Xml rep: <keyword> else </keyword>
self.check_for_value('{') # Xml rep: <symbol> { </symbol>
self.compile_statements() # Compile statements.
self.check_for_value('}') # Xml rep: <symbol> } </symbol>
self.xml_data.append("</ifStatement>") # Xml rep: </ifStatement>
def compile_do_statement(self):
"""
Compile do statement.
-------------------------------
Rule => 'do' subroutineCall ';'
-------------------------------
"""
self.xml_data.append("<doStatement>") # Xml rep: <doStatement>
self.check_for_value("do") # Xml rep: <keword> do </keyword>
self.compile_subroutine_call() # Compile subroutine call.
self.check_for_value(";") # Xml rep: <symbol> ; </symbol>
self.xml_data.append("</doStatement>") # Xml rep: </doStatement>
def compile_return_statement(self):
"""
Compile return statement.
--------------------------------
Rule => 'return' expression? ';'
--------------------------------
"""
self.xml_data.append("<returnStatement>") # Xml rep: <returnStatement>
self.check_for_value("return") # Xml rep: <keword> return </keyword>
if self.tokenizer.next().value != ";":
self.compile_expression(';')
self.check_for_value(";") # Xml rep: <symbol> ; </symbol>
self.xml_data.append("</returnStatement>") # Xml rep: </returnStatement>
def compile_subroutine_call(self):
"""
Compile subroutine call.
---------------------------------------------------------------------------------------------------------------
Rule => subroutineName '(' expressionList ')' | (className | varName) '.' subroutineName '(' expressionList ')'
---------------------------------------------------------------------------------------------------------------
"""
self.xml_data.append("<subroutineCall>") # Xml rep: <subroutineCall>
self.check_for_identifier() # Xml rep: <identifier> subroutineName | (className | varName) </identifier>
if self.tokenizer.next().value == ".":
self.check_for_value(".") # Xml rep: <symbol> . </symbol>
self.check_for_identifier() # Xml rep: <identifier> subroutineName </identifier>
self.check_for_value("(") # Xml rep: <symbol> ( </symbol>
self.compile_expression_list() # Compile expression list.
self.check_for_value(")") # Xml rep: <symbol> ) </symbol>
self.xml_data.append("</subroutineCall>") # Xml rep: </subroutineCall>
def compile_expression(self, *end):
"""
Compile expression.
-----------------------
Rule => term (op term)*
-----------------------
"""
self.xml_data.append("<expression>") # Xml rep:<expression>
self.compile_term() # Compile term.
while self.tokenizer.next().value not in end:
self.check_for_operator() # Xml rep: <symbol> operator </symbol>
self.compile_term() # Compile term.
self.xml_data.append("</expression>") # Xml rep: </expression>
def compile_term(self):
"""
Compile term.
----------------------------------------------------------------------------------
Rule => integerConstant | stringConstant | keywordConstant | unaryOp term |
varName | varName'[' expression ']' | subroutineCall | '(' expression ')'
----------------------------------------------------------------------------------
"""
self.xml_data.append("<term>") # Xml rep: <term>
if self.tokenizer.next().type in ["integerConstant", "stringConstant"] or self.tokenizer.next().value in KEYWORD_CONSANTS:
self.token = self.tokenizer.advance()
self.xml_data.append(self.token.__str__()) # Xml rep: <integerConstant | stringConstant | keyword> value </integerConstant | stringConstant | keyword>
elif self.tokenizer.next().value in UNARY_OP:
self.token = self.tokenizer.advance()
self.xml_data.append(self.token.__str__()) # Xml rep: <symbol> unaryOp </symbol>
self.compile_term() # Compile term.
elif self.tokenizer.next().value == "(":
self.check_for_value("(") # Xml rep: <symbol> ( </symbol>
self.compile_expression(")") # Compile expression.
self.check_for_value(")") # Xml rep: <symbol> ) </symbol>
else:
self.check_for_identifier() # Xml rep: <identifier> varName </identifier>
var = self.symbol_table.find(self.token.value)
if self.tokenizer.next().value == "[":
self.check_for_value("[") # Xml rep: <symbol> [ </symbol>
self.compile_expression("]") # Compile expression.
self.check_for_value("]") # Xml rep: <symbol> ] </symbol>
elif self.tokenizer.next().value == ".":
self.check_for_value(".") # Xml rep: <symbol> . </symbol>
self.check_for_identifier() # Xml rep: <identifier> subroutineName </identifier>
self.check_for_value("(") # Xml rep: <symbol> ( </symbol>
self.compile_expression_list() # Compile expression list.
self.check_for_value(")") # Xml rep: <symbol> ) </symbol>
elif self.tokenizer.next().value == "(":
self.check_for_value("(") # Xml rep: <symbol> ( </symbol>
self.compile_expression_list() # Compile expression list.
self.check_for_value(")") # Xml rep: <symbol> ) </symbol>
self.xml_data.append("</term>") # Xml rep: </term>
def compile_expression_list(self):
"""
Compile expression list.
---------------------------------------
Rule => (expression (',' expression)*)?
---------------------------------------
"""
self.xml_data.append("<expressionList>") # Xml rep: <expressionList>
if self.tokenizer.next().value != ")":
self.compile_expression(",", ")") # Compile expression.
while self.tokenizer.next().value == ",":
self.check_for_value(",") # Xml rep: <symbol> , </symbol>
self.compile_expression(",", ")")
self.xml_data.append("</expressionList>") # Xml rep: </expressionList>
def export_xml(self, file_name):
""" Export code structure to file in xml format. """
with open("xml-export/{0}.structure.xml".format(file_name), "w") as xml_file:
for line in self.xml_data:
xml_file.write(line + "\n") | StarcoderdataPython |
3213628 | # coding=utf8
"""Resize
Common functions for resizing dimensions to fit, to crop, etc
"""
__author__ = "<NAME>"
__copyright__ = "OuroborosCoding"
__version__ = "1.0.0"
__email__ = "<EMAIL>"
__created__ = "2018-11-11"
def crop(w, h, bw, bh):
"""Crop
Makes sure one side fits and crops the other
Arguments:
w (int): The current width
h (int): The current height
bw (int): The boundary width
bh (int): The boundary height
Returns:
dict
"""
# Init the return
dRet = {}
# Easier to work with floats
w = float(w)
h = float(h)
# If the image is already smaller, make it bigger
if w < bw or h < bh:
# Which is the side that needs to grow more?
if (bw / w) > (bh / h):
dRet['w'] = bw
dRet['h'] = int(round(bw * (h / w)))
else:
dRet['w'] = int(round(bh * (w / h)))
dRet['h'] = bh
# Else, make it smaller
else:
# Which is the side that needs to shrink less?
if (w / bw) > (h / bh):
dRet['w'] = int(round(bh * (w / h)))
dRet['h'] = bh
else:
dRet['w'] = bw
dRet['h'] = int(round(bw * (h / w)))
# Return the new width and height
return dRet
def fit(w, h, bw, bh):
"""Fit
Makes sure one side fits and makes the other smaller than necessary
Arguments:
w (int): The current width
h (int): The current height
bw (int): The boundary width
bh (int): The boundary height
Returns:
list [w, h]
"""
# Init the return
dRet = {}
# Easier to work with floats
w = float(w)
h = float(h)
# If the image is already smaller, make it bigger
if w < bw and h < bh:
# Figure out the larger side
if (bw / w) > (bh / h):
dRet['w'] = int(round(bh * (w / h)))
dRet['h'] = bh
else:
dRet['w'] = bw
dRet['h'] = int(round(bw * (h / w)))
# Else, make it smaller
else:
# Figure out the larger side
if (w / bw) > (h / bh):
dRet['w'] = bw
dRet['h'] = int(round(bw * (h / w)))
else:
dRet['w'] = int(round(bh * (w / h)))
dRet['h'] = bh
# Return the new width and height
return dRet
def region(w, h, bw, bh):
"""Region
Returns a new set of region points based on a current width and height and
the bounding box
Arguments:
w (int): The current width
h (int): The current height
bw (int): The boundary width
bh (int): The boundary height
Returns:
dict
"""
# Return
dRet = {}
# If the current width is larger than the bounds width
if w > bw:
dRet['x'] = int(round((w - bw) / 2.0))
dRet['y'] = 0
dRet['w'] = int(bw + round((w - bw) / 2.0))
dRet['h'] = bh
# Else if the current height is larger than the bounds height
else:
dRet['x'] = 0
dRet['y'] = int(round((h - bh) / 2.0))
dRet['w'] = bw
dRet['h'] = int(bh + round((h - bh) / 2.0))
# Return the region
return dRet
| StarcoderdataPython |
1700067 | <reponame>mksh/k93s
"""Main k93 CLI module."""
import contextlib
import logging
import tempfile
import os
import yaml
import click
import k93s
import k93s.config
import k93s.provision
import k93s.vms
import k93s.utils
logger = logging.getLogger(__name__)
@contextlib.contextmanager
def _with_config(ctx):
config_file = k93s.utils.ensure_config_file_location(ctx.obj['config'])
try:
config_contents = k93s.utils.read_config(config_file)
except FileNotFoundError:
logger.exception('Config %s does not exist.', config_file)
exit(5)
else:
ctx.obj['config_contents'] = config_contents
with tempfile.TemporaryDirectory() as tmpdirname:
yield tmpdirname
pass
@click.group()
@click.option('-f', '--config-file',
default=os.environ.setdefault('K_93_CONFIG', '.k93s.default.config'),
help='Config file location.')
@click.pass_context
def cli(ctx, config_file):
ctx.ensure_object(dict)
ctx.obj['config'] = config_file
@cli.command()
@click.pass_context
def config(ctx):
"""Create a new configuration file in specified location."""
config_file = k93s.utils.ensure_config_file_location(ctx.obj['config'])
_existconfig = os.path.exists(config_file)
if _existconfig and not click.confirm('A config file at {!r} already exists. '
' Do you want to re-create it?'.format(config_file)):
exit(4)
else:
if _existconfig:
os.unlink(config_file)
with open(config_file, 'w') as fl:
k93s.config.create_new_config_file()
yaml.dump({'k93s': k93s.config.k93s}, fl, default_flow_style=False)
@cli.command()
@click.pass_context
def spinup(ctx):
"""Create VMs for Kubernetes cluster without provisioning them."""
with _with_config(ctx) as tmpdirname:
k93s.vms.spinup(tmpdirname, **ctx.obj)
@cli.command()
@click.pass_context
def teardown(ctx):
"""Remove the VMs for current cluster."""
with _with_config(ctx) as tmpdirname:
if click.confirm('Do you really want to tear down k93s cluster, set up '
'with config {!s}'.format(ctx.obj['config'])):
k93s.vms.teardown(tmpdirname, **ctx.obj)
@cli.command()
@click.pass_context
def kubernetes(ctx):
"""Make sure VMs are set up, and provision cluster with Ansible."""
with _with_config(ctx) as tmpdirname:
k93s.vms.spinup(tmpdirname, **ctx.obj)
# New hosts may have been created, so re-read inventory
inventory_contents = k93s.vms.inventory(tmpdirname, **ctx.obj)
k93s.provision.ansible_kubernetes(inventory_contents,
ctx.obj['config_contents'],
tmpdirname)
@cli.command()
@click.pass_context
def kubectl(ctx):
"""Configure kubectl for current user to facilitate cluster."""
with _with_config(ctx) as tmpdirname:
inventory_contents = k93s.vms.inventory(tmpdirname, **ctx.obj)
k93s.provision.configure_kubectl(
inventory_contents,
tmpdirname,
click.confirm('REWRITE ./.kube/config ?'),
)
def main():
"""Bind CLI application logic."""
cli() # pragma: no cover
if __name__ == '__main__':
main() # pragma: no cover
| StarcoderdataPython |
3274760 | <filename>waitlist/migrations/0001_initial.py<gh_stars>10-100
# Generated by Django 2.0.1 on 2019-01-07 14:44
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('books', '0008_bookcopy_borrow_date'),
]
operations = [
migrations.CreateModel(
name='WaitlistItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('added_date', models.DateTimeField()),
('book', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='books.Book')),
('library', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='books.Library')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='waitlist_items', to=settings.AUTH_USER_MODEL)),
],
),
]
| StarcoderdataPython |
1642470 | # -*- coding: utf-8 -*-
"""
Zenoss jobs_router
"""
from zenossapi.routers import ZenossRouter
class JobsRouter(ZenossRouter):
"""
Class for interacting with the Zenoss device router
"""
def __init__(self, url, headers, ssl_verify):
super(JobsRouter, self).__init__(url, headers, ssl_verify, 'jobs_router', 'JobsRouter')
self.uuid = None
def __repr__(self):
if self.uuid:
identifier = self.uuid
else:
identifier = hex(id(self))
return '<{0} object at {1}>'.format(
type(self).__name__, identifier
)
def _abort_jobs_by_uuid(self, jobs):
"""
Aborts the jobs specified in the uuid list.
Arguments:
jobs (list): List of job uuids
Returns:
bool:
"""
jobs_abort_response = self._router_request(
self._make_request_data(
'abort',
dict(jobids=jobs),
)
)
return True
def _delete_jobs_by_uuid(self, jobs):
"""
Deletes the jobs specified in the uuid list.
Arguments:
jobs (list): List of jobs uuids
Returns:
list: List of uuids deleted
"""
deletes_response = self._router_request(
self._make_request_data(
'deleteJobs',
dict(
jobids=jobs
)
)
)
if deletes_response is None:
return []
else:
return deletes_response['deletedJobs']
def list_jobs(self, start=0, limit=50, sort='scheduled', dir='DESC'):
"""
List all Job Manager jobs, supports pagination.
Arguments:
start (int): Offset to start device list from, default 0
limit (int): The number of results to return, default 50
sort (str): Sort key for the list, default is 'scheduled'. Other
sort keys are 'started, 'finished', 'status', 'type' and 'user'
dir (str): Sort order, either 'ASC' or 'DESC', default is 'DESC'
Returns:
dict(int, dict(str, int, int, int, str, str, str, str, str)): ::
{
'total': (int) Total number of jobs,
'jobs': {
'description': (str) Job description,
'finished': (int) Time the job finished in timestamp format,
'scheduled': (int) Time the job was scheduled in timestamp format,
'started': (int) Time the job started in timestamp format,
'status': (str) Status of the job,
'type': (str) Job type,
'uid': (str) JobManager UID - /zport/dmd/JobManager,
'user': (str) User who scheduled the job,
'uuid': (str) UUID of the job,
}
}
"""
jobs_data = self._router_request(
self._make_request_data(
'getJobs',
dict(
start=start,
limit=limit,
sort=sort,
dir=dir,
page=0,
)
)
)
return dict(
total=jobs_data['totalCount'],
jobs=jobs_data['jobs'],
)
def get_jobs(self, start=0, limit=50, sort='scheduled', dir='ASC'):
"""
Get ZenossJob objects for Job Manager jobs. Supports pagination.
Arguments:
start (int): Offset to start device list from, default 0
limit (int): The number of results to return, default 50
sort (str): Sort key for the list, default is 'scheduled'. Other
sort keys are 'started, 'finished', 'status', 'type' and 'user'
dir (str): Sort order, either 'ASC' or 'DESC', default is 'ASC'
Returns:
list(ZenossJob):
"""
jobs_data = self.list_jobs(start=start, limit=limit, sort=sort, dir=dir)
jobs = []
for job in jobs_data['jobs']:
jobs.append(self.get_job(job['uuid']))
return jobs
def get_job(self, job):
"""
Get a ZenossJob object by the job's uuid
Arguments:
job (str): uuid of the job
Returns:
ZenossJob:
"""
job_data = self._router_request(
self._make_request_data(
'getInfo',
dict(jobid=job)
)
)
return ZenossJob(
self.api_url,
self.api_headers,
self.ssl_verify,
job_data['data']
)
class ZenossJob(JobsRouter):
"""
Class for Zenoss job objects
"""
def __init__(self, url, headers, ssl_verify, job_data):
super(ZenossJob, self).__init__(url, headers, ssl_verify)
self.description = job_data['description']
self.duration = job_data['duration']
self.errors = job_data['errors']
self.finished = job_data['finished']
self.id = job_data['id']
self.logfile = job_data['logfile']
self.meta_type = job_data['meta_type']
self.name = job_data['name']
self.scheduled = job_data['scheduled']
self.started = job_data['started']
self.status = job_data['status']
self.type = job_data['type']
self.uid = job_data['uid']
self.user = job_data['user']
self.uuid = job_data['uuid']
def abort(self):
"""
Abort the job.
Returns:
bool:
"""
return self._abort_jobs_by_uuid([self.uuid])
def delete(self):
"""
Delete the job.
Returns:
list: Job ID
"""
return self._delete_jobs_by_uuid([self.uuid])
def get_log(self):
"""
Get the log for the job.
Returns:
dict(str, bool, list): ::
{
'logfile': Filesystem path of the log file,
'maxLimit': True or False,
'content': Log file lines
}
"""
return self._router_request(
self._make_request_data(
'detail',
dict(
jobid=self.uuid,
)
)
)
| StarcoderdataPython |
3274367 | <filename>src/client.py
import time
import socket
import sys
from thread import *
from getpass import getpass
import os
from thread import *
from client_core import *
'''
Create Socket
'''
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print 'Created socket for the client'
except socket.error:
print 'Failed to create socket for the client'
sys.exit()
'''
Resolve Hostname
'''
HOST = 'localhost'
PORT = 9999
try:
remote_ip = sys.argv[1]
print 'IP address of ' + HOST + ' on ip ' + remote_ip
except socket.error:
print 'Hostname could not be resolved. Exiting'
sys.exit()
'''
Connect to remote server
'''
sock.connect((remote_ip, PORT))
print 'Socket connected to ' + HOST + ' on ip ' + remote_ip + ' through port ' + str(PORT)
'''
Login user
'''
login_count = 0
reply = None
while reply != "valid":
usr = raw_input("Enter your username: ")
pwd = getpass("Enter your password: ")
print 'Sent username and password ' + usr + ' ' + pwd
sock.sendall(usr + '<>' + pwd)
reply = sock.recv(5)
# if successful login
if reply == 'valid':
print 'Login successful!'
print 'Welcome back ' + usr
break
# if successful login but another user is logged into account
elif reply == 'exist':
print 'Existing user is logged in!'
print 'Please contact us for tech support'
print ''
# if invalid authentication, try again
elif reply == 'nalid':
print 'We could not find your username / password combination'
print 'Please try again'
print ''
# if too many attempts, quit
elif reply == 'close':
print 'Too many attempts have been made'
print 'Exiting the app'
print ''
sock.close()
sys.exit()
# error case
else:
print 'Received: ' + reply
print 'Client Login: Invalid response from server'
print 'Exiting the app'
sock.close()
sys.exit()
'''
Log client "into" the server
'''
# generate client core for processing commands
core = Client_Core(usr, pwd, sock)
# start receiving thread
start_new_thread(core.receive_thread, (sock,))
# display count of unread messages on login
core.run_count()
# run app while logged in
while True:
time.sleep(1)
args = core.prompt()
if args != None:
core.run(args)
| StarcoderdataPython |
99842 | <reponame>ustutz/dataquest<gh_stars>1-10
class Script:
@staticmethod
def main():
cities = ["Albuquerque", "Anaheim", "Anchorage", "Arlington", "Atlanta", "Aurora", "Austin", "Bakersfield", "Baltimore", "Boston", "Buffalo", "Charlotte-Mecklenburg", "Cincinnati", "Cleveland", "Colorado Springs", "<NAME>", "Dallas", "Denver", "Detroit", "El Paso", "<NAME>", "<NAME>", "Fresno", "Greensboro", "Henderson", "Houston", "Indianapolis", "Jacksonville", "Jersey City", "Kansas City", "Las Vegas", "Lexington", "Lincoln", "Long Beach", "Los Angeles", "Louisville Metro", "Memphis", "Mesa", "Miami", "Milwaukee", "Minneapolis", "Mobile", "Nashville", "New Orleans", "New York", "Newark", "Oakland", "Oklahoma City", "Omaha", "Philadelphia", "Phoenix", "Pittsburgh", "Plano", "Portland", "Raleigh", "Riverside", "Sacramento", "San Antonio", "San Diego", "San Francisco", "San Jose", "Santa Ana", "Seattle", "St. Louis", "St. Paul", "Stockton", "Tampa", "Toledo", "Tucson", "Tulsa", "Virginia Beach", "Washington", "Wichita"]
first_alb = ((cities[0] if 0 < len(cities) else None) == "Albuquerque")
second_alb = ((cities[1] if 1 < len(cities) else None) == "Albuquerque")
first_last = ((cities[0] if 0 < len(cities) else None) == python_internal_ArrayImpl._get(cities, (len(cities) - 1)))
print(str(first_alb))
print(str(second_alb))
print(str(first_last))
class python_internal_ArrayImpl:
@staticmethod
def _get(x,idx):
if ((idx > -1) and ((idx < len(x)))):
return x[idx]
else:
return None
Script.main() | StarcoderdataPython |
77210 | import pathlib
from typing import Dict
from app_types import WordSeq
from word import Word
class Words:
words: list[Word]
def __init__(self, letter_groups: str) -> None:
def find_paths(letters: str) -> Dict[str, str]:
'Build dictionary of valid letter-to-letter transitions'
def other_group_letters(group: str) -> str:
return ''.join(gl for gl in grouped_letters if gl != group)
grouped_letters: list[str] = letters.split()
return {letter: other_group_letters(group)
for group in grouped_letters for letter in group}
def word_works(word: str) -> bool:
for i in range(len(word) - 1):
letter = word[i]
next_letter = word[i + 1]
if not (ok_next_letters := paths.get(letter)):
return False
if next_letter not in ok_next_letters:
return False
return True
paths: Dict[str, str] = find_paths(letter_groups)
all_words = pathlib.Path('resources/words.txt').read_text().strip().split('\n')
candidate_words = list(filter(word_works, all_words))
print(f'{len(candidate_words):,} candidate words loaded from list of {len(all_words):,} words')
self.words = list(map(Word.create, candidate_words))
self.words.sort(key=lambda word: word.num_unique_letters, reverse=True)
def best_words_for_needed_letters(self, needed_letters: set[str]) -> WordSeq:
def num_needed_letters(word: Word):
return len(word.unique_letters.intersection(needed_letters))
num_needed_letters_and_words: list[tuple[int, Word]] = [
(num_needed_letters(word), word) for word in self.words]
num_needed_letters_and_words.sort(reverse=True)
return [lw[1] for lw in num_needed_letters_and_words]
| StarcoderdataPython |
4818125 | import imp
from unicodedata import name
from django.shortcuts import render
from django.http import HttpResponse
from .models import Post, Universities
import wikipediaapi
# from .wikiAPI import get_summary # Todo: import needs to be fixed
# Create your views here.
def index(request):
"""render the main page"""
return render(request,'rateMySchool/index.html')
def get_summary(name):
"""takes the title of university and returns a wiki summery"""
wiki_wiki = wikipediaapi.Wikipedia('en')
page_py = wiki_wiki.page(name)
if page_py.exists():
return page_py.summary
else:
return 'Wiki summary not found'
def matchRatings(data):
"""matches ratings data to lables"""
matchedData = []
lable = []
lable.append("5-star")
matchedData.append(data.count(5))
lable.append("4-star")
matchedData.append(data.count(4))
lable.append("3-star")
matchedData.append(data.count(3))
lable.append("2-star")
matchedData.append(data.count(2))
lable.append("1-star")
matchedData.append(data.count(1))
return lable, matchedData
def Average(lst):
"""calculate average rating"""
if len(lst) > 0:
return sum(lst) / len(lst)
else:
return 0
def college_rating(request):
"""renders college rating page"""
# variables
data = ''
summary = ''
query_post = ''
average_rating = ''
# get university list for search recommendation
univeristies = Universities.objects.all()
graph_data = []
lable = []
if 'collegeQuery' in request.GET:
q = request.GET['collegeQuery']
crude_data = Universities.objects.filter(name__icontains=q)
if len(crude_data) != 0: # if the search succeeds
query_post = Post.objects.filter(ratedBody=crude_data[0])
# debug
# print(crude_data[0])
# print(query_post, len(query_post), "query post")
data = crude_data[0]
summary = get_summary(data)
# chart
queryPost = Post.objects.filter(ratedBody=crude_data[0]).order_by('-rate_stars')
for post in queryPost:
graph_data.append(post.rate_stars)
average_rating = Average(graph_data)
lable, graph_data = matchRatings(graph_data)
#print(graph_data, lable)
context = {
'posts': query_post,
'universities' : univeristies,
'queryUNI' : data,
'crudeQueryResult': data,
'wiki_summary': summary,
'graph_data': graph_data,
'lable': lable,
'average_rating': average_rating
}
return render(request, 'rateMySchool/collegeRating.html', context) | StarcoderdataPython |
159013 | <reponame>bidhata/EquationGroupLeaks
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: __init__.py
def GetDir(subdir=None):
import dsz
import os.path
resDir = dsz.env.Get('_LPDIR_RESOURCES')
if subdir != None and len(subdir) > 0:
resDir = resDir + '/%s' % subdir
return os.path.normpath(resDir)
def GetName(resName):
import dsz
from xml.dom.minidom import parse
if resName != None and len(resName) > 0:
xmlPath = GetDir(resName)
else:
xmlPath = GetDir('Dsz')
xmlPath = xmlPath + '/Names.xml'
doc = parse(xmlPath)
if dsz.script.IsLocal():
nodeName = 'Local'
else:
nodeName = 'Remote'
nodeList = doc.getElementsByTagName(nodeName)
return _getNodeText(nodeList[0])
def Open(filename, flags, subdir=None, project=None):
import dsz
import mcl.tasking.resource
import os.path
if filename == None or len(filename) == 0:
raise RuntimeError('Invalid filename specified')
file = None
if filename.find(':') == -1 and filename[0] != '/':
resPath = GetDir()
resDirs = ''
if project != None:
resDirs = project
elif dsz.env.Check('_RES_DIRS'):
resDirs = dsz.env.Get('_RES_DIRS')
archStr = ''
if flags & mcl.tasking.resource.OPEN_RES_FLAG_USE_ARCH:
envName = ''
if flags & mcl.tasking.resource.OPEN_RES_FLAG_USE_COMPILED:
envName = envName + '_COMPILED'
envName = envName + '_ARCH'
archStr = dsz.env.Get(envName)
osStr = ''
if flags & mcl.tasking.resource.OPEN_RES_FLAG_USE_OS:
envName = ''
if flags & mcl.tasking.resource.OPEN_RES_FLAG_USE_COMPILED:
envName = envName + '_COMPILED'
envName = envName + '_OS'
osStr = dsz.env.Get(envName)
libcStr = ''
if flags & mcl.tasking.resource.OPEN_RES_FLAG_USE_LIBC:
if osStr.lower() == 'linux':
majorVersion = dsz.env.Get('_CLIB_MAJOR_VERSION')
minorVersion = dsz.env.Get('_CLIB_MINOR_VERSION')
revVersion = dsz.env.Get('_CLIB_REVISION_VERSION')
libcStr = 'glibc%u.%u.%u' % (majorVersion, minorVersion, revVersion)
while len(resDirs) > 0:
loc = resDirs.find(';')
if loc == -1:
dir = resDirs
resDirs = ''
else:
dir = resDirs[0:loc]
resDirs = resDirs[loc + 1:]
fullPath = resPath + '/' + dir + '/'
if subdir != None and len(subdir) > 0:
fullPath = fullPath + subdir + '/'
if len(archStr) > 0:
fullPath = fullPath + archStr + '/'
if len(osStr) > 0:
fullPath = fullPath + osStr + '/'
if len(libcStr) > 0:
fullPath = fullPath + libcStr + '/'
fullPath = os.path.normpath(fullPath + filename)
try:
_f = os.open(fullPath, os.O_RDONLY | os.O_BINARY)
f = os.fdopen(_f, 'rb')
try:
openedFile = os.path.abspath(fullPath)
except:
openedFile = fullPath
return (
f, openedFile, dir)
except:
pass
try:
_f = os.open(filename, os.O_RDONLY | os.O_BINARY)
f = os.fdopen(_f, 'rb')
try:
openedFile = os.path.abspath(filename)
except:
openedFile = filename
return (
f, openedFile, None)
except:
pass
return (None, None, None)
def _getNodeText(element):
txt = ''
for node in element.childNodes:
if node.nodeType == node.TEXT_NODE:
txt = txt + node.data
return txt | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.