id stringlengths 2 8 | text stringlengths 16 264k | dataset_id stringclasses 1 value |
|---|---|---|
11390628 | meals_3meals = {
'1': ('16:30', '19:30'),
'2': ('22:30', '00:30'),
'3': ('4:30', '5:30')
}
meals_6meals = {
'1': ('16:30', '18:00'),
'2': ('20:00', '20:30'),
'3': ('22:30', '00:00'),
'4': ('2:00', '2:30'),
'5': ('4:30', '6:00'),
'6': ('8:00', '8:30')
}
meals_3meals_buffer = {
'1': ('16:15', '19:58'),
'2': ('22:15', '00:58'),
'3': ('4:15', '5:58')
}
meals_6meals_buffer = {
'1': ('16:15', '18:30'),
'2': ('19:45', '20:57'),
'3': ('22:15', '00:30'),
'4': ('1:45', '2:57'),
'5': ('4:15', '6:30'),
'6': ('7:45', '8:57')
}
# glob.glob(pathname)
AGGS = {
'actual_foodupa': 'sum',
'kcal_hr': 'sum',
'kcal_mean': 'mean',
'actual_allmeters': 'mean',
"bodymass": 'mean',
'rq': 'mean',
'actual_pedmeters': 'mean',
'vco2': 'mean',
'vo2': 'mean',
'locomotion': 'mean',
'actual_waterupa': 'sum'
}
period = 'dd_period'
save_path_light_dark = [f'csvs\shani\summary\\{period}\light_dark_agg\\avg_weeks\\',
f'csvs\shani\summary\\{period}\light_dark_agg\\avg_days\\',
f'csvs\shani\summary\\{period}\light_dark_agg\\avg_all_exp\\']
save_path_all_exp = [f'csvs\shani\summary\\{period}\\all_exp_agg\\avg_weeks\\',
f'csvs\shani\summary\\{period}\\all_exp_agg\\avg_days\\',
f'csvs\shani\summary\\{period}\\all_exp_agg\\avg_all_exp\\']
FILE_DATA_EXPRIMENTS = 'csvs\shani\modiInCal_format_SHANI_EXP_MODI_RESTRIC_PLUS_DD.csv'
FILE_DESIGN_EXPRIMENT = 'csvs\shani\modiInCal_format_your_Design_SHANI_EXP_MODI_RESTRIC_PLUS_DD.csv'
AGGS = {
'actual_foodupa': 'sum',
'kcal': 'sum',
'kcal_mean': 'mean',
'actual_allmeters': 'mean',
"bodymass": 'mean',
'rq': 'mean',
'actual_pedmeters': 'mean',
'vco2': 'mean',
'vo2': 'mean',
'locomotion': 'mean',
'actual_waterupa': 'sum'
}
| StarcoderdataPython |
5137654 | <reponame>IlyaFaer/GitHub-Scraper<gh_stars>10-100
"""Manual mocks of some Scraper classes."""
import github
from sheet import Sheet
from sheet_builder import SheetBuilder
import spreadsheet
import examples.fill_funcs_example
SPREADSHEET_ID = "ss_id"
class SheetBuilderMock(SheetBuilder):
def _login_on_github(self):
return github.Github()
class SheetMock(Sheet):
def __init__(self, name, spreadsheet_id, id_=None):
self.id = id_
self.name = name
self.ss_id = spreadsheet_id
self._config = None
self._builder = SheetBuilderMock(name)
class ConfigMock:
"""Hand-written mock for config module."""
def __init__(self):
self.SHEETS = {"sheet1": {"repo_names": {}}, "sheet2": {}}
self.TITLE = "MockTitle"
self.__file__ = 0
self.fill_funcs = examples.fill_funcs_example
self.ARCHIVE_SHEET = {}
class SpreadsheetMock(spreadsheet.Spreadsheet):
"""Hand-written mock for Spreadsheet objects.
Overrides some methods to exclude backend calls.
"""
def __init__(self, config, id_=None):
self._builders = {}
self._columns = []
self._config = config
self._id = SPREADSHEET_ID
self._last_config_update = -1
self._ss_resource = None
self._config_updated = True
self._to_be_archived = {}
self._archive = None
def return_module(module):
"""Mock for importlib.reload(). Returns argument."""
return module
| StarcoderdataPython |
9753782 | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Sections view"""
import tkinter as tk
import tkinter.ttk as ttk
from ._scrollbarview import ScrollbarView
class SectionsView(ScrollbarView):
""" Aqua Browser Sections View """
_TAG_PLUGGABLE_TYPE = 'PLUGGABLE_TYPE'
_TAG_PLUGGABLE = 'PLUGGABLE'
_TAG_PROBLEMS = 'PROBLEMS'
_TAG_DEPENDS = 'DEPENDS'
_TAG_DEPENDENCY = 'DEPENDENCY'
def __init__(self, controller, parent, **options) -> None:
super(SectionsView, self).__init__(parent, **options)
self._controller = controller
ttk.Style().configure("BrowseSectionsView.Treeview.Heading", font=(None, 12, 'bold'))
self._tree = ttk.Treeview(self, style='BrowseSectionsView.Treeview', selectmode=tk.BROWSE)
self._tree.heading('#0', text='Sections')
self._tree.bind('<<TreeviewSelect>>', self._cb_tree_select)
self.init_widgets(self._tree)
def clear(self):
""" clear view """
for i in self._tree.get_children():
self._tree.delete([i])
def populate(self, algos):
""" populate view with pluggables """
self.clear()
root_identifier = None
for pluggable_type, section_types in algos.items():
identifier = self._tree.insert('',
tk.END,
text=pluggable_type,
values=[''],
tags=SectionsView._TAG_PLUGGABLE_TYPE)
if root_identifier is None:
root_identifier = identifier
child_identifier = None
for pluggable_name, pluggable_name_values in section_types.items():
child_identifier = self._tree.insert(identifier,
tk.END,
text=pluggable_name,
values=[pluggable_type],
tags=SectionsView._TAG_PLUGGABLE)
problems = pluggable_name_values['problems']
if problems:
self._tree.insert(child_identifier,
tk.END,
text='problems',
values=[pluggable_type, pluggable_name],
tags=SectionsView._TAG_PROBLEMS)
depends = pluggable_name_values['depends']
if depends:
depends_identifier = self._tree.insert(child_identifier,
tk.END,
text='depends',
values=[pluggable_type, pluggable_name],
tags=SectionsView._TAG_DEPENDS)
for dependency in depends:
if 'pluggable_type' in dependency:
self._tree.insert(depends_identifier,
tk.END,
text=dependency['pluggable_type'],
values=[pluggable_type, pluggable_name],
tags=SectionsView._TAG_DEPENDENCY)
if child_identifier is not None:
self._tree.see(child_identifier)
if root_identifier is not None:
self._tree.see(root_identifier)
def has_selection(self):
""" check if an entry is selected """
return self._tree.selection()
def _cb_tree_select(self, event):
for item in self._tree.selection():
item_tag = self._tree.item(item, 'tag')[0]
if item_tag == SectionsView._TAG_PLUGGABLE_TYPE:
item_text = self._tree.item(item, 'text')
self._controller.pluggable_type_select(item_text)
elif item_tag == SectionsView._TAG_PLUGGABLE:
item_text = self._tree.item(item, 'text')
values = self._tree.item(item, 'values')
self._controller.pluggable_schema_select(values[0], item_text)
elif item_tag == SectionsView._TAG_PROBLEMS:
values = self._tree.item(item, 'values')
self._controller.pluggable_problems_select(values[0], values[1])
elif item_tag == SectionsView._TAG_DEPENDS:
values = self._tree.item(item, 'values')
self._controller.pluggable_depends_select(values[0], values[1])
elif item_tag == SectionsView._TAG_DEPENDENCY:
item_text = self._tree.item(item, 'text')
values = self._tree.item(item, 'values')
self._controller.pluggable_dependency_select(values[0], values[1], item_text)
return
| StarcoderdataPython |
5111474 | <gh_stars>0
from utils import *
def load_data():
data = []
if KEEP_IDX:
cti = load_tkn_to_idx(sys.argv[1] + ".char_to_idx")
wti = load_tkn_to_idx(sys.argv[1] + ".word_to_idx")
tti = load_tkn_to_idx(sys.argv[1] + ".tag_to_idx")
else:
cti = {PAD: PAD_IDX, SOS: SOS_IDX, EOS: EOS_IDX, UNK: UNK_IDX}
wti = {PAD: PAD_IDX, SOS: SOS_IDX, EOS: EOS_IDX, UNK: UNK_IDX}
tti = {PAD: PAD_IDX, SOS: SOS_IDX, EOS: EOS_IDX}
fo = open(sys.argv[1])
if HRE:
tmp = []
txt = fo.read().strip().split("\n\n")
for doc in txt:
data.append([])
for line in doc.split("\n"):
x, y = load_line(line, cti, wti, tti)
data[-1].append((x, y))
for doc in sorted(data, key = lambda x: -len(x)):
tmp.extend(doc)
tmp.append(None)
data = tmp[:-1]
else:
for line in fo:
x, y = load_line(line, cti, wti, tti)
data.append((x, y))
data.sort(key = lambda x: -len(x[0])) # sort by source sequence length
fo.close()
return data, cti, wti, tti
def load_line(line, cti, wti, tti):
x, y = [], []
if HRE:
line, y = line.split("\t")
if y not in tti:
tti[y] = len(tti)
y = [str(tti[y])]
for w in line.split(" "):
w, tag = (w, None) if HRE else re.split("/(?=[^/]+$)", w)
w0 = normalize(w) # for character embedding
w1 = w0.lower() # for word embedding
if not KEEP_IDX:
for c in w0:
if c not in cti:
cti[c] = len(cti)
if w1 not in wti:
wti[w1] = len(wti)
if tag and tag not in tti:
tti[tag] = len(tti)
x.append("+".join(str(cti[c]) for c in w0) + ":%d" % wti[w1])
if tag:
y.append(str(tti[tag]))
return x, y
if __name__ == "__main__":
if len(sys.argv) != 2:
sys.exit("Usage: %s training_data" % sys.argv[0])
data, cti, wti, tti = load_data()
save_data(sys.argv[1] + ".csv", data)
if not KEEP_IDX:
save_tkn_to_idx(sys.argv[1] + ".char_to_idx", cti)
save_tkn_to_idx(sys.argv[1] + ".word_to_idx", wti)
save_tkn_to_idx(sys.argv[1] + ".tag_to_idx", tti)
| StarcoderdataPython |
11358363 |
#import time, itertools,
import string
import os, os.path
#import sys, shutil
#import numpy
from PyQt4.QtCore import *
from PyQt4.QtGui import *
#import operator
import matplotlib
#from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
#try:
# from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
#except ImportError:
# from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
#from matplotlib.figure import Figure
#import numpy.ma as ma
#import matplotlib.colors as colors
#import matplotlib.cm as cm
#import matplotlib.mlab as mlab
#import pylab
#import pickle
#from fcns_math import *
from fcns_io import *
from fcns_ui import *
#from VisualizeAuxFcns import *
from SaveImagesForm import Ui_SaveImagesDialog
from SaveImagesBatchForm import Ui_SaveImagesBatchDialog
from fcns_compplots import *
#from quatcomp_plot_options import quatcompplotoptions
matplotlib.rcParams['backend.qt4'] = 'PyQt4'
class saveimagesDialog(QDialog, Ui_SaveImagesDialog):
def __init__(self, parent, anafolder, fomname, plateid_dict_list=[], code_dict_list=[], histplow=None, xyplotw=None, selectsamplebrowser=None, x_y_righty=['x', 'y', ''], repr_anaint_plots=1, filenamesearchlist=None):
#filenamesearchlist is nested list, level 0 of filenamesearchlist is OR and level 1 is AND
super(saveimagesDialog, self).__init__(parent)
self.setupUi(self)
self.parent=parent
self.plateid_dict_list=plateid_dict_list
self.code_dict_list=code_dict_list
self.repr_anaint_plots=repr_anaint_plots
if '.zip' in anafolder:
idialog=messageDialog(self, 'Cannot save to ANA because it is in a .zip ')
idialog.exec_()
self.reject()
return
fnl=[fn for fn in os.listdir(anafolder) if fn.endswith('.ana') and not fn.startswith('.')]
if len(fnl)==0:
idialog=messageDialog(self, 'Cannot save to ANA because no .ana in the folder')
idialog.exec_()
self.reject()
return
self.anafn=fnl[0]
self.anafolder=anafolder
QObject.connect(self.FilesTreeWidget, SIGNAL('itemDoubleClicked(QTreeWidgetItem*, int)'), self.editname)
QObject.connect(self.buttonBox,SIGNAL("accepted()"),self.ExitRoutine)
self.widgetTopLevelItems={}
self.widgetkeys=['plate_id','code', 'xy', 'hist', 'select_samples_text']
for k in self.widgetkeys:
mainitem=QTreeWidgetItem([k], 0)
self.FilesTreeWidget.addTopLevelItem(mainitem)
mainitem.setExpanded(True)
self.widgetTopLevelItems[k]=mainitem
self.xyyname='-'.join([k for k in x_y_righty if len(k)>0])
self.fomname=fomname
if filenamesearchlist is None:
searchchecker=lambda filen:True#not used in this instance
else:
searchchecker=lambda filen:True in [not (False in [searchstr in filen for searchstr in searchlist]) for searchlist in filenamesearchlist]
self.widget_plow_dlist=[]
for widgk, val_dict_list in zip(self.widgetkeys[0:2], [self.plateid_dict_list, self.code_dict_list]):
mainitem=self.widgetTopLevelItems[widgk]
for (k, d) in val_dict_list:
filen=self.filterchars('%s__%s-%s.png' %(widgk, k, self.fomname))
s=filen+': python_visualizer_png_image'
item=QTreeWidgetItem([s], 0)
item.setFlags(mainitem.flags() | Qt.ItemIsUserCheckable)
if filenamesearchlist is None:
item.setCheckState(0, Qt.Checked if d['checked'] else Qt.Unchecked)
else:
item.setCheckState(0, Qt.Checked if searchchecker(filen) else Qt.Unchecked)
mainitem.addChild(item)
d['item']=item
self.widget_plow_dlist+=[d]
for widgk, plotw, lab in zip(self.widgetkeys[2:4], [xyplotw, histplow], [self.xyyname, self.fomname]):
if plotw is None:
continue
mainitem=self.widgetTopLevelItems[widgk]
d={'plotw':plotw}
filen=self.filterchars('%s__%s.png' %(widgk, lab))
s=filen+': python_visualizer_png_image'
item=QTreeWidgetItem([s], 0)
item.setFlags(mainitem.flags() | Qt.ItemIsUserCheckable)
if filenamesearchlist is None:
item.setCheckState(0, Qt.Unchecked)
else:
item.setCheckState(0, Qt.Checked if searchchecker(filen) else Qt.Unchecked)
mainitem.addChild(item)
d['item']=item
self.widget_plow_dlist+=[d]
self.selectsamplesname=fomname
self.widget_textbrowser_dlist=[]
for widgk, browser, lab in zip(self.widgetkeys[4:5], [selectsamplebrowser], [self.selectsamplesname]):
if browser is None:
continue
mainitem=self.widgetTopLevelItems[widgk]
d={'browser':browser}
filen=self.filterchars('%s__%s.txt' %(widgk, lab))
s=filen+': python_visualizer_txt'
item=QTreeWidgetItem([s], 0)
item.setFlags(mainitem.flags() | Qt.ItemIsUserCheckable)
if filenamesearchlist is None:
item.setCheckState(0, Qt.Unchecked)
else:
item.setCheckState(0, Qt.Checked if searchchecker(filen) else Qt.Unchecked)
mainitem.addChild(item)
d['item']=item
self.widget_textbrowser_dlist+=[d]
self.newanapath=False
def editname(self, item, column):
if item is None:
item=widget.currentItem()
s=str(item.text(column))
st=s.partition('.png: ')
v=st[0]
keepstr=''.join(st[1:])
ans=userinputcaller(self, inputs=[('filename', str, v)], title='Enter new filename', cancelallowed=True)
if ans is None or ans[0].strip()==v:
return
ans=ans[0].strip()
item.setText(column,''.join([ans, keepstr]))
def filterchars(self, s):
valid_chars = "-_.%s%s" % (string.ascii_letters, string.digits)
return ''.join([c for c in s if c in valid_chars])
def updateoptionsfrombatchidialog(self, batchidialog, lastbatchiteration=False):
prependstr=str(batchidialog.prependfilenameLineEdit.text())
combinedprependstr=self.filterchars(prependstr+str(self.prependfilenameLineEdit.text()))
self.prependfilenameLineEdit.setText(combinedprependstr)
self.overwriteCheckBox.setChecked(batchidialog.overwriteCheckBox.isChecked())
self.epsCheckBox.setChecked(batchidialog.epsCheckBox.isChecked())
if lastbatchiteration:#only want to convert to done on last image being batch-saved
self.doneCheckBox.setChecked(batchidialog.doneCheckBox.isChecked())#for batch save, images saved in place and then box check in the end if convert to .done chosen
def ExitRoutine(self):
overbool=self.overwriteCheckBox.isChecked()
prependstr=self.filterchars(str(self.prependfilenameLineEdit.text()))
oldp=os.path.join(self.anafolder, self.anafn)
anadict=readana(oldp, erroruifcn=None, stringvalues=True, returnzipclass=False)#cannot be a .zip
startingwithcopiedbool='copied' in os.path.split(self.anafolder)[1]
if startingwithcopiedbool or self.doneCheckBox.isChecked():#must convert to .done if starting with .copied. allows .done to be edited which is bad practice
if not os.path.split(self.anafolder)[1].count('.')>1:
idialog=messageDialog(self, 'Cannot save because ANA folder has no extension')
idialog.exec_()
return
if startingwithcopiedbool:#if modiyfing a .copied then need a new time stamp
newanafn=timestampname()+'.ana'
newanafolder=self.anafolder.rpartition('.')[0][:-15]+newanafn[:-4]+'.done'
movebool=False
else:
newanafolder=self.anafolder.rpartition('.')[0]+'.done'#this reapleces .run with .done but more generally .anything with .done
movebool=True
newanafn=self.anafn
saveana_tempfolder(None, self.anafolder, erroruifcn=None, skipana=True, anadict=None, movebool=movebool, savefolder=newanafolder, saveanafile=False)#move files if necessary but don't create .ana or .exp yet. Do this first so image files get put only into new folder
self.newanapath=os.path.join(newanafolder, newanafn)
else:#writing files and new ana into existing folder
newanafn=self.anafn
newanafolder=self.anafolder
#images here
lines=[]
for d in self.widget_plow_dlist:
if not bool(d['item'].checkState(0)):
continue
pngfn, garb, pngattr=str(d['item'].text(0)).partition(': ')
pngfn=self.filterchars(prependstr+pngfn)
existfns=os.listdir(newanafolder)
fn_attr_list=[(pngfn, pngattr)]
if self.epsCheckBox.isChecked():
fn_attr_list+=[(pngfn.replace('png', 'eps'), pngattr.replace('png', 'eps'))]
for fn, a in fn_attr_list:
if (fn in existfns) and not overbool:
i=2
fnorig=fn
while fn in existfns:
fn=''.join([fnorig[:-4], '__%d' %i, fnorig[-4:]])
i+=1
savep=os.path.join(newanafolder, fn)
existfns+=[fn]
d['plotw'].fig.savefig(savep)
lines+=[(fn, a)]
#txt here
txtlines=[]
for d in self.widget_textbrowser_dlist:
if not bool(d['item'].checkState(0)):
continue
pngfn, garb, pngattr=str(d['item'].text(0)).partition(': ')
pngfn=prependstr+pngfn
existfns=os.listdir(newanafolder)
fn_attr_list=[(pngfn, pngattr)]
for fn, a in fn_attr_list:
if (fn in existfns) and not overbool:
i=2
fnorig=fn
while fn in existfns:
fn=''.join([fnorig[:-4], '__%d' %i, fnorig[-4:]])
i+=1
savep=os.path.join(newanafolder, fn)
existfns+=[fn]
with open(savep, mode='w') as f:
f.write(str(d['browser'].toPlainText()))
txtlines+=[(fn, a)]
if (len(lines)+len(txtlines))>0:
da=anadict['ana__%d' %self.repr_anaint_plots]
if not 'files_multi_run' in da.keys():
da['files_multi_run']={}
df=da['files_multi_run']
if len(lines)>0:
if not 'image_files' in df.keys():
df['image_files']={}
d=df['image_files']
for fn, a in lines:
d[fn]=a#if fn exists and was overwritten this will jdo nothing or update the attrstr
if len(txtlines)>0:
if not 'txt_files' in df.keys():
df['txt_files']={}
d=df['txt_files']
for fn, a in txtlines:
d[fn]=a#if fn exists and was overwritten this will jdo nothing or update the attrstr
newp=os.path.join(newanafolder, newanafn)
saveanafiles(newp, anadict=anadict, changeananame=True)#need to overwrite the name because may be a new anafolder/timestamp
class saveimagesbatchDialog(QDialog, Ui_SaveImagesBatchDialog):
def __init__(self, parent, comboind_strlist):
super(saveimagesbatchDialog, self).__init__(parent)
self.setupUi(self)
self.parent=parent
QObject.connect(self.buttonBox,SIGNAL("accepted()"),self.ExitRoutine)
self.widgetTopLevelItems={}
self.comboind_strlist=comboind_strlist
for comboind, k in self.comboind_strlist:
mainitem=QTreeWidgetItem([k], 0)
mainitem.setFlags(mainitem.flags() | Qt.ItemIsUserCheckable)
mainitem.setCheckState(0, Qt.Checked)
self.FilesTreeWidget.addTopLevelItem(mainitem)
self.widgetTopLevelItems[k]={}
self.widgetTopLevelItems[k]['item']=mainitem
self.widgetTopLevelItems[k]['comboind']=comboind
def ExitRoutine(self):
self.selectcomboboxinds=sorted([d['comboind'] for d in self.widgetTopLevelItems.values() if bool(d['item'].checkState(0))])
| StarcoderdataPython |
3541443 | '''Crie um programa que leia varios produtos
pergunte se quer continuar
o total de gasto na compra
quantos produtos custam mais que 1000
o nome do produto mais barato'''
contador =total = quant = preco_b =0
nome_b = ''
while True:
nome = str(input('Digite o nome do produto: '))
preco = float(input('Digite o valor do produto: R$ '))
continuar = str(input('Quer continuar?\n'
'[ S ] / [ N ] ')).strip().upper()[0]
total += preco
contador += 1
if preco > 1000:
quant += 1
if contador == 1:
nome_b = nome
preco_b = preco
if preco < preco_b:
nome_b = nome
preco_b = preco
if continuar in 'N':
break
print(f'O total gasto na compra foi R${total:.2f}')
print(f'{quant} produtos tem valor superior a R$ 1000')
print(f'O produto mais barato chama {nome_b} e custa R${preco_b:.2f}') | StarcoderdataPython |
6454976 | <reponame>geraltofrivia/mytorch
"""
This file contains training loops, available as function calls.
## USAGE
Typically, a training loop will want a train, a predict and a eval function;
alongwith other args and data which dictate what the loop should train on, and for how long.
See the documentation of `simplest loop` to see what it'd look like.
"""
from tqdm import tqdm
from typing import Callable
# Local imports
from .utils.goodies import *
from . import dataiters
def simplest_loop(epochs: int,
data: dict,
opt: torch.optim,
loss_fn: torch.nn,
train_fn: Callable,
predict_fn: Callable,
device: Union[str, torch.device] = torch.device('cpu'),
data_fn: classmethod = dataiters.SimplestSampler,
eval_fn: Callable = default_eval) -> (list, list, list):
"""
A fn which can be used to train a language model.
The model doesn't need to be an nn.Module,
but have an eval (optional), a train and a predict function.
Data should be a dict like so:
{"train":{"x":np.arr, "y":np.arr}, "val":{"x":np.arr, "y":np.arr} }
Train_fn must return both loss and y_pred
:param epochs: number of epochs to train for
:param data: a dict having keys train_x, test_x, train_y, test_y
:param device: torch device to create new tensor from data
:param opt: optimizer
:param loss_fn: loss function
:param train_fn: function to call with x and y
:param predict_fn: function to call with x (test)
:param data_fn: a class to which we can pass X and Y, and get an iterator.
:param eval_fn: (optional) function which when given pred and true, returns acc
:return: traces
"""
train_loss = []
train_acc = []
valid_acc = []
lrs = []
# Epoch level
for e in range(epochs):
per_epoch_loss = []
per_epoch_tr_acc = []
# Train
with Timer() as timer:
# Make data
trn_dl, val_dl = data_fn(data['train']), data_fn(data['valid'])
for x, y in tqdm(trn_dl):
opt.zero_grad()
_x = torch.tensor(x, dtype=torch.long, device=device)
_y = torch.tensor(y, dtype=torch.long, device=device)
y_pred = train_fn(_x)
loss = loss_fn(y_pred, _y)
per_epoch_tr_acc.append(eval_fn(y_pred=y_pred, y_true=_y).item())
per_epoch_loss.append(loss.item())
loss.backward()
opt.step()
# Val
with torch.no_grad():
per_epoch_vl_acc = []
for x, y in tqdm(val_dl):
_x = torch.tensor(x, dtype=torch.long, device=device)
_y = torch.tensor(y, dtype=torch.long, device=device)
y_pred = predict_fn(_x)
per_epoch_vl_acc.append(eval_fn(y_pred, _y).item())
# Bookkeep
train_acc.append(np.mean(per_epoch_tr_acc))
train_loss.append(np.mean(per_epoch_loss))
valid_acc.append(np.mean(per_epoch_vl_acc))
print("Epoch: %(epo)03d | Loss: %(loss).5f | Tr_c: %(tracc)0.5f | Vl_c: %(vlacc)0.5f | Time: %(time).3f min"
% {'epo': e,
'loss': float(np.mean(per_epoch_loss)),
'tracc': float(np.mean(per_epoch_tr_acc)),
'vlacc': float(np.mean(per_epoch_vl_acc)),
'time': timer.interval / 60.0})
return train_acc, valid_acc, train_loss
def generic_loop(epochs: int,
data: dict,
device: Union[str, torch.device],
opt: torch.optim,
loss_fn: torch.nn,
model: torch.nn.Module,
train_fn: Callable,
predict_fn: Callable,
save: bool = False,
save_params: dict = None,
save_dir: Path = None,
save_above: float = -np.inf,
save_args: dict = None,
epoch_count: int = 0,
epoch_start_hook: Callable = None,
epoch_end_hook: Callable = None,
batch_start_hook: Callable = None,
batch_end_hook: Callable = None,
weight_decay: float = 0.0,
clip_grads_at: float = -1.0,
lr_schedule=None,
data_fn: classmethod = dataiters.SimplestSampler,
eval_fn: Callable = None,
notify: bool = False,
notify_key: str = None) -> (list, list, list, list):
"""
A generic training loop, which based on diff hook fns (defined below), should handle anything given to it.
The model need not be an nn.Module,
but should have correctly wired forward and a predict function.
# Data input
Data should be a dict like so:
{"train":{"x":np.arr, "y":np.arr}, "val":{"x":np.arr, "y":np.arr} }
or more generally,
{"train": something that can be thrown to data_fn, "valid": something that can be thrown to data_fn}
# Saving Logic
If the flag is enabled, give in the dir and it'll save traces and the model (and the model encoder)
everytime training acc exceeds all prev ones.
## If you want to save diff parts of the model,
Prepare save args like -
save_args = {'torch_stuff': [tosave('model.torch', clf.state_dict()), tosave('model_enc.torch', clf.encoder.state_dict())]}
and pass it to the model alongwith.
If the arg is empty, it defaults to -
save_args = {'torch_stuff': [tosave('model.torch', model.state_dict())]}
:param epochs: number of epochs to train for
:param data: data dict (structure specified above)
:param device: torch device to init the tensors with
:param opt: torch optimizer, with proper param_groups for better lr decay per laye
:param loss_fn: torch.nn loss fn
:param model: torch module needed for
i: grad clipping
ii: for calling eval() and train() (regarding dropout)
:param train_fn: a function which takes x & y, returns loss and y_pred
:param predict_fn: a fn which takes x and returns y_pred
:param save: [OPTIONAL] bool which wants either doesn't save, or saves at best
:param save_dir: [OPTIONAL] Path object to which we save stuff (based on save_best)
:param save_params: [OPTIONAL] a dict of all the params used while running and training the model.
:param save_above: [OPTIONAL] acts as threshold regarading model saving. If the current trn accuracy is less than this, won't.
:param save_args: [OPTIONAL] reference to the model to be saved
:param epoch_count: an int which is added with #epochs (for better representation of how many epochs have actually passed).
You can use this for when you run the loop say 3 times, do something else and run it for another 10.
:param epoch_start_hook: a fn that can be called @ start of every epoch (returns model, opt)
:param epoch_end_hook: a fn that can be called @ end of every epoch (returns model, opt)
:param batch_start_hook: a fn that can be called @ start of every batch (returns model, opt)
:param batch_end_hook: a fn that can be called @ end of every batch (returns model, opt)
:param weight_decay: a L2 ratio (as mentioned in (https://arxiv.org/pdf/1711.05101.pdf)
:param clip_grads_at: in case you want gradients clipped, send the max val here
:param lr_schedule: a schedule that is called @ every batch start.
:param data_fn: a class to which we can pass X and Y, and get an iterator.
:param eval_fn: (optional) function which when given pred and true, returns acc
:param notify: (optional) flag which enables sending notifications to your phones once the loop is done.
:param notify_key: (optional) the api key to which the notification is to be sent. You can give it here, or in a file (see README.md)
:return: traces
"""
train_loss = []
train_acc = []
val_acc = []
lrs = []
saved_info = {}
# Epoch level
for e in range(epoch_count, epochs + epoch_count):
per_epoch_loss = []
per_epoch_tr_acc = []
# Train
with Timer() as timer:
# Enable dropouts
model.train()
# @TODO: Add hook at start of epoch (how to decide what goes in)
if epoch_start_hook: epoch_start_hook()
# Make data
trn_dl, val_dl = data_fn(data['train']), data_fn(data['valid'])
for x, y in tqdm(trn_dl):
if batch_start_hook: batch_start_hook()
opt.zero_grad()
if lr_schedule: lrs.append(update_lr(opt, lr_schedule.get()))
_x = torch.tensor(x, dtype=torch.long, device=device)
_y = torch.tensor(y, dtype=torch.long, device=device)
y_pred = train_fn(_x)
loss = loss_fn(y_pred, _y)
per_epoch_tr_acc.append(eval_fn(y_pred=y_pred, y_true=_y).item())
per_epoch_loss.append(loss.item())
loss.backward()
if clip_grads_at > 0.0: torch.nn.utils.clip_grad_norm_(model.parameters(), clip_grads_at)
for group in opt.param_groups:
for param in group['params']:
param.data = param.data.add(-weight_decay * group['lr'], param.data)
opt.step()
if batch_end_hook: batch_end_hook()
if epoch_end_hook: epoch_end_hook()
# Val
with torch.no_grad():
# Disable dropouts
model.eval()
per_epoch_vl_acc = []
for x, y in tqdm(val_dl):
_x = torch.tensor(x, dtype=torch.long, device=device)
_y = torch.tensor(y, dtype=torch.long, device=device)
y_pred = predict_fn(_x)
per_epoch_vl_acc.append(eval_fn(y_pred, _y).item())
# Bookkeeping
train_acc.append(np.mean(per_epoch_tr_acc))
train_loss.append(np.mean(per_epoch_loss))
val_acc.append(np.mean(per_epoch_vl_acc))
print("Epoch: %(epo)03d | Loss: %(loss).5f | Tr_c: %(tracc)0.5f | Vl_c: %(vlacc)0.5f | Time: %(time).3f min"
% {'epo': e,
'loss': float(np.mean(per_epoch_loss)),
'tracc': float(np.mean(per_epoch_tr_acc)),
'vlacc': float(np.mean(per_epoch_vl_acc)),
'time': timer.interval / 60.0})
# Save block (flag and condition)
if save and train_acc[-1] >= save_above:
# Update threshold
save_above = train_acc[-1]
# Adding epoch info along with options
if save_params:
save_params['epoch'] = e
else:
save_paras = {'epoch': e}
# Prepare save_args if none
if not save_args:
save_args = {'torch_stuff': [tosave('model.torch', model.state_dict())]}
# Call save function and save
mt_save(save_dir,
torch_stuff=None if 'torch_stuff' in save_args.keys() else save_args['torch_stuff'],
pickle_stuff=[
tosave('traces.pkl', [train_acc, val_acc, train_loss, lrs]),
tosave('unsup_options.pkl', save_params)])
print(f"Model saved on Epoch {e} at {save_dir} because of highest training acc so far")
# Log the saved thing
saved_info['epoch'] = e
saved_info['accuracy'] = train_acc[-1]
saved_info['directory'] = save_dir
if notify:
if not saved_info:
message_template = "Your model is done training."
send_notification(data=saved_info, key=notify_key, message_template=message_template)
else:
send_notification(data=saved_info, key=notify_key)
return train_acc, val_acc, train_loss, lrs
# Let's write hooks to mimic phase 2 data prep
def reset_hidden(model, **args):
for l in model.encoder.hidden:
for h in l:
h.data.zero_()
# epoch_start_hook_unsup_ft = partial(reset_hidden, model)
| StarcoderdataPython |
1956693 | """Tests for `{{ cookiecutter.pkg_name }}` package."""
{% if cookiecutter.command_line_interface|lower == 'y' -%}
from typer.testing import CliRunner
from {{ cookiecutter.pkg_name }} import cli
{%- endif %}
{%- if cookiecutter.command_line_interface|lower == 'y' %}
class TestCLI:
"""Test the CLI."""
runner = CliRunner()
def test_main(self) -> None:
"""Test main call of CLI"""
result = self.runner.invoke(cli.main)
assert result.exit_code == 0
assert "{{ cookiecutter.project_slug }}" in result.output
def test_help(self) -> None:
"""Test --help call of CLI"""
result = self.runner.invoke(cli.main, ["--help"])
assert result.exit_code == 0
help_lines = list(
filter(lambda line: "--help" in line, result.output.split("\n"))
)
assert len(help_lines) == 1
help_text = help_lines[0]
assert "Show this message and exit." in help_text
{%- endif %}
| StarcoderdataPython |
3277698 | import itertools
import numpy as np
from challenge import Challenge
class ChallengeSolution(Challenge):
def __init__(self):
# Initialise super
super().__init__()
# Define digit masks
self.digits = np.asarray([
[True , True , True , False, True , True , True ], # 0
[False, False, True , False, False, True , False], # 1
[True , False, True , True , True , False, True ], # 2
[True , False, True , True , False, True , True ], # 3
[False, True , True , True , False, True , False], # 4
[True , True , False, True , False, True , True ], # 5
[True , True , False, True , True , True , True ], # 6
[True , False, True , False, False, True , False], # 7
[True , True , True , True , True , True , True ], # 8
[True , True , True , True , False, True , True ], # 9
])
# Length dict
# 1 = len(2), 4 = len(4), 7 = len(3), 8 = len(7)
self.fixed_lengths = {2, 3, 4, 7}
########################################################################
# Load data #
########################################################################
def load(self, path):
# Load data from path
with open(path) as infile:
data = infile.read().strip().split('\n')
# Parse data
for i, item in enumerate(data):
crossed, target = item.split(' | ')
data[i] = (
[set(x) for x in crossed.split()],
[set(x) for x in target .split()],
)
# Return data
return data
########################################################################
# Exercises #
########################################################################
def part_1(self, data):
# Initialise result
result = 0
# Loop over data
for crossed, target in data:
result += sum([len(x) in self.fixed_lengths for x in target])
# Return result
return result
def part_2(self, data):
# Initialise result
result = 0
# Loop over all data
for crossed, target in data:
# Sort crossed by length
crossed = list(sorted(crossed, key=lambda x: len(x)))
# Define each number in crossed
one = crossed[0]
four = crossed[2]
seven = crossed[1]
eight = crossed[9]
three = [x for x in crossed[3:6] if len(x & one ) == 2][0]
six = [x for x in crossed[6:9] if len(x & one ) == 1][0]
zero = [x for x in crossed[6:9] if len(x | three) == 7 and x != six ][0]
nine = [x for x in crossed[6:9] if len(x | three) != 7][0]
two = [x for x in crossed[3:6] if len(x | nine ) == 7 and x != three][0]
five = [x for x in crossed[3:6] if len(x | nine ) == 6 and x != three][0]
# Define numbers
crossed = [zero,one,two,three,four,five,six,seven,eight,nine]
# Check where target equals crossed
for i, x in enumerate(reversed(target)):
result += pow(10, i) * crossed.index(x)
# Return result
return result
def part_2_naive(self, data):
return 0
# Get all possible permutations
permutations = np.asarray([
list(permutation) for permutation in itertools.permutations('abcdefg')
])
result = 0
from tqdm import tqdm
# Loop over all data
for crossed, target in tqdm(data):
# Loop over all permutations
for permutation in permutations:
# Check if the observation represents at least one digit for all observations
if all(
# Check if permutation is correct
(self.digits == np.isin(permutation, observation)).all(axis=-1).any()
# Loop over all observations
for observation in crossed
):
subresult = ''
for digit in target:
digit = np.argwhere((self.digits == np.isin(permutation, digit)).all(axis=-1))[0][0]
subresult += str(digit)
result += int(subresult)
# Stop checking for other permutations
break
# Return result
return result
| StarcoderdataPython |
11314035 | description = 'setup for the astrium chopper'
group = 'optional'
devices = dict(
chopper_dru_rpm = device('nicos.devices.generic.VirtualMotor',
description = 'Chopper speed control',
abslimits = (0, 20000),
unit = 'rpm',
fmtstr = '%.0f',
maxage = 35,
),
chopper_watertemp = device('nicos.devices.generic.ManualMove',
description = 'Chopper water temp',
unit = 'degC',
fmtstr = '%.3f',
maxage = 35,
abslimits = (0, 100),
),
chopper_waterflow = device('nicos.devices.generic.ManualMove',
description = 'Chopper water flow',
unit = 'l/min',
fmtstr = '%.3f',
maxage = 35,
abslimits = (0, 100),
),
chopper_vacuum = device('nicos.devices.generic.ManualMove',
description = 'Chopper vacuum pressure',
unit = 'mbar',
fmtstr = '%.3f',
maxage = 35,
abslimits = (0, 100),
),
)
for i in range(1, 3):
devices['chopper_ch%i_gear' % i] = device('nicos.devices.generic.ManualMove',
description = 'Chopper channel %i gear' % i,
fmtstr = '%.3f',
maxage = 35,
abslimits = (0, 10),
unit = '',
)
devices['chopper_ch%i_phase' % i] = device('nicos.devices.generic.ManualMove',
description = 'Chopper channel %i phase' % i,
fmtstr = '%.3f',
abslimits = (0, 10),
maxage = 35,
unit = '',
)
devices['chopper_ch%i_parkingpos' % i] = device('nicos.devices.generic.ManualMove',
description = 'Chopper channel %i parking position' % i,
fmtstr = '%.3f',
abslimits = (0, 10),
maxage = 35,
unit = '',
)
devices['chopper_ch%i_speed' % i] = device('nicos.devices.generic.ManualMove',
description = 'Chopper channel %i speed' % i,
fmtstr = '%.3f',
abslimits = (0, 10),
maxage = 35,
unit = '',
)
| StarcoderdataPython |
5169976 | #!/usr/bin/env python3
"""
Calculating gcd of given numbers
"""
from sys import argv
from divide import divide
from errorhandler import inputerror, zerodiv
def gcd(a,b):
try:
[q, r] = divide(a,b)
except ZeroDivisionError:
zerodiv("b")
while True:
[q, r] = divide(a,b)
if not r:
break
a,b = b,r
return b
if __name__ == "__main__":
if len(argv) != 3:
inputerror(2)
try:
a,b = int(argv[1]), int(argv[2])
except:
inputerror(2)
g = gcd(a,b)
print("a = %d, b = %d, gcd(a,b) = %d" %(a,b,g))
| StarcoderdataPython |
396552 | <filename>Main.py
#coding = utf-8
import sys
import os
if hasattr(sys, 'frozen'):
os.environ['PATH'] = sys._MEIPASS + ";" + os.environ['PATH']
from Ui_Main import Ui_MainWindow
from PyQt5.QtWidgets import QApplication, QMainWindow
from PyQt5 import QtCore
import downloadNeteaseMusiclib
import queue
import re
musicLink = queue.Queue(maxsize=20)
saveFolder = queue.Queue(maxsize=20)
class DownloadMusicWindow(QMainWindow):
INFO = 0
ERROR = 1
DEBUG = 2
def __init__(self, Ui_MainWindow):
self.bufferString = []
self.debug = True
self.ui = Ui_MainWindow
super().__init__()
def print_(self, type, s):
if self.bufferString.__len__() >= 9:
self.bufferString.clear()
if type == self.INFO:
self.bufferString.append('<b>Info:</b> <span style="color: #00FF00">{}.</span>'.format(s))
elif type == self.ERROR:
self.bufferString.append('<b>Error:</b> <span style="color:#ff0000">{}</span>.'.format(s))
elif type == self.DEBUG and self.debug == True:
self.bufferString.append('<b>DEBUG:</b> {}.'.format(s))
if self.ui.tabChoose.currentIndex() == 0:
self.ui.informationOfSong.setText('<br>'.join(self.bufferString))
else:
self.ui.informationOfSongList.setText('<br>'.join(self.bufferString))
def update_(self,type,s):
if self.bufferString.__len__() >= 9:
self.bufferString.clear()
self.bufferString.pop()
if type == self.INFO:
self.bufferString.append('<b>Info:</b> <span style="color: #00FF00">{}</span>.'.format(s))
elif type == self.ERROR:
self.bufferString.append('<b>Error:</b> {}.'.format(s))
elif type == self.DEBUG and self.debug == True:
self.bufferString.append('<b>DEBUG:</b> {}.'.format(s))
if self.ui.tabChoose.currentIndex() == 0:
self.ui.informationOfSong.setText('<br>'.join(self.bufferString))
else:
self.ui.informationOfSongList.setText('<br>'.join(self.bufferString))
def clear_(self):
self.bufferString.clear()
if self.ui.tabChoose.currentIndex() == 0:
self.ui.informationOfSong.setText('<br>'.join(self.bufferString))
else:
self.ui.informationOfSongList.setText('<br>'.join(self.bufferString))
def exit(self):
sys.exit(0)
def updata(self,precient,s):
self.ui.progressBar.setValue(precient)
if s.find('speed') == -1:
self.print_(self.INFO,s)
else:
self.update_(self.INFO,s)
def error(self,s):
self.print_(self.ERROR,s)
def downloadClick(self):
global musicLink
global saveFolder
self.ui.progressBar.setVisible(True)
self.ui.progressBar.setValue(1)
self.ui.informationOfSong.setFontPointSize(10)
if self.ui.tabChoose.currentIndex() == 0:
if self.ui.musicLinkOfSong.text() != '' and self.ui.saveFolderOfSong.text() != '':
self.print_(
self.INFO, 'MusicLink > {}'.format(self.ui.musicLinkOfSong.text()))
self.print_(
self.INFO, 'SaveFolder > {}'.format(self.ui.saveFolderOfSong.text()))
self.clear_()
if not (os.path.exists(self.ui.saveFolderOfSong.text())):
os.mkdir(self.ui.saveFolderOfSong.text())
musicLink.put(self.ui.musicLinkOfSong.text())
saveFolder.put(self.ui.saveFolderOfSong.text())
self.print_(self.INFO, '开始下载线程')
self.Thread = downloadNeteaseMusicOneThread()
self.Thread.signUpData.connect(self.updata)
self.Thread.singError.connect(self.error)
self.Thread.signClear.connect(self.clear_)
self.Thread.start()
else:
self.error('请查看是否填写MusicLink或者SaveFolder')
elif self.ui.tabChoose.currentIndex() == 1:
if self.ui.musicLinkOfSongList.text() != '' and self.ui.saveFolderOfSongList.text() != '':
self.print_(
self.INFO, 'MusicLink > {}'.format(self.ui.musicLinkOfSongList.text()))
self.print_(
self.INFO, 'SaveFolder > {}'.format(self.ui.saveFolderOfSongList.text()))
self.clear_()
if not (os.path.exists(self.ui.saveFolderOfSongList.text())):
os.mkdir(self.ui.saveFolderOfSongList.text())
musicLink.put(self.ui.musicLinkOfSongList.text())
saveFolder.put(self.ui.saveFolderOfSongList.text())
self.print_(self.INFO, '开始下载线程')
self.Thread = downloadNeteaseMusicListThread()
self.Thread.signUpData.connect(self.updata)
self.Thread.singError.connect(self.error)
self.Thread.signClear.connect(self.clear_)
self.Thread.start()
else:
self.error('请查看是否填写MusicLink或者SaveFolder')
class downloadNeteaseMusicOneThread(QtCore.QThread):
signUpData = QtCore.pyqtSignal(int, str)
singError = QtCore.pyqtSignal(str)
signClear = QtCore.pyqtSignal()
def fetch_song_list(self, id) -> list:
return downloadNeteaseMusiclib.fetch_song_list(id)
def download_music_by_id(self, saveFolder, information, updateFun) -> bool:
return downloadNeteaseMusiclib.download_music_by_id(
saveFolder, information, updateFun)
def getInformationByID(self, id) -> dict:
return downloadNeteaseMusiclib.getInformationByID(id)
def fillInformation(self, saveFolder, information) -> bool:
return downloadNeteaseMusiclib.fillInformation(saveFolder, information)
def updateFun(self, length, t):
self.signUpData.emit(3+int((t/length)*80),
'speed -> {} byte - {} byte'.format(t, length))
def run(self):
global saveFolder
global musicLink
_saveFloder = saveFolder.get()
_musicLink = musicLink.get()
if _saveFloder != '' and _musicLink != '':
_id = re.findall(r'song\?id=(\d+)', _musicLink)[0]
self.signUpData.emit(3, '成功获取id')
information = self.getInformationByID(_id)
self.signUpData.emit(9, '{}||{}||{}'.format(
information['name'], information['author'], information['album']))
self.signUpData.emit(10,'')
self.download_music_by_id(
_saveFloder, information, self.updateFun)
self.signUpData.emit(80, '下载成功')
self.fillInformation(_saveFloder, information)
self.signUpData.emit(100, '写入Tag成功')
self.quit()
class downloadNeteaseMusicListThread(downloadNeteaseMusicOneThread):
def run(self):
_saveFloder = saveFolder.get()
_musicLink = musicLink.get()
if _saveFloder != '' and _musicLink != '':
_id = re.findall(r'playlist\?id=(\d+)', _musicLink)[0]
self.signUpData.emit(3, '成功获取歌单id')
songName, songIdList = self.fetch_song_list(_id)
if not os.path.exists(os.path.join(_saveFloder,songName)):
os.mkdir(os.path.join(_saveFloder,songName))
_saveFloder = os.path.join(_saveFloder,songName)
for _ in songIdList:
try:
self.signClear.emit()
self.signUpData.emit(7,'{}'.format(songName))
self.signUpData.emit(8,'{} in {}'.format(songIdList.index(_)+1,songIdList.__len__()))
information = self.getInformationByID(_)
self.signUpData.emit(9, '{}||{}||{}'.format(
information['name'], information['author'], information['album']))
self.signUpData.emit(10,'')
self.download_music_by_id(
_saveFloder, information, self.updateFun)
self.signUpData.emit(80, '下载成功')
self.fillInformation(_saveFloder, information)
self.signUpData.emit(100, '写入Tag成功')
except Exception as e:
self.singError.emit(str(e))
if __name__ == "__main__":
app = QApplication(sys.argv)
ui = Ui_MainWindow()
MainWindow = DownloadMusicWindow(ui)
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| StarcoderdataPython |
8064302 | <filename>manager/edgeap.py
import manager
import sys
import signal
import time
import argparse
def main():
# CLI options
argparser = argparse.ArgumentParser(description='EdgeAP management server')
argparser.add_argument('-c', '--config', type=str,
default="manager.conf",
help='configuration file (default is "manager.conf")')
args = argparser.parse_args()
config_file = args.config
# Create manager object
man_obj = manager.Manager(config_file)
# Signal handler
def handler(signal, frame):
print("Error: received signal ", signal, file=sys.stderr)
print("Shutting down...", file=sys.stderr)
man_obj.shutdown()
sys.exit(0)
# Register all catchable signals
catchable_sigs = set(signal.Signals) - {signal.SIGKILL, signal.SIGSTOP, signal.SIGWINCH}
for sig in catchable_sigs:
signal.signal(sig, handler)
# Start the manager servers
man_obj.start_request_server()
man_obj.start_shutdown_server()
# Join threads
for _, thread in man_obj.threads.items():
thread.join()
if __name__ == "__main__":
main()
| StarcoderdataPython |
4836897 | <reponame>rftafas/stdcores<filename>axis_demux/axis_demux_run.py
from os.path import join, dirname
import sys
import glob
try:
from vunit import VUnit
except:
print("Please, intall vunit_hdl with 'pip install vunit_hdl'")
print("Also, make sure to have either GHDL or Modelsim installed.")
exit()
root = dirname(__file__)
vu = VUnit.from_argv()
vu.add_verification_components()
expert = vu.add_library("expert")
expert.add_source_files(join(root, "../dependencies/stdblocks/libraries/stdexpert/src/*.vhd"))
stdblocks = vu.add_library("stdblocks")
stdblocks_filelist = glob.glob("../dependencies/stdblocks/sync_lib/*.vhd")
stdblocks_filelist = stdblocks_filelist + glob.glob("../dependencies/stdblocks/ram_lib/*.vhd")
stdblocks_filelist = stdblocks_filelist + glob.glob("../dependencies/stdblocks/fifo_lib/*.vhd")
stdblocks_filelist = stdblocks_filelist + glob.glob("../dependencies/stdblocks/prbs_lib/*.vhd")
stdblocks_filelist = stdblocks_filelist + glob.glob("../dependencies/stdblocks/scheduler_lib/*.vhd")
for vhd_file in stdblocks_filelist:
if "_tb" not in vhd_file:
stdblocks.add_source_files(vhd_file)
stdcores = vu.add_library("stdcores")
stdcores.add_source_files(join(root, "./*.vhd"))
test_tb = stdcores.entity("axis_demux_tb")
test_tb.scan_tests_from_file(join(root, "axis_demux_tb.vhd"))
vu.main()
| StarcoderdataPython |
3595343 | <filename>example_tester.py
import json
from photo_dash import image
with open('resources/example.json') as f:
e = json.load(f)
i = image.DashImage(e['module'], e['title'], e['sections'])
i.create()
| StarcoderdataPython |
9616692 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 13 09:32:40 2020
@author: twguest
"""
###############################################################################
import sys
sys.path.append("/opt/WPG/") # LOCAL PATH
sys.path.append("/gpfs/exfel/data/user/guestt/WPG") # DESY MAXWELL PATH
sys.path.append("/opt/spb_model") # LOCAL PATH
sys.path.append("/gpfs/exfel/data/user/guestt/spb_model") # DESY MAXWELL PATH
###############################################################################
###############################################################################
import os
import numpy as np
import wpg.srwlib as srwlib
from wpg.wpg_uti_wf import calc_pulse_energy, get_axial_power_density, get_centroid, get_profile_1d
from felpy.model.core.wavefront import Wavefront
indir = "/gpfs/exfel/data/group/spb-sfx/user/guestt/h5/NanoKB-Pulse/out/"
directoryName = "/gpfs/exfel/data/group/spb-sfx/user/guestt/h5/NanoKB-Pulse/legacyData/"
def mkdir_p(dir):
'''make a directory (dir) if it doesn't exist'''
if not os.path.exists(dir):
os.mkdir(dir)
def loadWavefront(fname):
wfr = Wavefront()
wfr.load_hdf5(indir + fname)
return wfr
def storeCentroid(wfr, fname):
intDir = directoryName + "iCentroid/"
slicedDir = directoryName + "sCentroid/"
mkdir_p(intDir)
mkdir_p(slicedDir)
cI = get_centroid(wfr, mode = 'integrated')
np.save(intDir + fname, cI)
del cI
cS = get_centroid(wfr, mode = 'pulse')
np.save(slicedDir + fname, cS)
del cS
print("Centroids Stored: {}".format(fname))
def storeSpectrum(wfr, fname):
timeDir = directoryName + "tSpectrum/"
freqDir = directoryName + "fSpectrum/"
mkdir_p(timeDir)
mkdir_p(freqDir)
sz0 = get_axial_power_density(wfr, spectrum = False)
np.save(timeDir + fname, sz0)
del sz0
sz1 = get_axial_power_density(wfr, spectrum = True)
np.save(freqDir + fname, sz1)
del sz1
print("Spectrums Stored: {}".format(fname))
def storePhotonEnergy(wfr, fname):
enDir = directoryName + "pulseEnergy/"
mkdir_p(enDir)
effDir = directoryName + "systemEff/"
mkdir_p(effDir)
srwlib.srwl.SetRepresElecField(wfr._srwl_wf, 't')
pulseEn, photons_per_pulse = calc_pulse_energy(wfr)
srwlib.srwl.SetRepresElecField(wfr._srwl_wf, 'f')
effPhotons = photons_per_pulse / wfr.custom_fields['source']['nPhotons']
effEn = pulseEn / wfr.custom_fields['source']['pulseEn']
np.save(enDir + fname, [pulseEn, photons_per_pulse])
np.save(effDir + fname, [effPhotons, effEn])
print("Photon Energy Saved: {}".format(fname))
def storeProfiles(wfr, fname):
profDir = directoryName + "profiles/"
mkdir_p(profDir)
profile = get_profile_1d(wfr)
np.save(profDir + fname, profile)
print("1D Profiles Stored: {}".format(profDir))
def integratedAnalysis(indir, outfile):
data = []
for f in os.listdir(indir):
print("Processing: {}".format(f))
d = []
wfr = loadWavefront(f)
cen = get_centroid(wfr, mode = 'integrated')
srwlib.srwl.SetRepresElecField(wfr._srwl_wf, 't')
pulseEn, photons_per_pulse = calc_pulse_energy(wfr)
srwlib.srwl.SetRepresElecField(wfr._srwl_wf, 'f')
d.append([f, cen, pulseEn, photons_per_pulse])
data.append(d)
data = np.asarray(data)
np.save(outfile, data)
def main(fname):
mkdir_p(directoryName)
wfr = loadWavefront(fname)
storeCentroid(wfr, fname)
storeSpectrum(wfr, fname)
storePhotonEnergy(wfr, fname)
storeProfiles(wfr, fname)
if __name__ == '__main__':
main(fname)
indir = "/gpfs/exfel/data/group/spb-sfx/user/guestt/h5/NanoKB-Pulse/out/"
outfile = "/gpfs/exfel/data/group/spb-sfx/user/guestt/h5/NanoKB-Pulse/integratedEnergyAnalysis.npy"
integratedAnalysis(indir, outfile)
| StarcoderdataPython |
3216113 | <reponame>yiguanxianyu/PiGIS<filename>ui/mainwindow.py
from PySide6.QtCore import Qt, QStringListModel
from PySide6.QtGui import QFont, QStandardItemModel, QStandardItem
from PySide6.QtWidgets import QMainWindow, QApplication, QSplitter, QWidget, QTabWidget, QListWidgetItem
# import pyqtgraph as pg
from ui import LayerTree, Graph, OptionsPage, AboutPage
from ui.raw import Ui_MainWindow
from project import PiGISProjectController
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.__optionsPage = None
self.__aboutPage = None
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.project = PiGISProjectController()
# self.plot = pg.PlotWidget(enableAutoRange=True)
# self.ui.graphView.addWidget(self.plot)
# self.curve = self.plot.plot()
layer_tree_widget = LayerTree()
self.layerTree = layer_tree_widget.ui.treeView
graph_widget = Graph()
self.graphWidget = graph_widget.ui.graphicsView
layer_setting_widget = QTabWidget()
# 分离器添加控件
main_horizontal_splitter = QSplitter(Qt.Horizontal)
main_horizontal_splitter.addWidget(layer_tree_widget)
main_horizontal_splitter.addWidget(graph_widget)
main_horizontal_splitter.addWidget(layer_setting_widget)
# 把这个 splitter 放在一个布局里才能显示出来
self.ui.mainLayout.addWidget(main_horizontal_splitter)
def open_project(self):
self.project.open_project()
def new_project(self):
self.project.new_project()
def save_project(self):
self.project.save_project()
def save_project_as(self):
self.project.save_project_as()
def add_layer(self):
self.project.add_layer()
def copy_layer(self):
self.project.copy_current_layer()
def switch_editing(self, mode):
self.project.currentLayer.editable = mode
def show_options_page(self):
if self.__optionsPage is None:
self.__optionsPage = OptionsPage()
self.__optionsPage.setWindowModality(Qt.ApplicationModal)
self.__optionsPage.show()
def show_about_page(self):
if self.__aboutPage is None:
self.__aboutPage = AboutPage()
self.__aboutPage.show()
@staticmethod
def exit_app():
QApplication.instance().quit()
| StarcoderdataPython |
4806590 | <reponame>KingaS03/global-divergences
import torch
from torch.autograd import grad
from divergences import kernel_divergence
from divergences import regularized_ot, hausdorff_divergence, sinkhorn_divergence
from divergences import regularized_ot_visualization, hausdorff_divergence_visualization, sinkhorn_divergence_visualization
def extract_point_cloud(I, affine) :
"""Bitmap to point cloud."""
# Threshold, to extract the relevant indices ---------------------------------------
ind = (I > .001).nonzero()
# Extract the weights --------------------------------------------------------------
D = len(I.shape)
if D == 2 : α_i = I[ind[:,0], ind[:,1]]
elif D == 3 : α_i = I[ind[:,0], ind[:,1], ind[:,2]]
else : raise NotImplementedError()
α_i = α_i * affine[0,0] * affine[1,1] # Lazy approximation of the determinant...
# If we normalize the measures, it doesn't matter anyway.
# Don't forget the changes of coordinates! -----------------------------------------
M = affine[:D,:D] ; off = affine[:D,D]
x_i = ind.float() @ M.t() + off
return ind, α_i.view(-1,1), x_i
def sparse_distance_bmp(params, A, B, affine_A, affine_B, normalize=True, info=False, action="measure") :
"""
Takes as input two torch bitmaps (Tensors).
Returns a cost and a gradient, encoded as a vector bitmap.
Args :
- A and B : two torch bitmaps (Tensors) of dimension D.
- affine_A and affine_B : two matrices of size (D+1,D+1) (Tensors).
"""
D = len(A.shape) # dimension of the ambient space, =2 for slices or =3 for volumes
ind_A, α_i, x_i = extract_point_cloud(A, affine_A)
ind_B, β_j, y_j = extract_point_cloud(B, affine_B)
if normalize :
α_i = α_i / α_i.sum()
β_j = β_j / β_j.sum()
x_i.requires_grad = True
if action == "image" :
α_i.requires_grad = True
# Compute the distance between the *measures* A and B ------------------------------
print("{:,}-by-{:,} KP: ".format(len(x_i), len(y_j)), end='')
routines = {
"kernel" : kernel_divergence,
"regularized_ot" : regularized_ot,
"hausdorff" : hausdorff_divergence,
"sinkhorn" : sinkhorn_divergence,
"regularized_ot_visualization" : regularized_ot_visualization,
"hausdorff_visualization" : hausdorff_divergence_visualization,
"sinkhorn_visualization" : sinkhorn_divergence_visualization,
}
routine = routines[ params.get("formula", "hausdorff") ]
params["heatmaps"] = info
cost, heatmaps = routine( α_i,x_i, β_j,y_j, **params )
if action == "image" :
grad_a, grad_x = grad( cost, [α_i, x_i] ) # gradient wrt the voxels' positions and weights
elif action == "measure" :
grad_x = grad( cost, [x_i] )[0] # gradient wrt the voxels' positions
# Point cloud to bitmap (grad_x) ---------------------------------------------------
tensor = torch.cuda.FloatTensor if A.is_cuda else torch.FloatTensor
# Using torch.zero(...).dtype(cuda.FloatTensor) would be inefficient...
# Let's directly make a "malloc", before zero-ing in place
grad_A = tensor( *(tuple(A.shape) + (D,)) )
grad_A.zero_()
if action == "measure" :
if D == 2 : grad_A[ind_A[:,0],ind_A[:,1], :] = grad_x[:,:]
elif D == 3 : grad_A[ind_A[:,0],ind_A[:,1],ind_A[:,2], :] = grad_x[:,:]
else : raise NotImplementedError()
elif action == "image" :
if D == 2 :
if True :
dim_0 = affine_A[0,0] ; print(dim_0)
grad_A[ind_A[:,0] ,ind_A[:,1] , :] += .25 * dim_0 * grad_x[:,:]
grad_A[ind_A[:,0]+1,ind_A[:,1] , :] += .25 * dim_0 * grad_x[:,:]
grad_A[ind_A[:,0] ,ind_A[:,1]+1, :] += .25 * dim_0 * grad_x[:,:]
grad_A[ind_A[:,0]+1,ind_A[:,1]+1, :] += .25 * dim_0 * grad_x[:,:]
grad_a = grad_a[:] * alpha_i[:]
grad_A[ind_A[:,0] ,ind_A[:,1] , 0] -= .5*grad_a[:]
grad_A[ind_A[:,0]+1,ind_A[:,1] , 0] += .5*grad_a[:]
grad_A[ind_A[:,0] ,ind_A[:,1]+1, 0] -= .5*grad_a[:]
grad_A[ind_A[:,0]+1,ind_A[:,1]+1, 0] += .5*grad_a[:]
grad_A[ind_A[:,0] ,ind_A[:,1] , 1] -= .5*grad_a[:]
grad_A[ind_A[:,0] ,ind_A[:,1]+1, 1] += .5*grad_a[:]
grad_A[ind_A[:,0]+1,ind_A[:,1] , 1] -= .5*grad_a[:]
grad_A[ind_A[:,0]+1,ind_A[:,1]+1, 1] += .5*grad_a[:]
if False :
grad_A[ind_A[:,0] ,ind_A[:,1] , 0] = grad_a[:]
grad_A[ind_A[:,0] ,ind_A[:,1] , 1] = grad_a[:]
# N.B.: we return "PLUS gradient", i.e. "MINUS a descent direction".
return cost, grad_A.detach(), heatmaps
| StarcoderdataPython |
3376718 | from numbers import Number
from math import sqrt, pow
from random import uniform
# Constants that cn be modified to get different results.
# Current values have been arbitrary chosen
NB_CABLES = 2
MIN_LOAD = 0
MAX_LOAD = 150
class UNumber:
def __init__(self, mean=0., variance=0.):
self.mean = mean
self.variance = variance
def __add__(self, other):
if not(isinstance(other, UNumber)) and not(isinstance(other, Number)):
raise TypeError("UNumber can only be added with another UNumber or a Number."
"Actual type: %s" % type(other))
if isinstance(other, Number):
other = UNumber(other, 0)
return UNumber(self.mean + other.mean, self.variance + other.variance)
def __div__(self, other):
if not(isinstance(other, UNumber)) and not(isinstance(other, Number)):
raise TypeError("UNumber can only be added with another UNumber or a Number."
"Actual type: %s" % type(other))
if isinstance(other, Number):
other = UNumber(other, 0)
value = (self.mean / other.mean) + (self.mean * other.variance) / (pow(other.mean, 3))
variance = (self.variance / other.mean) + (pow(self.mean, 2) * other.variance) / (pow(other.mean, 4))
return UNumber(value, variance)
def __str__(self):
return "UNumber(%s, %s)" % (self.mean, self.variance)
class SmartGrid:
def __init__(self):
self.cables = []
def compute_avg_load(self):
sum_load = UNumber(0,0)
for c in self.cables:
try:
sum_load += c.load
except AttributeError:
raise AttributeError("%s is not an instance of Cable class but a instance of %s" % (c, type(c)))
return sum_load / len(self.cables)
class Cable:
def __init__(self, p_id, p_load=UNumber(0, 0)):
self.id = p_id
self.load = p_load
def __str__(self):
return "Cable(%s, %s)" % (self.id, self.load)
def main():
grid = SmartGrid()
for i in range(0, NB_CABLES):
load = uniform(MIN_LOAD, MAX_LOAD)
max_deviation = sqrt(load / 2)
deviation = uniform(0, max_deviation)
cable = Cable(i, UNumber(load, deviation))
grid.cables.append(cable)
for c in grid.cables:
print("%s" % c)
grid_avg_load = grid.compute_avg_load()
print("Gid load is equal to %s" % grid_avg_load)
if __name__ == "__main__":
main()
| StarcoderdataPython |
165722 | from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.common.exceptions import WebDriverException
import time
import pandas as pd
from datetime import datetime
import platform
import credsPASSWORDS as credsPASSWORDS
import re
from sqlalchemy import engine, types
def sqlEngineMaker(modeIn):
mode = modeIn
system = platform.system()
if mode == 'mysql':
credentials = credsPASSWORDS.mySql
connection_type='pymysql'
elif mode == 'digitalOcean':
credentials = credsPASSWORDS.digitalOcean
connection_type='mysqlconnector'
elif mode == 'wordPressLocal':
credentials = credsPASSWORDS.wordPressLocal
connection_type='pymysql'
else:
raise Exception("Don't support database: " + mode)
connection_string = ''
if system == 'Darwin':
connection_string = "mysql+" + connection_type + "://" + credentials['user'] + ":" + credentials['password'] + "@" + credentials['host'] + "?unix_socket=" + credentials['socket']
else:
connection_string = "mysql+" + connection_type + "://" + credentials['user'] + ":" + credentials['password'] + "@" + credentials['host'] + ":" + credentials['port']
return engine.create_engine(connection_string) # mysql+mysqlconnector://<user>:<password>@<host>[:<port>]/<dbname>
def runChrome():
chrome_options = Options()
chrome_options.add_argument("--window-size=1920,1080")
chrome_options.add_argument("--verbose")
chrome_options.add_experimental_option('excludeSwitches', ['enable-logging'])
# chrome_options.add_argument("--headless")
system = platform.system()
if system == 'Windows':
s = Service('./scraper/chromedriver.exe')
wd = webdriver.Chrome(options=chrome_options, service=s) # executable_path='./scraper/chromedriver.exe')
elif system == 'Darwin':
# s = Service('./chromedriver')
wd = webdriver.Chrome(options=chrome_options, executable_path='./scraper/chromedriver')
else:
raise Exception("Don't have an executable for: " + system)
return wd
def loadPage(wd,url):
wd.get(url)
no_of_jobs = int(wd.find_element(By.CSS_SELECTOR, 'h1>span').get_attribute('innerText'))
# load the whole page with a combination of scrolling and clicking a "show more" button
print(no_of_jobs)
print(len(wd.find_element(By.CLASS_NAME, 'jobs-search__results-list').find_elements(By.TAG_NAME, 'li')))
i = 2
while len(wd.find_element(By.CLASS_NAME, 'jobs-search__results-list').find_elements(By.TAG_NAME,
'li')) < no_of_jobs and i < 4: # i<4 for testing - faster
print(len(wd.find_element(By.CLASS_NAME, 'jobs-search__results-list').find_elements(By.TAG_NAME, 'li')))
wd.execute_script("window.scrollTo(0, document.body.scrollHeight);")
i = i + 1
try:
wd.find_element(By.XPATH, '//*[@id="main-content"]/section[2]/button').click()
time.sleep(4)
except WebDriverException:
pass
time.sleep(4)
return
def main():
wd = runChrome()
urls = [
'https://www.linkedin.com/jobs/search?keywords=Cybersecurity&location=Nashville%2C%20Tennessee%2C%20United%20States&geoId=105573479&trk=public_jobs_jobs-search-bar_search-submit&position=1&pageNum=0'
,'https://www.google.com'
]
url = urls[0]
loadPage(wd,url)
# now that everything is loaded, get info about the page
# TODO make get job info function
job_lists = wd.find_element(By.CLASS_NAME, 'jobs-search__results-list')
jobs = job_lists.find_elements(By.TAG_NAME, 'li') # return a list
job_id = []
job_urn = []
job_title = []
company_name = []
location = []
date = []
job_link = []
k = 0
for job in jobs:
if k > 1:
break
# job_id0 = job.get_attribute('data-id') # don't think job id is a thing anymore
job_id0 = k
job_id.append(job_id0)
# jar[k]=job_id0
k = k + 1
job_urn0 = job.find_element(By.CSS_SELECTOR, 'div').get_attribute('data-entity-urn')
job_urn_id = job_urn0.split(":")[3]
job_urn.append(job_urn_id)
# main-content > section.two-pane-serp-page__results-list > ul > li:nth-child(2) > div
job_title0 = job.find_element(By.CSS_SELECTOR, 'div > div.base-search-card__info > h3').get_attribute(
'innerText')
job_title.append(job_title0)
company_name0 = job.find_element(By.CSS_SELECTOR, 'div > div.base-search-card__info > h4 > a').get_attribute(
'innerText')
company_name.append(company_name0)
location0 = job.find_element(By.CSS_SELECTOR, 'div > div.base-search-card__info > div > span').get_attribute(
'innerText')
location.append(location0)
date0 = job.find_element(By.CSS_SELECTOR, 'div > div.base-search-card__info > div > time').get_attribute(
'datetime')
date.append(date0)
job_link0 = job.find_element(By.CSS_SELECTOR, 'div > a').get_attribute('href')
job_link.append(job_link0)
# TODO make get descriptions function
jd = []
descriptions = []
# len(jobs)
for item in range(k):
descriptions0 = {
'ID': job_id[item],
'URN': "",
'Seniority level': "",
'Employment type': "",
'Job function': "",
'Industries': "",
}
try:
# clicking job to view job details
job_click_path = F'//*[@id="main-content"]/section[2]/ul/li[{item + 1}]/div/a'
job_click_element = wd.find_element(By.XPATH, job_click_path)
desc_urn_match = re.search("\d*(?=\?refId)", job_click_element.get_attribute('href'))
desc_urn = desc_urn_match.group(0)
descriptions0['URN'] = desc_urn
job_click_element.click()
time.sleep(5)
detail_path = '/html/body/div[1]/div/section'
detail_section = wd.find_element(By.XPATH, detail_path)
show_more_click_path = 'div.decorated-job-posting__details > section.core-section-container.description > div > div > section > button.show-more-less-html__button.show-more-less-html__button--more'
detail_section.find_element(By.CSS_SELECTOR, show_more_click_path).click()
jd_path = 'show-more-less-html__markup'
jd0 = wd.find_element(By.CLASS_NAME, jd_path).get_attribute('innerText')
jd.append(jd0)
# descriptions exist in a series of <li> containers. it's easier to just loop through that list
description_class = 'description__job-criteria-list'
description_element = wd.find_element(By.CLASS_NAME, description_class)
item_class = 'description__job-criteria-item'
description_items = description_element.find_elements(By.CLASS_NAME, item_class)
item_description_class = 'description__job-criteria-text'
item_name = 'description__job-criteria-subheader'
for d_item in description_items:
item0 = d_item.find_element(By.CLASS_NAME, item_description_class).get_attribute('innerText')
d_item_name = d_item.find_element(By.CLASS_NAME, item_name).get_attribute('innerText')
descriptions0[d_item_name] = item0
descriptions.append(descriptions0)
except WebDriverException:
jd0 = 'error'
jd.append(jd0)
descriptions0['Seniority level'] = 'error'
descriptions0['Employment type'] = 'error'
descriptions0['Job function'] = 'error'
descriptions0['Industries'] = 'error'
descriptions.append(descriptions0)
wd.close()
job_data = pd.DataFrame({'ID': job_id,
'URN': job_urn,
'Date': date,
'Company': company_name,
'Title': job_title,
'Location': location,
'Description': jd,
'Link': job_link,
})
description_data = pd.DataFrame.from_dict(data=descriptions)
print(job_data)
print(description_data)
full_data = job_data.join(description_data, how='inner', on='ID', lsuffix='_left', rsuffix='_right')
# TODO make output function
# cleaning description column
full_data['Description'] = full_data['Description'].str.replace('\n', ' ')
filename = 'LinkedIn Job Data_Data Scientist' + datetime.now().strftime("%m%d%Y%H%M%S") + '.csv'
#TODO parameterize output - csv, sql, both, etc
# full_data.to_csv(filename, index=False, sep='|')
# job_data.to_excel('LinkedIn Job Data_Data Scientist.xlsx', index = False)
print(full_data)
test_types = dict(
zip(full_data.columns.tolist(), (types.VARCHAR(length=20), types.VARCHAR(length=20), types.VARCHAR(length=200)
, types.VARCHAR(length=20), types.VARCHAR(length=200),
types.VARCHAR(length=400), types.VARCHAR(length=400), types.TEXT(length=20000)
, types.VARCHAR(length=400), types.VARCHAR(length=20),
types.VARCHAR(length=200), types.VARCHAR(length=400),
types.VARCHAR(length=400), types.VARCHAR(length=400),
types.VARCHAR(length=400))))
full_data = full_data.astype(str)
mode='wordPressLocal'
mysql_engine = sqlEngineMaker(mode)
if mode == "mysql":
output_table="jobs"
output_schema="scraper"
elif mode == "wordPressLocal":
output_table="scraper_jobs"
output_schema="local"
full_data.to_sql(output_table, con=mysql_engine, schema=output_schema, if_exists='append', index=False, chunksize=None, dtype=test_types, method=None)
quit() # windows hangs? chromedriver issue
if __name__== "__main__" :
main()
| StarcoderdataPython |
5190326 | <reponame>microsoft/semantic_parsing_with_constrained_lm<filename>src/semantic_parsing_with_constrained_lm/configs/lib/common.py
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from enum import Enum
from typing import Callable, List, Optional
from semantic_parsing_with_constrained_lm.datum import DatumSub, FullDatumSub
from semantic_parsing_with_constrained_lm.fewshot import (
GPT2TokenizerQuirks,
PromptBuilder,
ShuffleAndSampleChunks,
TopKSimilar,
TruncateTokenLength,
)
from semantic_parsing_with_constrained_lm.lm import (
AutoregressiveModel,
IncrementalLanguageModel,
Seq2SeqModel,
)
from semantic_parsing_with_constrained_lm.model import (
BeamSearchSemanticParser,
DatumProblemFactory,
FewShotLMProblemFactory,
IncrementalLMSimilarityFunction,
Seq2SeqProblemFactory,
)
from semantic_parsing_with_constrained_lm.search import PartialParse
class PromptOrder(Enum):
# Shuffle the training examples inside the prompt.
Shuffle = 0
# Put the best (most similar to test) training example earliest in the prompt.
BestFirst = 1
# Put the best training example at the end of the prompt.
BestLast = 2
def make_semantic_parser(
train_data: List[FullDatumSub],
lm: AutoregressiveModel,
use_gpt3: bool,
global_max_steps: int,
beam_size: int,
partial_parse_builder: Callable[[DatumSub], PartialParse],
max_steps_fn: Callable[[DatumSub], Optional[int]],
prompt_order: PromptOrder = PromptOrder.Shuffle,
# Settings for using autoregressive models in a few-shot in-context setting
similarity_lm: Optional[IncrementalLanguageModel] = None,
prompt_builder: Optional[PromptBuilder] = None,
num_examples_per_prompt: int = 20,
) -> BeamSearchSemanticParser:
problem_factory: DatumProblemFactory
if isinstance(lm, IncrementalLanguageModel):
if prompt_builder is None:
prompt_builder = PromptBuilder.for_demo(
do_include_context=False, use_preamble=True
)
if similarity_lm is None:
similarity_lm = lm
if use_gpt3:
if prompt_order == PromptOrder.Shuffle:
train_selectors = [
TopKSimilar(
IncrementalLMSimilarityFunction(similarity_lm),
k=num_examples_per_prompt,
),
ShuffleAndSampleChunks(
num_samples=1,
num_per_sample=num_examples_per_prompt,
random_seed=0,
),
TruncateTokenLength(
tokenizer=lm.tokenizer,
completion_length=global_max_steps,
prompt_builder=prompt_builder,
),
]
elif prompt_order == PromptOrder.BestFirst:
train_selectors = [
TopKSimilar(
IncrementalLMSimilarityFunction(similarity_lm),
k=num_examples_per_prompt,
),
TruncateTokenLength(
tokenizer=lm.tokenizer,
completion_length=global_max_steps,
prompt_builder=prompt_builder,
),
]
elif prompt_order == PromptOrder.BestLast:
train_selectors = [
TopKSimilar(
IncrementalLMSimilarityFunction(similarity_lm),
k=num_examples_per_prompt,
best_first=False,
),
TruncateTokenLength(
tokenizer=lm.tokenizer,
completion_length=global_max_steps,
prompt_builder=prompt_builder,
reverse=True,
),
]
else:
train_selectors = []
tokenizer_quirks = GPT2TokenizerQuirks(lm.tokenizer)
problem_factory = FewShotLMProblemFactory(
train_data=train_data if use_gpt3 else [],
train_selectors=train_selectors,
prompt_builder=prompt_builder,
incremental_lm=lm,
partial_parse_builder=partial_parse_builder,
tokenizer_quirks=tokenizer_quirks,
length_normalization=0.7,
top_k=beam_size,
)
finalizer = lambda tokens: tokenizer_quirks.postprocess_result(
lm.tokenizer.decode(tokens, clean_up_tokenization_spaces=False)
)
elif isinstance(lm, Seq2SeqModel):
problem_factory = Seq2SeqProblemFactory(
seq2seq_model=lm,
partial_parse_builder=partial_parse_builder,
length_normalization=0.7,
top_k=beam_size,
)
finalizer = lm.decode_output
return BeamSearchSemanticParser(
problem_factory=problem_factory,
tokenizer=lm.tokenizer,
finalizer=finalizer,
beam_size=beam_size,
max_steps_fn=max_steps_fn,
)
| StarcoderdataPython |
5068786 | from flask_caching import Cache
config = {'CACHE_TYPE': 'redis',
'CACHE_REDIS_URL': 'redis://localhost:6379/0'
}
cache = Cache(config=config)
| StarcoderdataPython |
8154671 | import numpy as np
import tensorflow as tf
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.python.framework import function as tff
def energy_distance(f_sample, f_data):
nr_chunks = len(f_sample)
f_sample = np.concatenate(f_sample)
f_data = np.concatenate(f_data)
grads = np.zeros_like(f_sample)
for j in range(f_sample.shape[1]):
sample_ind = np.argsort(f_sample[:,j])
data_ind = np.argsort(f_data[:,j])
grads[sample_ind,j] = f_sample[sample_ind,j] - f_data[data_ind,j]
loss = np.mean(np.square(grads))
grads = np.split(grads,nr_chunks,0)
return loss,grads
def int_shape(x):
return x.get_shape().as_list() #[int(s) if s is not None else None for s in x.get_shape()]
def weight_decay(params):
loss = 0.
for p in params:
if len(p.get_shape())>=2:
loss += tf.reduce_sum(tf.square(p))
return loss
def adamax_updates(params, cost_or_grads, lr=0.001, mom1=0.9, mom2=0.999):
updates = []
if type(cost_or_grads) is not list:
grads = tf.gradients(cost_or_grads, params)
else:
grads = cost_or_grads
for p, g in zip(params, grads):
mg = tf.Variable(tf.zeros(p.get_shape()), p.name + '_adamax_mg')
if mom1>0:
v = tf.Variable(tf.zeros(p.get_shape()), p.name + '_adamax_v')
v_t = mom1*v + (1. - mom1)*g
updates.append(v.assign(v_t))
else:
v_t = g
mg_t = tf.maximum(mom2*mg + 1e-8, tf.abs(g))
g_t = v_t / mg_t
p_t = p - lr * g_t
updates.append(mg.assign(mg_t))
updates.append(p.assign(p_t))
return tf.group(*updates)
def adam_updates(params, cost_or_grads, lr=0.001, mom1=0.9, mom2=0.999):
updates = []
if type(cost_or_grads) is not list:
grads = tf.gradients(cost_or_grads, params)
else:
grads = cost_or_grads
t = tf.Variable(1., 'adam_t')
for p, g in zip(params, grads):
mg = tf.Variable(tf.zeros(p.get_shape()), p.name + '_adam_mg')
if mom1>0:
v = tf.Variable(tf.zeros(p.get_shape()), p.name + '_adam_v')
v_t = mom1*v + (1. - mom1)*g
v_hat = v_t / (1. - tf.pow(mom1,t))
updates.append(v.assign(v_t))
else:
v_hat = g
mg_t = mom2*mg + (1. - mom2)*tf.square(g)
mg_hat = mg_t / (1. - tf.pow(mom2,t))
g_t = v_hat / tf.sqrt(mg_hat + 1e-8)
p_t = p - lr * g_t
updates.append(mg.assign(mg_t))
updates.append(p.assign(p_t))
updates.append(t.assign_add(1))
return tf.group(*updates)
def nesterov_updates(params, cost_or_grads, lr=0.01, mom1=0.9):
updates = []
if type(cost_or_grads) is not list:
grads = tf.gradients(cost_or_grads, params)
else:
grads = cost_or_grads
for p, g in zip(params, grads):
v = tf.Variable(tf.zeros(p.get_shape()), p.name + '_nesterov_mom')
v_new = mom1*v - lr*g
p_new = p - mom1*v + (1. + mom1)*v_new
updates.append(p.assign(p_new))
updates.append(v.assign(v_new + 0.*p_new)) # to ensure it runs after updating p
return tf.group(*updates)
def get_var_maybe_avg(var_name, ema, **kwargs):
v = tf.get_variable(var_name, **kwargs)
if ema is not None:
v = ema.average(v)
return v
def get_name(layer_name, counters):
if not layer_name in counters:
counters[layer_name] = 0
name = layer_name + '_' + str(counters[layer_name])
counters[layer_name] += 1
return name
# get params, using data based initialization & (optionally) weight normalization, and using moving averages
def get_params(layer_name, x=None, init=False, ema=None, use_W=True, use_g=True, use_b=True,
f=tf.matmul, weight_norm=True, init_scale=1., filter_size=None, num_units=None, pre_activation=None):
params = {}
with tf.variable_scope(layer_name):
if init:
if type(x) is list:
xs = int_shape(x[0])
xs[-1] = np.sum([int_shape(xi)[-1] for xi in x])
else:
xs = int_shape(x)
if num_units is None:
num_units = xs[-1]
norm_axes = [i for i in np.arange(len(xs) - 1)]
# weights
if use_W:
nr_in = xs[-1]
if pre_activation in ['celu', 'crelu']:
nr_in *= 2
if filter_size is not None:
V = tf.get_variable('V', filter_size + [nr_in, num_units], tf.float32,
tf.random_normal_initializer(0, 0.05), trainable=True)
else:
V = tf.get_variable('V', [nr_in, num_units], tf.float32,
tf.random_normal_initializer(0, 0.05), trainable=True)
if weight_norm:
W = tf.nn.l2_normalize(V, [i for i in np.arange(len(V.get_shape())-1)])
else:
W = V
# moments for normalization
if use_W:
x_init = f(x, W)
else:
x_init = x
m_init, v_init = tf.nn.moments(x_init, norm_axes)
# scale
init_g = init_scale / tf.sqrt(v_init)
if use_g:
g = tf.get_variable('g', dtype=tf.float32, shape=num_units, initializer=tf.ones_initializer(), trainable=True)
g = g.assign(init_g)
if use_W:
W *= tf.reshape(g, [1]*(len(W.get_shape())-1)+[num_units])
else: # g is used directly if there are no weights
params['g'] = g
m_init *= init_g
elif use_W and not weight_norm: # init is the same as when using weight norm
W = V.assign(tf.reshape(init_g, [1]*(len(W.get_shape())-1) + [num_units]) * W)
m_init *= init_g
# (possibly) scaled weights
if use_W:
params['W'] = W
# bias
if use_b:
b = tf.get_variable('b', dtype=tf.float32, shape=num_units, initializer=tf.zeros_initializer(), trainable=True)
b = b.assign(-m_init)
params['b'] = b
else:
# get variables, use the exponential moving average if provided
if use_b:
params['b'] = get_var_maybe_avg('b', ema)
if use_g:
g = get_var_maybe_avg('g', ema)
if not use_W: # g is used directly if there are no weights
params['g'] = g
if use_W:
V = get_var_maybe_avg('V', ema)
Vs = int_shape(V)
if weight_norm:
W = tf.nn.l2_normalize(V, [i for i in np.arange(len(Vs)-1)])
else:
W = V
if use_g:
W *= tf.reshape(g, [1]*(len(Vs)-1) + [Vs[-1]])
params['W'] = W
return params
''' memory saving stuff '''
mem_funcs = {}
def apply_pre_activation(x, pre_activation, axis=3):
if type(x) is tuple:
x = list(x)
elif type(x) is not list:
x = [x]
if pre_activation is None:
return tf.concat(x,axis)
elif pre_activation == 'celu':
return tf.nn.elu(tf.concat([xs for xi in x for xs in [xi,-xi]],axis))
elif pre_activation == 'crelu':
return tf.nn.relu(tf.concat([xs for xi in x for xs in [xi,-xi]],axis))
elif pre_activation == 'elu':
return tf.nn.elu(tf.concat(x,axis))
elif pre_activation == 'relu':
return tf.nn.relu(tf.concat(x,axis))
else:
raise('unsupported pre-activation')
def __dense(W, pre_activation, x):
return tf.matmul(apply_pre_activation(x, pre_activation, 1), W)
def __dense_grad(op, grad, pre_activation):
with tf.control_dependencies([grad]):
W,x = op.inputs
y = __dense(W, pre_activation, x)
grads = tf.gradients(ys=y,xs=[W,x],grad_ys=grad)
return grads
def _dense(x, W, pre_activation=None, mem_funcs=mem_funcs):
func_name = ('_dense'+str(pre_activation)).replace(' ','_').replace('[','_').replace(']','_').replace(',','_')
if func_name in mem_funcs:
my_func = mem_funcs[func_name]
else:
my_grad = lambda op,grad: __dense_grad(op, grad, pre_activation)
#@tff.Defun(tf.float32, tf.float32, func_name=func_name, python_grad_func=my_grad)
def my_func(W,x):
return __dense(W, pre_activation, x)
mem_funcs[func_name] = my_func
x_out = my_func(W, x)
xs = int_shape(x)
xs[-1] = int_shape(W)[-1]
x_out.set_shape(xs)
return x_out
def __list_conv2d(W, stride, pad, dilate, pre_activation, upsample, xs, *x_list):
if upsample:
x_list = tf.image.resize_nearest_neighbor(tf.concat(x_list,3), [xs[1],xs[2]])
x = apply_pre_activation(x_list, pre_activation, 3)
if dilate>1:
return tf.nn.atrous_conv2d(x, W, dilate, pad)
else:
return tf.nn.conv2d(x, W, [1]+stride+[1], pad)
def __list_conv2d_grad(op, grad, stride, pad, dilate, pre_activation, upsample, xs):
with tf.control_dependencies([grad]):
W = tf.stop_gradient(op.inputs[0])
x_list = [tf.stop_gradient(x) for x in op.inputs[1:]]
y = __list_conv2d(W, stride, pad, dilate, pre_activation, upsample, xs, *x_list)
grads = tf.gradients(ys=y, xs=[W]+x_list, grad_ys=grad)
return grads
def _conv2d(x, W, stride=[1,1], pad='SAME', dilate=1, pre_activation=None, upsample=False, mem_funcs=mem_funcs):
if type(x) is tuple:
x = list(x)
elif type(x) is not list:
x = [x]
xs = int_shape(x[-1])
if upsample:
xs[1] = int(2*xs[1])
xs[2] = int(2*xs[2])
func_name = ('_conv2d'+str(len(x))+'_'+str(stride)+'_'+str(pad)+'_'+str(dilate)+'_'+str(pre_activation)+'_'+str(upsample)).replace(' ','_').replace('[','_').replace(']','_').replace(',','_')
if func_name in mem_funcs:
my_func = mem_funcs[func_name]
else:
my_grad = lambda op, grad: __list_conv2d_grad(op, grad, stride, pad, dilate, pre_activation, upsample, xs)
#@tff.Defun(*([tf.float32] * (len(x) + 1)), func_name=func_name, python_grad_func=my_grad)
def my_func(W, *x_list):
return __list_conv2d(W, stride, pad, dilate, pre_activation, upsample, xs, *x_list)
mem_funcs[func_name] = my_func
x_out = my_func(W, *x)
xs[-1] = int_shape(W)[-1]
xs[1] = int(xs[1]/stride[0])
xs[2] = int(xs[2]/stride[1])
x_out.set_shape(xs)
return x_out
def __list_global_avg_pool(pre_activation, *x_list):
return tf.reduce_mean(apply_pre_activation(x_list, pre_activation, 3), [1,2])
def __list_global_avg_pool_grad(op, grad, pre_activation):
with tf.control_dependencies([grad]):
x_list = [tf.stop_gradient(x) for x in op.inputs]
y = __list_global_avg_pool(pre_activation, *x_list)
grads = tf.gradients(ys=y, xs=x_list, grad_ys=grad)
return grads
def global_avg_pool(x, pre_activation='celu', mem_funcs=mem_funcs):
if type(x) is tuple:
x = list(x)
elif type(x) is not list:
x = [x]
n_in = int_shape(x[0])[0]
c_in = np.sum([int_shape(xi)[-1] for xi in x])
func_name = ('global_avg_pool_'+str(len(x))+'_'+str(pre_activation)).replace(' ','_').replace('[','_').replace(']','_').replace(',','_')
if func_name in mem_funcs:
my_func = mem_funcs[func_name]
else:
my_grad = lambda op, grad: __list_global_avg_pool_grad(op, grad, pre_activation)
#@<EMAIL>.Defun(*([tf.float32] * len(x)), func_name=func_name, python_grad_func=my_grad)
def my_func(*x_list):
return __list_global_avg_pool(pre_activation, *x_list)
mem_funcs[func_name] = my_func
if pre_activation in ['celu', 'crelu']:
c_out = int(2*c_in)
else:
c_out = int(c_in)
x_out = my_func(*x)
x_out.set_shape([n_in, c_out])
return x_out
''' layer definitions '''
@add_arg_scope
def dense(x, num_units, pre_activation='celu', init_scale=1., counters={}, init=False,
ema=None, weight_norm=True, use_b=True, use_g=True, **kwargs):
layer_name = get_name('dense', counters)
f = lambda x, W: _dense(x, W, pre_activation)
params = get_params(layer_name, x, init, ema, use_W=True, use_g=use_g, use_b=use_b, f=f,
weight_norm=weight_norm, init_scale=init_scale, num_units=num_units, pre_activation=pre_activation)
x = f(x, params['W'])
if use_b:
x = tf.nn.bias_add(x, params['b'])
return x
@add_arg_scope
def conv2d(x, num_filters, pre_activation='celu', filter_size=[3,3], stride=[1,1], pad='SAME', dilate=1, upsample=False,
init_scale=1., counters={}, init=False, ema=None, weight_norm=True, use_b=True, use_g=True, **kwargs):
layer_name = get_name('conv2d', counters)
f = lambda x,W: _conv2d(x, W, stride, pad, dilate, pre_activation, upsample)
params = get_params(layer_name, x, init, ema, use_W=True, use_g=use_g, use_b=use_b, f=f,
weight_norm=weight_norm, init_scale=init_scale, filter_size=filter_size, num_units=num_filters, pre_activation=pre_activation)
x = f(x, params['W'])
if use_b:
x = tf.nn.bias_add(x, params['b'])
return x
| StarcoderdataPython |
6432782 | <reponame>varikakasandor/dissertation-balls-into-bins<filename>two_thinning/full_knowledge/RL/DeepSarsaRL/__init__.py
import two_thinning.full_knowledge.RL.DeepSarsaRL.train
| StarcoderdataPython |
11284470 | __version__ = "1.0.0"
__author__ = "vcokltfre"
__license__ = "MIT"
def bolb() -> str:
return "bolb"
__all__ = ("bolb",)
| StarcoderdataPython |
5062285 | # (c) Copyright 2018 Palantir Technologies Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import pytest
import json
from .conftest import TEST_CASES
def convert_to_snake_case(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def load_test_cases():
return json.load(open(TEST_CASES))['client']
def generate_auto_deserialize_tests():
test_cases = load_test_cases()
all_cases = []
for endpoint_name, test_kinds in test_cases['autoDeserialize'].items():
method_name = convert_to_snake_case(endpoint_name)
positive_count = len(test_kinds['positive'])
all_cases.extend([(endpoint_name, method_name, i, case, True)
for i, case in enumerate(test_kinds['positive'])])
all_cases.extend([(endpoint_name, method_name, i + positive_count, case, False)
for i, case in enumerate(test_kinds['negative'])])
return all_cases
def generate_param_tests(test_kind):
test_cases = load_test_cases()
return [(endpoint_name, convert_to_snake_case(endpoint_name), i, value)
for endpoint_name, test_kinds in test_cases[test_kind].items()
for i, value in enumerate(test_kinds)]
def run_test(should_pass, is_blacklisted, runnable):
# If the test is blacklisted, we inverse the logic, to ensure that it would have "failed" the normal test.
if not is_blacklisted:
run_test_inner(runnable, should_pass)
# TODO(forozco): better handle tests failing in one environment and passing in another
# else:
# try:
# run_test_inner(runnable, not should_pass)
# except Exception:
# pytest.fail("The test passed but the test case was ignored - remove this from ignored-test-cases.yml")
# # If it did behave as intended, then skip it in the end if it was blacklisted.
# pytest.skip("Blacklisted")
def run_test_inner(runnable, should_succeed):
"""Run the test, raising an exception if it succeeded but shouldn't have or vice versa"""
if should_succeed:
runnable()
else:
with pytest.raises(Exception, message="Expected test to fail"):
runnable()
@pytest.mark.parametrize('endpoint_name,method_name,index,case,should_pass', generate_auto_deserialize_tests())
def test_body(
conjure_validation_server,
test_black_list,
body_service,
confirm_service,
endpoint_name,
method_name,
index,
case,
should_pass):
body_black_list = test_black_list['autoDeserialize']
is_blacklisted = endpoint_name in body_black_list and case in body_black_list[endpoint_name]
if should_pass:
run_test(True,
is_blacklisted,
lambda: confirm_service.confirm(getattr(body_service, method_name)(index), endpoint_name, index))
else:
run_test(False, is_blacklisted, lambda: getattr(body_service, method_name)(index))
@pytest.mark.parametrize('endpoint_name,method_name,index,value', generate_param_tests('singleHeaderService'))
def test_header(conjure_validation_server, test_black_list, header_service, endpoint_name, method_name, index, value):
header_black_list = test_black_list['singleHeaderService']
is_blacklisted = endpoint_name in header_black_list and value in header_black_list[endpoint_name]
run_test(True, is_blacklisted, lambda: getattr(header_service, method_name)(json.loads(value), index))
@pytest.mark.parametrize('endpoint_name,method_name,index,value', generate_param_tests('singlePathParamService'))
def test_path(conjure_validation_server, test_black_list, path_service, endpoint_name, method_name, index, value):
header_black_list = test_black_list['singlePathParamService']
is_blacklisted = endpoint_name in header_black_list and value in header_black_list[endpoint_name]
run_test(True, is_blacklisted, lambda: getattr(path_service, method_name)(index, json.loads(value)))
@pytest.mark.parametrize('endpoint_name,method_name,index,value', generate_param_tests('singleQueryParamService'))
def test_query(conjure_validation_server, test_black_list, query_service, endpoint_name, method_name, index, value):
query_black_list = test_black_list['singleQueryService']
is_blacklisted = endpoint_name in query_black_list and value in query_black_list[endpoint_name]
run_test(True, is_blacklisted, lambda: getattr(query_service, method_name)(index, json.loads(value)))
| StarcoderdataPython |
11263151 | <filename>network/vis_detection.py
import cv2
import os
#image_dir = "/data/dataset/VAR/UCF-101/train"
#result_dir = "/data/dataset/ucf101/UCF-101_det_vis/"
#bbox_dir = "/data/dataset/UCF-101-result/UCF-101-20/"
#image_dir = "/data/dataset/something-somthing-v2/20bn-something-something-v2-frames-224/"
#result_dir = "/data/dataset/something-somthing-v2/20bn-something-something-224-20_det_vis/"
#bbox_dir = "/data/dataset/something-somthing-v2/20bn-something-something-224-20"
image_dir = '/data/dataset/something-something-v2-lite/20bn-something-something-v2-frames/'
result_dir = '/data/dataset/something-something-v2-lite/20bn-something-something-det-vis'
bbox_dir = '/data/dataset/something-something-v2-lite/20bn-something-something-det'
# image_dir = '/data/dataset/volleyball/videos/'
# result_dir = '/data/dataset/volleyball/volleyball_det_vis'
# bbox_dir = '/data/dataset/volleyball/volleyball-20'
# for label in os.listdir(image_dir):
# if not os.path.exists(os.path.join(result_dir, label)):
# os.makedirs(os.path.join(result_dir, label))
for frames in os.listdir(os.path.join(image_dir)):
if not os.path.exists(os.path.join(result_dir, frames)):
os.makedirs(os.path.join(result_dir, frames))
# print(frames)
for img_name in os.listdir(os.path.join(image_dir, frames)):
#print(img_name)
result_path = os.path.join(result_dir, frames, img_name)
im_file = os.path.join(image_dir, frames,img_name)
print('im_file', im_file)
img = cv2.imread(im_file)
print(img.shape)
height = img.shape[0]
width = img.shape[1]
with open(os.path.join(bbox_dir, frames,img_name[:-4]+'_det.txt'),"r") as f:
lines = f.readlines()
for line in lines:
bbox = [float(x) for x in line.strip().split(" ")]
print(bbox)
img = cv2.rectangle(img, (int(bbox[1]), int(bbox[2])), (int(bbox[3]), \
int(bbox[4])), (0, 204, 0), 2)
print(result_path)
# cv2.imwrite(result_path, img)
cv2.imshow(frames+img_name,img)
cv2.waitKey(0)
cv2.destroyAllWindows()
| StarcoderdataPython |
281246 | <reponame>himanshudabas/spade
import time
import pytest
from aioxmpp import PresenceShow, PresenceState
from asynctest import Mock, CoroutineMock
from spade.agent import Agent
from spade.container import Container
from spade import quit_spade
@pytest.fixture(autouse=True)
def run_around_tests():
# Code that will run before your test, for example:
# A test function will be run at this point
container = Container()
if not container.is_running:
container.__init__()
yield
# Code that will run after your test, for example:
quit_spade()
class MockedConnectedAgent(Agent):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._async_connect = CoroutineMock()
self._async_register = CoroutineMock()
self.conn_coro = Mock()
self.conn_coro.__aexit__ = CoroutineMock()
self.stream = Mock()
def make_connected_agent(jid="fake@jid", password="<PASSWORD>"):
return MockedConnectedAgent(jid, password)
class MockedPresenceConnectedAgent(Agent):
def __init__(self,
available=None,
show=None,
status=None,
priority=0,
*args, **kwargs):
super().__init__(*args, **kwargs)
if status is None:
status = {}
self._async_connect = CoroutineMock()
self.conn_coro = Mock()
self.conn_coro.__aexit__ = CoroutineMock()
self.available = available
self.show = show
self.status = status
self.priority = priority
def mock_presence(self):
show = self.show if self.show is not None else PresenceShow.NONE
available = self.available if self.available is not None else False
state = PresenceState(available, show)
self.presence.presenceserver.set_presence(state, self.status, self.priority)
def make_presence_connected_agent(jid="fake@jid", password="<PASSWORD>",
available=None,
show=None,
status=None,
priority=0):
status = {} if status is None else status
return MockedPresenceConnectedAgent(jid=jid, password=password,
available=available,
show=show,
status=status,
priority=priority)
def wait_for_behaviour_is_killed(behaviour, tries=500, sleep=0.01):
counter = 0
while not behaviour.is_killed() and counter < tries:
time.sleep(sleep)
counter += 1
if not behaviour.is_killed():
raise Exception("Behaviour not finished")
| StarcoderdataPython |
12826160 | import os
from pathlib import Path
import yaml
from hlkit.syntax import MatchPattern, SyntaxDefinition
from hlkit.parse import ParseResult, ParseState
BASE_DIR = os.path.join(os.path.dirname(__file__), "..", "..")
ASSETS_DIR = os.path.join(BASE_DIR, "assets")
ASSETS_DIR = os.path.abspath(ASSETS_DIR)
class TestParseState(object):
syndef: SyntaxDefinition
def setup_method(self, method):
synfile = "Packages/JSON/JSON.sublime-syntax"
full_path = Path(os.path.join(ASSETS_DIR, synfile))
data = yaml.load(full_path.read_text(), yaml.FullLoader)
self.syndef = SyntaxDefinition.load(data)
def test_flatten(self):
state = ParseState(self.syndef)
# MatchPatterns in main context:
# - comments[*] | prototype | 3
# - constant[*] | main -> value -> constant | 1
# - number[*] | main -> value -> number | 2
# - string[*] | main -> value -> string | 1
# - array[*] | main -> value -> array | 1
# - object[*] | main -> value -> object | 1
matches = state.current_level.matches
assert len(matches) == 9
# comments[0]
assert matches[0].match._regex == r"/\*\*(?!/)"
# constant[0]
assert matches[3].match._regex == r"\b(?:true|false|null)\b"
# number[1]
assert matches[5].match._regex == r"(-?)(0|[1-9]\d*)"
# string[0]
assert matches[6].match._regex == r'"'
# array[0]
assert matches[7].match._regex == r"\["
# object[0]
assert matches[8].match._regex == r"\{"
def test_best_match(self):
state = ParseState(self.syndef)
pattern, match = state.find_best_match(" [ \n")
assert isinstance(pattern, MatchPattern)
assert pattern.scope == "punctuation.section.sequence.begin.json"
assert match.start() == 1
pattern, match = state.find_best_match(" \n")
assert pattern is None
assert match is None
pattern, match = state.find_best_match(r'"\n"')
assert pattern.match._regex == r'"' # string[0]
assert pattern.scope == "punctuation.definition.string.begin.json"
state.push_context(self.syndef["inside-string"])
pattern, match = state.find_best_match(r"a\tc\n")
assert pattern.scope == "constant.character.escape.json"
assert match.group() == r"\t"
def test_next_token(self):
state = ParseState(self.syndef)
line = " // comment\n"
result = state.parse_next_token(line)
assert isinstance(result, ParseResult)
assert len(line) == result.chars_count
assert len(result.tokens) == 3
assert result.tokens[0].text == " "
assert result.tokens[0].scopes == ["source.json"]
assert result.tokens[1].text == "//"
assert result.tokens[1].scopes == [
"source.json",
"comment.line.double-slash.js",
"punctuation.definition.comment.json",
]
assert result.tokens[2].text == " comment\n"
assert result.tokens[2].scopes == [
"source.json",
"comment.line.double-slash.js",
]
def test_next_token2(self):
state = ParseState(self.syndef)
line, pos = "[12,// comment\n", 0
result = state.parse_next_token(line, start=pos)
pos += result.chars_count
assert pos == 1 and result.tokens[0].text == "["
assert result.tokens[0].scopes == [
"source.json",
"meta.sequence.json",
"punctuation.section.sequence.begin.json",
]
result = state.parse_next_token(line, start=pos)
pos += result.chars_count
assert pos == 3 and result.tokens[0].text == "12"
assert result.tokens[0].scopes == [
"source.json",
"meta.sequence.json",
"meta.number.integer.decimal.json",
"constant.numeric.value.json",
]
result = state.parse_next_token(line, start=pos)
pos += result.chars_count
assert pos == 4 and result.tokens[0].text == ","
assert result.tokens[0].scopes == [
"source.json",
"meta.sequence.json",
"punctuation.separator.sequence.json",
]
result = state.parse_next_token(line, start=pos)
pos += result.chars_count
assert pos == len(line)
assert result.tokens[0].text == "//"
assert result.tokens[0].scopes == [
"source.json",
"meta.sequence.json",
"comment.line.double-slash.js",
"punctuation.definition.comment.json",
]
assert result.tokens[1].text == " comment\n"
assert result.tokens[1].scopes == [
"source.json",
"meta.sequence.json",
"comment.line.double-slash.js",
]
line, pos = ' "a\\tb", ab\n', 0
result = state.parse_next_token(line, start=pos)
pos += result.chars_count
assert pos == 3
assert result.tokens[0].text == " "
assert result.tokens[1].text == '"'
result = state.parse_next_token(line, start=pos)
pos += result.chars_count
assert pos == 6
assert result.tokens[0].text == "a"
assert result.tokens[1].text == r"\t"
result = state.parse_next_token(line, start=pos)
pos += result.chars_count
assert pos == 8
assert result.tokens[0].text == "b"
assert result.tokens[1].text == '"'
result = state.parse_next_token(line, start=pos)
pos += result.chars_count
assert pos == 9
assert result.tokens[0].text == ","
result = state.parse_next_token(line, start=pos)
pos += result.chars_count
assert pos == 11
assert result.tokens[0].text == " "
assert result.tokens[1].text == "a"
assert result.tokens[1].scopes == [
"source.json",
"meta.sequence.json",
"invalid.illegal.expected-sequence-separator.json",
]
result = state.parse_next_token(line, start=pos)
pos += result.chars_count
assert result.tokens[0].text == "b"
assert result.tokens[0].scopes == [
"source.json",
"meta.sequence.json",
"invalid.illegal.expected-sequence-separator.json",
]
result = state.parse_next_token(line, start=pos)
pos += result.chars_count
assert pos == len(line)
assert result.tokens[0].text == "\n"
assert result.tokens[0].scopes == [
"source.json",
"meta.sequence.json",
]
def test_push_context(self):
state = ParseState(self.syndef)
line = "["
result = state.parse_next_token(line)
assert result.tokens[0].text == "["
assert state.level_stack[-1].current_ctx.meta_scope == "meta.sequence.json"
| StarcoderdataPython |
6691599 | SOURCE_FILE = __file__
def in_order_traversal(t):
"""
Generator function that generates an "in-order" traversal, in which we
yield the value of every node in order from left to right, assuming that each node has either 0 or 2 branches.
For example, take the following tree t:
1
2 3
4 5
6 7
We have the in-order-traversal 4, 2, 6, 5, 7, 1, 3
>>> t = Tree(1, [Tree(2, [Tree(4), Tree(5, [Tree(6), Tree(7)])]), Tree(3)])
>>> list(in_order_traversal(t))
[4, 2, 6, 5, 7, 1, 3]
"""
"*** YOUR CODE HERE ***"
if t.is_leaf():
yield t.label
else:
left, right = t.branches
yield from in_order_traversal(left)
yield t.label
yield from in_order_traversal(right)
def summation(n, term):
"""Return the sum of the first n terms of a sequence.
>>> summation(5, lambda x: pow(x, 3))
225
"""
total, k = 0, 1
while k <= n:
total, k = total + term(k), k + 1
return total
def interleaved_sum(n, odd_term, even_term):
"""Compute the sum odd_term(1) + even_term(2) + odd_term(3) + ..., up
to n.
>>> # 1 + 2^2 + 3 + 4^2 + 5
... interleaved_sum(5, lambda x: x, lambda x: x*x)
29
>>> from construct_check import check
>>> check(SOURCE_FILE, 'interleaved_sum', ['While', 'For', 'Mod']) # ban loops and %
True
"""
"*** YOUR CODE HERE ***"
def helper(i, odd):
if i > n:
return 0
elif odd:
return odd_term(i) + helper(i + 1, not odd)
else:
return even_term(i) + helper(i + 1, not odd)
return helper(1, True)
def mutate_reverse(link):
"""Mutates the Link so that its elements are reversed.
>>> link = Link(1)
>>> mutate_reverse(link)
>>> link
Link(1)
>>> link = Link(1, Link(2, Link(3)))
>>> mutate_reverse(link)
>>> link
Link(3, Link(2, Link(1)))
"""
"*** YOUR CODE HERE ***"
if link is Link.empty or link.rest is Link.empty:
return
mutate_reverse(link.rest)
while link.rest is not Link.empty:
link.first, link.rest.first = link.rest.first, link.first
link = link.rest
class Tree:
"""
>>> t = Tree(3, [Tree(2, [Tree(5)]), Tree(4)])
>>> t.label
3
>>> t.branches[0].label
2
>>> t.branches[1].is_leaf()
True
"""
def __init__(self, label, branches=[]):
for b in branches:
assert isinstance(b, Tree)
self.label = label
self.branches = list(branches)
def is_leaf(self):
return not self.branches
def map(self, fn):
"""
Apply a function `fn` to each node in the tree and mutate the tree.
>>> t1 = Tree(1)
>>> t1.map(lambda x: x + 2)
>>> t1.map(lambda x : x * 4)
>>> t1.label
12
>>> t2 = Tree(3, [Tree(2, [Tree(5)]), Tree(4)])
>>> t2.map(lambda x: x * x)
>>> t2
Tree(9, [Tree(4, [Tree(25)]), Tree(16)])
"""
self.label = fn(self.label)
for b in self.branches:
b.map(fn)
def __contains__(self, e):
"""
Determine whether an element exists in the tree.
>>> t1 = Tree(1)
>>> 1 in t1
True
>>> 8 in t1
False
>>> t2 = Tree(3, [Tree(2, [Tree(5)]), Tree(4)])
>>> 6 in t2
False
>>> 5 in t2
True
"""
if self.label == e:
return True
for b in self.branches:
if e in b:
return True
return False
def __repr__(self):
if self.branches:
branch_str = ', ' + repr(self.branches)
else:
branch_str = ''
return 'Tree({0}{1})'.format(self.label, branch_str)
def __str__(self):
def print_tree(t, indent=0):
tree_str = ' ' * indent + str(t.label) + "\n"
for b in t.branches:
tree_str += print_tree(b, indent + 1)
return tree_str
return print_tree(self).rstrip()
class Link:
"""A linked list.
>>> s = Link(1)
>>> s.first
1
>>> s.rest is Link.empty
True
>>> s = Link(2, Link(3, Link(4)))
>>> s.first = 5
>>> s.rest.first = 6
>>> s.rest.rest = Link.empty
>>> s # Displays the contents of repr(s)
Link(5, Link(6))
>>> s.rest = Link(7, Link(Link(8, Link(9))))
>>> s
Link(5, Link(7, Link(Link(8, Link(9)))))
>>> print(s) # Prints str(s)
<5 7 <8 9>>
"""
empty = ()
def __init__(self, first, rest=empty):
assert rest is Link.empty or isinstance(rest, Link)
self.first = first
self.rest = rest
def __repr__(self):
if self.rest is not Link.empty:
rest_repr = ', ' + repr(self.rest)
else:
rest_repr = ''
return 'Link(' + repr(self.first) + rest_repr + ')'
def __str__(self):
string = '<'
while self.rest is not Link.empty:
string += str(self.first) + ' '
self = self.rest
return string + str(self.first) + '>'
| StarcoderdataPython |
11210991 | <filename>equipment/framework/helpers.py
from importlib import import_module
from inspect import getfile
from pathlib import Path
from typing import TYPE_CHECKING, Any, NoReturn, Optional
from sys import exit as _exit, modules as _modules
from pprint import pformat
from equipment.framework.Exceptions.ContainerModuleNotFound import ContainerModuleNotFound
if TYPE_CHECKING:
from equipment.framework.App.Container import Container
def app(name: str = 'app.App.Container', autodiscover: bool = True) -> 'Container':
framework_container = module('equipment.framework.App.Container')
resolved_module = module(name)
# Fallback framework module
if autodiscover and resolved_module is None:
resolved_module = framework_container
raise_unless(isinstance(resolved_module, framework_container.__class__), ContainerModuleNotFound(name)) # nopep8
return resolved_module.Container
def base_path(join: Optional[str] = None, container: Optional['Container'] = None, rootfile: str = '.equipment') -> Path:
if container is None:
container = app()
path = Path(getfile(container))
while not path.joinpath(rootfile).exists():
path = path.parent
return path.absolute().joinpath(
join if join is not None else ''
)
def module(name: str, print_exception: bool = False) -> Any:
try:
return import_module(name) if name not in _modules else _modules[name]
except Exception as e:
print_if(print_exception, e)
return None
def raise_if(condition: bool, exception: Exception) -> None:
if condition:
raise exception
def raise_unless(condition: bool, exception: Exception) -> None:
if not condition:
raise exception
def print_if(condition: bool, *args, **kwargs) -> None:
if condition:
print(*args, **kwargs)
def print_unless(condition: bool, *args, **kwargs) -> None:
if not condition:
print(*args, **kwargs)
def dump(*args, **kwargs) -> None:
pformat(*args, **kwargs)
def dd(*args, **kwargs) -> NoReturn:
dump(*args, **kwargs)
_exit()
| StarcoderdataPython |
1759539 | # PiFrame weather.py
# Manages weather data as well as forecast for the "Weather" Extension
# Uses Open Weather API https://openweathermap.org/api
import requests, settings, json, datetime
# Request URLS for weather
currentWeatherRequestURL = lambda zip, apiKey : ("http://api.openweathermap.org/data/2.5/weather?zip=%s&appid=%s&units=imperial" % (zip, apiKey))
forecastWeatherRequestURL = lambda zip, apiKey : ("http://api.openweathermap.org/data/2.5/forecast?zip=%s&appid=%s&units=imperial" % (zip, apiKey))
weatherIconURL = lambda iconCode : "http://openweathermap.org/img/wn/%s@2x.png" % (iconCode)
# WeatherResponse is a serializable response containing requested weather information
class WeatherResponse:
def __init__(self, location, sunrise, sunset, currentResponse, todayForecast, upcomingForecast):
self.location = location
self.sunrise = sunrise
self.sunset = sunset
self.currentResponse = currentResponse
self.todaysForecast = todayForecast
self.upcomingForecasts = upcomingForecast
def toJSON(self):
return json.dumps(self, default=lambda weather: weather.__dict__)
# WeatherResponseItem represents a single weather log
class WeatherResponseItem:
def __init__(self, iconURL, epochTime, temperature, minTemperature, maxTemperature, humidity):
self.iconURL = iconURL
self.temperature = round(temperature, 0)
self.minTemperature = round(minTemperature, 0)
self.maxTemperature = round(maxTemperature, 0)
self.humidity = humidity
self.time = epochTime
# getWeatherResponseItemFromData is used to create a WeatherResponseItem object from a dictionary of weather data
# param :data: a dictionary of information from the API call
# param :timeStamp: the datetime that the weather information corresponds to
# return :WeatherResponseItem: the response item created with data
def getWeatherResponseItemFromData(data, timeStamp):
iconURL = weatherIconURL(data["weather"][0]["icon"])
temperature = data["main"]["temp"]
maxTemp = data["main"]["temp_max"]
minTemp = data["main"]["temp_min"]
humidity = data["main"]["humidity"]
time = int(timeStamp.timestamp())
return WeatherResponseItem(iconURL, time, temperature, minTemp, maxTemp, humidity)
# getWeather queries the weather API for the client. By default, the current data is retrieved.
# param :includeForecast: a boolean value that indicates if forecast data should be included in the request
# return :WeatherResponse: the results of the weather query/parse
def getWeather(includeForecast, settings):
zip = settings.zip
apiKey = settings.apiKey
# If API key is not set, let the user know
if apiKey == None or apiKey == "":
return '{"error": "API"}'
url = currentWeatherRequestURL(zip, apiKey)
response = requests.get(url, timeout=10)
# Make sure request was completed
if response.status_code != 200:
return '{"error": "REQUEST"}'
data = response.json()
location = data["name"]
sunset = data["sys"]["sunset"]
sunrise = data["sys"]["sunrise"]
timeStamp = datetime.datetime.now()
current = getWeatherResponseItemFromData(data, timeStamp)
todayForecast = []
upcomingForecast = []
if includeForecast:
url = forecastWeatherRequestURL(zip, apiKey)
response = requests.get(url, timeout=10)
# If request wasn't completed, skip to end and return what we have
if response.status_code == 200:
data = response.json()
currentDay = timeStamp.day
entriesForCurrentDay = []
for update in data["list"]:
dt = datetime.datetime.fromtimestamp(update["dt"])
dataDay = dt.day
responseItem = getWeatherResponseItemFromData(update, dt)
# Keep a list of weather for a given day
entriesForCurrentDay.append(responseItem)
# Should record forecasts for the next 24 hours
if len(todayForecast) < 8:
todayForecast.append(responseItem)
# Once we move to a new day add the normalized information to our upcomingForecast list
# Note, only the next 4 full days are recorded, not including the current day
if currentDay != dataDay and len(upcomingForecast) < 5:
if len(entriesForCurrentDay) == 8:
entryFromDaysForecast = parseAveragesForDaysForecast(entriesForCurrentDay)
upcomingForecast.append(entryFromDaysForecast)
entriesForCurrentDay = []
currentDay = dataDay
# Return our results
returnObj = WeatherResponse(location, sunrise, sunset, current, todayForecast, upcomingForecast)
return returnObj.toJSON()
# parseAveragesForDaysForecast goes over all 8 weather entries for a given day and creates one entry for the full day.
# This means taking the over all max and min temperatures, as well as the average temperature and humidity
# return :WeatherResponseItem: The consolidated response item
def parseAveragesForDaysForecast(entriesForCurrentDay):
temp = 0
humidity = 0
max_temp = -1000
min_temp = 1000
time = entriesForCurrentDay[0].time
for entry in entriesForCurrentDay:
temp += entry.temperature
humidity += entry.humidity
max_temp = entry.maxTemperature if entry.maxTemperature > max_temp else max_temp
min_temp = entry.minTemperature if entry.minTemperature < min_temp else min_temp
temp = temp / 8
humidity = humidity / 8
return WeatherResponseItem("", time, temp, min_temp, max_temp, humidity) | StarcoderdataPython |
9681260 | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""changes.ChangeStore unit tests"""
import datetime
import json
import unittest
import test_env # pylint: disable=W0611
from master.buildbucket import common, changestore
from master.unittests.deferred_resource_test import run_deferred
from mock import Mock
from twisted.internet import defer
class ChangeStoreTest(unittest.TestCase):
buildbot = None
buildbucket_change = {
'id': '1',
'author': {'email': '<EMAIL>'},
'message': 'hello world',
'revision': 'deadbeef',
'branch': 'master',
'create_ts': 1419206400000000, # datetime.datetime(2014, 12, 22)
'project': 'chromium',
'repo_url': 'http://chromium.googlesource.com/chromium/src',
'url': 'http://chromium.googlesource.com/chromium/src/+/deadbeef'
}
ssid = None
def setUp(self):
super(ChangeStoreTest, self).setUp()
self.buildbot = Mock()
self.buildbot.change.number = 5
self.buildbot.get_cache.return_value = self.buildbot.change_cache
self.buildbot.change_cache.get.return_value = self.buildbot.change
self.store = changestore.ChangeStore(self.buildbot)
def test_find_change_in_db(self):
change = Mock()
rev = 123
change_id = 'abc'
change.properties.getProperty.return_value = {'change_id': change_id}
self.buildbot.get_change_by_id.return_value = change
self.buildbot.find_changes_by_revision.return_value = [change.changeid]
result = run_deferred(
self.store._find_change_in_db((rev, change_id))
)
self.assertEqual(result, change)
change.properties.getProperty.assert_any_call(common.INFO_PROPERTY)
self.buildbot.find_changes_by_revision.assert_called_once_with(rev)
self.buildbot.get_change_by_id.assert_called_once_with(change.changeid)
def test_find_change(self):
m = Mock()
change = self.store._find_change(m.revision, m.change_id)
cache = self.buildbot.change_cache
cache.get.assert_called_once_with((m.revision, m.change_id))
self.assertEqual(change, cache.get.return_value)
def test_get_change(self):
cache = self.buildbot.change_cache
cache.get.return_value = None
result = run_deferred(self.store.get_change(self.buildbucket_change))
info = json.dumps({
common.BUILDBUCKET_CHANGE_ID_PROPERTY: '1',
}, sort_keys=True)
self.buildbot.add_change_to_db.assert_called_once_with(
author=self.buildbucket_change['author']['email'],
files=[],
comments=self.buildbucket_change['message'],
revision=self.buildbucket_change['revision'],
when_timestamp=datetime.datetime(2014, 12, 22),
branch=self.buildbucket_change['branch'],
category=common.CHANGE_CATEGORY,
revlink=self.buildbucket_change['url'],
properties={
common.INFO_PROPERTY: (info, 'Change'),
},
repository=self.buildbucket_change.get('repo_url'),
project=self.buildbucket_change.get('project'),
)
self.assertEqual(result, self.buildbot.get_change_by_id.return_value)
def test_get_change_without_id(self):
cache = self.buildbot.change_cache
cache.get.return_value = None
result = run_deferred(self.store.get_change({
'author': {
'email': '<EMAIL>',
},
'message': 'Hello world',
}))
expected_info = json.dumps({'change_id': None})
self.buildbot.add_change_to_db.assert_called_once_with(
author='<EMAIL>',
files=[],
comments='Hello world',
revision='',
when_timestamp=None,
branch=None,
category=common.CHANGE_CATEGORY,
revlink='',
properties={
common.INFO_PROPERTY: (expected_info, 'Change'),
},
repository='',
project='',
)
self.assertEqual(result, self.buildbot.get_change_by_id.return_value)
def test_get_change_with_cached_value(self):
cache = self.buildbot.change_cache
result = run_deferred(self.store.get_change(self.buildbucket_change))
self.assertEqual(result, cache.get.return_value)
self.assertFalse(self.buildbot.add_change_to_db.called)
def test_get_source_stamp(self):
result = run_deferred(
self.store.get_source_stamp([self.buildbucket_change]))
cache = self.buildbot.change_cache
cache.get.assert_called_once_with(
(self.buildbucket_change['revision'], self.buildbucket_change['id']))
bb_change = cache.get.return_value
self.buildbot.insert_source_stamp_to_db.assert_called_once_with(
branch=bb_change.branch,
revision=bb_change.revision,
repository=bb_change.repository,
project=bb_change.project,
changeids=[bb_change.number],
)
self.assertEqual(
result, self.buildbot.insert_source_stamp_to_db.return_value)
def test_get_source_stamp_with_cache(self):
ssid = Mock()
ss_cache = {
(self.buildbucket_change['id'],): ssid,
}
result = run_deferred(
self.store.get_source_stamp([self.buildbucket_change], cache=ss_cache))
self.assertFalse(self.buildbot.change_cache.get.called)
self.assertFalse(self.buildbot.insert_source_stamp_to_db.called)
self.assertEqual(result, ssid)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
11314885 | <reponame>Ouranosinc/Magpie
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_magpie_api
----------------------------------
Tests for :mod:`magpie.api` module.
"""
import unittest
import mock
# NOTE: must be imported without 'from', otherwise the interface's test cases are also executed
import tests.interfaces as ti
from magpie.constants import get_constant
from magpie.models import UserGroupStatus, UserStatuses
from magpie.utils import CONTENT_TYPE_JSON
from tests import runner, utils
from tests.utils import TestVersion
@runner.MAGPIE_TEST_API
@runner.MAGPIE_TEST_LOCAL
class TestCase_MagpieAPI_NoAuth_Local(ti.Interface_MagpieAPI_NoAuth, unittest.TestCase):
# pylint: disable=C0103,invalid-name
"""
Test any operation that do not require any AuthN/AuthZ (``MAGPIE_ANONYMOUS_GROUP`` & ``MAGPIE_ANONYMOUS_USER``).
Use a local Magpie test application.
"""
__test__ = True
@classmethod
def setUpClass(cls):
cls.app = utils.get_test_magpie_app()
cls.cookies = None # force not logged in
cls.version = utils.TestSetup.get_Version(cls)
# note: admin credentials to setup data on test instance as needed, but not to be used for these tests
cls.grp = get_constant("MAGPIE_ADMIN_GROUP")
cls.usr = get_constant("MAGPIE_TEST_ADMIN_USERNAME")
cls.pwd = get_constant("<PASSWORD>")
cls.setup_admin()
cls.test_user_name = get_constant("MAGPIE_TEST_USER", default_value="unittest-no-auth_api-user-local",
raise_missing=False, raise_not_set=False)
cls.test_group_name = get_constant("MAGPIE_TEST_GROUP", default_value="unittest-no-auth_api-group-local",
raise_missing=False, raise_not_set=False)
@runner.MAGPIE_TEST_API
@runner.MAGPIE_TEST_LOCAL
class TestCase_MagpieAPI_UsersAuth_Local(ti.Interface_MagpieAPI_UsersAuth, unittest.TestCase):
# pylint: disable=C0103,invalid-name
"""
Test any operation that require logged AuthN/AuthZ, but lower than ``MAGPIE_ADMIN_GROUP``.
Use a local Magpie test application.
"""
__test__ = True
@classmethod
def setUpClass(cls):
cls.app = utils.get_test_magpie_app()
# admin login credentials for setup operations, use 'test' parameters for testing actual feature
cls.grp = get_constant("MAGPIE_ADMIN_GROUP")
cls.usr = get_constant("MAGPIE_TEST_ADMIN_USERNAME")
cls.pwd = get_constant("<PASSWORD>")
cls.cookies = None
cls.version = utils.TestSetup.get_Version(cls)
cls.setup_admin()
cls.headers, cls.cookies = utils.check_or_try_login_user(cls, cls.usr, cls.pwd, use_ui_form_submit=True)
cls.require = "cannot run tests without logged in user with '{}' permissions".format(cls.grp)
assert cls.headers and cls.cookies, cls.require # nosec
cls.test_service_name = "unittest-user-auth-local_test-service"
cls.test_service_type = "api"
cls.test_resource_name = "unittest-user-auth-local_test-resource"
cls.test_resource_type = "route"
cls.test_group_name = "unittest-user-auth-local_test-group"
cls.test_user_name = "unittest-user-auth-local_test-user-username"
@runner.MAGPIE_TEST_USERS
@runner.MAGPIE_TEST_GROUPS
@runner.MAGPIE_TEST_REGISTRATION
@utils.mocked_send_email
def test_RegisterDiscoverableGroupWithTerms(self):
"""
Non-admin logged user is allowed to request to join a group requiring terms and conditions acceptation.
"""
terms = "Test terms and conditions."
utils.TestSetup.delete_TestGroup(self)
utils.TestSetup.create_TestGroup(self, override_discoverable=True, override_terms=terms)
self.login_test_user()
path = "/register/groups/{}".format(self.test_group_name)
resp = utils.test_request(self, "POST", path, data={}, headers=self.test_headers, cookies=self.test_cookies)
body = utils.check_response_basic_info(resp, 202, expected_method="POST")
utils.check_val_is_in("group_name", body)
utils.check_val_is_in("user_name", body)
utils.check_val_is_in(body["group_name"], self.test_group_name)
utils.check_val_is_in(body["user_name"], self.test_user_name)
# validate as admin that user was not registered yet to the group,
# since it requires terms and condition acceptation
utils.check_or_try_logout_user(self)
utils.check_or_try_login_user(self, username=self.usr, password=<PASSWORD>)
utils.TestSetup.check_UserGroupMembership(self, member=False,
override_headers=self.json_headers, override_cookies=self.cookies)
# Check if the user's membership is pending
path = "/users/{user_name}/groups?status={status}".format(user_name=self.test_user_name,
status=UserGroupStatus.PENDING.value)
resp = utils.test_request(self, "GET", path, headers=self.json_headers, cookies=self.cookies)
body = utils.check_response_basic_info(resp, 200, expected_method="GET")
utils.check_val_is_in("group_names", body)
utils.check_val_type(body["group_names"], list)
utils.check_val_is_in(self.test_group_name, body["group_names"])
@runner.MAGPIE_TEST_API
@runner.MAGPIE_TEST_LOCAL
@runner.MAGPIE_TEST_REGISTRATION
class TestCase_MagpieAPI_UsersAuth_Local_UserRegistration(ti.UserTestCase):
# pylint: disable=C0103,invalid-name
"""
Test any operation that require logged AuthN/AuthZ, but lower than ``MAGPIE_ADMIN_GROUP``.
Use a local Magpie test application. Enables the User self-registration feature.
"""
__test__ = True
@classmethod
def setUpClass(cls):
# configuration employed for user registration tests
settings = {
"magpie.user_registration_enabled": True,
"magpie.user_registered_enabled": True,
"magpie.admin_approval_enabled": True,
"magpie.admin_approval_email_recipient": "<EMAIL>",
}
cls.app = utils.get_test_magpie_app(settings)
cls.grp = get_constant("MAGPIE_ADMIN_GROUP")
cls.usr = get_constant("MAGPIE_TEST_ADMIN_USERNAME")
cls.pwd = get_constant("MAGPIE_TEST_ADMIN_PASSWORD")
cls.cookies = None
cls.version = utils.TestSetup.get_Version(cls)
cls.setup_admin()
cls.headers, cls.cookies = utils.check_or_try_login_user(cls, cls.usr, cls.pwd, use_ui_form_submit=True)
cls.require = "cannot run tests without logged in user with '{}' permissions".format(cls.grp)
assert cls.headers and cls.cookies, cls.require # nosec
# don't bother with any test if not supported, must wait until here to get version from app
utils.warn_version(cls, "User self-registration.", "3.13.0", skip=True)
cls.test_group_name = "unittest-user-register-local_test-group"
cls.test_user_name = "unittest-user-register-local_test-user-username"
@runner.MAGPIE_TEST_USERS
def test_GetPendingUsersList_Forbidden(self):
"""
Non-admin logged user cannot list pending user registrations.
"""
self.login_test_user()
resp = utils.test_request(self, "GET", "/register/users", expect_errors=True,
headers=self.test_headers, cookies=self.test_cookies)
utils.check_response_basic_info(resp, 403)
@runner.MAGPIE_TEST_USERS
def test_DeletePendingUser_Forbidden(self):
"""
Non-admin logged user cannot remove pending user registrations.
"""
self.login_test_user()
resp = utils.test_request(self, "DELETE", "/register/users/dont-care", expect_errors=True,
headers=self.test_headers, cookies=self.test_cookies)
utils.check_response_basic_info(resp, 403, expected_method="DELETE")
@runner.MAGPIE_TEST_API
@runner.MAGPIE_TEST_LOCAL
class TestCase_MagpieAPI_AdminAuth_Local(ti.Interface_MagpieAPI_AdminAuth, unittest.TestCase):
# pylint: disable=C0103,invalid-name
"""
Test any operation that require at least ``MAGPIE_ADMIN_GROUP`` AuthN/AuthZ.
Use a local Magpie test application.
"""
__test__ = True
@classmethod
def setUpClass(cls):
cls.app = utils.get_test_magpie_app()
cls.grp = get_constant("MAGPIE_ADMIN_GROUP")
cls.usr = get_constant("MAGPIE_TEST_ADMIN_USERNAME")
cls.pwd = get_constant("<PASSWORD>")
cls.cookies = None
cls.version = utils.TestSetup.get_Version(cls)
cls.setup_admin()
cls.headers, cls.cookies = utils.check_or_try_login_user(cls.app, cls.usr, cls.pwd, use_ui_form_submit=True)
cls.require = "cannot run tests without logged in user with '{}' permissions".format(cls.grp)
cls.login_admin()
cls.setup_test_values()
@runner.MAGPIE_TEST_GROUPS
def test_GetGroupUsers_Pending(self):
terms = "Test terms and conditions."
utils.TestSetup.create_TestGroup(self, override_terms=terms)
# Create test user and request adding the user to test group, but leave him as 'pending'
utils.TestSetup.create_TestUser(self, accept_terms=False)
# Add admin user as an active member of test group
utils.TestSetup.assign_TestUserGroup(self, override_user_name=self.usr)
path = "/groups/{grp}/users?status={status}".format(grp=self.test_group_name,
status=UserGroupStatus.PENDING.value)
resp = utils.test_request(self, "GET", path, headers=self.json_headers, cookies=self.cookies)
body = utils.check_response_basic_info(resp, 200, expected_method="GET")
utils.check_val_is_in("user_names", body)
utils.check_val_type(body["user_names"], list)
utils.check_all_equal(body["user_names"], {self.test_user_name}, any_order=True)
@runner.MAGPIE_TEST_GROUPS
def test_GetGroupUsers_Active(self):
terms = "Test terms and conditions."
utils.TestSetup.create_TestGroup(self, override_terms=terms)
# Create test user and request adding the user to test group, but leave him as 'pending'
utils.TestSetup.create_TestUser(self, accept_terms=False)
# Add admin user as an active member of test group
utils.TestSetup.assign_TestUserGroup(self, override_user_name=self.usr)
path = "/groups/{grp}/users?status={status}".format(grp=self.test_group_name,
status=UserGroupStatus.ACTIVE.value)
resp = utils.test_request(self, "GET", path, headers=self.json_headers, cookies=self.cookies)
body = utils.check_response_basic_info(resp, 200, expected_method="GET")
utils.check_val_is_in("user_names", body)
utils.check_val_type(body["user_names"], list)
utils.check_all_equal(body["user_names"], {self.usr}, any_order=True)
@runner.MAGPIE_TEST_GROUPS
def test_GetGroupUsers_Unspecified(self):
terms = "Test terms and conditions."
utils.TestSetup.create_TestGroup(self, override_terms=terms)
# Create test user and request adding the user to test group, but leave him as 'pending'
utils.TestSetup.create_TestUser(self, accept_terms=False)
# Add admin user as an active member of test group
utils.TestSetup.assign_TestUserGroup(self, override_user_name=self.usr)
path = "/groups/{grp}/users".format(grp=self.test_group_name)
resp = utils.test_request(self, "GET", path, headers=self.json_headers, cookies=self.cookies)
body = utils.check_response_basic_info(resp, 200, expected_method="GET")
utils.check_val_is_in("user_names", body)
utils.check_val_type(body["user_names"], list)
utils.check_all_equal(body["user_names"], {self.usr}, any_order=True)
@runner.MAGPIE_TEST_GROUPS
def test_GetGroupUsers_All(self):
terms = "Test terms and conditions."
utils.TestSetup.create_TestGroup(self, override_terms=terms)
# Create test user and request adding the user to test group, but leave him as 'pending'
utils.TestSetup.create_TestUser(self, accept_terms=False)
# Add admin user as an active member of test group
utils.TestSetup.assign_TestUserGroup(self, override_user_name=self.usr)
path = "/groups/{grp}/users?status={status}".format(grp=self.test_group_name,
status=UserGroupStatus.ALL.value)
resp = utils.test_request(self, "GET", path, headers=self.json_headers, cookies=self.cookies)
body = utils.check_response_basic_info(resp, 200, expected_method="GET")
utils.check_val_is_in("user_names", body)
utils.check_val_type(body["user_names"], list)
utils.check_all_equal(body["user_names"], {self.usr, self.test_user_name}, any_order=True)
@runner.MAGPIE_TEST_GROUPS
def test_GetUserInfo_PendingGroups(self):
terms = "Test terms and conditions."
utils.TestSetup.create_TestGroup(self, override_terms=terms)
# Add user to users group
users_group = get_constant("MAGPIE_USERS_GROUP")
utils.TestSetup.create_TestUser(self, override_group_name=users_group)
# Check if user info displays no current pending group
path = "/users/{usr}".format(usr=self.test_user_name)
resp = utils.test_request(self, "GET", path, headers=self.json_headers, cookies=self.cookies)
body = utils.check_response_basic_info(resp, 200, expected_method="GET")
utils.check_val_is_in("user", body)
utils.check_val_is_in("has_pending_group", body["user"])
utils.check_val_false(body["user"]["has_pending_group"])
# add user to test group and leave him as pending
utils.TestSetup.assign_TestUserGroup(self, accept_terms=False)
# Check if user info displays having a pending group
path = "/users/{usr}".format(usr=self.test_user_name)
resp = utils.test_request(self, "GET", path, headers=self.json_headers, cookies=self.cookies)
body = utils.check_response_basic_info(resp, 200, expected_method="GET")
utils.check_val_is_in("user", body)
utils.check_val_is_in("has_pending_group", body["user"])
utils.check_val_true(body["user"]["has_pending_group"])
@runner.MAGPIE_TEST_GROUPS
def test_GetUserGroups_Pending(self):
terms = "Test terms and conditions."
utils.TestSetup.create_TestGroup(self, override_terms=terms)
# Add user to users group and leave user pending on test group
users_group = get_constant("MAGPIE_USERS_GROUP")
utils.TestSetup.create_TestUser(self, override_group_name=users_group)
utils.TestSetup.assign_TestUserGroup(self, accept_terms=False)
path = "/users/{usr}/groups?status={status}".format(usr=self.test_user_name,
status=UserGroupStatus.PENDING.value)
resp = utils.test_request(self, "GET", path, headers=self.json_headers, cookies=self.cookies)
body = utils.check_response_basic_info(resp, 200, expected_method="GET")
utils.check_val_is_in("group_names", body)
utils.check_val_type(body["group_names"], list)
utils.check_all_equal(body["group_names"], {self.test_group_name}, any_order=True)
@runner.MAGPIE_TEST_GROUPS
def test_GetUserGroups_Active(self):
terms = "Test terms and conditions."
utils.TestSetup.create_TestGroup(self, override_terms=terms)
# Add user to users group and leave user pending on test group
users_group = get_constant("MAGPIE_USERS_GROUP")
utils.TestSetup.create_TestUser(self, override_group_name=users_group)
utils.TestSetup.assign_TestUserGroup(self, accept_terms=False)
path = "/users/{usr}/groups?status={status}".format(usr=self.test_user_name,
status=UserGroupStatus.ACTIVE.value)
resp = utils.test_request(self, "GET", path, headers=self.json_headers, cookies=self.cookies)
body = utils.check_response_basic_info(resp, 200, expected_method="GET")
utils.check_val_is_in("group_names", body)
utils.check_val_type(body["group_names"], list)
expected_active_groups = {users_group}
if TestVersion(self.version) >= TestVersion("1.4.0"):
expected_active_groups.add(get_constant("MAGPIE_ANONYMOUS_GROUP"))
utils.check_all_equal(body["group_names"], expected_active_groups, any_order=True)
@runner.MAGPIE_TEST_GROUPS
def test_GetUserGroups_Unspecified(self):
terms = "Test terms and conditions."
utils.TestSetup.create_TestGroup(self, override_terms=terms)
# Add user to users group and leave user pending on test group
users_group = get_constant("MAGPIE_USERS_GROUP")
utils.TestSetup.create_TestUser(self, override_group_name=users_group)
utils.TestSetup.assign_TestUserGroup(self, accept_terms=False)
path = "/users/{usr}/groups".format(usr=self.test_user_name)
resp = utils.test_request(self, "GET", path, headers=self.json_headers, cookies=self.cookies)
body = utils.check_response_basic_info(resp, 200, expected_method="GET")
utils.check_val_is_in("group_names", body)
utils.check_val_type(body["group_names"], list)
expected_active_groups = {users_group}
if TestVersion(self.version) >= TestVersion("1.4.0"):
expected_active_groups.add(get_constant("MAGPIE_ANONYMOUS_GROUP"))
utils.check_all_equal(body["group_names"], expected_active_groups, any_order=True)
@runner.MAGPIE_TEST_GROUPS
def test_GetUserGroups_All(self):
terms = "Test terms and conditions."
utils.TestSetup.create_TestGroup(self, override_terms=terms)
# Add user to users group and leave user pending on test group
users_group = get_constant("MAGPIE_USERS_GROUP")
utils.TestSetup.create_TestUser(self, override_group_name=users_group)
utils.TestSetup.assign_TestUserGroup(self, accept_terms=False)
path = "/users/{usr}/groups?status={status}".format(usr=self.test_user_name,
status=UserGroupStatus.ALL.value)
resp = utils.test_request(self, "GET", path, headers=self.json_headers,
cookies=self.cookies, expect_errors=True)
body = utils.check_response_basic_info(resp, 200, expected_method="GET")
utils.check_val_is_in("group_names", body)
utils.check_val_type(body["group_names"], list)
expected_groups = {users_group, self.test_group_name}
if TestVersion(self.version) >= TestVersion("1.4.0"):
expected_groups.add(get_constant("MAGPIE_ANONYMOUS_GROUP"))
utils.check_all_equal(body["group_names"], expected_groups, any_order=True)
@runner.MAGPIE_TEST_GROUPS
@utils.mocked_send_email
def test_PostUserGroupWithTerms(self):
# First test adding an existing user to a group with terms
utils.TestSetup.create_TestUser(self, override_group_name=None)
terms = "Test terms and conditions."
utils.TestSetup.create_TestGroup(self, override_terms=terms)
# Request adding the user to test group
path = "/users/{usr}/groups".format(usr=self.test_user_name)
data = {"group_name": self.test_group_name}
resp = utils.test_request(self, "POST", path, json=data,
headers=self.json_headers, cookies=self.cookies)
utils.check_response_basic_info(resp, 202, expected_method="POST")
# User should not be added to group until terms are accepted
utils.TestSetup.check_UserGroupMembership(self, member=False)
# Now test adding a new user to a group with terms upon user creation
new_user_name = "new_usr_in_group_with_terms"
self.extra_user_names.add(new_user_name)
data = {
"user_name": new_user_name,
"password": <PASSWORD>,
"group_name": self.test_group_name,
"email": <EMAIL>".<EMAIL>(new_user_<EMAIL>)
}
resp = utils.test_request(self, "POST", "/users", json=data, headers=self.json_headers, cookies=self.cookies)
utils.check_response_basic_info(resp, 201, expected_method="POST")
utils.TestSetup.check_UserGroupMembership(self, override_user_name=new_user_name, member=False)
# Check if both user memberships are pending
path = "/groups/{grp}/users?status={status}".format(grp=self.test_group_name,
status=UserGroupStatus.PENDING.value)
resp = utils.test_request(self, "GET", path, headers=self.json_headers, cookies=self.cookies)
body = utils.check_response_basic_info(resp, 200, expected_method="GET")
utils.check_val_is_in("user_names", body)
utils.check_val_type(body["user_names"], list)
utils.check_val_is_in(self.test_user_name, body["user_names"])
utils.check_val_is_in(new_user_name, body["user_names"])
@runner.MAGPIE_TEST_GROUPS
def test_PostUserGroupWithTerms_Fail(self):
utils.TestSetup.create_TestUser(self, override_group_name=None)
terms = "Test terms and conditions."
utils.TestSetup.create_TestGroup(self, override_terms=terms)
# Use empty settings dictionary, not assigning the MAGPIE_SMTP_HOST variable in the settings will
# trigger a fail when assigning the user to the group with terms
with utils.mocked_get_settings(settings={}):
with utils.mock_send_email("magpie.api.management.user.user_utils.send_email"):
path = "/users/{usr}/groups".format(usr=self.test_user_name)
data = {"group_name": self.test_group_name}
resp = utils.test_request(self, "POST", path, json=data, expect_errors=True,
headers=self.json_headers, cookies=self.cookies)
utils.check_response_basic_info(resp, 500, expected_method="POST")
# Check that the user membership has not been updated as pending or as active
path = "/groups/{grp}/users?status={status}".format(grp=self.test_group_name,
status=UserGroupStatus.ALL.value)
resp = utils.test_request(self, "GET", path, headers=self.json_headers, cookies=self.cookies)
body = utils.check_response_basic_info(resp, 200, expected_method="GET")
utils.check_val_is_in("user_names", body)
utils.check_val_type(body["user_names"], list)
utils.check_val_not_in(self.test_user_name, body["user_names"])
@runner.MAGPIE_TEST_API
@runner.MAGPIE_TEST_LOCAL
@runner.MAGPIE_TEST_REGISTRATION
class TestCase_MagpieAPI_AdminAuth_Local_UserRegistration(ti.AdminTestCase):
# pylint: disable=C0103,invalid-name
"""
Test any operation that require at least ``MAGPIE_ADMIN_GROUP`` AuthN/AuthZ.
Use a local Magpie test application. Enables the User self-registration feature.
"""
__test__ = True
@classmethod
def setUpClass(cls):
# configuration employed for user registration tests
settings = {
"magpie.user_registration_enabled": True,
"magpie.user_registered_enabled": True,
"magpie.admin_approval_enabled": True,
"magpie.admin_approval_email_recipient": "<EMAIL>",
}
# setup
cls.grp = get_constant("MAGPIE_ADMIN_GROUP")
cls.usr = get_constant("MAGPIE_TEST_ADMIN_USERNAME")
cls.pwd = get_constant("MAGPIE_TEST_ADMIN_PASSWORD")
cls.app = utils.get_test_magpie_app(settings)
cls.version = utils.TestSetup.get_Version(cls, real_version=True)
cls.setup_admin()
cls.headers, cls.cookies = utils.check_or_try_login_user(cls.app, cls.usr, cls.pwd)
cls.require = "cannot run tests without logged in user with '{}' permissions".format(cls.grp)
cls.login_admin()
# don't bother with any test if not supported, must wait until here to get version from app
utils.warn_version(cls, "User self-registration.", "3.13.0", skip=True)
@runner.MAGPIE_TEST_USERS
@utils.mocked_send_email
def test_GetPendingUsersList(self):
utils.TestSetup.clear_PendingUsers(self)
test_user = "test-pending-user-listing"
utils.TestSetup.create_TestUser(self, override_user_name=test_user, pending=True)
resp = utils.test_request(self, "GET", "/register/users", headers=self.json_headers, cookies=self.cookies)
body = utils.check_response_basic_info(resp, 200)
utils.check_val_is_in("registrations", body)
utils.check_val_equal(len(body["registrations"]), 1)
utils.check_val_equal(body["registrations"][0], test_user)
@runner.MAGPIE_TEST_USERS
@utils.mocked_send_email
def test_GetPendingUsersRegistration(self):
utils.TestSetup.clear_PendingUsers(self)
test_user = "test-pending-user-listing"
utils.TestSetup.create_TestUser(self, override_user_name=test_user, pending=True)
path = "/register/users/{}".format(test_user)
resp = utils.test_request(self, "GET", path, headers=self.json_headers, cookies=self.cookies)
body = utils.check_response_basic_info(resp)
utils.check_val_is_in("registration", body)
reg = body["registration"]
utils.check_val_is_in("user_name", reg)
utils.check_val_equal(reg["user_name"], test_user)
utils.check_val_is_in("status", reg)
utils.check_val_equal(reg["status"], UserStatuses.Pending.name)
utils.check_val_is_in("confirm_url", reg)
utils.check_val_is_in("approve_url", reg)
utils.check_val_is_in("decline_url", reg)
@runner.MAGPIE_TEST_USERS
@utils.mocked_send_email
def test_DeletePendingUser(self):
utils.TestSetup.clear_PendingUsers(self)
test_user = "test-pending-user-listing"
utils.TestSetup.create_TestUser(self, override_user_name=test_user, pending=True)
path = "/register/users/{}".format(test_user)
resp = utils.test_request(self, "DELETE", path, headers=self.test_headers, cookies=self.test_cookies)
utils.check_response_basic_info(resp, 200)
resp = utils.test_request(self, "GET", path, expect_errors=True,
headers=self.test_headers, cookies=self.test_cookies)
utils.check_response_basic_info(resp, 404)
@runner.MAGPIE_TEST_API
@runner.MAGPIE_TEST_REMOTE
class TestCase_MagpieAPI_NoAuth_Remote(ti.Interface_MagpieAPI_NoAuth, unittest.TestCase):
# pylint: disable=C0103,invalid-name
"""
Test any operation that do not require any AuthN/AuthZ (``MAGPIE_ANONYMOUS_GROUP`` & ``MAGPIE_ANONYMOUS_USER``).
Use an already running remote bird server.
"""
__test__ = True
@classmethod
def setUpClass(cls):
cls.url = get_constant("MAGPIE_TEST_REMOTE_SERVER_URL")
cls.cookies = None
cls.version = utils.TestSetup.get_Version(cls, real_version=True)
# note: admin credentials to setup data on test instance as needed, but not to be used for these tests
cls.grp = get_constant("MAGPIE_ADMIN_GROUP")
cls.usr = get_constant("MAGPIE_TEST_ADMIN_USERNAME")
cls.pwd = get_constant("<PASSWORD>PASSWORD")
cls.setup_admin()
cls.test_user_name = get_constant("MAGPIE_TEST_USER", default_value="unittest-no-auth_api-user-remote",
raise_missing=False, raise_not_set=False)
cls.test_group_name = get_constant("MAGPIE_TEST_GROUP", default_value="unittest-no-auth_api-group-remote",
raise_missing=False, raise_not_set=False)
@runner.MAGPIE_TEST_API
@runner.MAGPIE_TEST_REMOTE
class TestCase_MagpieAPI_UsersAuth_Remote(ti.Interface_MagpieAPI_UsersAuth, unittest.TestCase):
# pylint: disable=C0103,invalid-name
"""
Test any operation that require logged user AuthN/AuthZ, but lower than ``MAGPIE_ADMIN_GROUP``.
Use an already running remote bird server.
"""
__test__ = True
@classmethod
def setUpClass(cls):
cls.url = get_constant("MAGPIE_TEST_REMOTE_SERVER_URL")
cls.usr = get_constant("MAGPIE_TEST_ADMIN_USERNAME")
cls.pwd = get_constant("<PASSWORD>")
cls.grp = get_constant("MAGPIE_ADMIN_GROUP")
cls.cookies = None
cls.version = utils.TestSetup.get_Version(cls, real_version=True)
cls.setup_admin()
cls.headers, cls.cookies = utils.check_or_try_login_user(cls, cls.usr, cls.pwd, use_ui_form_submit=True)
cls.require = "cannot run tests without logged in user with '{}' permissions".format(cls.grp)
assert cls.headers and cls.cookies, cls.require # nosec
cls.test_service_name = "unittest-user-auth-remote_test-service"
cls.test_service_type = "api"
cls.test_resource_name = "unittest-user-auth-remote_test-resource"
cls.test_resource_type = "route"
cls.test_group_name = "unittest-user-auth-remote_test-group"
cls.test_user_name = "unittest-user-auth-remote_test-user-username"
@runner.MAGPIE_TEST_API
@runner.MAGPIE_TEST_REMOTE
class TestCase_MagpieAPI_AdminAuth_Remote(ti.Interface_MagpieAPI_AdminAuth, unittest.TestCase):
# pylint: disable=C0103,invalid-name
"""
Test any operation that require at least ``MAGPIE_ADMIN_GROUP`` AuthN/AuthZ.
Use an already running remote bird server.
"""
__test__ = True
@classmethod
def setUpClass(cls):
cls.grp = get_constant("MAGPIE_ADMIN_GROUP")
cls.usr = get_constant("MAGPIE_TEST_ADMIN_USERNAME")
cls.pwd = get_constant("<PASSWORD>")
cls.url = get_constant("MAGPIE_TEST_REMOTE_SERVER_URL")
cls.version = utils.TestSetup.get_Version(cls, real_version=True)
cls.setup_admin()
cls.headers, cls.cookies = utils.check_or_try_login_user(cls.url, cls.usr, cls.pwd)
cls.require = "cannot run tests without logged in user with '{}' permissions".format(cls.grp)
cls.login_admin()
cls.setup_test_values()
@runner.MAGPIE_TEST_API
@runner.MAGPIE_TEST_LOCAL
def test_magpie_homepage():
from magpie.constants import get_constant as real_get_constant # pylint: disable=W0404,reimported
def mock_get_constant(*args, **kwargs):
if args[0] == "MAGPIE_UI_ENABLED":
return False
return real_get_constant(*args, **kwargs)
with mock.patch("magpie.constants.get_constant", side_effect=mock_get_constant), \
mock.patch("magpie.api.home.get_constant", side_effect=mock_get_constant):
app = utils.get_test_magpie_app()
resp = utils.test_request(app, "GET", "/")
body = utils.check_response_basic_info(resp)
utils.check_val_is_in("name", body)
utils.check_val_is_in("title", body)
utils.check_val_is_in("contact", body)
utils.check_val_is_in("description", body)
utils.check_val_is_in("documentation", body)
utils.check_val_is_in("magpie", body["name"])
@runner.MAGPIE_TEST_API
@runner.MAGPIE_TEST_LOCAL
@runner.MAGPIE_TEST_STATUS
def test_response_metadata():
"""
Validate that regardless of response type (success/error) and status-code, metadata details are added.
note: test only locally to avoid remote server side-effects and because mock cannot be done remotely
"""
def raise_request(*_, **__):
raise TypeError()
app = utils.get_test_magpie_app()
# all paths below must be publicly accessible
for code, method, path, kwargs in [
(200, "GET", "/session", {}),
# FIXME: sort out 400 vs 422 everywhere (https://github.com/Ouranosinc/Magpie/issues/359)
# (400, "POST", "/signin", {"body": {}}), # missing credentials
(401, "GET", "/services", {}), # anonymous unauthorized
(404, "GET", "/random", {}),
(405, "POST", "/users/{}".format("MAGPIE_LOGGED_USER"), {"body": {}}),
(406, "GET", "/session", {"headers": {"Accept": "application/pdf"}}),
# 409: need connection to test conflict, no route available without so (other tests validates them though)
(422, "POST", "/signin", {"body": {"user_name": "!!!!"}}), # invalid format
(500, "GET", "/json", {}), # see mock
]:
with mock.patch("magpie.api.schemas.generate_api_schema", side_effect=raise_request):
headers = {"Accept": CONTENT_TYPE_JSON, "Content-Type": CONTENT_TYPE_JSON}
headers.update(kwargs.get("headers", {}))
kwargs.pop("headers", None)
resp = utils.test_request(app, method, path, expect_errors=True, headers=headers, **kwargs)
# following util check validates all expected request metadata in response body
utils.check_response_basic_info(resp, expected_code=code, expected_method=method)
if __name__ == "__main__":
import sys
sys.exit(unittest.main())
| StarcoderdataPython |
4924522 | from yunionclient.common import base
class Kubecluster(base.ResourceBase):
pass
class KubeclusterManager(base.StandaloneManager):
resource_class = Kubecluster
keyword = 'kubecluster'
keyword_plural = 'kubeclusters'
_columns = ["Name", "Id", "Status", "Cluster_Type", "Cloudregion_Id", "Vpc_Id", "Resource_Type", "Cloud_Type", "Version", "Mode", "Provider", "Machines", "Sync_Status", "Sync_Message"]
| StarcoderdataPython |
3554686 | from time import sleep
from easyprocess import EasyProcess
from pykeyboard import PyKeyboard
from pyvirtualdisplay.smartdisplay import SmartDisplay
from discogui.imgutil import getbbox, grab
VISIBLE = 0
def test_zenity():
with SmartDisplay(visible=VISIBLE) as disp:
with EasyProcess(["zenity", "--warning"]):
disp.waitgrab()
k = PyKeyboard()
k.tap_key(k.enter_key)
sleep(0.1) # wait for processing keyboard event
assert not getbbox(grab())
with EasyProcess(["zenity", "--warning"]):
disp.waitgrab()
k.tap_key(k.enter_key)
sleep(0.1) # wait for processing keyboard event
assert not getbbox(grab())
with EasyProcess(["zenity", "--warning"]):
disp.waitgrab()
k.tap_key(" ")
sleep(0.1) # wait for processing keyboard event
assert not getbbox(grab())
with EasyProcess(["zenity", "--warning"]):
disp.waitgrab()
k.tap_key("x")
sleep(0.1) # wait for processing keyboard event
assert getbbox(grab())
# def test_gcalctool1():
# with SmartDisplay() as disp:
# with EasyProcess(["gnome-calculator"]):
# disp.waitgrab()
# focus_wnd()
# k = PyKeyboard()
# k.press_keys([k.control_key, "q"])
# sleep(1)
# assert not getbbox(grab())
| StarcoderdataPython |
4894240 | class Solution:
# Sort and sum abs diff (Accepted), O(n log n) time, O(n) space
def minMovesToSeat(self, seats: List[int], students: List[int]) -> int:
seats.sort()
students.sort()
res = 0
for i in range(len(seats)):
res += abs(seats[i] - students[i])
return res
# Sum abs diff with zip (Top Voted), O(n log n) time, O(n) space
def minMovesToSeat(self, seats: List[int], students: List[int]) -> int:
seats.sort()
students.sort()
return sum(abs(e - t) for e, t in zip(seats, students))
| StarcoderdataPython |
5046597 | <filename>src-electron/main-process/python-logic/scripts/message_protocol.py<gh_stars>0
import json
from enum import Enum
import requests
from os import path as osPath, remove as removeFile
from hashlib import sha256
from re import split
class Types(Enum):
INTERNAL = 1
STT = 2
class Message:
__blocked_words = []
__bw_src = 'https://raw.githubusercontent.com/RobertJGabriel/Google-profanity-wors/master/list.txt'
__bw_local_path = 'db/Google-profanity-words/list.txt'
def __init__(self):
def __check_file_integrity(local_src, remote_src):
"""
:param local_src: local txt file stored in 'self.__bw_local_path'
:param remote_src: original txt file fetched from '__self.__bw_src'
:return: 'True' if the files match and 'False' if otherwise.
this Method compares original file's hash value (sha256) with current local file to verify it's integrity
"""
fetched_bw = str([i.decode('utf-8') for i in remote_src.splitlines()]).encode("utf-8")
return sha256(fetched_bw).hexdigest() == sha256(str(local_src.read().splitlines()).encode('utf-8')).hexdigest()
try:
with open(self.__bw_local_path) as (f):
resp = requests.get(self.__bw_src)
if resp.status_code == 200 and not __check_file_integrity(f, resp.content):
raise Exception('Corrupted File: {}'.format(self.__bw_local_path))
self.__blocked_words = f.read().splitlines()
except Exception as e:
"""
if app reaches here, means that the local file stored in 'self.__bw_local_path' is corrupted.
it will attempt to download the original list, remove current corrupt file and replace it.
"""
print(str(e))
self.__blocked_words = [i.decode('utf-8') for i in requests.get(self.__bw_src).content.splitlines()]
if osPath.exists(self.__bw_local_path):
removeFile(self.__bw_local_path)
with open(self.__bw_local_path, "x") as (f):
f.write('\n'.join(self.__blocked_words))
f.close()
def message_builder(self, msg_type, stt, keywords):
"""
Args:
:param msg_type: Types.STT for handling of user interaction and
Types.INTERNAL for other purposes
:param stt: (Speech to Text) user's speech converted to text
:param keywords: keywords extracted from user's speech
:return: an Stringified JSON object containing:
msg type, the generated text from user's speech and keywords extracted from it ("type","stt","keywords")
"""
if msg_type == Types.STT:
msg = {
"type": 2,
"stt": self.__sanitize(stt),
"keywords": keywords
}
# elif msg_type == Types.INTERNAL:
else:
msg = {
"type": str(msg_type),
"text": "RANDOM TEXT :|"
}
return json.dumps(msg, separators=(',', ':'))
def __sanitize(self, text):
"""
Args:
text: the text that needs to be filtered
"""
splitted = split('\s', text)
for index, word in enumerate(splitted):
if word in self.__blocked_words:
splitted[index] = word[:1] + '@#$!%&*#!?@!$@%^@~**'[:len(word) - 2] + word[-1:]
return ' '.join(splitted)
| StarcoderdataPython |
102407 | <gh_stars>0
import os
import pdf2image
from time import sleep
class EverythingForms:
def __init__(self, pdf_path, img_path):
"""
:param pdf_path: caminho do PDF com o nome dos alunos
:param img_path: destino para as imagens
- Tranforma cada página do pdf em imagens.
- O nome do arquivo pdf se torna uma pasta.
"""
self.pdf_path = pdf_path
self.img_path = img_path
pass
def list_dir(self, dire, fullname=False, endswith='.pdf', sort=True):
"""
:param dire: path...
:param fullname: True -> returns with fullpath, False -> only images names...
:param endswith: ...
:param sort: order...
:return:
"""
import os
final = []
for file in os.listdir(dire):
if file.endswith(endswith.upper()) or file.endswith(endswith.lower()):
file_fim = f'{dire}\\{file}'
if not fullname:
final.append(self.just_rename2upper(file))
else:
final.append(file_fim)
os.renames(file_fim, f'{dire}\\{self.just_rename2upper(file)}')
# input(f'test {file_fim}')
if sort:
import re
final.sort(key=lambda var: [int(x) if x.isdigit() else x for x in re.findall(r'[^0-9]|[0-9]+', var)])
# nice, sort by names
return final
else:
return final
def complete_name(self, name, pre, materia=None):
if not materia:
final = f'{pre}\\{name}'
# aqui o caminho já vem com a matéria
else:
final = f'{pre}\\{materia}\\{name}'
# aqui ainda não vem...
return final
def transforma_pdf_em_img_por_materia(self, materia):
searched = materia
pdf_path = self.pdf_path
if pdf_path:
list_files = self.list_dir(self.complete_name(searched, pre=pdf_path), True)
else:
from tkinter import filedialog
dale = filedialog.askdirectory(initialdir=os.path.realpath(''.join((__file__.split('\\')[:-1]))))
list_files = self.list_dir(self.complete_name(searched, pre=dale), True)
# C:\Users\Silas\Desktop\projeto-mae\Atividades_outubro
volta = os.getcwd()
for file in list_files:
pages = pdf2image.convert_from_path(file)
print(file)
os.chdir(volta)
for e, page in enumerate(pages):
e_cont = e + 1
# dir_name = '../MATERIAS_CRIA_FILES'
dir_name = self.img_path
dir_name += '\\' + searched + '\\'
dir_name += file.split('\\')[-1].split('-')[0]
for folder in dir_name.split('\\'):
try:
os.chdir(folder)
except (FileNotFoundError):
os.mkdir(folder)
os.chdir(folder)
os.chdir(volta)
real = '\\'.join(os.path.realpath(__file__).split('\\')[:-1])
page.save(f'{real}\\{dir_name}\\out-{e_cont}.jpg', 'JPEG')
print(dir_name)
@classmethod
def just_rename2upper(cls, name=None):
if name is None:
name = '<NAME>'
name = name.upper()
if ' ' in name:
name = '_'.join(name.split())
return name
rout = EverythingForms(r'H:\mae_projeto_backup\maeportifolios_desktop_etc\Atividades_outubro_final',
'MATERIAS_CRIA_FILES-13-11-2020')
materias = ['Português', 'História', 'Geografia e Ciências', 'Matemática']
for mater in materias[-1:]:
rout.transforma_pdf_em_img_por_materia(mater)
sleep(2.5)
# rout.transforma_pdf_em_img_por_materia('Português')
# rout.transforma_pdf_em_img_por_materia('História')
# rout.transforma_pdf_em_img_por_materia('Geografia e Ciências')
# rout.transforma_pdf_em_img_por_materia('Matemática')
| StarcoderdataPython |
6555197 | <reponame>dilr/Coramin
import pyomo.environ as pyo
from pyomo.core.base.var import _GeneralVarData
from pyomo.core.base.PyomoModel import ConcreteModel
from pyomo.opt import SolverStatus, TerminationCondition as TC
import warnings
from pyomo.common.collections import ComponentMap
from pyomo.solvers.plugins.solvers.GUROBI import GUROBISHELL
from pyomo.solvers.plugins.solvers.gurobi_direct import GurobiDirect
from pyomo.solvers.plugins.solvers.gurobi_persistent import GurobiPersistent
from pyomo.solvers.plugins.solvers.CPLEX import CPLEXSHELL
from pyomo.solvers.plugins.solvers.cplex_direct import CPLEXDirect
from pyomo.solvers.plugins.solvers.cplex_persistent import CPLEXPersistent
from pyomo.solvers.plugins.solvers.GLPK import GLPKSHELL
from pyomo.solvers.plugins.solvers.persistent_solver import PersistentSolver
from pyomo.solvers.plugins.solvers.direct_or_persistent_solver import DirectOrPersistentSolver
from pyomo.core.kernel.objective import minimize, maximize
import logging
import traceback
import numpy as np
import math
import time
from coramin.algorithms.ecp_bounder import ECPBounder
try:
import coramin.utils.mpi_utils as mpiu
mpi_available = True
except ImportError:
mpi_available = False
try:
from tqdm import tqdm
except ImportError:
pass
logger = logging.getLogger(__name__)
_mip_solver_types = {GUROBISHELL, GurobiDirect, GurobiPersistent, CPLEXSHELL, CPLEXDirect, CPLEXPersistent, GLPKSHELL,
ECPBounder}
_acceptable_termination_conditions = {TC.optimal, TC.globallyOptimal}
_acceptable_solver_status = {SolverStatus.ok}
class OBBTInfo(object):
def __init__(self):
self.total_num_problems = None
self.num_problems_attempted = None
self.num_successful_problems = None
def _bt_cleanup(model, solver, vardatalist, initial_var_values, deactivated_objectives, lower_bounds=None, upper_bounds=None):
"""
Cleanup the changes made to the model during bounds tightening.
Reactivate any deactivated objectives.
Remove an objective upper bound constraint if it was added.
If lower_bounds or upper_bounds is provided, update the bounds of the variables in self.vars_to_tighten.
Parameters
----------
model: pyo.ConcreteModel or pyo.Block
solver: pyomo solver object
vardatalist: list of pyo.Var
initial_var_values: ComponentMap
deactivated_objectives: list of pyo.Objective
lower_bounds: list of float
Only needed if you want to update the bounds of the variables. Should be in the same order as
self.vars_to_tighten.
upper_bounds: list of float
Only needed if you want to update the bounds of the variables. Should be in the same order as
self.vars_to_tighten.
"""
for v in model.component_data_objects(ctype=pyo.Var, active=None, sort=True, descend_into=True):
v.value = initial_var_values[v]
# remove the obj upper bound constraint
using_persistent_solver = False
if isinstance(solver, PersistentSolver):
using_persistent_solver = True
if hasattr(model, '__objective_ineq'):
if using_persistent_solver:
solver.remove_constraint(model.__objective_ineq)
del model.__objective_ineq
# reactivate the objectives that we deactivated
for obj in deactivated_objectives:
obj.activate()
if using_persistent_solver:
solver.set_objective(obj)
if lower_bounds is not None and upper_bounds is not None:
for i, v in enumerate(vardatalist):
lb = lower_bounds[i]
ub = upper_bounds[i]
v.setlb(lb)
v.setub(ub)
if using_persistent_solver:
solver.update_var(v)
elif lower_bounds is not None:
for i, v in enumerate(vardatalist):
lb = lower_bounds[i]
v.setlb(lb)
if using_persistent_solver:
solver.update_var(v)
elif upper_bounds is not None:
for i, v in enumerate(vardatalist):
ub = upper_bounds[i]
v.setub(ub)
if using_persistent_solver:
solver.update_var(v)
def _single_solve(v, model, solver, vardatalist, lb_or_ub, obbt_info, reset=False):
obbt_info.num_problems_attempted += 1
# solve for lower var bound
if lb_or_ub == 'lb':
model.__obj_bounds_tightening = pyo.Objective(expr=v, sense=pyo.minimize)
else:
assert lb_or_ub == 'ub'
model.__obj_bounds_tightening = pyo.Objective(expr=-v, sense=pyo.minimize)
if isinstance(solver, DirectOrPersistentSolver):
if isinstance(solver, PersistentSolver):
solver.set_objective(model.__obj_bounds_tightening)
if reset:
solver.reset()
results = solver.solve(tee=False, load_solutions=False, save_results=False)
else:
results = solver.solve(model, tee=False, load_solutions=False, save_results=False)
if ((results.solver.status in _acceptable_solver_status) and
(results.solver.termination_condition in _acceptable_termination_conditions)):
obbt_info.num_successful_problems += 1
if type(solver) in _mip_solver_types:
if lb_or_ub == 'lb':
new_bnd = results.problem.lower_bound
else:
new_bnd = -results.problem.lower_bound
else:
solver.load_vars([v])
new_bnd = pyo.value(v.value)
elif isinstance(solver, ECPBounder):
if lb_or_ub == 'lb':
if results.problem.lower_bound is not None and math.isfinite(results.problem.lower_bound):
obbt_info.num_successful_problems += 1
new_bnd = results.problem.lower_bound
else:
new_bnd = None
msg = 'Warning: Bounds tightening for lb for var {0} was unsuccessful. Termination condition: {1}; The lb was not changed.'.format(
v, results.solver.termination_condition)
logger.warning(msg)
else:
if results.problem.upper_bound is not None and math.isfinite(results.problem.upper_bound):
obbt_info.num_successful_problems += 1
new_bnd = results.problem.upper_bound
else:
new_bnd = None
msg = 'Warning: Bounds tightening for lb for var {0} was unsuccessful. Termination condition: {1}; The lb was not changed.'.format(v, results.solver.termination_condition)
logger.warning(msg)
else:
new_bnd = None
msg = 'Warning: Bounds tightening for lb for var {0} was unsuccessful. Termination condition: {1}; The lb was not changed.'.format(v, results.solver.termination_condition)
logger.warning(msg)
else:
results = solver.solve(model, tee=False, load_solutions=False)
if ((results.solver.status in _acceptable_solver_status) and
(results.solver.termination_condition in _acceptable_termination_conditions)):
obbt_info.num_successful_problems += 1
if type(solver) in _mip_solver_types:
if lb_or_ub == 'lb':
new_bnd = results.problem.lower_bound
else:
new_bnd = -results.problem.lower_bound
else:
model.solutions.load_from(results)
new_bnd = pyo.value(v.value)
else:
new_bnd = None
msg = 'Warning: Bounds tightening for lb for var {0} was unsuccessful. Termination condition: {1}; The lb was not changed.'.format(v, results.solver.termination_condition)
logger.warning(msg)
if lb_or_ub == 'lb':
orig_lb = pyo.value(v.lb)
if new_bnd is None:
new_bnd = orig_lb
elif v.has_lb():
if new_bnd < orig_lb:
new_bnd = orig_lb
else:
orig_ub = pyo.value(v.ub)
if new_bnd is None:
new_bnd = orig_ub
elif v.has_ub():
if new_bnd > orig_ub:
new_bnd = orig_ub
if new_bnd is None:
# Need nan instead of None for MPI communication; This is appropriately handled in perform_obbt().
new_bnd = np.nan
# remove the objective function
del model.__obj_bounds_tightening
return new_bnd
def _tighten_bnds(model, solver, vardatalist, lb_or_ub, obbt_info, with_progress_bar=False, reset=False, time_limit=math.inf):
"""
Tighten the lower bounds of all variables in vardatalist (or self.vars_to_tighten if vardatalist is None).
Parameters
----------
model: pyo.ConcreteModel or pyo.Block
solver: pyomo solver object
vardatalist: list of _GeneralVarData
lb_or_ub: str
'lb' or 'ub'
time_limit: float
Returns
-------
new_bounds: list of float
"""
# solve for the new bounds
t0 = time.time()
new_bounds = list()
obbt_info.total_num_problems += len(vardatalist)
if with_progress_bar:
if lb_or_ub == 'lb':
bnd_str = 'LBs'
else:
bnd_str = 'UBs'
if mpi_available:
tqdm_position = mpiu.MPI.COMM_WORLD.Get_rank()
else:
tqdm_position = 0
for v in tqdm(vardatalist, ncols=100, desc='OBBT '+bnd_str, leave=False, position=tqdm_position):
if time.time() - t0 > time_limit:
if lb_or_ub == 'lb':
if v.lb is None:
new_bounds.append(np.nan)
else:
new_bounds.append(pyo.value(v.lb))
else:
if v.ub is None:
new_bounds.append(np.nan)
else:
new_bounds.append(pyo.value(v.ub))
else:
new_bnd = _single_solve(v=v, model=model, solver=solver,
vardatalist=vardatalist,
lb_or_ub=lb_or_ub,
obbt_info=obbt_info, reset=reset)
new_bounds.append(new_bnd)
else:
for v in vardatalist:
if time.time() - t0 > time_limit:
if lb_or_ub == 'lb':
if v.lb is None:
new_bounds.append(np.nan)
else:
new_bounds.append(pyo.value(v.lb))
else:
if v.ub is None:
new_bounds.append(np.nan)
else:
new_bounds.append(pyo.value(v.ub))
else:
new_bnd = _single_solve(v=v, model=model, solver=solver,
vardatalist=vardatalist, lb_or_ub=lb_or_ub,
obbt_info=obbt_info, reset=reset)
new_bounds.append(new_bnd)
return new_bounds
def _bt_prep(model, solver, objective_bound=None):
"""
Prepare the model for bounds tightening.
Gather the variable values to load back in after bounds tightening.
Deactivate any active objectives.
If objective_ub is not None, then add a constraint forcing the objective to be less than objective_ub
Parameters
----------
model : pyo.ConcreteModel or pyo.Block
The model object that will be used for bounds tightening.
objective_bound : float
The objective value for the current best upper bound incumbent
Returns
-------
initial_var_values: ComponentMap
deactivated_objectives: list
"""
if isinstance(solver, PersistentSolver):
solver.set_instance(model)
initial_var_values = ComponentMap()
for v in model.component_data_objects(ctype=pyo.Var, active=None, sort=True, descend_into=True):
initial_var_values[v] = v.value
deactivated_objectives = list()
for obj in model.component_data_objects(pyo.Objective, active=True, sort=True, descend_into=True):
deactivated_objectives.append(obj)
obj.deactivate()
# add inequality bound on objective functions if required
# obj.expr <= objective_ub
if objective_bound is not None and math.isfinite(objective_bound):
if len(deactivated_objectives) != 1:
e = 'BoundsTightener: When providing objective_ub,' + \
' the model must have one and only one objective function.'
logger.error(e)
raise ValueError(e)
original_obj = deactivated_objectives[0]
if original_obj.sense == minimize:
model.__objective_ineq = \
pyo.Constraint(expr=original_obj.expr <= objective_bound)
else:
assert original_obj.sense == maximize
model.__objective_ineq = pyo.Constraint(expr=original_obj.expr >= objective_bound)
if isinstance(solver, PersistentSolver):
solver.add_constraint(model.__objective_ineq)
return initial_var_values, deactivated_objectives
def _build_vardatalist(model, varlist=None, warning_threshold=0):
"""
Convert a list of pyomo variables to a list of SimpleVar and _GeneralVarData. If varlist is none, builds a
list of all variables in the model. The new list is stored in the vars_to_tighten attribute.
Parameters
----------
model: ConcreteModel
varlist: None or list of pyo.Var
warning_threshold: float
The threshold below which a warning is raised when attempting to perform OBBT on variables whose
ub - lb < warning_threshold.
"""
vardatalist = None
# if the varlist is None, then assume we want all the active variables
if varlist is None:
raise NotImplementedError('Still need to do this.')
elif isinstance(varlist, pyo.Var):
# user provided a variable, not a list of variables. Let's work with it anyway
varlist = [varlist]
if vardatalist is None:
# expand any indexed components in the list to their
# component data objects
vardatalist = list()
for v in varlist:
if v.is_indexed():
vardatalist.extend(v.values())
else:
vardatalist.append(v)
# remove from vardatalist if the variable is fixed (maybe there is a better way to do this)
corrected_vardatalist = []
for v in vardatalist:
if not v.is_fixed():
if v.has_lb() and v.has_ub():
if v.ub - v.lb < warning_threshold:
e = 'Warning: Tightening a variable with ub - lb is less than {threshold}: {v}, lb: {lb}, ub: {ub}'.format(threshold=warning_threshold, v=v, lb=v.lb, ub=v.ub)
logger.warning(e)
warnings.warn(e)
corrected_vardatalist.append(v)
return corrected_vardatalist
def perform_obbt(model, solver, varlist=None, objective_bound=None, update_bounds=True, with_progress_bar=False,
direction='both', reset=False, time_limit=math.inf, parallel=True, collect_obbt_info=False,
warning_threshold=0):
"""
Perform optimization-based bounds tighening on the variables in varlist subject to the constraints in model.
Parameters
----------
model: pyo.ConcreteModel or pyo.Block
The model to be used for bounds tightening
solver: pyomo solver object
The solver to be used for bounds tightening.
varlist: list of pyo.Var
The variables for which OBBT should be performed. If varlist is None, then we attempt to automatically
detect which variables need tightened.
objective_bound: float
A lower or upper bound on the objective. If this is not None, then a constraint will be added to the
bounds tightening problems constraining the objective to be less than/greater than objective_bound.
update_bounds: bool
If True, then the variable bounds will be updated
with_progress_bar: bool
direction: str
Options are 'both', 'lbs', or 'ubs'
reset: bool
This is an option specifically for use with persistent solvers. If reset
is True, then any simplex warmstart will be discarded between solves.
time_limit: float
The maximum amount of time to be spent performing OBBT
parallel: bool
If True, then OBBT will automatically be performed in parallel if mpirun or mpiexec was used;
If False, then OBBT will not run in parallel even if mpirun or mpiexec was used;
warning_threshold: float
The threshold below which a warning is issued when attempting to perform OBBT on variables whose
ub - lb < warning_threshold.
Returns
-------
lower_bounds: list of float
upper_bounds: list of float
obbt_info: OBBTInfo
"""
obbt_info = OBBTInfo()
obbt_info.total_num_problems = 0
obbt_info.num_problems_attempted = 0
obbt_info.num_successful_problems = 0
t0 = time.time()
initial_var_values, deactivated_objectives = _bt_prep(model=model, solver=solver, objective_bound=objective_bound)
vardata_list = _build_vardatalist(model=model, varlist=varlist, warning_threshold=warning_threshold)
if mpi_available and parallel:
mpi_interface = mpiu.MPIInterface()
alloc_map = mpiu.MPIAllocationMap(mpi_interface, len(vardata_list))
local_vardata_list = alloc_map.local_list(vardata_list)
else:
local_vardata_list = vardata_list
exc = None
try:
if direction in {'both', 'lbs'}:
local_lower_bounds = _tighten_bnds(model=model, solver=solver,
vardatalist=local_vardata_list,
lb_or_ub='lb',
obbt_info=obbt_info,
with_progress_bar=with_progress_bar,
reset=reset,
time_limit=(time_limit - (time.time() - t0)))
else:
local_lower_bounds = list()
for v in local_vardata_list:
if v.lb is None:
local_lower_bounds.append(np.nan)
else:
local_lower_bounds.append(pyo.value(v.lb))
if direction in {'both', 'ubs'}:
local_upper_bounds = _tighten_bnds(model=model, solver=solver,
vardatalist=local_vardata_list,
lb_or_ub='ub',
obbt_info=obbt_info,
with_progress_bar=with_progress_bar,
reset=reset,
time_limit=(time_limit - (time.time() - t0)))
else:
local_upper_bounds = list()
for v in local_vardata_list:
if v.ub is None:
local_upper_bounds.append(np.nan)
else:
local_upper_bounds.append(pyo.value(v.ub))
status = 1
msg = None
except Exception as err:
exc = err
tb = traceback.format_exc()
status = 0
msg = str(tb)
if mpi_available and parallel:
local_status = np.array([status], dtype='i')
global_status = np.array([0 for i in range(mpiu.MPI.COMM_WORLD.Get_size())], dtype='i')
mpiu.MPI.COMM_WORLD.Allgatherv([local_status, mpiu.MPI.INT], [global_status, mpiu.MPI.INT])
if not np.all(global_status):
messages = mpi_interface.comm.allgather(msg)
msg = None
for m in messages:
if m is not None:
msg = m
logger.error('An error was raised in one or more processes:\n' + msg)
raise mpiu.MPISyncError('An error was raised in one or more processes:\n' + msg)
else:
if status != 1:
logger.error('An error was raised during OBBT:\n' + msg)
raise exc
if mpi_available and parallel:
global_lower = alloc_map.global_list_float64(local_lower_bounds)
global_upper = alloc_map.global_list_float64(local_upper_bounds)
obbt_info.total_num_problems = mpiu.MPI.COMM_WORLD.allreduce(obbt_info.total_num_problems)
obbt_info.num_problems_attempted = mpiu.MPI.COMM_WORLD.allreduce(obbt_info.num_problems_attempted)
obbt_info.num_successful_problems = mpiu.MPI.COMM_WORLD.allreduce(obbt_info.num_successful_problems)
else:
global_lower = local_lower_bounds
global_upper = local_upper_bounds
tmp = list()
for i in global_lower:
if np.isnan(i):
tmp.append(None)
else:
tmp.append(float(i))
global_lower = tmp
tmp = list()
for i in global_upper:
if np.isnan(i):
tmp.append(None)
else:
tmp.append(float(i))
global_upper = tmp
_lower_bounds = None
_upper_bounds = None
if update_bounds:
_lower_bounds = global_lower
_upper_bounds = global_upper
_bt_cleanup(model=model, solver=solver, vardatalist=vardata_list, initial_var_values=initial_var_values,
deactivated_objectives=deactivated_objectives, lower_bounds=_lower_bounds, upper_bounds=_upper_bounds)
if collect_obbt_info:
return global_lower, global_upper, obbt_info
else:
return global_lower, global_upper
| StarcoderdataPython |
3501059 | <filename>openai/api_resources/abstract/__init__.py
from __future__ import absolute_import, division, print_function
# flake8: noqa
from openai.api_resources.abstract.api_resource import APIResource
from openai.api_resources.abstract.singleton_api_resource import (
SingletonAPIResource,
)
from openai.api_resources.abstract.createable_api_resource import (
CreateableAPIResource,
)
from openai.api_resources.abstract.updateable_api_resource import (
UpdateableAPIResource,
)
from openai.api_resources.abstract.deletable_api_resource import (
DeletableAPIResource,
)
from openai.api_resources.abstract.listable_api_resource import (
ListableAPIResource,
)
from openai.api_resources.abstract.verify_mixin import VerifyMixin
from openai.api_resources.abstract.custom_method import custom_method
from openai.api_resources.abstract.nested_resource_class_methods import (
nested_resource_class_methods,
)
| StarcoderdataPython |
327606 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: iso-8859-15 -*-
import subprocess
import tempfile
import logging
def run_command(command):
return run_command_with_input_data(command, input_data = None)
def run_command_with_input_data(command, input_data = None):
try:
logging.debug("About to execute %s", ' '.join(command))
process = subprocess.Popen(command, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds = True)
stdout, stderr = process.communicate(input=input_data)
return_code = process.wait()
if return_code != 0:
logging.error('Failed running command %s :\n Return code : %d\n StdOut: %s\n StdErr: %s', ' '.join(command), return_code, stdout, stderr)
return False
elif logging.root.isEnabledFor(logging.DEBUG):
logging.debug('Succeful running command %s :\n Return code : %d\n StdOut: %s\n StdErr: %s', ' '.join(command), return_code, stdout, stderr)
return True
except Exception, why:
logging.error('Failed running command %s : %s', ' '.join(command), why)
return False
def run_command_with_input_files(command, input_filenames = []):
# We redirect the stdout and stderr to temporary files
try:
stdout_file = tempfile.TemporaryFile()
stderr_file = tempfile.TemporaryFile()
except Exception, why:
logging.critical('Failed creating temporary files for stout and stderr : %s', why)
return False
try:
logging.debug("About to execute %s", ' '.join(command))
process = subprocess.Popen(command, bufsize = 1024 * 1024, shell=False, stdin=subprocess.PIPE, stdout=stdout_file, stderr=stderr_file, close_fds = True)
# We write the content of the input files to the stdin of the process
for input_filename in input_filenames:
try:
with open(input_filename, 'rb') as input_file:
logging.debug("Writing input file %s to process stdin", input_filename)
process.stdin.write(input_file.read())
except Exception, why:
logging.error("Error writing input file %s to process stdin: %s", input_filename, why)
process.stdin.close()
# We wait for the process to terminate
logging.debug("Waiting to terminate process for: %s", ' '.join(command))
return_code = process.wait()
# Read the stdout and stderr content
stdout_file.seek(0)
stdout = stdout_file.read()
stdout_file.close()
stderr_file.seek(0)
stderr = stderr_file.read()
stderr_file.close()
if return_code != 0:
logging.error('Failed running command %s :\n Return code : %d\n StdOut: %s\n StdErr: %s', ' '.join(command), return_code, stdout, stderr)
return False
elif logging.root.isEnabledFor(logging.DEBUG):
logging.debug('Succeful running command %s :\n Return code : %d\n StdOut: %s\n StdErr: %s', ' '.join(command), return_code, stdout, stderr)
return True
except Exception, why:
logging.error('Failed running command %s : %s', ' '.join(command), why)
return False
| StarcoderdataPython |
3289405 | <reponame>ruanyangry/Spark-ML-study
# _*_ coding:utf-8 _*_
'''
GaussianMixture
'''
from pyspark.sql import SparkSession
from pyspark.ml.clustering import GaussianMixture
spark = SparkSession.builder.appName("GaussianMixture").getOrCreate()
paths="/export/home/ry/spark-2.2.1-bin-hadoop2.7/data/mllib/"
data=spark.read.format("libsvm").load(paths+"sample_kmeans_data.txt")
gmm=GaussianMixture().setK(2)
model=gmm.fit(data)
print("Gaussian: ")
model.gaussiansDF.show()
| StarcoderdataPython |
3205230 | <reponame>todhm/wicarproject
from mongoengine import *
import json
class UserAgent(EmbeddedDocument):
browser = StringField()
language = StringField()
platform = StringField()
string = StringField()
version = StringField()
class Tracking(Document):
#session_key = models.CharField(max_length=40, null=True, blank=True, db_index=True)
date_created = DateTimeField()
host = StringField()
path = StringField()
query_params = StringField()
ip = StringField()
user = GenericReferenceField()
user_agent = EmbeddedDocumentField(UserAgent)
method = StringField()
request_headers = ListField()
request_body = BinaryField()
status_code = IntField()
response_headers = ListField()
# Execution time in ms
execution_time = IntField()
# System hostname
hostname = StringField()
custom_data = DynamicField()
meta = {
'max_documents': 10**6, # 1 million
}
def user_repr(self):
if self._data['user']:
if isinstance(self._data['user'], dict):
return self._data['user']['_ref'].id
else:
return self.user.id
else:
return '-'
def __unicode__(self):
return '{id} {date} {method} {user} {path}{query} {status} ({time} ms)'.format(
id=self.id,
date=self.date_created.strftime('%Y-%m-%d %H:%M:%S.%f'),
method=self.method,
user=self.user_repr(),
path=self.path,
query=self.query_params and '?%s' % self.query_params or '',
status=self.status_code,
time=self.execution_time)
def debug(self):
ret = '%s %s%s%s\n' % (self.method, self.host, self.path, self.query_params and '?%s' % self.query_params or '')
ret += 'REQUEST:\n'
ret += self.format_headers(self.request_headers) + '\n'
ret += '%s RESPONSE:\n' % self.status_code
ret += self.format_headers(self.response_headers) + '\n'
ret += self.format_body(self.response_body)
return ret
def get_header(self, name, default=''):
return { h[0]: h[1] for h in self.request_headers }.get(name, default)
def replay(self):
from flask import current_app
client = current_app.test_client()
# Make sure we don't send invalid cookies.
client.cookie_jar.clear()
full_path = self.path + ('?'+self.query_params if self.query_params else '')
method_func = getattr(client, self.method.lower())
return method_func(
full_path,
headers=self.request_headers,
data=self.request_body,
content_type=dict(self.request_headers)['Content-Type']
)
@staticmethod
def format_body(inpt):
"""Format an HTTP body as JSON if possible, otherwise return string"""
try:
return json.dumps(json.loads(inpt.decode('utf8')), indent=4)
except ValueError:
return repr(inpt)
@staticmethod
def format_headers(headers):
return '\n'.join([' %s: %s' % (h[0], h[1] if len(h[1]) < 100 else '%s...' % h[1][:100]) for h in headers])
| StarcoderdataPython |
1862904 | <reponame>crcresearch/GOS
import numpy as np
import pandas as pd
from constants import MIN_POPULATION, POPULATION_SCALE
COUNTRY_COLS = ["Population", "GDP", "Unemployment", "Conflict",
"Fertility", "FullName", "neighbors"]
def csv_path(name):
"""
Shortcut function to get the relative path to the directory
which contains the CSV data.
"""
return "./data/%s" % name
def country_codes():
"""
Read country names and ISO codes.
"""
return (pd.read_csv(csv_path("country-codes.csv"), usecols=[1, 3, 4],
index_col=2, keep_default_na=False))
def codemap():
return pd.read_csv(csv_path("names_to_codes.csv"))
def population():
"""
Read the population for each country.
"""
pop_csv = pd.read_csv(csv_path("Population.csv"),
index_col=0,
dtype={"Population": np.uint32})
pop_csv["Population"] = (pop_csv["Population"] * POPULATION_SCALE).astype("uint32")
return pop_csv
def gdp():
"""
Read the GDP for each country.
"""
gdp_csv = pd.read_csv(csv_path("UN_GDP.csv"), index_col=0, usecols=[0, 3],
dtype={"Value": np.float32})
gdp_csv.columns = ["GDP"]
return gdp_csv
def employment():
"""
Read the unemployment for each country.
"""
return pd.read_csv(csv_path("CIA_Unemployment.csv"), index_col=0, usecols=[1, 2])
# This is a dictionary used to translate names that do not math the ones used elsewhere.
# TODO: Use ISO3 codes for each country and map their names to their codes using a CSV.
alt_names = {
"Russia (Soviet Union)": "Russia",
"Hyderabad": "India",
"North Yemen": "Yemen",
"South Yemen": "Yemen",
"North Vietnam": "Vietnam",
"South Vietnam": "Vietnam",
"Democratic Republic of Congo (Zaire)": "Democratic Republic of the Congo",
"Sri Lanka (Ceylon)": "Sri Lanka",
"Zimbabwe (Rhodesia)": "Zimbabwe",
"Turkey/Ottoman Empire": "Turkey",
"Yugoslavia (Serbia)": "Serbia",
"Cote DÕIvoire": "Cote d'Ivoire",
"Rumania": "Romania",
"United States of America": "United States",
"Myanmar (Burma)": "Myanmar",
"DR Congo (Zaire)": "Democratic Republic of the Congo",
"Yemen (North Yemen)": "Yemen",
"Cambodia (Kampuchea)": "Cambodia",
"Vietnam (North Vietnam)": "Vietnam",
"Bosnia-Herzegovina": "Bosnia and Herzegovina",
"Serbia (Yugoslavia)": "Serbia",
"FYR": "Macedonia",
"Madagascar (Malagasy)": "Madagascar",
"Hyderabadh": "India",
"Yemen, Rep.": "Yemen",
"Venezuela, RB": "Venezuela",
"Syrian Arab Republic": "Syria",
"Slovak Republic": "Slovakia",
"Russian Federation": "Russia",
"Korea, Rep.": "South Korea",
"Korea, Dem. People’s Rep.": "North Korea",
"Macedonia, FYR": "Macedonia",
"Lao PDR": "Laos",
"Kyrgyz Republic": "Kyrgyzstan",
"Iran, Islamic Rep.": "Iran"
}
def conflict():
"""
Read and calculate conflict scores for each country.
"""
conflict_csv = pd.read_csv(csv_path("ucdp-prio-acd-4-2016.csv"), usecols=[0, 1, 9, 10])
conflict_csv["Location"] = conflict_csv["Location"].str.split(', ')
conflict_csv["Location"] = conflict_csv["Location"].map(
lambda y: [alt_names[x].strip() if x in alt_names else x for x in y])
conflict_csv["Conflict"] = ((conflict_csv["Year"] - 1946) / 10 *
conflict_csv["IntensityLevel"] ** 2)
conflict_data = (pd.DataFrame(conflict_csv.Location.tolist(), index=conflict_csv.Conflict)
.stack().reset_index(level=1, drop=True)
.reset_index(name='Location')[['Location', 'Conflict']]
.groupby("Location").sum())
return conflict_data
def neighbors():
"""
Read the neighbors for each country.
"""
neighbors_csv = pd.read_csv(csv_path("mledoze-countries.csv"), sep=';',
usecols=[4, 17])
neighbors_csv.columns = ["Code", "neighbors"]
neighbors_csv["neighbors"] = neighbors_csv["neighbors"].str.split(',')
for row in neighbors_csv.loc[neighbors_csv.neighbors.isnull(), 'neighbors'].index:
neighbors_csv.at[row, 'neighbors'] = []
# Island nations are a weird exception
neighbors_csv.loc[neighbors_csv.Code == "MDG", "neighbors"] = [["MOZ", "ZAF", "TZA"]]
neighbors_csv.loc[neighbors_csv.Code == "TWN", "neighbors"] = [["CHN", "PHL"]]
neighbors_csv.loc[neighbors_csv.Code == "AUS", "neighbors"] = [["NZL"]]
neighbors_csv.loc[neighbors_csv.Code == "NZL", "neighbors"] = [["AUS"]]
neighbors_csv.loc[neighbors_csv.Code == "JPN", "neighbors"] = [["TWN", "KOR", "PHL"]]
neighbors_csv.loc[neighbors_csv.Code == "PHL", "neighbors"] = [["TWN", "KOR", "JPN"]]
neighbors_csv.loc[neighbors_csv.Code == "PRI", "neighbors"] = [["DOM"]]
neighbors_csv.loc[neighbors_csv.Code == "SGP", "neighbors"] = [["MYS", "IDN"]]
neighbors_csv.loc[neighbors_csv.Code == "JAM", "neighbors"] = [["CUB", "DOM"]]
return neighbors_csv
def fertility():
"""
Read the fertiltiy rate for each country.
"""
fertility_csv = pd.read_csv(csv_path("attachment.csv"), usecols=[1, 7], index_col=0)
fertility_csv.columns = ["Fertility"]
return fertility_csv
def net_migration():
"""
Read net migration for all countries from 2007-2012.
"""
net = pd.read_csv(csv_path("API_SM.POP.NETM_DS2_en_csv_v2.csv"),
usecols=[0, 56], header=2, index_col=0).dropna()
net.columns = ["Net Migration"]
net.index = net.index.map(lambda x: alt_names[x] if x in alt_names else x)
return net
def all(fill_nan=True):
"""
Join all data into a single DataFrame.
"""
df = population().merge(codemap(), left_index=True, right_on='Name')
df = df[df.Population > MIN_POPULATION * POPULATION_SCALE]
df.columns = ["Population", "Code", "FullName"]
for g in map(lambda e: e().merge(codemap(), left_index=True, right_on='Name'),
[employment, conflict, fertility, gdp]):
df = df.merge(g, on='Code', how='left')
df = df.merge(neighbors(), how='left', on='Code')
df = df.set_index(["Code"])
df = df[COUNTRY_COLS]
df = df.sort_values("FullName")
if not fill_nan:
return df
# Some countries are missing data. We will guess this data using that of
# neighboring countries.
missing_cols = ["Conflict", "Unemployment", "GDP", "Fertility"]
for _ in range(2):
for column in missing_cols:
for item, frame in df[df[column].isnull()]["neighbors"].iteritems():
df.set_value(item, column, df[df.index.isin(frame)][column].mean())
return df
if __name__ == "__main__":
print(all())
| StarcoderdataPython |
6471825 | class Solution(object):
def countRangeSum(self, nums, lower, upper):
"""
:type nums: List[int]
:type lower: int
:type upper: int
:rtype: int
"""
n = len(nums)
sums = [0] * (n + 1)
for i in range(0, n):
sums[i + 1] = sums[i] + nums[i]
return self.count_while_merge_sort(sums, 0, n + 1, lower, upper)
def count_while_merge_sort(self, sums, start, end, lower, upper):
if end - start <= 1:
return 0
mid = start + (end - start) / 2
count = self.count_while_merge_sort(sums, start, mid, lower, upper) + \
self.count_while_merge_sort(sums, mid, end, lower, upper)
j = k = t = mid
cache = [0] * (end - start)
r = 0
for i in range(start, mid):
while k < end and sums[k] - sums[i] < lower:
k += 1
while j < end and sums[j] - sums[i] <= upper:
j += 1
while t < end and sums[t] < sums[i]:
cache[r] = sums[t]
r += 1
t += 1
cache[r] = sums[i]
count += j - k
r += 1
j = 0
for i in range(start, t):
sums[i] = cache[j]
j += 1
return count
print Solution().countRangeSum([-2, 5, -1], -2, 2)
| StarcoderdataPython |
1913667 | class Node:
__slots__ = 'next', 'data'
def __init__(self, data):
self.data = data
self.next = None
class Queue:
"""
FiFo Queue
Methods:
- to_list() - O(n)
Returns list representation of queue, used for debugging
- enqueue(data) - O(1)
Enter data at the end of the queue
- dequeue() - O(1)
Remove data from the start of the queue
"""
__slots__ = 'first', 'last', 'size'
def __init__(self):
self.first = None
self.last = None
self.size = 0
def to_list(self):
"""
Converts the Queue into a python native datatype list
O(N)
@return {list}}
"""
_return = []
pointer = self.first
while pointer is not None:
_return.append(pointer.data)
pointer = pointer.next
return _return
def enqueue(self, data):
"""
Enter data at the end of the queue
O(1)
@param data {*} data to enqueue
@return {number} current size of queue
"""
newNode = Node(data)
if self.first is None:
self.first = newNode
self.last = newNode
else:
self.last.next = newNode
self.last = newNode
self.size += 1
return self.size
def dequeue(self):
"""
Enter data at the end of the queue
O(1)
@return {*} dequeued data
"""
if self.first is None:
return None
if self.first is self.last:
# If only one node, set last to None
# this way when we set self.first = self.first.next
# we are setting both nodes to None
self.last = None
dequeued = self.first
self.first = dequeued.next
dequeued.next = None
self.size -= 1
return dequeued.data
| StarcoderdataPython |
6466096 | <gh_stars>0
# Assignment 3: CSC 486 - Spring 2022
# Author: Dr. <NAME>
# The purpose of this assignment is to guide you through building
# some common contagion models from scratch and use them to
# understand some of the dynamics underlying the spread of a
# phenomenon through a network.
import matplotlib.pyplot as plt
import networkx as nx
import random
# A convenient function to create an undirected scale free graph.
def undirected_scale_free_graph(n):
"""
Create an undirected scale free networkx graph.
:param n: Number of nodes
:return: A networkx graph
"""
H = nx.scale_free_graph(n)
G = nx.Graph()
for (u, v) in H.edges():
G.add_edge(u, v)
del H
return G
def all_affected(G):
"""
A function that checks the 'state' attribute of all vertices,
and returns True if all states are set to True.
:param G: A networkx graph
:return: True if all state attributes are True, False otherwise
"""
return all([G.nodes[i]['state'] for i in G.nodes()])
def num_affected(G):
"""
A function to calculate the number of affected vertices on a graph
:param G: A networkx graph
:return: The number of affected nodes
"""
states = [G.nodes[i]['state'] for i in G.nodes()]
return states.count(True)
def perc_affected(G):
"""
A function to calculate the percentage of affected vertices on a graph
:param G: A networkx graph
:return: The percentage of affected nodes
"""
states = [G.nodes[i]['state'] for i in G.nodes()]
return states.count(True) / G.number_of_nodes()
def display_states(G):
"""
Print all vertex states as a list
:param G: A networkx graph
:return: None
"""
print([G.nodes[j]['state'] for j in G.nodes()])
def display_steps(G):
"""
Print the number of steps until exposure for each vertex as a list
:param G: A networkx graph
:return: None
"""
print([G.nodes[j]['numsteps'] for j in G.nodes()])
def plot_steps_histogram(G):
"""
Plot a histogram displaying the number of vertices corresponding
to each possible number of steps since exposure.
:param G: A networkx graph
:return: None
"""
x = list(range(-1, G.number_of_nodes() + 1))
y = []
steps = [G.nodes[j]['numsteps'] for j in G.nodes()]
for i in x:
y.append(steps.count(i))
plt.bar(x, y)
plt.xlabel('Number of steps to exposure')
plt.ylabel('Number of nodes')
plt.show()
def initialize_states(G):
"""
Initialize states for vertices in G.
States can have values True (active), or False (inactive).
:param G: A networkx Graph object to modify.
:return: None - this function will modify the Graph in place,
so when the function returns the changes will remain.
"""
for i in G.nodes():
G.nodes[i]['state'] = False
def initialize_numsteps(G):
"""
Initialize counter to track when vertices in G were exposed.
Values are integers representing the number of steps it took for
the contagion to reach each vertex.
:param G: A networkx Graph object to modify.
:return: None - this function will modify the Graph in place,
so when the function returns the changes will remain.
"""
# TODO: Task 2
# Replace 'pass' with your code
pass
def generate_structures_random(n, p):
"""
Generate an Erdos-Renyi random network.
:param n: Number of nodes
:param p: Probability for each edge to be created
:return: A networkx graph object
"""
G = nx.erdos_renyi_graph(n, p)
initialize_states(G)
initialize_numsteps(G)
return G
def generate_structures_smallworld(n, k, p):
"""
Generate an Watts-Strogatz small world network.
:param n: Number of nodes
:param k: Number of immediate neighbors for each vertex
:param p: Probability for each edge to be rewired
:return: A networkx graph object
"""
# TODO: Task 1
pass
def generate_structures_scalefree(n):
"""
Generate an Barbasi-Albert scale free network.
:param n: Number of nodes
:return: A networkx graph object
"""
# TODO: Task 1
pass
def seed_diffusion_random(G, numnodes=1):
"""
Seed a number of randomly selected vertices equal to numnodes
to spread the contagion.
:param G: A networkx graph
:param numnodes: The number of nodes to affect
:return: None, the graph will be modified in place
"""
# TODO: Task 3
nodes = list(G.nodes())
# Randomly choose 'numnodes' vertices to initially affect.
# Set the 'state' and 'numsteps' attributes of these vertices
# appropriately.
def seed_diffusion_neighborhood(G):
"""
Seed a randomly selected vertex and all its immediate
neighbors as initial spreaders.
:param G: A networkx graph object
:return: None, the graph will be modified in place
"""
# TODO: Task 3
i = random.randint(0, G.number_of_nodes())
neighborhood = [i] + list(G.neighbors(i))
# Use the list 'neighborhood' to set the 'state' and 'numsteps'
# attributes of a random neighborhood of vertices.
def update(G, numnbrs=1, stepnum=1):
"""
Update the state of all vertices
:param G: A networkx graph
:param numnbrs: The minimum number of affected neighbors required
for a vertex to become affected
:param stepnum: The simulation step number, used to record which
step a vertex was affected
:return: None, the graph is updated in place
"""
# TODO: Task 4
# Iterate over all vertices and decide which need updates,
# then update any 'state' and 'numsteps' properties as needed.
pass
def simulation1(steps):
"""
Driver function for random network simulations.
:param steps: The number of steps to run the simulation.
:return: None
"""
# TODO: Task 5
# Create a random graph called G here
# Then, seed its initially affected vertices
# Iterate the appropriate number of times, updating the
# graph each step.
# Finally, print the percentage of affected vertices and
# plot the histogram once your loop exits.
def simulation2(steps):
"""
Driver function for small world network simulations.
:param steps: The number of steps to run the simulation.
:return: None
"""
# TODO: Task 6
# Create a small world graph called G here
# Then, seed its initially affected vertices
# Iterate the appropriate number of times, updating the
# graph each step.
# Finally, print the percentage of affected vertices and
# plot the histogram once your loop exits.
pass
def simulation3(steps):
"""
Driver function for scale free network simulations.
:param steps: The number of steps to run the simulation.
:return: None
"""
# TODO: Task 7
# Create a scale free graph called G here
# Then, seed its initially affected vertices
# Iterate the appropriate number of times, updating the
# graph each step.
# Finally, print the percentage of affected vertices and
# plot the histogram once your loop exits.
def main():
# Fill this in with calls to simulation1(), simulation2(), and
# simulation3().
pass
if __name__ == '__main__':
main()
| StarcoderdataPython |
11235005 | <reponame>AnujBrandy/AdsIdeaInQBO<filename>parameters_8001.py
password="<PASSWORD>(1<PASSWORD>,20,sha512)$b7be3eabb9e0f671$42805bad515a5f87e5b75c18f3abe6182f5c2545"
| StarcoderdataPython |
4818412 | <gh_stars>0
import os
def parseMegan(filename, prefix=""):
''' Takes the MEGAN_info file generated from the MEGAN GUI and split it into the
respective categories (TAX, INTERPRO2GO etc). '''
output = {}
key = ""
data = ""
with open(filename,"r") as f:
while True:
line = f.readline().strip()
if line == "END_OF_DATA_TABLE":
break
elif line.split("\t")[0] == "@Names":
data = line[6:]
data = "CATEGORY\tNUM" + data
elif line[0] == "@":
continue
else:
key = line.split("\t")[0]
if key not in output.keys():
output[key] = []
output[key].append(line)
for key, value in output.items():
file = prefix + "_" + key + ".tsv"
with open(file, "w") as newfile:
newfile.write(data+"\n")
newfile.write('\n'.join(line for line in value))
def interproscan_reformat(filename):
''' Reformat the INTERPROSCAN to GO mapping to be more consistent and easier for downstream analysis.'''
interpro_id_ls =[]
interpro_name_ls = []
go_id_ls = []
go_name_ls = []
interpro, go = "", ""
with open(filename, "r") as f:
for line in f.readlines():
if line[0] == "!":
continue
else:
interpro, go = line.split(" > ")
# interpro processing
interpro = interpro.split()
interpro_id = interpro[0].split(":")[1]
interpro_id_ls.append(interpro_id.strip())
interpro_name = " ".join(interpro[1:])
interpro_name_ls.append(interpro_name.strip())
# go processing
go_name, go_id = go.split(" ; ")
go_id_ls.append(go_id.strip())
go_name_ls.append(go_name.strip())
newfile_name = "INTERPRO2GO_MAP_CLEANED.tsv"
with open(newfile_name, "w") as newfile:
for a,b,c,d in zip(interpro_id_ls,interpro_name_ls,go_id_ls,go_name_ls):
newfile.write("\t".join([a,b,c,d])+"\n")
def interproscan_goatools(filename, output="interproscan_goatools.txt"):
mapping_data = {}
with open(filename, "r") as f:
for line in f.readlines():
line = line.split("\t")
if line[0][3:] not in mapping_data.keys():
mapping_data[line[0][3:]] = []
mapping_data[line[0][3:]].append(line[2])
with open(output, "w") as out:
for key, value in mapping_data.items():
out.write(key+"\t"+";".join(value)+"\n")
def combine_bracken_output(filepath,level="P"):
file_list = os.listdir(filepath)
main_dic = {}
#read in all data
for file in file_list:
sample = file.replace("_bracken_phylums.kreport", "")
main_dic[sample] = {}
with open(os.path.join(filepath,file), "r") as f:
lines = f.readlines()
for line in lines:
line = line.split("\t")
if line[3] == level:
main_dic[sample][line[5].strip()] = line[1]
all_taxa = set()
#get unique taxas
for key in main_dic.keys():
all_taxa.update(list(main_dic[key].keys()))
out = ["taxa"]
out.extend(all_taxa)
for key in main_dic.keys():
out[0] += "\t" + key
for i in range(1, len(out)):
taxa = out[i].split("\t")[0]
out[i] += "\t" + main_dic[key].get(taxa, "0")
with open("bracken_combined.tsv", "w") as f:
f.write("\n".join(out))
#interproscan_reformat("INTERPRO2GO_MAP.txt")
#parseMegan("daa2rma.megan", "rma")
#parseMegan("root_4m_info", prefix="root4m")
#parseMegan("bulk_4m_info", prefix="bulk4m")
#interproscan_goatools("INTERPRO2GO_MAP_CLEANED.tsv")
#combine_bracken_output("C:\\Users\\YZ\\Desktop\\FYP\\dip_metagenome\\results\\bracken_kreport",level="P")
| StarcoderdataPython |
8011969 | import numpy as np
import numba as nb
from numba import types, typed, typeof
from numba import jit
from numba.experimental import jitclass
from nrc_spifpy.spif import TIME_CHUNK
# The size of the metadata in a particle record
# Word 1 = Flag 2S
# Word 2 = word for h image metadata
# Word 3 = word for v image metadata
# Word 4 = word for particle count
# Word 5 = word for number of slices
METADATA_LENGTH = 5
# Offsets to find specific metadata in an image record
WORD_H_OFFSET = 1
WORD_V_OFFSET = 2
WORD_PC_OFFSET = 3
WORD_NUM_SLICE_OFFSET = 4
# Easier to define here than to have to flip 1 and 0 in the code
SHADED_VAL = 0
CLEAR_VAL = 1
# Useful datatypes
decoded_word_type = np.dtype([
("is_image_slice", "u2"),
("is_start_slice", "u2"),
("num_shaded", "u2"),
("num_clear", "u2")
])
class ImageMetadataContainer:
def __init__(self):
self.buffer_idx = 0
self.n_h = 0
self.timing_h = 0
self.mismatch_h = 0
self.fifo_h = 0
self.overload_h = 0
self.n_v = 0
self.timing_v = 0
self.mismatch_v = 0
self.fifo_v = 0
self.overload_v = 0
self.particle_count = 0
self.num_slices = 0
self.h_start = 0
self.h_end = 0
self.v_start = 0
self.v_end = 0
self.frame_len = 0
self.image_in_buffer = 0
class ImageMetadataProcessor:
"""
This is for words 2 in a particle frame
NH (Word 2)
-----------------------------------------------------------
Bits 0–11 Number of horizontal words–Includes Timing Words if present
Bit 12 – 1 = Timing Words not found
Bit 13 – Timing Word mismatch
Bit 14 — FIFO Empty (means the next particle was cut off)
Bit 15 – The last two words of the horizontal data record are overload timing words
NV (Word 3)
-------------------------------------------------------------
Bits 0 –11 Number of vertical words–Includes Timing Words if not same as the horizontal Timing Word and the TW were found.
Bit 12 –1 = Timing Words not found
Bit 13 –Timing Word mismatch
Bit 14-FIFO Empty before timing word found
Bit 15 –The last two words of the vertical data record are overload timing words
"""
def __init__(self) -> None:
pass
def process_metadata(self, buffer_idx, buffer):
metadata = ImageMetadataContainer()
metadata.buffer_idx = buffer_idx
metadata.n_h = num_words(buffer[buffer_idx + WORD_H_OFFSET])
metadata.timing_h = timing_words_not_found(buffer[buffer_idx + WORD_H_OFFSET])
metadata.mismatch_h = timing_word_mismatch(buffer[buffer_idx + WORD_H_OFFSET])
metadata.fifo_h = fifo_empty(buffer[buffer_idx + WORD_H_OFFSET])
metadata.overload_h = overload_timing_words_exist(buffer[buffer_idx + WORD_H_OFFSET])
metadata.n_v = num_words(buffer[buffer_idx + WORD_V_OFFSET])
metadata.timing_v = timing_words_not_found(buffer[buffer_idx + WORD_V_OFFSET])
metadata.mismatch_v = timing_word_mismatch(buffer[buffer_idx + WORD_V_OFFSET])
metadata.fifo_v = fifo_empty(buffer[buffer_idx + WORD_V_OFFSET])
metadata.overload_v = overload_timing_words_exist(buffer[buffer_idx + WORD_V_OFFSET])
metadata.particle_count = buffer[buffer_idx + WORD_PC_OFFSET]
metadata.num_slices = buffer[buffer_idx + WORD_NUM_SLICE_OFFSET]
metadata.h_start = metadata.buffer_idx + METADATA_LENGTH
metadata.h_end = metadata.h_start + metadata.n_h
metadata.v_start = metadata.buffer_idx + METADATA_LENGTH + metadata.n_h
metadata.v_end = metadata.v_start + metadata.n_v
metadata.frame_len = METADATA_LENGTH + metadata.n_h + metadata.n_v
metadata.image_in_buffer = (metadata.buffer_idx + metadata.frame_len) < 2048
return metadata
@jit(nopython = True)
def num_words(word):
# Bit masking out of a 16-bit number
# to only get the 12 bit component
return word & 0b0000111111111111
@jit(nopython = True)
def timing_words_not_found(word):
# Bitmask to get 12th bit only, then bit shift right
# 12 spots to keep only that bit
return (word & 0b0001000000000000) >> 12
@jit(nopython = True)
def timing_word_mismatch(word):
# Bitmask to get 13th bit only, then bit shift right
# 13 spots to keep only that bit
return (word & 0b0010000000000000) >> 13
@jit(nopython = True)
def fifo_empty(word):
# Bitmask to get 14th bit only, then bit shift right
# 14 spots to keep only that bit
return (word & 0b0100000000000000) >> 14
@jit(nopython = True)
def overload_timing_words_exist(word):
# Bitmask to get 15th bit only, then bit shift right
# 15 spots to keep only that bit
return (word & 0b1000000000000000) >> 15
class RawImageContainer:
def __init__(self) -> None:
self.raw_image_h = np.array([], dtype=np.uint16)
self.raw_image_v = np.array([], dtype=np.uint16)
class ImageTimewordContainer:
def __init__(self) -> None:
self.timeword_h_upper = 0
self.timeword_h_lower = 0
self.timeword_v_upper = 0
self.timeword_v_lower = 0
class RawImageExtractor:
def __init__(self) -> None:
self.raw_image_container = RawImageContainer()
self.image_timeword_container = ImageTimewordContainer()
def extract_raw_images(self, metadata, buffer):
self.raw_image_container = RawImageContainer()
raw_image_h = buffer[metadata.h_start:metadata.h_end]
raw_image_v = buffer[metadata.v_start:metadata.v_end]
if metadata.timing_h == 0:
raw_image_h = raw_image_h[:-2]
if metadata.timing_v == 0:
raw_image_v = raw_image_v[:-2]
self.raw_image_container.raw_image_h = raw_image_h
self.raw_image_container.raw_image_v = raw_image_v
return self.raw_image_container
def extract_image_timewords(self, metadata, buffer):
self.image_timeword_container = ImageTimewordContainer()
raw_image_h = buffer[metadata.h_start:metadata.h_end]
raw_image_v = buffer[metadata.v_start:metadata.v_end]
if (metadata.timing_h == 0) and (len(raw_image_h) >= 2):
self.image_timeword_container.timeword_h_upper = raw_image_h[-2]
self.image_timeword_container.timeword_h_lower = raw_image_h[-1]
if (metadata.timing_v == 0) and (len(raw_image_v) >= 2):
self.image_timeword_container.timeword_v_upper = raw_image_v[-2]
self.image_timeword_container.timeword_v_lower = raw_image_v[-1]
return self.image_timeword_container
class DecodedImageContainer:
def __init__(self) -> None:
self.decoded_image_h = np.empty(0, dtype = decoded_word_type)
self.decoded_image_v = np.empty(0, dtype = decoded_word_type)
class RawImageDecoder:
def __init__(self) -> None:
pass
def decode_dual_channel_images(self, raw_image_container):
decoded_image_container = DecodedImageContainer()
decoded_image_container.decoded_image_h = decode_image(raw_image_container.raw_image_h)
decoded_image_container.decoded_image_v = decode_image(raw_image_container.raw_image_v)
return decoded_image_container
@jit
def decode_image(encoded_image):
decoded_image = np.zeros(len(encoded_image), dtype = decoded_word_type)
for i, word in enumerate(encoded_image):
if word == 0x7fff:
decoded_image['is_image_slice'][i] = 1
decoded_image['is_start_slice'][i] = 1
decoded_image['num_clear'][i] = 128
decoded_image['num_shaded'][i] = 0
elif word == 0x4000:
decoded_image['is_image_slice'][i] = 1
decoded_image['is_start_slice'][i] = 1
decoded_image['num_clear'][i] = 0
decoded_image['num_shaded'][i] = 128
else:
decoded_image['is_image_slice'][i] = ((word & 2**15) >> 15) == 0
decoded_image['is_start_slice'][i] = (word & 2**14) >> 14
decoded_image['num_shaded'][i] = (word & 0b0011111110000000) >> 7
decoded_image['num_clear'][i] = (word & 0b0000000001111111)
valid_image_words = decoded_image['is_image_slice'] == True
return decoded_image[valid_image_words]
class DecompressedImageContainer:
def __init__(self) -> None:
self.decompressed_image_h = np.array([], np.uint8)
self.decompressed_image_v = np.array([], np.uint8)
class DecodedImageDecompressor:
def __init__(self) -> None:
pass
def decompress_image(self, decoded_image_container):
decompressed_image_container = DecompressedImageContainer()
decompressed_image_container.decompressed_image_h = self.decompress_single_channel_image(decoded_image_container.decoded_image_h)
decompressed_image_container.decompressed_image_v = self.decompress_single_channel_image(decoded_image_container.decoded_image_v)
return decompressed_image_container
def decompress_single_channel_image(self, decoded_image):
if len(decoded_image) == 0:
return []
else:
return decompress_complete_image(decoded_image)
@jit(nopython = True)
def get_complete_image_slice_inds(start_slice_flags):
image_slice_id = np.cumsum(start_slice_flags)
image_slice_inds = []
for i in np.unique(image_slice_id):
image_slice_inds.append(
np.ravel(
np.argwhere(image_slice_id == i)
)
)
return image_slice_inds
@jit(nopython = True)
def decompress_complete_image(decoded_image):
image_slice_inds = get_complete_image_slice_inds(decoded_image['is_start_slice'])
image_slices = [int(x) for x in range(0)]
#breakpoint()
for i, slice_collection in enumerate(image_slice_inds):
image_slice = [int(x) for x in range(0)]
for slice_idx in slice_collection:
"""
Be sure to append clear, THEN shaded
Order matters very much here
"""
image_slice += [CLEAR_VAL]*decoded_image['num_clear'][slice_idx]
image_slice += [SHADED_VAL]*decoded_image['num_shaded'][slice_idx]
# Add some clear bits to any incomplete slices
if len(image_slice) < 128:
image_slice += [CLEAR_VAL] * (128 - len(image_slice))
# NOTE -> This is a TEMPORARY fix to a very strange encountered problem. To be sorted out later.
if len(image_slice) > 128:
image_slice += [CLEAR_VAL] * int(128*np.ceil(len(image_slice)/128) - len(image_slice) )
image_slices += image_slice
return np.array(image_slices, dtype=np.uint8)
class AssembledImageRecordContainer:
def __init__(self) -> None:
# Critical variables for the SPIF format
self.buffer_id = np.array([], np.int32)
self.image_sec = np.array([], np.int32)
self.image_ns = np.array([], np.int32)
self.image_len = np.array([], np.int32)
self.image = np.array([], dtype = np.uint8)
# Auxiliaries not neccessary for the format
self.num_words = np.array([], dtype = np.uint16)
self.timing_flag = np.array([], dtype = np.uint16)
self.mismatch_flag = np.array([], dtype = np.uint16)
self.fifo_flag = np.array([], dtype = np.uint16)
self.overload_flag = np.array([], dtype = np.uint16)
self.particle_count = np.array([], dtype = np.uint16)
self.num_slices = np.array([], dtype = np.uint16)
self.timeword_upper = np.array([], dtype = np.uint16)
self.timeword_lower = np.array([], dtype = np.uint16)
self.tas = np.array([], dtype = np.float32)
class ImageRecordAssembler:
def __init__(self) -> None:
pass
def assemble_images(
self,
buffer_id,
buffer_sec,
buffer_ns,
metadata_containers,
timeword_containers,
decompressed_image_containers,
housekeeping
):
image_container_h = AssembledImageRecordContainer()
image_container_v = AssembledImageRecordContainer()
image_container_h, image_container_v = self.set_buffer_info(
buffer_id = buffer_id,
buffer_sec = buffer_sec,
buffer_ns = buffer_ns,
num_images = len(metadata_containers),
image_container_h = image_container_h,
image_container_v = image_container_v
)
image_container_h, image_container_v = self.set_image_tas(
housekeeping = housekeeping,
num_images = len(metadata_containers),
image_container_h = image_container_h,
image_container_v = image_container_v
)
if len(metadata_containers) > 0:
image_container_h, image_container_v = self.set_images(
image_container_h,
image_container_v,
decompressed_image_containers
)
image_container_h, image_container_v = self.set_auxiliaries(
metadata_containers,
timeword_containers,
image_container_h,
image_container_v
)
return image_container_h, image_container_v
def set_buffer_info(self, buffer_id, buffer_sec, buffer_ns, num_images, image_container_h, image_container_v):
image_container_h.buffer_id = np.zeros(num_images, dtype = np.int32) + buffer_id
image_container_h.image_sec = np.zeros(num_images, dtype = np.int32) + buffer_sec
image_container_h.image_ns = np.zeros(num_images, dtype = np.int32) + buffer_ns
image_container_v.buffer_id = np.zeros(num_images, dtype = np.int32) + buffer_id
image_container_v.image_sec = np.zeros(num_images, dtype = np.int32) + buffer_sec
image_container_v.image_ns = np.zeros(num_images, dtype = np.int32) + buffer_ns
return image_container_h, image_container_v
def set_image_tas(self, housekeeping, num_images, image_container_h, image_container_v):
housekeeping = housekeeping[ housekeeping['incomplete_packet'] == 0 ]
if len(housekeeping) == 0:
tas = np.zeros(num_images)*np.nan
else:
tas = np.zeros(num_images, dtype = np.float32) + housekeeping['tas'][-1]
image_container_h.tas = tas
image_container_v.tas = tas
return image_container_h, image_container_v
def set_images(
self,
image_container_h,
image_container_v,
decompressed_image_containers
):
image_container_h.image_len = np.array(
[len(x.decompressed_image_h) / 128 for x in decompressed_image_containers],
dtype = np.int32
)
image_container_v.image_len = np.array(
[len(x.decompressed_image_v) / 128 for x in decompressed_image_containers],
dtype = np.int32
)
image_container_h.image = np.concatenate(
[x.decompressed_image_h for x in decompressed_image_containers]
)
image_container_v.image = np.concatenate(
[x.decompressed_image_v for x in decompressed_image_containers]
)
return image_container_h, image_container_v
def set_auxiliaries(
self,
metadata,
timewords,
image_container_h,
image_container_v
):
image_container_h.num_words = np.array([x.n_h for x in metadata], dtype = np.uint16)
image_container_h.timing_flag = np.array([x.timing_h for x in metadata], dtype = np.uint16)
image_container_h.mismatch_flag = np.array([x.mismatch_h for x in metadata], dtype = np.uint16)
image_container_h.fifo_flag = np.array([x.fifo_h for x in metadata], dtype = np.uint16)
image_container_h.overload_flag = np.array([x.overload_h for x in metadata], dtype = np.uint16)
image_container_h.particle_count = np.array([x.particle_count for x in metadata], dtype = np.uint16)
image_container_h.num_slices = np.array([x.num_slices for x in metadata], dtype = np.uint16)
image_container_h.timeword_upper = np.array([x.timeword_h_upper for x in timewords], dtype = np.uint16)
image_container_h.timeword_lower = np.array([x.timeword_h_lower for x in timewords], dtype = np.uint16)
image_container_v.num_words = np.array([x.n_v for x in metadata], dtype = np.uint16)
image_container_v.timing_flag = np.array([x.timing_v for x in metadata], dtype = np.uint16)
image_container_v.mismatch_flag = np.array([x.mismatch_v for x in metadata], dtype = np.uint16)
image_container_v.fifo_flag = np.array([x.fifo_v for x in metadata], dtype = np.uint16)
image_container_v.overload_flag = np.array([x.overload_v for x in metadata], dtype = np.uint16)
image_container_v.particle_count = np.array([x.particle_count for x in metadata], dtype = np.uint16)
image_container_v.num_slices = np.array([x.num_slices for x in metadata], dtype = np.uint16)
image_container_v.timeword_upper = np.array([x.timeword_v_upper for x in timewords], dtype = np.uint16)
image_container_v.timeword_lower = np.array([x.timeword_v_lower for x in timewords], dtype = np.uint16)
return image_container_h, image_container_v
# A utility method to define the extra image related variables in a netcdf file
def add_auxiliary_core_variables(spiffile, inst_name):
coregrp = spiffile.rootgrp[inst_name]['core']
spiffile.create_variable(
coregrp,
'num_words',
'u2',
('Images',),
{
'long_name':'Number of data words present in image',
'units':'counts'
},
chunksizes=(TIME_CHUNK,)
)
spiffile.create_variable(
coregrp,
'timing_flag',
'u2',
('Images',),
{
'long_name':'Timing flag for image',
'units':'boolean'
},
chunksizes=(TIME_CHUNK,)
)
spiffile.create_variable(
coregrp,
'mismatch_flag',
'u2',
('Images',),
{
'long_name':'Mismatch flag for image',
'units':'boolean'
},
chunksizes=(TIME_CHUNK,)
)
spiffile.create_variable(
coregrp,
'fifo_flag',
'u2',
('Images',),
{
'long_name':'FIFO flag for image',
'units':'boolean'
},
chunksizes=(TIME_CHUNK,)
)
spiffile.create_variable(
coregrp,
'overload_flag',
'u2',
('Images',),
{
'long_name':'Overload flag for image',
'units':'binary'
},
chunksizes=(TIME_CHUNK,)
)
spiffile.create_variable(
coregrp,
'particle_count',
'u2',
('Images',),
{
'long_name':'Number of particles detected in image',
'units':'counts'
},
chunksizes=(TIME_CHUNK,)
)
spiffile.create_variable(
coregrp,
'num_slices',
'u2',
('Images',),
{
'long_name':'Number of slices detected in the image',
'units':'counts'
},
chunksizes=(TIME_CHUNK,)
)
spiffile.create_variable(
coregrp,
'timeword_upper',
'u2',
('Images',),
{
'long_name':'Upper 16 bits of timeword for image',
'units':'clock ticks'
},
chunksizes=(TIME_CHUNK,)
)
spiffile.create_variable(
coregrp,
'timeword_lower',
'u2',
('Images',),
{
'long_name':'Lower 16 bits of timeword for image',
'units':'clock ticks'
},
chunksizes=(TIME_CHUNK,)
)
spiffile.create_variable(
coregrp,
'tas',
'f',
('Images',),
{
'long_name':'True airspeed as recorded by probe',
'units':'m/s'
},
chunksizes=(TIME_CHUNK,)
) | StarcoderdataPython |
4920107 | <filename>heat/core/memory.py<gh_stars>0
import numpy as np
import torch
from . import dndarray
__all__ = ["copy", "sanitize_memory_layout"]
def copy(a):
"""
Return an array copy of the given object.
Parameters
----------
a : ht.DNDarray
Input data to be copied.
Returns
-------
copied : ht.DNDarray
A copy of the original
"""
if not isinstance(a, dndarray.DNDarray):
raise TypeError("input needs to be a tensor")
return dndarray.DNDarray(
a._DNDarray__array.clone(), a.shape, a.dtype, a.split, a.device, a.comm
)
def sanitize_memory_layout(x, order="C"):
"""
Return the given object with memory layout as defined below.
Parameters
-----------
x: torch.tensor
Input data
order: str, optional.
Default is 'C' as in C-like (row-major) memory layout. The array is stored first dimension first (rows first if ndim=2).
Alternative is 'F', as in Fortran-like (column-major) memory layout. The array is stored last dimension first (columns first if ndim=2).
"""
if x.ndim < 2:
# do nothing
return x
dims = list(range(x.ndim))
stride = list(x.stride())
row_major = all(np.diff(stride) <= 0)
column_major = all(np.diff(stride) >= 0)
if (order == "C" and row_major) or (order == "F" and column_major):
# do nothing
return x
if (order == "C" and column_major) or (order == "F" and row_major):
dims = tuple(reversed(dims))
y = torch.empty_like(x)
permutation = x.permute(dims).contiguous()
y = y.set_(
permutation.storage(),
x.storage_offset(),
x.shape,
tuple(reversed(permutation.stride())),
)
if order == "K":
raise NotImplementedError(
"Internal usage of torch.clone() means losing original memory layout for now. \n Please specify order='C' for row-major, order='F' for column-major layout."
)
return y
| StarcoderdataPython |
8096778 | <gh_stars>0
from flask import Flask, request, redirect, url_for, flash
from flask_sqlalchemy import SQLAlchemy
from flask_bootstrap import Bootstrap
from flask.ext.modular_auth import AuthManager, SessionBasedAuthProvider, current_authenticated_entity
def unauthorized_callback():
if current_authenticated_entity.is_authenticated:
flash('You are not authorized to access this resource!', 'warning')
return redirect(url_for('index'))
else:
return redirect(url_for('login', next=request.url))
def setup_auth(user_loader):
app.session_provider = SessionBasedAuthProvider(user_loader)
auth_manager.register_auth_provider(app.session_provider)
app = Flask(__name__)
db = SQLAlchemy(app)
auth_manager = AuthManager(app, unauthorized_callback=unauthorized_callback)
app.config['DEBUG'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///simple_blog.db'
app.config['SECRET_KEY'] = b'\<KEY>'
Bootstrap(app)
| StarcoderdataPython |
3268296 | <gh_stars>0
#/usr/bin/env/ python
#coding=utf8
import tornado.ioloop
import tornado.web
import httplib
import md5
import urllib
import random
import time
from tornado.escape import json_decode
from apps_info_setting import apps_info
apps = [
"01_41","01_51","01_61","01_71","01_81",
"02_11","02_12","02_13","02_211","02_212",
"02_22","02_31","02_32",
"03_21","03_22","03_23","03_24","03_31","03_32","03_33",
"04_21","04_22","04_23","04_24","04_25",
"04_31","04_32","04_33","04_41","04_42",
"05_111","05_112","05_113","05_114","05_121","05_122",
"05_123","05_124","05_125","05_126",
"06_11","06_21","06_31","06_32","06_33","06_34",
"06_41","06_51",
"07_11","07_12","07_21","07_31","07_32","07_33",
"07_41","07_42","07_51","07_52","07_61",
"08_11","08_12","08_13","08_14","08_15","08_1611","08_1612",
"08_1613","08_1631","08_1632","08_1633","08_1634","08_1635",
"08_1636","08_1637","08_1641","08_1651",
"09_11","09_12","09_13","09_21","09_22","09_23",
"09_24","09_25","09_31","09_32","09_33","09_41",
"09_42","09_43",
"10_111","10_112","10_113","10_12","10_13",
"10_14","10_15","10_16","10_211","10_212",
"10_22","10_231","10_232","10_241",
"11_11","11_12","11_13","11_21","11_231",
"11_232","11_233","11_31",
"12_11","12_21","12_31","12_41","12_51","12_61",
]
class ThreeJSListHandler(tornado.web.RequestHandler):
def get(self):
self.apps_info = apps_info
self.apps = apps
self.render("template/threejs_list.html")
# 临时接口
class ThreeJSHandler_learn(tornado.web.RequestHandler):
def get(self):
self.js_time = time.time()
self.render("template/shoes.html")
# coffee重写部分
class ThreeJSHandler_demo(tornado.web.RequestHandler):
def get(self, app):
if not app in apps:
self.finish({"info":"no demos"})
return
demo_plus_arr = app.split("_")
self.js_time = time.time()
template_uri = "template/demo_%s_%s.html"%(demo_plus_arr[0],demo_plus_arr[1])
self.render(template_uri)
| StarcoderdataPython |
9780303 | <reponame>k88097/Switch-Fightstick<filename>example/Fossil.py
from NXController import Controller
import time
ctr = Controller()
count = 0
goal = int(input("目標幾隻:"))
print("{}開始{}".format("=" * 10, "=" * 10))
while count < goal:
count += 1
print("目前第{}隻,剩餘{}隻達到目標。".format(count, goal - count))
Fossil()
print("已達目標數量,共{}隻。".format(goal))
#復活化石
def Fossil():
ctr.A(1)
# print("1. click A")
ctr.A(1)
# print("2. click A")
ctr.A(1)
# print("3. click A")
ctr.A(1)
# print("4. click A")
ctr.A(1)
# print("5. click A")
ctr.A(5)
# print("6. click A")
ctr.A(1)
# print("7. click A")
ctr.A(1)
# print("8. click A")
ctr.A(1)
# print("9. click A")
ctr.A(5)
# print("10. click A")
ctr.A(2)
# print("11. click A")
ctr.A(2)
# print("12. click A")
| StarcoderdataPython |
1877627 | from typing import Any, Dict, List, Optional
class Node:
"""Just to make mypy happy"""
class Contents:
"""Simulated context manager for file.open for tests and helpers"""
def __init__(self, contents=None):
self.contents = contents
def read(self):
"""Just returns the contents"""
return "\n".join(self.contents)
def __enter__(self):
return self
def __exit__(self, *args):
pass
class File(Node):
"""Simulated File node, mimicking the API from Path needed to test and process a tree"""
EMPTY_CONTENTS = "This file is empty, mode was {mode}"
def __init__(
self, path, basename="", replacements: Optional[List[Dict[str, str]]] = None
):
self._path = ""
self.name = ""
self._rename(path)
self.contents: Optional[List[str]] = None
self.replacements = replacements
splitted = self.name.split(".")
if len(splitted) < 2:
self.suffix = None
else:
self.suffix = splitted[-1]
self.basename = basename
def _rename(self, path):
self._path = path
self.name = path
def as_posix(self):
"""Not really posix, but useful to generate links in the markdown result"""
if self.basename == "":
return self.name
return self.basename + "/" + self.name
def open(self, mode="meh"):
"""Simulates a content open"""
if self.contents is None:
return Contents(contents=[self.EMPTY_CONTENTS.format(mode=mode)])
return Contents(contents=self.contents)
def set_contents(self, contents):
"""Adds content as lines"""
self.contents = contents.strip().split("\n")
return self
def set_replacements(self, replacements):
"""Adds replacement rules"""
self.replacements = replacements
return self
@staticmethod
def is_dir():
"""What do you think this is"""
return False
def __repr__(self):
return f"{self.name}({self.contents})"
def __eq__(self, other):
return str(self) == str(other)
class Folder(Node):
"""Simulated File node, mimicking the API from Path needed to test and process a tree"""
def __init__(self, path, contents: List[Any] = None, depth=0, basename=""):
self._depth = depth
self._path = ""
self._basename = basename
self.basename = ""
self.name = ""
self._rename(path)
if contents is None:
self._contents = []
else:
self._contents = contents
self.suffix = ""
def _rename(self, path):
self._path = path
self.name = path
if self._basename == "":
self.basename = path
else:
self.basename = self._basename + "/" + path
def __repr__(self):
return self.name + f"[{self.iterdir()}]"
def as_posix(self):
"Returns a kind of _as_posix, used to generate basenames and links"
return self.basename
def append_to_contents(self, node: Node):
"""Adds files or folders to a folder"""
self._contents += [node]
def prune(self):
"""Removes empty folders"""
for item in self.iterdir():
if item.is_dir():
item.prune()
self._contents = list(
filter(lambda x: not (x.is_dir() and x.is_empty()), self._contents)
)
return self
def is_empty(self):
"""Obvious"""
return len(self._contents) == 0
@staticmethod
def is_dir():
"""🙄"""
return True
def iterdir(self):
"""Returns the contents so we can iterate inside"""
return self._contents
def __eq__(self, other):
return str(self) == str(other)
SPACE = " "
BRANCH = "│ "
TEE = "├── "
LST = "└── "
def tree(base: Folder, prefix: str = ""):
"""From https://stackoverflow.com/a/59109706 by https://twitter.com/aaronchall
"""
contents = list(base.iterdir())
pointers = [TEE] * (len(contents) - 1) + [LST]
for pointer, path in zip(pointers, contents):
yield prefix + pointer + path.name
if path.is_dir():
extension = BRANCH if pointer == TEE else SPACE
yield from tree(path, prefix=prefix + extension)
def url(path):
"""Generate a Markdown link for a file/folder"""
urlable = path.as_posix().replace("/", "").replace(".", "").lower()
if path.is_dir():
return f"`{path.name}`"
return f"[`{path.name}`](#{urlable})"
def tree_links(base: Folder, prefix: str = ""):
"""Generate a table of contents based on the tree itself"""
contents = list(base.iterdir())
pointers = ["- "] * len(contents)
for pointer, path in zip(pointers, contents):
yield prefix + pointer + url(path)
if path.is_dir():
yield from tree_links(path, prefix=prefix + SPACE)
| StarcoderdataPython |
127933 | import unittest
import numpy as np
from pax import core, plugin
from pax.datastructure import Event, Peak
class TestPosRecMaxPMT(unittest.TestCase):
def setUp(self):
self.pax = core.Processor(config_names='XENON100', just_testing=True, config_dict={'pax': {
'plugin_group_names': ['test'],
'test': 'MaxPMT.PosRecMaxPMT'}})
self.plugin = self.pax.get_plugin_by_name('PosRecMaxPMT')
def tearDown(self):
delattr(self, 'pax')
delattr(self, 'plugin')
@staticmethod
def example_event(channels_with_something, area_per_channel=1):
bla = np.zeros(243)
bla[np.array(channels_with_something)] = area_per_channel
e = Event.empty_event()
e.peaks.append(Peak({'left': 5,
'right': 9,
'type': 'S2',
'detector': 'tpc',
'area_per_channel': bla}))
return e
def test_get_maxpmt_plugin(self):
self.assertIsInstance(self.plugin, plugin.TransformPlugin)
self.assertEqual(self.plugin.__class__.__name__, 'PosRecMaxPMT')
def test_posrec(self):
"""Test a hitpattern of all ones and one 2 (at PMT 42)"""
ch = 42 # Could test more locations, little point
hitp = np.ones(len(self.plugin.config['channels_top']))
hitp[ch] = 2
e = self.example_event(channels_with_something=self.plugin.config['channels_top'],
area_per_channel=hitp)
e = self.plugin.transform_event(e)
self.assertIsInstance(e, Event)
self.assertEqual(len(e.peaks), 1)
self.assertEqual(len(e.S2s()), 1)
self.assertEqual(len(e.peaks[0].reconstructed_positions), 1)
rp = e.peaks[0].reconstructed_positions[0]
self.assertEqual(rp.algorithm, self.plugin.name)
self.assertEqual(rp.x, self.plugin.config['pmts'][ch]['position']['x'])
self.assertEqual(rp.y, self.plugin.config['pmts'][ch]['position']['y'])
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
6511931 | <reponame>kaitai-io/formats-kaitai-io.github.io
# This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
from pkg_resources import parse_version
import kaitaistruct
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
from enum import Enum
import zlib
if parse_version(kaitaistruct.__version__) < parse_version('0.9'):
raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))
class Swf(KaitaiStruct):
"""SWF files are used by Adobe Flash (AKA Shockwave Flash, Macromedia
Flash) to encode rich interactive multimedia content and are,
essentially, a container for special bytecode instructions to play
back that content. In early 2000s, it was dominant rich multimedia
web format (.swf files were integrated into web pages and played
back with a browser plugin), but its usage largely declined in
2010s, as HTML5 and performant browser-native solutions
(i.e. JavaScript engines and graphical approaches, such as WebGL)
emerged.
There are a lot of versions of SWF (~36), format is somewhat
documented by Adobe.
.. seealso::
Source - https://www.adobe.com/content/dam/acom/en/devnet/pdf/swf-file-format-spec.pdf
"""
class Compressions(Enum):
zlib = 67
none = 70
lzma = 90
class TagType(Enum):
end_of_file = 0
place_object = 4
remove_object = 5
set_background_color = 9
define_sound = 14
place_object2 = 26
remove_object2 = 28
frame_label = 43
export_assets = 56
script_limits = 65
file_attributes = 69
place_object3 = 70
symbol_class = 76
metadata = 77
define_scaling_grid = 78
do_abc = 82
define_scene_and_frame_label_data = 86
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.compression = KaitaiStream.resolve_enum(Swf.Compressions, self._io.read_u1())
self.signature = self._io.read_bytes(2)
if not self.signature == b"\x57\x53":
raise kaitaistruct.ValidationNotEqualError(b"\x57\x53", self.signature, self._io, u"/seq/1")
self.version = self._io.read_u1()
self.len_file = self._io.read_u4le()
if self.compression == Swf.Compressions.none:
self._raw_plain_body = self._io.read_bytes_full()
_io__raw_plain_body = KaitaiStream(BytesIO(self._raw_plain_body))
self.plain_body = Swf.SwfBody(_io__raw_plain_body, self, self._root)
if self.compression == Swf.Compressions.zlib:
self._raw__raw_zlib_body = self._io.read_bytes_full()
self._raw_zlib_body = zlib.decompress(self._raw__raw_zlib_body)
_io__raw_zlib_body = KaitaiStream(BytesIO(self._raw_zlib_body))
self.zlib_body = Swf.SwfBody(_io__raw_zlib_body, self, self._root)
class Rgb(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.r = self._io.read_u1()
self.g = self._io.read_u1()
self.b = self._io.read_u1()
class DoAbcBody(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.flags = self._io.read_u4le()
self.name = (self._io.read_bytes_term(0, False, True, True)).decode(u"ASCII")
self.abcdata = self._io.read_bytes_full()
class SwfBody(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.rect = Swf.Rect(self._io, self, self._root)
self.frame_rate = self._io.read_u2le()
self.frame_count = self._io.read_u2le()
if self._root.version >= 8:
self.file_attributes_tag = Swf.Tag(self._io, self, self._root)
self.tags = []
i = 0
while not self._io.is_eof():
self.tags.append(Swf.Tag(self._io, self, self._root))
i += 1
class Rect(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.b1 = self._io.read_u1()
self.skip = self._io.read_bytes(self.num_bytes)
@property
def num_bits(self):
if hasattr(self, '_m_num_bits'):
return self._m_num_bits if hasattr(self, '_m_num_bits') else None
self._m_num_bits = (self.b1 >> 3)
return self._m_num_bits if hasattr(self, '_m_num_bits') else None
@property
def num_bytes(self):
if hasattr(self, '_m_num_bytes'):
return self._m_num_bytes if hasattr(self, '_m_num_bytes') else None
self._m_num_bytes = (((self.num_bits * 4) - 3) + 7) // 8
return self._m_num_bytes if hasattr(self, '_m_num_bytes') else None
class Tag(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.record_header = Swf.RecordHeader(self._io, self, self._root)
_on = self.record_header.tag_type
if _on == Swf.TagType.define_sound:
self._raw_tag_body = self._io.read_bytes(self.record_header.len)
_io__raw_tag_body = KaitaiStream(BytesIO(self._raw_tag_body))
self.tag_body = Swf.DefineSoundBody(_io__raw_tag_body, self, self._root)
elif _on == Swf.TagType.set_background_color:
self._raw_tag_body = self._io.read_bytes(self.record_header.len)
_io__raw_tag_body = KaitaiStream(BytesIO(self._raw_tag_body))
self.tag_body = Swf.Rgb(_io__raw_tag_body, self, self._root)
elif _on == Swf.TagType.script_limits:
self._raw_tag_body = self._io.read_bytes(self.record_header.len)
_io__raw_tag_body = KaitaiStream(BytesIO(self._raw_tag_body))
self.tag_body = Swf.ScriptLimitsBody(_io__raw_tag_body, self, self._root)
elif _on == Swf.TagType.do_abc:
self._raw_tag_body = self._io.read_bytes(self.record_header.len)
_io__raw_tag_body = KaitaiStream(BytesIO(self._raw_tag_body))
self.tag_body = Swf.DoAbcBody(_io__raw_tag_body, self, self._root)
elif _on == Swf.TagType.export_assets:
self._raw_tag_body = self._io.read_bytes(self.record_header.len)
_io__raw_tag_body = KaitaiStream(BytesIO(self._raw_tag_body))
self.tag_body = Swf.SymbolClassBody(_io__raw_tag_body, self, self._root)
elif _on == Swf.TagType.symbol_class:
self._raw_tag_body = self._io.read_bytes(self.record_header.len)
_io__raw_tag_body = KaitaiStream(BytesIO(self._raw_tag_body))
self.tag_body = Swf.SymbolClassBody(_io__raw_tag_body, self, self._root)
else:
self.tag_body = self._io.read_bytes(self.record_header.len)
class SymbolClassBody(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.num_symbols = self._io.read_u2le()
self.symbols = [None] * (self.num_symbols)
for i in range(self.num_symbols):
self.symbols[i] = Swf.SymbolClassBody.Symbol(self._io, self, self._root)
class Symbol(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.tag = self._io.read_u2le()
self.name = (self._io.read_bytes_term(0, False, True, True)).decode(u"ASCII")
class DefineSoundBody(KaitaiStruct):
class SamplingRates(Enum):
rate_5_5_khz = 0
rate_11_khz = 1
rate_22_khz = 2
rate_44_khz = 3
class Bps(Enum):
sound_8_bit = 0
sound_16_bit = 1
class Channels(Enum):
mono = 0
stereo = 1
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.id = self._io.read_u2le()
self.format = self._io.read_bits_int_be(4)
self.sampling_rate = KaitaiStream.resolve_enum(Swf.DefineSoundBody.SamplingRates, self._io.read_bits_int_be(2))
self.bits_per_sample = KaitaiStream.resolve_enum(Swf.DefineSoundBody.Bps, self._io.read_bits_int_be(1))
self.num_channels = KaitaiStream.resolve_enum(Swf.DefineSoundBody.Channels, self._io.read_bits_int_be(1))
self._io.align_to_byte()
self.num_samples = self._io.read_u4le()
class RecordHeader(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.tag_code_and_length = self._io.read_u2le()
if self.small_len == 63:
self.big_len = self._io.read_s4le()
@property
def tag_type(self):
if hasattr(self, '_m_tag_type'):
return self._m_tag_type if hasattr(self, '_m_tag_type') else None
self._m_tag_type = KaitaiStream.resolve_enum(Swf.TagType, (self.tag_code_and_length >> 6))
return self._m_tag_type if hasattr(self, '_m_tag_type') else None
@property
def small_len(self):
if hasattr(self, '_m_small_len'):
return self._m_small_len if hasattr(self, '_m_small_len') else None
self._m_small_len = (self.tag_code_and_length & 63)
return self._m_small_len if hasattr(self, '_m_small_len') else None
@property
def len(self):
if hasattr(self, '_m_len'):
return self._m_len if hasattr(self, '_m_len') else None
self._m_len = (self.big_len if self.small_len == 63 else self.small_len)
return self._m_len if hasattr(self, '_m_len') else None
class ScriptLimitsBody(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.max_recursion_depth = self._io.read_u2le()
self.script_timeout_seconds = self._io.read_u2le()
| StarcoderdataPython |
5118189 | <filename>test/test_copy.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2019 Snowflake Computing Inc. All right reserved.
#
import pytest
from snowflake.sqlalchemy import (
AWSBucket,
AzureContainer,
CopyFormatter,
CopyIntoStorage,
CSVFormatter,
ExternalStage,
JSONFormatter,
PARQUETFormatter,
)
from sqlalchemy import Column, Integer, MetaData, Sequence, String, Table
from sqlalchemy.sql import select, text
def test_external_stage(sql_compiler):
assert ExternalStage.prepare_namespace("something") == "something."
assert ExternalStage.prepare_path("prefix") == "/prefix"
# All arguments are handled
assert (
sql_compiler(ExternalStage(name="name", path="prefix/path", namespace="namespace")) == "@namespace.name/prefix/path"
)
# defaults don't ruin things
assert sql_compiler(ExternalStage(name="name", path=None, namespace=None)) == "@name"
def test_copy_into_location(engine_testaccount, sql_compiler):
meta = MetaData()
conn = engine_testaccount.connect()
food_items = Table("python_tests_foods", meta,
Column('id', Integer, Sequence('new_user_id_seq'), primary_key=True),
Column('name', String),
Column('quantity', Integer))
meta.create_all(engine_testaccount)
copy_stmt_1 = CopyIntoStorage(from_=food_items,
into=AWSBucket.from_uri('s3://backup').encryption_aws_sse_kms(
'1234abcd-12ab-34cd-56ef-1234567890ab'),
formatter=CSVFormatter().record_delimiter('|').escape(None).null_if(['null', 'Null']))
assert (sql_compiler(copy_stmt_1) == "COPY INTO 's3://backup' FROM python_tests_foods FILE_FORMAT=(TYPE=csv "
"ESCAPE=None NULL_IF=('null', 'Null') RECORD_DELIMITER='|') ENCRYPTION="
"(KMS_KEY_ID='1234abcd-12ab-34cd-56ef-1234567890ab' TYPE='AWS_SSE_KMS')")
copy_stmt_2 = CopyIntoStorage(from_=select([food_items]).where(food_items.c.id == 1), # Test sub-query
into=AWSBucket.from_uri('s3://backup').credentials(
aws_role='some_iam_role').encryption_aws_sse_s3(),
formatter=JSONFormatter().file_extension('json').compression('zstd'))
assert (sql_compiler(copy_stmt_2) == "COPY INTO 's3://backup' FROM (SELECT python_tests_foods.id, "
"python_tests_foods.name, python_tests_foods.quantity FROM python_tests_foods "
"WHERE python_tests_foods.id = 1) FILE_FORMAT=(TYPE=json COMPRESSION='zstd' "
"FILE_EXTENSION='json') CREDENTIALS=(AWS_ROLE='some_iam_role') "
"ENCRYPTION=(TYPE='AWS_SSE_S3')")
copy_stmt_3 = CopyIntoStorage(from_=food_items,
into=AzureContainer.from_uri(
'azure://snowflake.blob.core.windows.net/snowpile/backup'
).credentials('token'),
formatter=PARQUETFormatter().snappy_compression(True))
assert (sql_compiler(copy_stmt_3) == "COPY INTO 'azure://snowflake.blob.core.windows.net/snowpile/backup' "
"FROM python_tests_foods FILE_FORMAT=(TYPE=parquet SNAPPY_COMPRESSION=true) "
"CREDENTIALS=(AZURE_SAS_TOKEN='token')")
copy_stmt_3.maxfilesize(50000000)
assert (sql_compiler(copy_stmt_3) == "COPY INTO 'azure://snowflake.blob.core.windows.net/snowpile/backup' "
"FROM python_tests_foods FILE_FORMAT=(TYPE=parquet SNAPPY_COMPRESSION=true) "
"MAX_FILE_SIZE = 50000000 "
"CREDENTIALS=(AZURE_SAS_TOKEN='token')")
copy_stmt_4 = CopyIntoStorage(from_=AWSBucket.from_uri('s3://backup').encryption_aws_sse_kms(
'1234abcd-12ab-34cd-56ef-1234567890ab'),
into=food_items,
formatter=CSVFormatter().record_delimiter('|').escape(None).null_if(['null', 'Null']))
assert (sql_compiler(copy_stmt_4) == "COPY INTO python_tests_foods FROM 's3://backup' FILE_FORMAT=(TYPE=csv "
"ESCAPE=None NULL_IF=('null', 'Null') RECORD_DELIMITER='|') ENCRYPTION="
"(KMS_KEY_ID='<KEY>' TYPE='AWS_SSE_KMS')")
copy_stmt_5 = CopyIntoStorage(from_=AWSBucket.from_uri('s3://backup').encryption_aws_sse_kms(
'1234abcd-12ab-34cd-56ef-1234567890ab'),
into=food_items,
formatter=CSVFormatter().field_delimiter(','))
assert (sql_compiler(copy_stmt_5) == "COPY INTO python_tests_foods FROM 's3://backup' FILE_FORMAT=(TYPE=csv "
"FIELD_DELIMITER=',') ENCRYPTION="
"(KMS_KEY_ID='<KEY>' TYPE='AWS_SSE_KMS')")
copy_stmt_6 = CopyIntoStorage(from_=food_items, into=ExternalStage(name="stage_name"), formatter=CSVFormatter())
assert sql_compiler(copy_stmt_6) == "COPY INTO @stage_name FROM python_tests_foods FILE_FORMAT=(TYPE=csv)"
copy_stmt_7 = CopyIntoStorage(from_=food_items, into=ExternalStage(name="stage_name", path="prefix/file", namespace="name"), formatter=CSVFormatter())
assert sql_compiler(copy_stmt_7) == "COPY INTO @name.stage_name/prefix/file FROM python_tests_foods FILE_FORMAT=(TYPE=csv)"
# NOTE Other than expect known compiled text, submit it to RegressionTests environment and expect them to fail, but
# because of the right reasons
try:
acceptable_exc_reasons = {'Failure using stage area',
'AWS_ROLE credentials are not allowed for this account.',
'AWS_ROLE credentials are invalid'}
for stmnt in (copy_stmt_1, copy_stmt_2, copy_stmt_3, copy_stmt_4):
with pytest.raises(Exception) as exc:
conn.execute(stmnt)
if not any(map(lambda reason: reason in str(exc) or reason in str(exc.value), acceptable_exc_reasons)):
raise Exception("Not acceptable exception: {} {}".format(str(exc), str(exc.value)))
finally:
conn.close()
food_items.drop(engine_testaccount)
def test_copy_into_storage_csv_extended(sql_compiler):
"""
This test compiles the SQL to read CSV data from a stage and insert it into a
table.
The CSV formatting statements are inserted inline, i.e. no explicit SQL definition
of that format is necessary.
The Stage is a named stage, i.e. we assume that a CREATE STAGE statement was
executed before. This way, the COPY INTO statement does not need to know any
security details (credentials or tokens)
"""
# target table definition (NB: this could be omitted for the test, since the
# SQL statement copies the whole CSV and assumes the target structure matches)
metadata = MetaData()
target_table = Table(
"TEST_IMPORT",
metadata,
Column("COL1", Integer, primary_key=True),
Column("COL2", String),
)
# define a source stage (root path)
root_stage = ExternalStage(
name="AZURE_STAGE",
namespace="ML_POC.PUBLIC",
)
# define a CSV formatter
formatter = (
CSVFormatter()
.compression("AUTO")
.field_delimiter(",")
.record_delimiter(r"\n")
.field_optionally_enclosed_by(None)
.escape(None)
.escape_unenclosed_field(r"\134")
.date_format("AUTO")
.null_if([r"\N"])
.skip_header(1)
.trim_space(False)
.error_on_column_count_mismatch(True)
)
# define CopyInto object; reads all CSV data (=> pattern) from
# the sub-path "testdata" beneath the root stage
copy_into = CopyIntoStorage(
from_=ExternalStage.from_parent_stage(root_stage, "testdata"),
into=target_table,
formatter=formatter
)
copy_into.copy_options = {"pattern": "'.*csv'", "force": "TRUE"}
# check that the result is as expected
result = sql_compiler(copy_into)
expected = (
r"COPY INTO TEST_IMPORT "
r"FROM @ML_POC.PUBLIC.AZURE_STAGE/testdata "
r"FILE_FORMAT=(TYPE=csv COMPRESSION='auto' DATE_FORMAT='AUTO' "
r"ERROR_ON_COLUMN_COUNT_MISMATCH=True ESCAPE=None "
r"ESCAPE_UNENCLOSED_FIELD='\134' FIELD_DELIMITER=',' "
r"FIELD_OPTIONALLY_ENCLOSED_BY=None NULL_IF=('\N') RECORD_DELIMITER='\n' "
r"SKIP_HEADER=1 TRIM_SPACE=False) force = TRUE pattern = '.*csv'"
)
assert result == expected
def test_copy_into_storage_parquet_named_format(sql_compiler):
"""
This test compiles the SQL to read Parquet data from a stage and insert it into a
table. The source file is accessed using a SELECT statement.
The Parquet formatting definitions are defined in a named format which was
explicitly created before.
The Stage is a named stage, i.e. we assume that a CREATE STAGE statement was
executed before. This way, the COPY INTO statement does not need to know any
security details (credentials or tokens)
"""
# target table definition (NB: this could be omitted for the test, as long as
# the statement is not executed)
metadata = MetaData()
target_table = Table(
"TEST_IMPORT",
metadata,
Column("COL1", Integer, primary_key=True),
Column("COL2", String),
)
# define a source stage (root path)
root_stage = ExternalStage(
name="AZURE_STAGE",
namespace="ML_POC.PUBLIC",
)
# define the SELECT statement to access the source file.
# we can probably defined source table metadata and use SQLAlchemy Column objects
# instead of texts, but this seems to be the easiest way.
sel_statement = select(
text("$1:COL1::number"),
text("$1:COL2::varchar")
).select_from(
ExternalStage.from_parent_stage(root_stage, "testdata/out.parquet")
)
# use an existing source format.
formatter = CopyFormatter(format_name="parquet_file_format")
# setup CopyInto object
copy_into = CopyIntoStorage(
from_=sel_statement,
into=target_table,
formatter=formatter
)
copy_into.copy_options = {"force": "TRUE"}
# compile and check the result
result = sql_compiler(copy_into)
expected = (
"COPY INTO TEST_IMPORT "
"FROM (SELECT $1:COL1::number, $1:COL2::varchar "
"FROM @ML_POC.PUBLIC.AZURE_STAGE/testdata/out.parquet) "
"FILE_FORMAT=(format_name = parquet_file_format) force = TRUE"
)
assert result == expected
def test_copy_into_storage_parquet_files(sql_compiler):
"""
This test compiles the SQL to read Parquet data from a stage and insert it into a
table. The source file is accessed using a SELECT statement.
The Parquet formatting definitions are defined in a named format which was
explicitly created before. The format is specified as a property of the stage,
not the CopyInto object.
The Stage is a named stage, i.e. we assume that a CREATE STAGE statement was
executed before. This way, the COPY INTO statement does not need to know any
security details (credentials or tokens).
The FORCE option is set using the corresponding function in CopyInto.
The FILES option is set to choose the files to upload
"""
# target table definition (NB: this could be omitted for the test, as long as
# the statement is not executed)
metadata = MetaData()
target_table = Table(
"TEST_IMPORT",
metadata,
Column("COL1", Integer, primary_key=True),
Column("COL2", String),
)
# define a source stage (root path)
root_stage = ExternalStage(
name="AZURE_STAGE",
namespace="ML_POC.PUBLIC",
)
# define the SELECT statement to access the source file.
# we can probably defined source table metadata and use SQLAlchemy Column objects
# instead of texts, but this seems to be the easiest way.
sel_statement = select(
text("$1:COL1::number"),
text("$1:COL2::varchar")
).select_from(
ExternalStage.from_parent_stage(root_stage, "testdata/out.parquet", file_format="parquet_file_format")
)
# setup CopyInto object
copy_into = CopyIntoStorage(
from_=sel_statement,
into=target_table,
).force(True).files(["foo.txt", "bar.txt"])
# compile and check the result
result = sql_compiler(copy_into)
expected = (
"COPY INTO TEST_IMPORT "
"FROM (SELECT $1:COL1::number, $1:COL2::varchar "
"FROM @ML_POC.PUBLIC.AZURE_STAGE/testdata/out.parquet "
"(file_format => parquet_file_format)) FILES = ('foo.txt','bar.txt') "
"FORCE = true"
)
assert result == expected
def test_copy_into_storage_parquet_pattern(sql_compiler):
"""
This test compiles the SQL to read Parquet data from a stage and insert it into a
table. The source file is accessed using a SELECT statement.
The Parquet formatting definitions are defined in a named format which was
explicitly created before. The format is specified as a property of the stage,
not the CopyInto object.
The Stage is a named stage, i.e. we assume that a CREATE STAGE statement was
executed before. This way, the COPY INTO statement does not need to know any
security details (credentials or tokens).
The FORCE option is set using the corresponding function in CopyInto.
The PATTERN option is set to choose multiple files
"""
# target table definition (NB: this could be omitted for the test, as long as
# the statement is not executed)
metadata = MetaData()
target_table = Table(
"TEST_IMPORT",
metadata,
Column("COL1", Integer, primary_key=True),
Column("COL2", String),
)
# define a source stage (root path)
root_stage = ExternalStage(
name="AZURE_STAGE",
namespace="ML_POC.PUBLIC",
)
# define the SELECT statement to access the source file.
# we can probably defined source table metadata and use SQLAlchemy Column objects
# instead of texts, but this seems to be the easiest way.
sel_statement = select(
text("$1:COL1::number"),
text("$1:COL2::varchar")
).select_from(
ExternalStage.from_parent_stage(root_stage, "testdata/out.parquet", file_format="parquet_file_format")
)
# setup CopyInto object
copy_into = CopyIntoStorage(
from_=sel_statement,
into=target_table,
).force(True).pattern("'.*csv'")
# compile and check the result
result = sql_compiler(copy_into)
expected = (
"COPY INTO TEST_IMPORT "
"FROM (SELECT $1:COL1::number, $1:COL2::varchar "
"FROM @ML_POC.PUBLIC.AZURE_STAGE/testdata/out.parquet "
"(file_format => parquet_file_format)) FORCE = true PATTERN = '.*csv'"
)
assert result == expected
| StarcoderdataPython |
1932405 | <reponame>stadham/mudpy
import rooms
color = {
"black": u"\u001b[30;1m",
"red": u"\u001b[31;1m",
"green": u"\u001b[32;1m",
"yellow": u"\u001b[33;1m",
"blue": u"\u001b[34;1m",
"magenta": u"\u001b[35;1m",
"cyan": u"\u001b[36;1m",
"white": u"\u001b[37;1m",
"reset": u"\u001b[0m"
}
class Fists(object):
"""
Simple class to use for when the character does not have a weapon equipped.
This will be added to the character.equpped_weapon attribute when using
the unequip command.
"""
def __init__(self):
self.name = "Fists"
self.description = "wimpy looking fists"
self.power = 1
class Naked(object):
"""
Class for keeping track of when the user does not have armor equipped.
"""
def __init__(self):
self.name = "Naked"
self.description = "absolutely nothing"
self.defense = 0
class Character(object):
"""
Creates the generic attributes for our characters. Stores all of the
variables such as name, current room, and authentication status. Also,
contains all of the necessary methods that the character will need.
"""
def __init__(self):
# Creates generic attributes for the character
self.name = "unknown"
self.menu_level = 0
self.room = "Tavern"
self.authenticated = False
self.muted_players = []
self.afk_status = False
self.gold = 0
self.level = 1
self.exp = 0
self.inventory = []
self.equipped_weapon = Fists()
self.equipped_armor = Naked()
def __str__(self):
# Define the default string representation of the warrior class
return ("%s is a lvl %d %s with %d health.") % (self.name,self.level,
self.type,self.health)
def equip(self,item):
# equips either a weapon or armor and updates the power/def
if item.equip == "weapon":
# adds the weapon object to the characters equipped_weapon attrib
self.equipped_weapon = item
self.power = self.base_power + self.equipped_weapon.power
elif item.equip == "armor":
# adds the armor object to the characters equipped_armor attrib
self.equipped_armor = item
self.defense = self.base_defense + self.equipped_armor.defense
def get_items(self):
# Iterate through the characters items and display them to the console.
inventoryList = [] # create an empty list to hold each item
# handles if the inventory is empty
if not self.inventory:
return ["nothing but air"]
else:
# iterates through the list which holds dictionary items
for item in self.inventory:
# append each dictionary key which is a string of the item
inventoryList.append(("%s[%d]") % (item.displayName,item.quantity))
# return a string of the items list separated by a comma and a space
return inventoryList
def get_status(self):
# determine the offset lengths for all left-side dynamic numbers
a = len(str(self.health)) + len(str(self.max_health))
b = len(str(self.exp))
c = len(str(self.base_power)) + len(str(self.equipped_weapon.power)) + len(str(self.power))
d = len(str(self.base_defense)) + len(str(self.equipped_armor.defense)) + len(str(self.defense))
# multipled spaces and subtracted them by the dynamic length of the left-side Variables
# this ensures everything stays nicely formatted. Returns a list of strings
status_screen = ["********************************************************************************",
" Name : {0}{1}{2}".format(color["yellow"],self.name,color["reset"]),
" Gold : {0}{1}{2}".format(color["yellow"],self.gold,color["reset"]),
" Level : {0}{1}{2} Class : {3}{4}{5} Current Room : {6}{7}{8}".format(color["yellow"],
self.level,color["reset"],color["yellow"],self.type,color["reset"],color["yellow"],self.room.name,color["reset"]),
"********************************************************************************",
(" Health : {0}{1}{2}/{3}{4}{5}" + " "*(37-a) + "Weapon: {6}{7}{8}").format(color["red"],
self.health,color["reset"],color["yellow"],self.max_health,color["reset"],color["red"],self.equipped_weapon.name,color["reset"]),
(" Experience : {0}{1}{2}" + " "*(38-b) + "Armor : {3}{4}{5}").format(color["yellow"],
self.exp,color["reset"],color["red"],self.equipped_armor.name,color["reset"]),
"********************************************************************************",
(" Attack Power : {0}({1}{2}{3})/{4}" + " "*(33-c) + "Critical : {5}{6}{7}").format(self.base_power,
color["blue"],self.equipped_weapon.power,color["reset"],self.power,color["blue"],self.critical,color["reset"]),
(" Defense : {0}({1}{2}{3})/{4}" + " "*(33-d) + "Crit % : {5}{6}{7}").format(self.base_defense,
color["blue"],self.equipped_armor.defense,color["reset"],self.defense,color["blue"],self.crit_chance,color["reset"]),
"********************************************************************************"]
return status_screen
class Warrior(Character):
"""
Creates the Warrior class which inherits all of the properties from the
Character class.
"""
def __init__(self):
# Generate Warrior specific attributes
super(Warrior,self).__init__()
self.type = "Warrior"
self.base_power = 5
self.power = self.base_power + self.equipped_weapon.power
self.health = 100
self.max_health = 100
self.base_defense = .9
self.defense = self.base_defense + self.equipped_armor.defense
self.evade_chance = 20
self.magic = 1
self.critical = 1.2
self.crit_chance = 10
self.spells = {}
class Mage(Character):
"""
Creates the Mage class which inherits all of the properties from the
Character class.
"""
def __init__(self):
#Generate Mage specific attributes
super(Mage,self).__init__()
self.type = "Mage"
self.power = 1
self.health = 90
self.max_health = 90
self.defense = .97
self.evade_chance = 15
self.magic = 5
self.critical = 1.2
self.crit_chance = 8
self.equipped_weapon = {"Staff": 2}
self.spells = {"Spark": 10, "Storm": 12}
class Rogue(Character):
"""
Creates the Rogue class which inherits all of the properties from the
Character class.
"""
def __init__(self):
#Generate Rogue specific attributes
super(Rogue,self).__init__()
self.type = "Rogue"
self.power = 3
self.health = 100
self.max_health = 100
self.defense = .95
self.evade_chance = 10
self.magic = 1
self.critical = 1.5
self.crit_chance = 6
self.equipped_weapon = {"Daggers": 8}
self.spells = {}
class Daemon(Character):
"""
Creates the Daemon class which inherits all of the properties from the
Character class.
"""
def __init__(self):
#Generate Daemon specific attributes
super(Daemon,self).__init__()
self.type = "Daemon"
self.power = 6
self.health = 80
self.max_health = 80
self.defense = .99
self.evade_chance = 13
self.magic = 3
self.critical = 1.7
self.crit_chance = 12
self.equipped_weapon = {"Reaper": 11}
self.spells = {"Hell Fire": 2}
| StarcoderdataPython |
11386813 | <reponame>OasisLMF/OasisLMF
__all__ = [
'oasis_log',
'read_log_config',
'set_rotating_logger'
]
"""
Logging utils.
"""
import inspect
import logging
import os
import time
from functools import wraps
from logging.handlers import RotatingFileHandler
def getargspec(func):
if hasattr(inspect, 'getfullargspec'):
return inspect.getfullargspec(func)
else:
return inspect.getargspec(func)
def set_rotating_logger(
log_file_path=inspect.stack()[1][1],
log_level=logging.INFO,
max_file_size=10**7,
max_backups=5
):
_log_fp = log_file_path
if not os.path.isabs(_log_fp):
_log_fp = os.path.abspath(_log_fp)
log_dir = os.path.dirname(_log_fp)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
handler = RotatingFileHandler(
_log_fp,
maxBytes=max_file_size,
backupCount=max_backups
)
logging.getLogger().setLevel(log_level)
logging.getLogger().addHandler(handler)
formatter = logging.Formatter(
"%(asctime)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
def read_log_config(config_parser):
"""
Read an Oasis standard logging config
"""
log_file = config_parser['LOG_FILE']
log_level = config_parser['LOG_LEVEL']
log_max_size_in_bytes = int(config_parser['LOG_MAX_SIZE_IN_BYTES'])
log_backup_count = int(config_parser['LOG_BACKUP_COUNT'])
log_dir = os.path.dirname(log_file)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
handler = RotatingFileHandler(
log_file, maxBytes=log_max_size_in_bytes,
backupCount=log_backup_count)
logging.getLogger().setLevel(log_level)
logging.getLogger().addHandler(handler)
formatter = logging.Formatter(
"%(asctime)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
def oasis_log(*args, **kwargs):
"""
Decorator that logs the entry, exit and execution time.
"""
logger = logging.getLogger()
def actual_oasis_log(func):
@wraps(func)
def wrapper(*args, **kwargs):
func_name = func.__name__
caller_module_name = func.__globals__.get('__name__')
if func_name == '__init__':
logger.debug("RUNNING: {}.{}".format(
caller_module_name, func_name))
else:
logger.info("RUNNING: {}.{}".format(
caller_module_name, func_name))
args_name = getargspec(func)[0]
args_dict = dict(zip(args_name, args))
for key, value in args_dict.items():
if key == "self":
continue
logger.debug(" {} == {}".format(key, value))
if len(args) > len(args_name):
for i in range(len(args_name), len(args)):
logger.debug(" {}".format(args[i]))
for key, value in kwargs.items():
logger.debug(" {} == {}".format(key, value))
start = time.time()
result = func(*args, **kwargs)
end = time.time()
# Only log timestamps on functions which took longer than 10ms
if (end-start) > 0.01:
logger.info(
"COMPLETED: {}.{} in {}s".format(
caller_module_name, func_name, round(end - start, 2)))
else:
logger.debug(
"COMPLETED: {}.{} in {}s".format(
caller_module_name, func_name, round(end - start, 2)))
return result
return wrapper
if len(args) == 1 and callable(args[0]):
return actual_oasis_log(args[0])
else:
return actual_oasis_log
| StarcoderdataPython |
6475588 | """ Helper utils to compose objects as blobs of html
"""
import yattag
from . import util
from .recipe import QuantizedIngredient
def close(content, tag, **kwargs):
if "class_" in kwargs:
kwargs["class"] = kwargs["class_"]
if "klass" in kwargs:
kwargs["class"] = kwargs["klass"]
attributes = ' '.join(['{}="{}"'.format(k, v) for k, v in kwargs.items()])
return '<{0} {2}>{1}</{0}>'.format(tag, content, attributes)
def em(content, **kwargs):
return close(content, 'em', **kwargs)
def small(content, **kwargs):
return close(content, 'small', **kwargs)
def sup(content, **kwargs):
return close(content, 'sup', **kwargs)
def small_br(content, **kwargs):
return small(content+'<br>', **kwargs)
def wrap_link(link, content, **kwargs):
return '<a href={}>{}</a>'.format(link, content, **kwargs)
def recipe_as_html(recipe, display_opts, order_link=None, condense_ingredients=False, fancy=True, convert_to=None):
""" use yattag lib to build an html blob contained in a div for the recipe"""
doc, tag, text, line = yattag.Doc().ttl()
glassware = {
"cocktail": "/static/glassware/coupe.svg",
"martini": "/static/glassware/martini.svg",
"highball": "/static/glassware/highball.svg",
"collins": "/static/glassware/collins.svg",
"hurricane": "/static/glassware/highball.svg",
"rocks": "/static/glassware/rocks.svg",
"copper mug": "/static/glassware/mule_mug.svg",
"tiki": "/static/glassware/rocks.svg",
"flute": "/static/glassware/flute.svg",
"glencairn": "/static/glassware/glencairn.svg",
"mug": "/static/glassware/irish_coffee_mug.svg",
"white wine": "/static/glassware/white_wine_kentfield.svg",
"red wine": "/static/glassware/red_wine_kentfield.svg",
"shooter": "/static/glassware/shooter.svg",
"shot": "/static/glassware/shot.svg",
}
if convert_to:
recipe.convert(convert_to)
main_tag = 'div'
extra_kwargs = {"klass": "card card-body h-100"}
if fancy:
extra_kwargs['klass'] += " shadow-sm"
if order_link:
main_tag = 'a'
extra_kwargs['href'] = order_link
with tag(main_tag, id=recipe.name, **extra_kwargs):
# embed glass image in name line
name_line = []
recipe_name = recipe.name
if display_opts.origin and 'schubar original' in recipe.origin.lower():
recipe_name += sup('*')
# attempt hack for keeping text aligned right of image when wrapping
if fancy:
name_line.append('<div class="clearfix" style="position:relative;">')
name_line.append('<img src={} style="height:2.2em; float:left;">'.format(glassware.get(recipe.glass)))
name_line.append(close(recipe_name, 'span', style="position:absolute;bottom:0;"))
else:
name_line.append(close(recipe_name, 'span'))
if display_opts.prices and recipe.max_cost:
price = util.calculate_price(recipe.max_cost, display_opts.markup)
price = '&{};{}{}'.format('nbsp' if fancy else 'mdash', sup('$'), price)
if fancy:
name_line.append(close(price, 'p', style="float:right"))
else:
name_line.append(price)
if fancy:
name_line.append("</div><!-- recipe name text -->")
name_line = close(''.join(name_line), 'h4', class_="card-title",
style="margin-left:-0.35em;") # tweak to the left
else:
name_line = close(''.join(name_line), 'h3')
doc.asis(name_line)
if display_opts.prep_line:
doc.asis(small_br(recipe.prep_line(extended=True, caps=False)))
if display_opts.info and recipe.info:
doc.asis(small_br(em(recipe.info)))
if condense_ingredients:
ingredients = ', '.join([str(ingredient.specifier) for ingredient in recipe.ingredients
if isinstance(ingredient, QuantizedIngredient)])
doc.asis(ingredients+'<br>')
else:
with tag('ul', id='ingredients'):
for item in recipe.ingredients:
line('li', item.str(), type="none")
if display_opts.variants:
if condense_ingredients:
# also need these to not be indented
for variant in recipe.variants:
doc.asis(small(em(variant)))
else:
with tag('ul', id='variants'):
for variant in recipe.variants:
with tag('small'):
with tag('li', type="none"):
line('em', variant)
if display_opts.examples and recipe.examples:# and recipe.name != 'The Cocktail':
# special display for recipe with examples
# TODO pull out bitters into supplimental list
if display_opts.prices:
for e in sorted(recipe.examples, key=lambda x: x.cost):
markup = 1.1+display_opts.markup if recipe.name == "A Dram" else display_opts.markup
fields = {
'cost': util.calculate_price(e.cost, markup),
'abv': e.abv,
'kinds': e.kinds
}
doc.asis(small_br("${cost:>3.0f} | {abv:.1f}% | {kinds}".format(**fields)))
else:
for e in recipe.examples:
doc.asis(small_br("${cost:.2f} | {abv:.2f}% | {std_drinks:.2f} | {kinds}".format(**e._asdict())))
return str(doc.getvalue())
def as_table(objects, headings, cells, formatters, outer_div="", table_id="", table_cls="table", thead_cls="", tbody_cls=""):
""" Generate HTML table where objects are instances of db.Models
headings, cells, formatters are three lists of equal length,
where headings are the table headings, cells are the attributes to put in those cells,
and formatters are a list of formatter callables to apply to the arguments
table_cls, thead_cls, tbody_cls are class tags to apply to those elements
"""
doc, tag, text, line = yattag.Doc().ttl()
with tag('div', klass=outer_div):
with tag('table', klass=table_cls, id=table_id):
with tag('thead', klass=thead_cls):
with tag('tr'):
for heading in headings:
line('th', heading, scope="col")
with tag('tbody', klass=tbody_cls):
for obj in objects:
with tag('tr'):
for cell, formatter in zip(cells, formatters):
try:
doc.asis(close(formatter(getattr(obj, cell)), 'td'))
except UnicodeEncodeError as e:
doc.asis(close(formatter(getattr(obj, cell)), 'td'))
return str(doc.getvalue())
def users_as_table(users):
headings = "ID,Email,First,Last,Nickname,Logins,Last,Confirmed,Roles,Orders".split(',')
cells = "id,email,first_name,last_name,nickname,login_count,last_login_at,confirmed_at,get_role_names,orders".split(',')
formatters = [str, str, str, str, str, str, str, str, lambda x: x(), len]
return as_table(users, headings, cells, formatters, outer_div="table-responsive-sm", table_cls="table table-sm")
def yes_no(b):
return 'yes' if b else 'no'
def orders_as_table(orders):
headings = "ID,Timestamp,Confirmed,User ID,Bar ID,Recipe".split(',')
cells = "id,timestamp,confirmed,user_id,bar_id,recipe_name".split(',')
formatters = [str, str, yes_no, str, str, str]
return as_table(orders, headings, cells, formatters, outer_div="table-responsive-sm", table_cls="table table-sm")
def bars_as_table(bars):
headings = "ID,Name,CName,Total Orders".split(',')
cells = "id,name,cname,orders".split(',')
formatters = [str,str,str,len]
return as_table(bars, headings, cells, formatters, outer_div="table-responsive-sm", table_cls="table table-sm")
| StarcoderdataPython |
1631424 | <filename>sdk/metricsadvisor/azure-ai-metricsadvisor/tests/async_tests/test_hooks_async.py
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import pytest
from devtools_testutils import AzureTestCase
from azure.core.exceptions import ResourceNotFoundError
from azure.ai.metricsadvisor.models import (
EmailNotificationHook,
WebNotificationHook,
)
from base_testcase_async import TestMetricsAdvisorAdministrationClientBaseAsync
class TestMetricsAdvisorAdministrationClientAsync(TestMetricsAdvisorAdministrationClientBaseAsync):
@AzureTestCase.await_prepared_test
async def test_create_email_hook(self):
email_hook_name = self.create_random_name("testemailhookasync")
async with self.admin_client:
try:
email_hook = await self.admin_client.create_hook(
hook=EmailNotificationHook(
name=email_hook_name,
emails_to_alert=["<EMAIL>"],
description="my email hook",
external_link="external link"
)
)
self.assertIsNotNone(email_hook.id)
self.assertIsNotNone(email_hook.name)
self.assertIsNotNone(email_hook.admins)
self.assertEqual(email_hook.emails_to_alert, ["<EMAIL>"])
self.assertEqual(email_hook.description, "my email hook")
self.assertEqual(email_hook.external_link, "external link")
self.assertEqual(email_hook.hook_type, "Email")
finally:
await self.admin_client.delete_hook(email_hook.id)
with self.assertRaises(ResourceNotFoundError):
await self.admin_client.get_hook(email_hook.id)
@AzureTestCase.await_prepared_test
async def test_create_web_hook(self):
web_hook_name = self.create_random_name("testwebhookasync")
async with self.admin_client:
try:
web_hook = await self.admin_client.create_hook(
hook=WebNotificationHook(
name=web_hook_name,
endpoint="https://httpbin.org/post",
description="my web hook",
external_link="external link"
)
)
self.assertIsNotNone(web_hook.id)
self.assertIsNotNone(web_hook.name)
self.assertIsNotNone(web_hook.admins)
self.assertEqual(web_hook.endpoint, "https://httpbin.org/post")
self.assertEqual(web_hook.description, "my web hook")
self.assertEqual(web_hook.external_link, "external link")
self.assertEqual(web_hook.hook_type, "Webhook")
finally:
await self.admin_client.delete_hook(web_hook.id)
with self.assertRaises(ResourceNotFoundError):
await self.admin_client.get_hook(web_hook.id)
@AzureTestCase.await_prepared_test
async def test_list_hooks(self):
async with self.admin_client:
hooks = self.admin_client.list_hooks()
hooks_list = []
async for hook in hooks:
hooks_list.append(hook)
assert len(hooks_list) > 0
@AzureTestCase.await_prepared_test
async def test_update_email_hook_with_model(self):
name = self.create_random_name("testwebhook")
async with self.admin_client:
try:
hook = await self._create_email_hook_for_update(name)
hook.name = "update"
hook.description = "update"
hook.external_link = "update"
hook.emails_to_alert = ["<EMAIL>"]
await self.admin_client.update_hook(hook)
updated = await self.admin_client.get_hook(hook.id)
self.assertEqual(updated.name, "update")
self.assertEqual(updated.description, "update")
self.assertEqual(updated.external_link, "update")
self.assertEqual(updated.emails_to_alert, ["<EMAIL>"])
finally:
await self.admin_client.delete_hook(hook.id)
@AzureTestCase.await_prepared_test
async def test_update_email_hook_with_kwargs(self):
name = self.create_random_name("testhook")
async with self.admin_client:
try:
hook = await self._create_email_hook_for_update(name)
await self.admin_client.update_hook(
hook.id,
hook_type="Email",
name="update",
description="update",
external_link="update",
emails_to_alert=["<EMAIL>"]
)
updated = await self.admin_client.get_hook(hook.id)
self.assertEqual(updated.name, "update")
self.assertEqual(updated.description, "update")
self.assertEqual(updated.external_link, "update")
self.assertEqual(updated.emails_to_alert, ["<EMAIL>"])
finally:
await self.admin_client.delete_hook(hook.id)
@AzureTestCase.await_prepared_test
async def test_update_email_hook_with_model_and_kwargs(self):
name = self.create_random_name("testhook")
async with self.admin_client:
try:
hook = await self._create_email_hook_for_update(name)
hook.name = "don't update me"
hook.description = "don't update me"
hook.emails_to_alert = []
await self.admin_client.update_hook(
hook,
hook_type="Email",
name="update",
description="update",
external_link="update",
emails_to_alert=["<EMAIL>"]
)
updated = await self.admin_client.get_hook(hook.id)
self.assertEqual(updated.name, "update")
self.assertEqual(updated.description, "update")
self.assertEqual(updated.external_link, "update")
self.assertEqual(updated.emails_to_alert, ["<EMAIL>"])
finally:
await self.admin_client.delete_hook(hook.id)
@AzureTestCase.await_prepared_test
async def test_update_email_hook_by_resetting_properties(self):
name = self.create_random_name("testhook")
async with self.admin_client:
try:
hook = await self._create_email_hook_for_update(name)
await self.admin_client.update_hook(
hook.id,
hook_type="Email",
name="reset",
description=None,
external_link=None,
)
updated = await self.admin_client.get_hook(hook.id)
self.assertEqual(updated.name, "reset")
# sending null, but not clearing properties
# self.assertEqual(updated.description, "")
# self.assertEqual(updated.external_link, "")
finally:
await self.admin_client.delete_hook(hook.id)
@AzureTestCase.await_prepared_test
async def test_update_web_hook_with_model(self):
name = self.create_random_name("testwebhook")
async with self.admin_client:
try:
hook = await self._create_web_hook_for_update(name)
hook.name = "update"
hook.description = "update"
hook.external_link = "update"
hook.username = "myusername"
hook.password = "password"
await self.admin_client.update_hook(hook)
updated = await self.admin_client.get_hook(hook.id)
self.assertEqual(updated.name, "update")
self.assertEqual(updated.description, "update")
self.assertEqual(updated.external_link, "update")
self.assertEqual(updated.username, "myusername")
finally:
await self.admin_client.delete_hook(hook.id)
@AzureTestCase.await_prepared_test
async def test_update_web_hook_with_kwargs(self):
name = self.create_random_name("testwebhook")
async with self.admin_client:
try:
hook = await self._create_web_hook_for_update(name)
await self.admin_client.update_hook(
hook.id,
hook_type="Web",
endpoint="https://httpbin.org/post",
name="update",
description="update",
external_link="update",
username="myusername",
password="password"
)
updated = await self.admin_client.get_hook(hook.id)
self.assertEqual(updated.name, "update")
self.assertEqual(updated.description, "update")
self.assertEqual(updated.external_link, "update")
self.assertEqual(updated.username, "myusername")
finally:
await self.admin_client.delete_hook(hook.id)
@AzureTestCase.await_prepared_test
async def test_update_web_hook_with_model_and_kwargs(self):
name = self.create_random_name("testwebhook")
async with self.admin_client:
try:
hook = await self._create_web_hook_for_update(name)
hook.name = "don't update me"
hook.description = "updateMe"
hook.username = "don't update me"
hook.password = "<PASSWORD>"
hook.endpoint = "don't update me"
await self.admin_client.update_hook(
hook,
hook_type="Web",
endpoint="https://httpbin.org/post",
name="update",
external_link="update",
username="myusername",
password="password"
)
updated = await self.admin_client.get_hook(hook.id)
self.assertEqual(updated.name, "update")
self.assertEqual(updated.description, "updateMe")
self.assertEqual(updated.external_link, "update")
self.assertEqual(updated.username, "myusername")
finally:
await self.admin_client.delete_hook(hook.id)
@AzureTestCase.await_prepared_test
async def test_update_web_hook_by_resetting_properties(self):
name = self.create_random_name("testhook")
async with self.admin_client:
try:
hook = await self._create_web_hook_for_update(name)
await self.admin_client.update_hook(
hook.id,
hook_type="Web",
name="reset",
description=None,
endpoint="https://httpbin.org/post",
external_link=None,
username="myusername",
password=<PASSWORD>
)
updated = await self.admin_client.get_hook(hook.id)
self.assertEqual(updated.name, "reset")
self.assertEqual(updated.password, "")
# sending null, but not clearing properties
# self.assertEqual(updated.description, "")
# self.assertEqual(updated.external_link, "")
finally:
await self.admin_client.delete_hook(hook.id)
| StarcoderdataPython |
3450281 | <reponame>nalderto/otter-grader<gh_stars>0
"""
Runs Otter on Gradescope with the configurations specified below
"""
import os
import subprocess
from otter.generate.run_autograder import main as run_autograder
config = {
"score_threshold": {{ threshold }},
"points_possible": {{ points }},
"show_stdout_on_release": {{ show_stdout }},
"show_hidden_tests_on_release": {{ show_hidden }},
"seed": {{ seed }},
"grade_from_log": {{ grade_from_log }},
"serialized_variables": {{ serialized_variables }},
"public_multiplier": {{ public_multiplier }},
"token": {% if token %}'{{ token }}'{% else %}None{% endif %},
"course_id": '{{ course_id }}',
"assignment_id": '{{ assignment_id }}',
"filtering": {{ filtering }},
"pagebreaks": {{ pagebreaks }},
"debug": False,
"autograder_dir": '{{ autograder_dir }}',
"lang": '{{ lang }}',
}
if __name__ == "__main__":
run_autograder(config)
| StarcoderdataPython |
206993 | <reponame>jihyungSong/plugin-azure-power-state<filename>src/spaceone/inventory/model/virtual_machine.py
import logging
from schematics import Model
from schematics.types import ModelType, StringType, ListType, DictType
from spaceone.inventory.libs.schema.cloud_service import CloudServiceResource, CloudServiceResponse
_LOGGER = logging.getLogger(__name__)
class Compute(Model):
instance_id = StringType()
instance_state = StringType(choices=('STARTING', 'RUNNING', 'STOPPING', 'STOPPED', 'DEALLOCATING', 'DEALLOCATED'))
class PowerState(Model):
status = StringType(choices=('RUNNING', 'STOPPED', 'UNHEALTHY'))
class Server(Model):
compute = ModelType(Compute)
power_state = ModelType(PowerState, serialize_when_none=False)
def reference(self):
return {
"resource_id": self.compute.instance_id,
}
class VirtualMachineResource(CloudServiceResource):
cloud_service_group = StringType(default='Compute')
cloud_service_type = StringType(default='VirtualMachine')
data = ModelType(Server)
class VirtualMachineResponse(CloudServiceResponse):
match_rules = DictType(ListType(StringType), default={'1': ['reference.resource_id']})
resource_type = StringType(default='inventory.Server')
resource = ModelType(VirtualMachineResource)
| StarcoderdataPython |
3575265 | from __future__ import annotations
import functools
from typing import Any, Union, Tuple, Callable, Type, cast, TypeVar
FuncSig = TypeVar("FuncSig", bound=Callable)
class MissingValue:
def __repr__(self) -> str:
return type(self).__name__
missing = MissingValue()
def _set_value_ignoring_exceptions(exception_types: Union[Type[Exception], Tuple[Type[Exception]]] = Exception) -> Callable[[FuncSig], FuncSig]:
def decorator(func: FuncSig) -> FuncSig:
@functools.wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
instance = args[0]
if instance._value_ is not missing:
try:
instance._value_ = func(*args, **kwargs)
except exception_types:
instance._value_ = missing
return instance
return cast(FuncSig, wrapper)
return decorator
class Maybe:
"""
A class which serves as a pseudo-implementation of null-aware operators in python. Provides null-aware item access, null-aware attribute access, null-aware chained method calls,
and can be combined with all arithmetic and bitwise operators
"""
def __init__(self, val: Any) -> None:
self._value_ = val if val is not None else missing
def __repr__(self) -> str:
return f"{type(self).__name__}({repr(self._value_)})"
def __bool__(self) -> bool:
return self._value_ is not missing
def __getattr__(self, name: str) -> Maybe:
try:
self._value_ = getattr(self._value_, name)
except AttributeError:
if not (name.startswith("_") and "ipython" in name.lower()):
self._value_ = missing
finally:
return self
@_set_value_ignoring_exceptions((KeyError, IndexError))
def __getitem__(self, key: str) -> Maybe:
return self._value_[key]
@_set_value_ignoring_exceptions(TypeError)
def __call__(self, *args: Any, **kwargs: Any) -> Maybe:
return self._value_(*args, **kwargs)
@_set_value_ignoring_exceptions(TypeError)
def __add__(self, other: Any) -> Maybe:
return self._value_ + other
@_set_value_ignoring_exceptions(TypeError)
def __radd__(self, other: Any) -> Maybe:
return other + self._value_
@_set_value_ignoring_exceptions(TypeError)
def __sub__(self, other: Any) -> Maybe:
return self._value_ - other
@_set_value_ignoring_exceptions(TypeError)
def __rsub__(self, other: Any) -> Maybe:
return other - self._value_
@_set_value_ignoring_exceptions(TypeError)
def __mul__(self, other: Any) -> Maybe:
return self._value_ * other
@_set_value_ignoring_exceptions(TypeError)
def __rmul__(self, other: Any) -> Maybe:
return other * self._value_
@_set_value_ignoring_exceptions(TypeError)
def __truediv__(self, other: Any) -> Maybe:
return self._value_ / other
@_set_value_ignoring_exceptions(TypeError)
def __rtruediv__(self, other: Any) -> Maybe:
return other / self._value_
@_set_value_ignoring_exceptions(TypeError)
def __floordiv__(self, other: Any) -> Maybe:
return self._value_ // other
@_set_value_ignoring_exceptions(TypeError)
def __rfloordiv__(self, other: Any) -> Maybe:
return other // self._value_
@_set_value_ignoring_exceptions(TypeError)
def __mod__(self, other: Any) -> Maybe:
return self._value_ % other
@_set_value_ignoring_exceptions(TypeError)
def __rmod__(self, other: Any) -> Maybe:
return other % self._value_
@_set_value_ignoring_exceptions(TypeError)
def __and__(self, other: Any) -> Maybe:
return self._value_ & other
@_set_value_ignoring_exceptions(TypeError)
def __rand__(self, other: Any) -> Maybe:
return other & self._value_
@_set_value_ignoring_exceptions(TypeError)
def __or__(self, other: Any) -> Maybe:
return self._value_ | other
@_set_value_ignoring_exceptions(TypeError)
def __ror__(self, other: Any) -> Maybe:
return other | self._value_
def else_(self, alternative: Any) -> Any:
"""Return the currently held value if the original was not None and all operations so far on the Maybe construct have been valid, otherwise return the alternative."""
return self._value_ if self._value_ is not missing else alternative
| StarcoderdataPython |
4951571 | <filename>sdk/python/pulumi_newrelic/get_entity.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = [
'GetEntityResult',
'AwaitableGetEntityResult',
'get_entity',
]
@pulumi.output_type
class GetEntityResult:
"""
A collection of values returned by getEntity.
"""
def __init__(__self__, account_id=None, application_id=None, domain=None, guid=None, id=None, ignore_case=None, name=None, serving_apm_application_id=None, tag=None, type=None):
if account_id and not isinstance(account_id, int):
raise TypeError("Expected argument 'account_id' to be a int")
pulumi.set(__self__, "account_id", account_id)
if application_id and not isinstance(application_id, int):
raise TypeError("Expected argument 'application_id' to be a int")
pulumi.set(__self__, "application_id", application_id)
if domain and not isinstance(domain, str):
raise TypeError("Expected argument 'domain' to be a str")
pulumi.set(__self__, "domain", domain)
if guid and not isinstance(guid, str):
raise TypeError("Expected argument 'guid' to be a str")
pulumi.set(__self__, "guid", guid)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if ignore_case and not isinstance(ignore_case, bool):
raise TypeError("Expected argument 'ignore_case' to be a bool")
pulumi.set(__self__, "ignore_case", ignore_case)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if serving_apm_application_id and not isinstance(serving_apm_application_id, int):
raise TypeError("Expected argument 'serving_apm_application_id' to be a int")
pulumi.set(__self__, "serving_apm_application_id", serving_apm_application_id)
if tag and not isinstance(tag, dict):
raise TypeError("Expected argument 'tag' to be a dict")
pulumi.set(__self__, "tag", tag)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="accountId")
def account_id(self) -> int:
"""
The New Relic account ID associated with this entity.
"""
return pulumi.get(self, "account_id")
@property
@pulumi.getter(name="applicationId")
def application_id(self) -> int:
"""
The domain-specific application ID of the entity. Only returned for APM and Browser applications.
"""
return pulumi.get(self, "application_id")
@property
@pulumi.getter
def domain(self) -> str:
return pulumi.get(self, "domain")
@property
@pulumi.getter
def guid(self) -> str:
"""
The unique GUID of the entity.
"""
return pulumi.get(self, "guid")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="ignoreCase")
def ignore_case(self) -> Optional[bool]:
return pulumi.get(self, "ignore_case")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="servingApmApplicationId")
def serving_apm_application_id(self) -> int:
return pulumi.get(self, "serving_apm_application_id")
@property
@pulumi.getter
def tag(self) -> Optional['outputs.GetEntityTagResult']:
return pulumi.get(self, "tag")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
class AwaitableGetEntityResult(GetEntityResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetEntityResult(
account_id=self.account_id,
application_id=self.application_id,
domain=self.domain,
guid=self.guid,
id=self.id,
ignore_case=self.ignore_case,
name=self.name,
serving_apm_application_id=self.serving_apm_application_id,
tag=self.tag,
type=self.type)
def get_entity(domain: Optional[str] = None,
ignore_case: Optional[bool] = None,
name: Optional[str] = None,
tag: Optional[pulumi.InputType['GetEntityTagArgs']] = None,
type: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetEntityResult:
"""
Use this data source to get information about a specific entity in New Relic One that already exists.
:param str domain: The entity's domain. Valid values are APM, BROWSER, INFRA, MOBILE, SYNTH, and VIZ. If not specified, all domains are searched.
:param bool ignore_case: Ignore case of the `name` when searching for the entity. Defaults to false.
:param str name: The name of the entity in New Relic One. The first entity matching this name for the given search parameters will be returned.
:param str type: The entity's type. Valid values are APPLICATION, DASHBOARD, HOST, MONITOR, and WORKLOAD.
"""
__args__ = dict()
__args__['domain'] = domain
__args__['ignoreCase'] = ignore_case
__args__['name'] = name
__args__['tag'] = tag
__args__['type'] = type
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('newrelic:index/getEntity:getEntity', __args__, opts=opts, typ=GetEntityResult).value
return AwaitableGetEntityResult(
account_id=__ret__.account_id,
application_id=__ret__.application_id,
domain=__ret__.domain,
guid=__ret__.guid,
id=__ret__.id,
ignore_case=__ret__.ignore_case,
name=__ret__.name,
serving_apm_application_id=__ret__.serving_apm_application_id,
tag=__ret__.tag,
type=__ret__.type)
| StarcoderdataPython |
5159779 | #Current key bindings:
#left/right arrow: change direction (forward/backword, respectively) and make a step in that direction
#space bar: start/stop the animation
#from Scientific.IO.NetCDF import NetCDFFile as Dataset
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from threading import Timer
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import matplotlib.pyplot as plt
from netCDF4 import Dataset
i=0
surf=None
#1: forward; 0:backward
direction=1
#timestep
simstep=20
#animation flag
animating=False
stopped=True
#User interaction
def key_press_handler(event):
global direction
global animating
global stopped
already_animating = animating
print('press', event.key)
if (event.key=='right'):
print("right")
direction=1
elif (event.key=='left'):
print("left")
direction=0
elif (event.key==' '):
animating=not animating
if (not already_animating and stopped): update()
def get_data(filename):
data = []
print("Reading from " + filename)
ds = Dataset(filename, "r", format="NETCDF4")
#if (ds.iteration!=it): print("ERROR: requested it: " + str(it) + "; read it: " + str(ds.iteration))
data[:] = ds.variables["data"]
ds.close()
return data
#last=None
def update():
#print('press', event.key)
global i
global fig
global surf
global last
global simstep
global direction
global animating
global stopped
#i = i + (direction)*simstep - (1-direction)*simstep
i = i + 2*simstep*direction - simstep
if (i<=0):
i=0
return
#create x,y data (size of plate)
x = np.arange(0, 100)
y = np.arange(0, 100)
#last = solver.compute(i, last);
data = get_data("../output/data_" + str(i))
#data = last.data
print("update i:%i" % i)
xx, yy = np.meshgrid(x, y)
#print data
ax.clear()
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Temperature')
#make the plot
surf = ax.plot_surface(xx, yy, data, cmap=cm.coolwarm, vmin=0, vmax=20)
#source box (cache/simulator)
ctxt = "CACHE"
ccolor = "green"
fcolor = "black"
#ax.text(-10, 8, 27, ctxt, bbox=dict(facecolor=ccolor, alpha=0.5, boxstyle='round,pad=1'), color=fcolor, fontweight='bold', fontsize=12, verticalalignment='center')
ax.set_autoscale_on(False)
ax.set_zlim(0,20)
#fig.canvas.draw()
plt.draw()
#keep animating if animating==true
if (animating):
Timer(0.001, update).start()
stopped=False
else: stopped=True
#now this is useless
return (xx, yy, data)
#init plot
fig = plt.figure()
ax = fig.gca(projection='3d')
#Labels
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Temperature')
ax.zaxis.set_major_locator(LinearLocator(10))
#surf = ax.plot_surface(xx, yy, data)
xx,yy,data = update()
#install key handlers
fig.canvas.mpl_connect('key_press_event', key_press_handler)
surf.set_clim(vmin=0, vmax=20)
#colorbar
fig.colorbar(surf, shrink=0.5, aspect=5)
np.set_printoptions(precision=3)
np.set_printoptions(suppress=True)
#show window
plt.show()
| StarcoderdataPython |
6430021 | <reponame>pradeep-charism/bigdata-analytics-ml<filename>prediction/test_bband.py
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pandas_datareader.data as web
import yfinance as yf
from talib import RSI, BBANDS
start = '2022-01-22'
end = '2022-04-21'
symbol = 'TSLA'
max_holding = 100
price = web.DataReader(name=symbol, data_source='quandl', start=start, end=end, api_key='-L1XxfzbhH1Zch7QzZ-y')
# price = yf.download("TSLA", start="2022-01-06", end="2022-04-21", interval="1d")
print(price)
price = price.iloc[::-1]
price = price.dropna()
close = price['Close'].values
# up, mid, low = BBANDS(close, timeperiod=20, nbdevup=2, nbdevdn=2, matype=0)
rsi = RSI(close, timeperiod=14)
print("RSI (first 10 elements)\n", rsi[14:24])
def bbp(price):
up, mid, low = BBANDS(close, timeperiod=20, nbdevup=2, nbdevdn=2, matype=0)
bbp = (price['AdjClose'] - low) / (up - low)
return bbp
holdings = pd.DataFrame(index=price.index, data={'Holdings': np.array([np.nan] * index.shape[0])})
holdings.loc[((price['RSI'] < 30) & (price['BBP'] < 0)), 'Holdings'] = max_holding
holdings.loc[((price['RSI'] > 70) & (price['BBP'] > 1)), 'Holdings'] = 0
holdings.ffill(inplace=True)
holdings.fillna(0, inplace=True)
holdings['Order'] = holdings.diff()
holdings.dropna(inplace=True)
fig, (ax0, ax1, ax2) = plt.subplots(3, 1, sharex=True, figsize=(12, 8))
ax0.plot(index, price['AdjClose'], label='AdjClose')
ax0.set_xlabel('Date')
ax0.set_ylabel('AdjClose')
ax0.grid()
for day, holding in holdings.iterrows():
order = holding['Order']
if order > 0:
ax0.scatter(x=day, y=price.loc[day, 'AdjClose'], color='green')
elif order < 0:
ax0.scatter(x=day, y=price.loc[day, 'AdjClose'], color='red')
ax1.plot(index, price['RSI'], label='RSI')
ax1.fill_between(index, y1=30, y2=70, color='#adccff', alpha='0.3')
ax1.set_xlabel('Date')
ax1.set_ylabel('RSI')
ax1.grid()
ax2.plot(index, price['BB_up'], label='BB_up')
ax2.plot(index, price['AdjClose'], label='AdjClose')
ax2.plot(index, price['BB_low'], label='BB_low')
ax2.fill_between(index, y1=price['BB_low'], y2=price['BB_up'], color='#adccff', alpha='0.3')
ax2.set_xlabel('Date')
ax2.set_ylabel('Bollinger Bands')
ax2.grid()
fig.tight_layout()
plt.show()
| StarcoderdataPython |
8106084 | <gh_stars>1-10
#! /usr/bin/python
# -*- coding: utf-8 -*-
#
# create/xindice_create.py
#
# Oct/16/2012
#
# ------------------------------------------------------------------
import os
import sys
import pycurl
#
sys.path.append ("/var/www/data_base/common/python_common")
#
from xml_manipulate import dict_to_xml_proc
from text_manipulate import dict_append_proc
#
from curl_get import curl_put_proc
#
# --------------------------------------------------------------------
def data_prepare_proc ():
dict_aa = {}
dict_aa = dict_append_proc (dict_aa,'t0271',u'青森',59171,'2003-4-30')
dict_aa = dict_append_proc (dict_aa,'t0272',u'弘前',47235,'2003-5-10')
dict_aa = dict_append_proc (dict_aa,'t0273',u'八戸',26754,'2003-6-14')
dict_aa = dict_append_proc (dict_aa,'t0274',u'三沢',83672,'2003-9-9')
dict_aa = dict_append_proc (dict_aa,'t0275',u'黒石',42391,'2003-8-4')
dict_aa = dict_append_proc (dict_aa,'t0276',u'むつ',35187,'2003-1-21')
dict_aa = dict_append_proc (dict_aa,'t0277',u'五所川原',81246,'2003-7-23')
dict_aa = dict_append_proc (dict_aa,'t0278',u'十和田',24784,'2003-10-26')
dict_aa = dict_append_proc (dict_aa,'t0279',u'平川',75829,'2003-12-15')
#
return dict_aa
#
#
# --------------------------------------------------------------------
url_base = 'http://cddn007:8888/xindice/db/'
url_sub = 'cities/cities'
url_target = url_base + url_sub
#
print ("*** 開始 ***")
#
dict_aa = data_prepare_proc ()
out_str = dict_to_xml_proc (dict_aa)
#
curl_put_proc (url_target,out_str.encode('utf-8'))
#
print ("*** 終了 ***")
# ------------------------------------------------------------------
| StarcoderdataPython |
6581755 | from requests import Session as RequestsSession
from ISEApi import logger
class Session(RequestsSession):
def __init__(self, base_url):
self.base_url = base_url
super().__init__()
def _set_content_type(self, request):
"""Checks for the content-type and accept headers and if they dont exists sets them to json"""
if not request.headers.get('content-type'):
request.headers['content-type'] = 'application/json'
if not request.headers.get('accept'):
request.headers['accept'] = 'application/json'
logger.debug(request.headers)
def _prepend_base_url(self, request):
"""Takes the relative URL that was provided and prepends the base URL to it"""
relative_url = request.url
request.url = self.base_url + relative_url
logger.debug(request.url)
def prepare_request(self, request):
"""Hijack the request before it is sent to the server and update url and header"""
self._set_content_type(request)
self._prepend_base_url(request)
logger.debug('Request data: {}'.format(request.data))
return super().prepare_request(request)
| StarcoderdataPython |
85096 | """
This package contains all objects managing Tunneling and Routing Connections..
- KNXIPInterface is the overall managing class.
- GatewayScanner searches for available KNX/IP devices in the local network.
- Routing uses UDP/Multicast to communicate with KNX/IP device.
- Tunnelling uses UDP packets and builds a static tunnel with KNX/IP device.
"""
# flake8: noqa
from .connect import Connect
from .connectionstate import ConnectionState
from .const import DEFAULT_MCAST_GRP, DEFAULT_MCAST_PORT
from .disconnect import Disconnect
from .gateway_scanner import GatewayScanFilter, GatewayScanner
from .knxip_interface import ConnectionConfig, ConnectionType, KNXIPInterface
from .request_response import RequestResponse
from .routing import Routing
from .tunnel import Tunnel
from .tunnelling import Tunnelling
from .udp_client import UDPClient
| StarcoderdataPython |
4986400 | <filename>draw_qrcode.py
from qrcode import coded_msg
from PIL import Image, ImageFont, ImageDraw, ImageEnhance
MSG = "MATHSDISCRETES"
MODE = "0010"
MAX = 19*8
CORRECTION = [211, 212, 181, 2, 31, 139, 106]
def draw_pattern(qrcode, x, y):
draw = ImageDraw.Draw(qrcode)
draw.rectangle(
[(x, y), (x+6, y+6)], outline="black", width=1)
draw.rectangle(
[(x, y), (x+6, y+6)], outline="black", width=1)
draw.rectangle(
[(x+2, y+2), (x+4, y+4)], outline="black", fill="black", width=1)
def draw_timing(qrcode):
for i in range(7, 13):
color = (0, 0, 0) if i % 2 == 0 else (255, 255, 255)
qrcode.putpixel((i, 6), color)
qrcode.putpixel((6, i), color)
def draw_dark_module(qrcode):
qrcode.putpixel((8, 4*1+9), (0, 0, 0))
def zigzag(col, start, end):
res = []
j = start
k = 1
if start > end:
while j >= end:
k = (k+1) % 2
res.append((col-k, j))
if k != 0:
j -= 1
else:
while j <= end:
k = (k+1) % 2
res.append((col-k, j))
if k != 0:
j += 1
return res
def apply_mask(bit, mask, row, column):
if mask == -1:
return bit
elif mask == 0:
if (row+column) % 2 == 0:
return 1 if bit == 0 else 0
elif mask == 1:
if (row) % 2 == 0:
return 1 if bit == 0 else 0
elif mask == 2:
if (column) % 3 == 0:
return 1 if bit == 0 else 0
elif mask == 3:
if (row + column) % 3 == 0:
return 1 if bit == 0 else 0
elif mask == 4:
if (row // 2 + column // 3) % 2 == 0:
return 1 if bit == 0 else 0
elif mask == 5:
if ((row * column) % 2) + ((row * column) % 3) == 0:
return 1 if bit == 0 else 0
elif mask == 6:
if (((row * column) % 2) + ((row * column) % 3)) % 2 == 0:
return 1 if bit == 0 else 0
elif mask == 7:
if (((row + column) % 2) + ((row * column) % 3)) % 2 == 0:
return 1 if bit == 0 else 0
return bit
def draw_column(data, qrcode, col, start, end, mask):
coords = zigzag(col, start, end)
modules_coords = zigzag(20, 20, 14) + [(20, 13)]
for i, p in enumerate(coords):
value = apply_mask(int(data[i]), mask, p[1], p[0]) if (
p[0], p[1]) not in modules_coords else int(data[i])
color = (0, 0, 0) if value == 1 else (255, 255, 255)
qrcode.putpixel(p, color)
return i+1
def draw_data(msg, qrcode, mask):
z = 0
raw = coded_msg(msg, MODE, MAX, CORRECTION)
coded = raw.replace(" ", "")
z += draw_column(coded, qrcode, 20, 20, 9, mask)
z += draw_column(coded[z:], qrcode, 18, 9, 20, mask)
z += draw_column(coded[z:], qrcode, 16, 20, 9, mask)
z += draw_column(coded[z:], qrcode, 14, 9, 20, mask)
z += draw_column(coded[z:], qrcode, 12, 20, 7, mask)
z += draw_column(coded[z:], qrcode, 12, 5, 0, mask)
z += draw_column(coded[z:], qrcode, 10, 0, 5, mask)
z += draw_column(coded[z:], qrcode, 10, 7, 20, mask)
z += draw_column(coded[z:], qrcode, 8, 12, 9, mask)
z += draw_column(coded[z:], qrcode, 5, 9, 12, mask)
z += draw_column(coded[z:], qrcode, 3, 12, 9, mask)
z += draw_column(coded[z:], qrcode, 1, 9, 12, mask)
def draw_infos(qrcode, mask):
if mask == -1:
return
info_strings = ["111011111000100", "111001011110011", "111110110101010", "111100010011101",
"110011000101111", "110001100011000", "110110001000001", "110100101110110"]
bits = info_strings[mask]
j = 0
for i in range(8):
if i == 6:
continue
color = (0, 0, 0) if int(bits[j]) == 1 else (255, 255, 255)
qrcode.putpixel((i, 8), color)
j += 1
for i in range(8, -1, -1):
if i == 6:
continue
color = (0, 0, 0) if int(bits[j]) == 1 else (255, 255, 255)
qrcode.putpixel((8, i), color)
j += 1
j = 0
for i in range(20, 13, -1):
color = (0, 0, 0) if int(bits[j]) == 1 else (255, 255, 255)
qrcode.putpixel((8, i), color)
j += 1
for i in range(13, 21):
color = (0, 0, 0) if int(bits[j]) == 1 else (255, 255, 255)
qrcode.putpixel((i, 8), color)
j += 1
def draw_qrcode(filename="qrcode", msg=MSG, mask=-1, size=250):
qrcode = Image.new('RGB', (21, 21), "white")
draw_pattern(qrcode, 0, 0)
draw_pattern(qrcode, 21-1-6, 0)
draw_pattern(qrcode, 0, 21-1-6)
draw_timing(qrcode)
draw_dark_module(qrcode)
draw_data(MSG, qrcode, mask)
draw_infos(qrcode, mask)
new_img = qrcode.resize((size, size))
new_img.save("images/{}.png".format(filename))
return new_img
imgs = []
for i in range(-1, 8):
img = draw_qrcode(filename="qrcode_mask{}".format(i), mask=i, size=250)
imgs.append(img)
final = Image.new('RGB', (860, 200), "white")
draw = ImageDraw.Draw(final)
for i, img in enumerate(imgs):
new_img = img.resize((80, 80))
final.paste(new_img, (30+i*90, 36))
text = "masque {}".format(i-1) if i != 0 else "Aucun\nmasque"
draw.text((45+i*90, 130), text, (0, 0, 0),
font=ImageFont.truetype("arial.ttf"))
final.save("images/final.png")
| StarcoderdataPython |
6544717 | <filename>settings.py
### GAME SETTINGS ###
# IMPORT LIBRARIES
# IMPORT GAME FILES
# WINDOW SETTINGS
TITLE = "Battler V1"
WIDTH = 500
HEIGHT = 500
FPS = 60
#COLOURS
WHITE = (255, 250, 250)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
YELLOW = (255, 255, 0)
#FONT SETTINGS
FONT_NAME = "arial" | StarcoderdataPython |
5112484 | <filename>Workflow.py
from pathlib import Path
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.cluster import KMeans, DBSCAN
from sklearn.preprocessing import StandardScaler, RobustScaler, PowerTransformer, Normalizer, FunctionTransformer
from sklearn.pipeline import make_pipeline
from sklearn.metrics import silhouette_samples
import matplotlib.pyplot as plt
import seaborn
import numpy as np
import pandas as pd
from math import pi
import utils as utils
import feature_utils as cu
import utils as utils
import os
def df_np_df(func):
def convert_call_reconvert_df(self, df, *args, **kwargs):
nparray = df.to_numpy()
nparray, meta = func(self, nparray, *args, **kwargs)
assert nparray.shape[1] == len(df.columns)
return pd.DataFrame(nparray, columns=df.columns), meta
return convert_call_reconvert_df
class Workflow:
# fields
DEFAULT_SCALE = "robust"
DEFAULT_PCA = 2
DEFAULT_CLUSTERS = [3]
DEFAULT_CLUSTER_METHOD = "KMeans"
def __init__(self, init_df, import_meta, filter_options=None, base_output_dir=None, nested_folder_output=True):
self.further_filter_query_list = None
# utils.init_path()
self._df = init_df
self._df_import_meta = import_meta
self.filter_options = filter_options
self._nested_folder_output:bool = nested_folder_output
self._base_output_dir = base_output_dir
# flags
self.verbose = True
# steps
self.pre_histogram = True
self.do_logtransform = True
self.do_scaling = True
self.do_normalization = False
self.post_histogram = True
self.plot_correlation = True
self.do_PCA = True
self.plot_scree = True
self.do_clustering = True
self.plot_cluster_scatter = True
self.plot_silhouettes = True
self.plot_radars = True
# scikitlearn
self.outlier_method = None
self.scaling_method = Workflow.DEFAULT_SCALE
self.normalization_method = 'Normalizer'
self.pca_dimension_count = Workflow.DEFAULT_PCA
self.clustering_method = Workflow.DEFAULT_CLUSTER_METHOD
self.clustering_counts = Workflow.DEFAULT_CLUSTERS
self.clustering_count = self.clustering_counts[0]
# viz
self.color_dict = {i: v for i, v in enumerate(plt.cm.get_cmap('tab10').colors)}
self.color_dict.update({10+i:plt.cm.get_cmap('tab20').colors[2*i+1] for i in range(10) })
self.color_dict[-1] = (.2, .2, .2)
self.feature_names = None
def clustering_abbrev(self):
cluster_abbrev = 'k' if self.clustering_method is "KMeans" else self.clustering_method
return f'z{self.filter_options.zthresh}pca{self.pca_dimension_count}{cluster_abbrev}{self.clustering_count}'
def get_base_output_dir(self):
if self._base_output_dir:
save_dir = self._base_output_dir
else:
logtransform = '_logtransform' if self.do_logtransform else ''
clustering_suffix = '' if self._nested_folder_output else '_'+ self.clustering_abbrev()
suffix = f'{logtransform}{clustering_suffix}'
save_dir = os.path.join('Results',self.filter_options.game.lower().capitalize(), self.filter_options.name+suffix)
Path(save_dir).mkdir(parents=True, exist_ok=True)
return save_dir
def get_cluster_output_dir(self):
if not self._nested_folder_output:
save_dir = self.get_base_output_dir()
else:
save_dir = os.path.join(self.get_base_output_dir(), self.clustering_abbrev())
Path(save_dir).mkdir(parents=True, exist_ok=True)
return save_dir
def get_filename(self):
return None # some_string
def query(self, df, query_list):
meta = []
for q in query_list:
df = df.query(q)
outstr = f'Query: {q}, output_shape: {df.shape}'
meta.append(outstr)
return df, meta
@staticmethod
def Histogram(df: pd.DataFrame, num_bins: int = None, title: str = None, log_scale=True, save=False, save_loc=None):
title = title or 'Histograms'
num_rows = len(df.index)
num_bins = num_bins or min(25, num_rows)
axes = df.plot(kind='hist', subplots=True, figsize=(20, 5), bins=num_bins,
title=title, layout=(1, len(df.columns)), color='k', sharex=False,
sharey=True, logy=log_scale, bottom=1)
# for axrow in axes:
# for ax in axrow:
# print(ax)
# ax.set_yscale('log')
if save:
savepath = os.path.join(save_loc, f'{title}.png')
plt.savefig(savepath)
plt.close()
# TODO: Graph is part cut off, i think there might be some stuff hardcoded.
@staticmethod
def Correlations(df, heat_range=0.3, save=False, save_loc=None):
plt.figure()
seaborn.set(style="ticks")
corr = df.corr()
g = seaborn.heatmap(corr, vmax=heat_range, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5}, annot=True, fmt='.2f', cmap='coolwarm')
seaborn.despine()
g.figure.set_size_inches(14, 10)
title = 'Correlations'
if save:
savepath = os.path.join(save_loc, f'{title}.png')
g.figure.savefig(savepath)
@staticmethod
def LogTransformed(df):
meta = []
nparray = df.to_numpy()
nparray = np.log1p(nparray)
meta.append('LogTransform using np.long1p')
return pd.DataFrame(nparray, columns=df.columns), meta
# @df_np_df
@staticmethod
def Scaled(df, scaling_method:str = DEFAULT_SCALE):
meta = []
nparray = df.to_numpy()
if scaling_method == "Standard":
scaler = StandardScaler()
elif scaling_method == "Robust":
scaler = RobustScaler()
meta.append(f'Scaled with scikitlearn {scaler}' )
nparray = scaler.fit_transform(nparray)
return pd.DataFrame(nparray, columns=df.columns), meta
# @df_np_df
@staticmethod
def Normalized(df):
meta = []
nparray = df.to_numpy()
normalizer = Normalizer()
meta.append(f'Normalized with scikitlearn {normalizer}')
nparray = normalizer.fit_transform(nparray)
return pd.DataFrame(nparray, columns=df.columns), meta
@staticmethod
def PCA(df, dimension_count:int = DEFAULT_PCA):
meta = []
nparray = df.to_numpy()
pca = PCA(n_components=dimension_count)
meta.append(f'PCA df calculated with scikitlearn {pca}')
nparray = pca.fit_transform(nparray)
PCA_names = [f"PCA_{i}" for i in range(dimension_count)]
return pd.DataFrame(nparray, columns=PCA_names), meta
@staticmethod
def PlotScree(df, save=False, save_loc=None):
nparray = df.to_numpy()
U, S, V = np.linalg.svd(nparray)
eigvals = S ** 2 / np.sum(S ** 2)
fig = plt.figure(figsize=(8, 5))
singular_vals = np.arange(nparray.shape[1]) + 1
plt.plot(singular_vals, eigvals, 'ro-', linewidth=2)
title = 'Scree Plot'
plt.title(title)
plt.xlabel('Principal Component')
plt.ylabel('Eigenvalue')
if save:
savepath = os.path.join(save_loc, f'{title}.png')
plt.savefig(savepath)
plt.close()
return
@staticmethod
def Cluster(df, cluster_count: int=DEFAULT_CLUSTERS[0], clustering_method=DEFAULT_CLUSTER_METHOD):
meta = []
nparray = df.to_numpy()
if clustering_method == "KMeans":
clusterer = KMeans(n_clusters=cluster_count)
# For future, include calculated distances.
# In the future, this will let us find centers:
# distances = clusterer.transform(nparray)
# nparray = np.concatenate((distances, labels))
# elif clustering_method == "FuzzyCMeans":
# pass
elif clustering_method == "DBSCAN":
clusterer = DBSCAN(eps=0.3, min_samples=10)
else:
return [], meta
labels = clusterer.fit_predict(nparray)
meta.append(f'Labels calculated via clusterer: {clusterer}')
return labels, meta
# for a,l in zip(PCA_dims, labels):
# b = clustering.cluster_centers_[l]
# distances.append(a-b)
# labels = labels
# df['PCA1 Offset'] = np.array(distances)[:,0]
# df['PCA2 Offset'] = np.array(distances)[:,1]
@staticmethod
def PlotSilhouettes(dimension_data: pd.DataFrame, labels: pd.DataFrame, title=None, clustering_abbrev=None, save=False, save_loc=None):
np_dimensions = dimension_data.to_numpy()
silhouette_vals = silhouette_samples(np_dimensions, labels)
# Silhouette plot
fig, ax1 = plt.subplots(1, 1)
fig.set_size_inches(18, 7)
y_ticks = []
y_lower, y_upper = 0, 0
for i, cluster in enumerate(np.unique(labels)):
cluster_silhouette_vals = silhouette_vals[labels == cluster]
cluster_silhouette_vals.sort()
y_upper += len(cluster_silhouette_vals)
ax1.barh(range(y_lower, y_upper), cluster_silhouette_vals, edgecolor='none', height=1)
ax1.text(-0.03, (y_lower + y_upper) / 2, str(i + 1))
y_lower += len(cluster_silhouette_vals)
# Get the average silhouette score and plot it
avg_score = np.mean(silhouette_vals)
ax1.axvline(avg_score, linestyle='--', linewidth=2, color='green')
ax1.set_yticks([])
ax1.set_xlim([-0.1, 1])
ax1.set_xlabel('Silhouette coefficient values')
ax1.set_ylabel('Cluster labels')
title = title or f'Silhouettes {clustering_abbrev} Avg={int(avg_score*100)}%'
ax1.set_title(title, y=1.02)
if save:
savepath = os.path.join(save_loc, f'{title}.png')
plt.savefig(savepath)
plt.close()
return
@staticmethod
def scatter(df, labels, color_dict, title='Scatter', save=False, save_loc=None):
num_cols = len(df.columns)
color_array = [color_dict[c] for c in labels]
fig, axs = plt.subplots(num_cols, num_cols, figsize=(30, 30))
for x in range(num_cols):
for y in range(num_cols):
axs[x, y].scatter(df.iloc[:, x], df.iloc[:, y], c=color_array)
axs[x, y].set_xlabel(df.columns[x])
axs[x, y].set_ylabel(df.columns[y])
if save:
savepath = os.path.join(save_loc, f'{title}.png')
plt.savefig(savepath)
plt.close()
@staticmethod
def radar_from_cluster_csv(csv_path, optionsgroup, savedir=None ):
print('here')
index_col = [0,1] if optionsgroup.game.upper() == 'LAKELAND' else 0
df = pd.read_csv(csv_path, index_col=index_col, comment='#')
labels = list(df['label'].to_numpy())
df = df.drop('label', axis=1)
w = Workflow(init_df=df, import_meta="", filter_options=optionsgroup)
w.radarCharts(df, labels, savedir=savedir)
def radarCharts(self, df, labels, save=True, savedir=None):
print('radarCharts')
categories = self.filter_options.finalfeats_readable
description_df = df.describe()
summary_df = pd.DataFrame(columns=description_df.columns)
clusters = set(labels)
cluster_dict = {}
for c in clusters:
cluster_dict[c] = df[labels == c]
cluster_df = cluster_dict[c].describe()
summary_df.loc[f'C{c}_zscore', :] = (cluster_df.loc['mean', :] - description_df.loc['mean', :]) / description_df.loc[
'std', :]
summary_df.loc[f'C{c}_%mean', :] = (cluster_df.loc['mean', :] / description_df.loc['mean', :]) * 100
summary_df.loc[f'C{c}_%std', :] = (cluster_df.loc['std', :] / description_df.loc['std', :]) * 100
summary_df = summary_df.apply(lambda x: (x * 100) // 1 * .01)
def make_spider(color, i):
offset = .25 * pi
# What will be the angle of each axis in the plot? (we divide the plot / number of variable)
angles = [n / float(N) * 2 * pi + offset for n in range(N)]
angles += angles[:1]
ax = plt.subplot(nrows, ncols, i + 1, polar=True)
plt.xticks(angles[:-1], categories, color='grey', size=12)
ax.set_rlabel_position(0)
if var == 'zscore':
plt.yticks([-2, -1, 0, 1, 2], color="grey", size=7)
plt.ylim(-2, 2)
elif '%' in var:
plt.yticks(range(0, 1000, 100), color="grey", size=7)
plt.ylim(0, 400)
values = list(tdf.iloc[i, :])
values += values[:1]
graph_name = tdf.index[i]
ax.plot(angles, values, color=color, linewidth=2, linestyle='solid')
ax.fill(angles, values, color=color, alpha=0.4)
plt.title(f'Cluster {i} (n={len(cluster_dict[i])})', size=11, color=color, y=1.1)
# number of variable
for var in ['zscore', '%mean', '%std']:
tdf = summary_df.loc[[idx for idx in summary_df.index if var in idx], :]
if not categories:
categories = list(tdf.columns)
N = len(categories)
num_groups = len(tdf.index)
# nrows = 2
# ncols = num_groups//2 if not num_groups%2 else num_groups//2 + 1
nrows = 1
ncols = num_groups
fig = plt.figure(figsize=(20, 5))
fig.suptitle(f'{var} Radar Charts')
for i in range(num_groups):
make_spider(self.color_dict[i], i)
fig.subplots_adjust(wspace=0.4)
if save:
plt.savefig(os.path.join(self.get_cluster_output_dir(),f'radar_{var}.png'))
plt.close()
pass
def save_csv_and_meta(self, df, meta_list, save_dir, csv_name, meta_name=None, permissions='w+'):
if csv_name.endswith(('.tsv', '.csv')):
extension = csv_name[-4:]
csv_name = csv_name[:-4]
else:
extension = '.csv'
separator = '\t' if extension == '.tsv' else ','
meta_name = meta_name or csv_name+ '_meta.txt'
meta_text = 'Metadata:\n'+'\n'.join(meta_list)
with open(os.path.join(save_dir, meta_name), permissions) as f:
f.write(meta_text)
with open(os.path.join(save_dir, csv_name)+extension, permissions) as f:
for l in meta_text.splitlines():
f.write(f'# {l}\n')
f.write('\n')
df.to_csv(f, sep=separator)
return None, []
def RunWorkflow(self):
def requestPCADims():
inp = input('pca dims? ')
try:
self.pca_dimension_count = int(inp.strip())
except:
pass
def requestClusterCount():
inp = input('k? ')
try:
self.clustering_counts = [int(inp.strip())]
except:
pass
if self.verbose:
print('Starting workflow.')
print('Saving to:', self.get_base_output_dir())
original_df, meta = cu.full_filter(df=self._df, import_meta=self._df_import_meta, options=self.filter_options, outpath=self.get_base_output_dir())
if self.further_filter_query_list is not None:
original_df, md = self.query(original_df, self.further_filter_query_list)
meta.extend(md)
self.save_csv_and_meta(original_df, meta, self.get_base_output_dir(), 'filtered_data')
working_df = original_df.copy()
original_cols = list(working_df.columns)
print(f"Ok, we've done the filtering and such, time to get a random sample of session ids. First, let's check the start")
print(working_df.head())
print("alright, now on to the actual sample:")
subsample = working_df.sample(n=26)
print(subsample.head(26))
# Preprocessing - LogTransform, Scaling, Normalization #
# show working_df before any processing
if self.pre_histogram:
Workflow.Histogram(working_df, title='Raw Histogram', save=True, save_loc=self.get_base_output_dir())
# do log transform
if self.do_logtransform:
working_df, md = Workflow.LogTransformed(working_df)
meta.extend(md)
# scale working_df
if self.do_scaling:
working_df, md = Workflow.Scaled(working_df, self.scaling_method)
meta.extend(md)
# do normalization
if self.do_normalization:
working_df, md = Workflow.Normalized(working_df)
meta.extend(md)
# show working_df after transformation
if self.post_histogram:
Workflow.Histogram(working_df, title='Preprocessed Histogram', save=True, save_loc=self.get_base_output_dir())
# correlation
if self.plot_correlation:
Workflow.Correlations(working_df, save=True, save_loc=self.get_base_output_dir())
# scree and PCA
if self.plot_scree:
Workflow.PlotScree(working_df, save=True, save_loc=self.get_base_output_dir())
if self.do_PCA:
while self.pca_dimension_count is None:
requestPCADims()
if self.verbose:
print('Starting PCA.')
pca_df, md = Workflow.PCA(working_df, self.pca_dimension_count)
meta.extend(md)
cluster_df = pca_df
meta.append('Cluster on PCA dims')
else:
cluster_df = working_df
meta.append('Cluster on non-PCA dims')
# silhouette and clustering
if self.do_clustering:
while self.clustering_counts is None:
requestClusterCount()
for cluster_count in self.clustering_counts:
self.clustering_count = cluster_count
if self.verbose:
print(f'Starting clustering k={self.clustering_count}')
labels, md = Workflow.Cluster(cluster_df, clustering_method=self.clustering_method, cluster_count=cluster_count)
meta.extend(md)
if self.plot_silhouettes:
Workflow.PlotSilhouettes(cluster_df, labels, save=True, save_loc=self.get_base_output_dir())
if self.plot_cluster_scatter:
Workflow.scatter(pca_df, [0]*len(labels), color_dict=self.color_dict, title='PCA No Label Scatter', save=True, save_loc=self.get_base_output_dir())
Workflow.scatter(working_df, labels, color_dict=self.color_dict, title='Preprocessed Scatter', save=True, save_loc=self.get_base_output_dir())
Workflow.scatter(pca_df, labels, color_dict=self.color_dict, title=f'PCA Scatter', save=True, save_loc=self.get_base_output_dir())
Workflow.scatter(original_df, labels, color_dict=self.color_dict, title='Raw Scatter', save=True, save_loc=self.get_base_output_dir())
if self.plot_radars:
self.radarCharts(original_df, labels)
self.save_csv_and_meta(cluster_df, meta, self.get_cluster_output_dir(), 'data_clustered_on')
original_df['label'] = labels
self.save_csv_and_meta(original_df, meta, self.get_cluster_output_dir(), 'clusters')
original_df = original_df.drop('label', axis=1)
return working_df, meta
def add_cluster_features_to_df(pipeline, df, data):
pipeline.fit(data)
PCA_dims = pipeline[:-1].transform(data)
clustering = pipeline[-1]
labels = clustering.predict(PCA_dims)
distances = []
for a, l in zip(PCA_dims, labels):
b = clustering.cluster_centers_[l]
distances.append(a - b)
labels = labels
df['PCA1 Offset'] = np.array(distances)[:, 0]
df['PCA2 Offset'] = np.array(distances)[:, 1]
# def main():
# utils.init_path()
# filter_options = cu.options.lakeland_actions_lvl01
# output_foler = r'G:\My Drive\Field Day\Research and Writing Projects\2020 CHI Play - Lakeland Clustering\Jupyter\Results\Lakeland\test'
# df_getter = cu.getLakelandDecJanLogDF
# w = Workflow(filter_options=filter_options, base_output_dir=output_foler)
# w.RunWorkflow(get_df_func=df_getter)
# if __name__ == '__main__':
# main()
| StarcoderdataPython |
6628146 | # -*- coding: utf-8 -*-
from robotkernel.builders import build_suite
TEST_SUITE = """\
*** Settings ***
Library Collections
*** Keywords ***
Head
[Arguments] ${list}
${value}= Get from list ${list} 0
[Return] ${value}
*** Tasks ***
Get head
${array}= Create list 1 2 3 4 5
${head}= Head ${array}
Should be equal ${head} 1
"""
def test_string():
suite = build_suite(TEST_SUITE, {})
assert len(suite.resource.keywords) == 1
assert len(suite.tests) == 1
| StarcoderdataPython |
3382775 | <reponame>cjw296/mush
from unittest import TestCase
from testfixtures import ShouldRaise
from mush import Context
from .compat import PY32
class TheType(object):
def __repr__(self):
return '<TheType obj>'
class TestContext(TestCase):
def test_simple(self):
obj = TheType()
context = Context()
context.add(obj)
self.assertTrue(context.get(TheType) is obj)
self.assertEqual(
repr(context),
"<Context: {<class 'mush.tests.test_context.TheType'>: <TheType obj>}>"
)
self.assertEqual(
str(context),
"<Context: {<class 'mush.tests.test_context.TheType'>: <TheType obj>}>"
)
def test_explicit_type(self):
class T2(object): pass
obj = TheType()
context = Context()
context.add(obj, T2)
self.assertTrue(context.get(T2) is obj)
if PY32:
expected = ("<Context: {"
"<class 'mush.tests.test_context.TestContext."
"test_explicit_type.<locals>.T2'>: "
"<TheType obj>}>")
else:
expected = ("<Context: {<class 'mush.tests.test_context.T2'>:"
" <TheType obj>}>")
self.assertEqual(repr(context), expected)
self.assertEqual(str(context), expected)
def test_clash(self):
obj1 = TheType()
obj2 = TheType()
context = Context()
context.add(obj1)
with ShouldRaise(ValueError('Context already contains TheType')):
context.add(obj2)
def test_missing(self):
context = Context()
with ShouldRaise(KeyError('No TheType in context')):
context.get(TheType)
def test_iter(self):
# check state is preserved
context = Context()
context.req_objs.append(1)
self.assertEqual(tuple(context), (1, ))
context.req_objs.append(2)
self.assertEqual(tuple(context), (2, ))
def test_add_none(self):
context = Context()
with ShouldRaise(ValueError('Cannot add None to context')):
context.add(None)
def test_add_none_with_type(self):
context = Context()
context.add(None, TheType)
self.assertTrue(context.get(TheType) is None)
def test_old_style_class(self):
class Type(): pass
obj = Type()
context = Context()
context.add(obj)
self.assertTrue(context.get(Type) is obj)
def test_old_style_class_explicit(self):
class Type(): pass
obj = object()
context = Context()
context.add(obj, Type)
self.assertTrue(context.get(Type) is obj)
def test_get_nonetype(self):
self.assertTrue(Context().get(type(None)) is None)
| StarcoderdataPython |
5053944 | <filename>setup.py
# -*- coding: utf-8 -*-
import os
import sys
__DIR__ = os.path.abspath(os.path.dirname(__file__))
import codecs
from setuptools import setup
from setuptools.command.test import test as TestCommand
import demeter
def read(filename):
"""Read and return `filename` in root dir of project and return string"""
return codecs.open(os.path.join(__DIR__, filename), 'r').read()
install_requires = read("requirements.txt").split()
long_description = read('README.rst')
setup(
name="Demeter",
version=demeter.__version__,
url='https://github.com/shemic/demeter',
license='MIT License',
author='Rabin',
author_email='<EMAIL>',
description=('A simple framework based on Tornado'),
long_description=long_description,
packages=['demeter'],
install_requires = install_requires,
#tests_require=['pytest'],
#cmdclass = {'test': install},
include_package_data=True,
package_data = {},
data_files=[
# Populate this with any files config files etc.
],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Topic :: Software Development :: Libraries :: Application Frameworks",
]
) | StarcoderdataPython |
3231456 |
class Solution(object):
def distributeCandies(self, candyType):
"""
:type candyType: List[int]
:rtype: int
"""
return min(len(set(candyType)),len(candyType)//2) | StarcoderdataPython |
3527762 | import turtle as t
from turtle import *
import random as r
import time
n = 100.0
speed("fastest")
screensize(bg='black')
left(90)
forward(3 * n)
color("orange", "yellow")
begin_fill()
left(126)
for i in range(5):
forward(n / 5)
right(144)
forward(n / 5)
left(72)
end_fill()
right(126)
def drawlight():
if r.randint(0, 30) == 0:
color('tomato')
circle(6)
elif r.randint(0, 30) == 1:
color('orange')
circle(3)
else:
color('dark green')
color("dark green")
backward(n * 4.8)
def tree(d, s):
if d <= 0: return
forward(s)
tree(d - 1, s * .8)
right(120)
tree(d - 3, s * .5)
drawlight()
right(120)
tree(d - 3, s * .5)
right(120)
backward(s)
tree(15, n)
backward(n / 2)
for i in range(200):
a = 200 - 400 * r.random()
b = 10 - 20 * r.random()
up()
forward(b)
left(90)
forward(a)
down()
if r.randint(0, 1) == 0:
color('tomato')
else:
color('wheat')
circle(2)
up()
backward(a)
right(90)
backward(b)
t.color("dark red", "red")
t.write("Merry Christmas", align="center", font=("Comic Sans MS", 40, "bold"))
def drawsnow():
t.ht()
t.pensize(2)
for i in range(200):
t.pencolor("white")
t.pu()
t.setx(r.randint(-350, 350))
t.sety(r.randint(-100, 350))
t.pd()
dens = 6
snowsize = r.randint(1, 10)
for j in range(dens):
t.fd(int(snowsize))
t.backward(int(snowsize))
t.right(int(360 / dens))
drawsnow()
t.done() | StarcoderdataPython |
6440026 | <gh_stars>0
"""
The MIT License (MIT)
Copyright (c) 2015 Red Hat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.mgb
"""
from __future__ import print_function
import docker
import json
import logging
import os
import re
import tarfile
import tempfile
import time
try:
d = docker.Client(version="1.22")
except:
base_url=os.getenv('DOCKER_HOST', 'unix://var/run/docker.sock')
d = docker.APIClient(base_url=base_url, version="1.22")
class ExecException(Exception):
def __init__(self, message, output=None):
super(ExecException, self).__init__(message)
self.output = output
class Container(object):
"""
Object representing a docker test container, it is used in tests
"""
def __init__(self, image_id, name=None, remove_image=False, output_dir="output", save_output=True, volumes=None, entrypoint=None, **kwargs):
self.image_id = image_id
self.container = None
self.name = name
self.ip_address = None
self.output_dir = output_dir
self.save_output = save_output
self.remove_image = remove_image
self.kwargs = kwargs
self.logging = logging.getLogger("dock.middleware.container")
self.running = False
self.volumes = volumes
self.environ = {}
self.entrypoint = entrypoint
# get volumes from env (CTF_DOCKER_VOLUME=out:in:z,out2:in2:z)
try:
if "CTF_DOCKER_VOLUMES" in os.environ:
self.volumes = [] if self.volumes is None else None
self.volumes.extend(os.environ["CTF_DOCKER_VOLUMES"].split(','))
except Exception as e:
self.logging.error("Cannot parse CTF_DOCKER_VOLUME variable %s", e)
# get env from env (CTF_DOCKER_ENV="foo=bar,env=baz")
try:
if "CTF_DOCKER_ENV" in os.environ:
for variable in os.environ["CTF_DOCKER_ENV"].split(','):
name, value = variable.split('=', 1)
self.environ.update({name: value})
except Exception as e:
self.logging.error("Cannot parse CTF_DOCKER_ENV variable, %s", e)
def __enter__(self):
self.start(**self.kwargs)
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
if self.remove_image:
self.remove_image()
def start(self, **kwargs):
""" Starts a detached container for selected image """
self._create_container(**kwargs)
self.logging.debug("Starting container '%s'..." % self.container.get('Id'))
d.start(container=self.container)
self.running = True
self.ip_address = self.inspect()['NetworkSettings']['IPAddress']
def _remove_container(self, number=1):
self.logging.info("Removing container '%s', %s try..." % (self.container['Id'], number))
try:
d.remove_container(self.container)
self.logging.info("Container '%s' removed", self.container['Id'])
except:
self.logging.info("Removing container '%s' failed" % self.container['Id'])
if number > 3:
raise
# Give 20 more seconds for the devices to cool down
time.sleep(20)
self._remove_container(number + 1)
def stop(self):
"""
Stops (and removes) selected container.
Additionally saves the STDOUT output to a `container_output` file for later investigation.
"""
if self.running and self.save_output:
if self.name:
self.name = "%s_%s" % (self.name, self.container.get('Id'))
else:
self.name = self.container.get('Id')
filename = "".join([c for c in self.name if re.match(r'[\w\ ]', c)]).replace(" ", "_")
out_path = self.output_dir + "/" + filename + ".txt"
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
with open(out_path, 'w') as f:
print(d.logs(container=self.container.get('Id'), stream=False), file=f)
if self.container:
self.logging.debug("Removing container '%s'" % self.container['Id'])
# Kill only running container
if self.inspect()['State']['Running']:
d.kill(container=self.container)
self.running = False
self._remove_container()
self.container = None
def startWithCommand(self, **kwargs):
""" Starts a detached container for selected image with a custom command"""
self._create_container(tty=True, **kwargs)
self.logging.debug("Starting container '%s'..." % self.container.get('Id'))
d.start(self.container)
self.running = True
self.ip_address = self.inspect()['NetworkSettings']['IPAddress']
def execute(self, cmd, detach=False):
""" executes cmd in container and return its output """
inst = d.exec_create(container=self.container, cmd=cmd)
if (detach):
d.exec_start(inst, detach)
return None
output = d.exec_start(inst, detach=detach)
retcode = d.exec_inspect(inst)['ExitCode']
count = 0
while retcode is None:
count += 1
retcode = d.exec_inspect(inst)['ExitCode']
time.sleep(1)
if count > 15:
raise ExecException("Command %s timed out, output: %s" % (cmd, output))
if retcode is not 0:
raise ExecException("Command %s failed to execute, return code: %s" % (cmd, retcode), output)
return output
def inspect(self):
if self.container:
return d.inspect_container(container=self.container.get('Id'))
def get_output(self, history=True):
try:
return d.logs(container=self.container)
except:
return d.attach(container=self.container, stream=False, logs=history)
def remove_image(self, force=False):
self.logging.info("Removing image %s" % self.image_id)
d.remove_image(image=self.image_id, force=force)
def copy_file_to_container(self, src_file, dest_folder):
if not os.path.isabs(src_file):
src_file = os.path.abspath(os.path.join(os.getcwd(), src_file))
# The Docker library needs tar bytes to put_archive
with tempfile.NamedTemporaryFile() as f:
with tarfile.open(fileobj=f, mode='w') as t:
t.add(src_file, arcname=os.path.basename(src_file), recursive=False)
f.seek(0)
d.put_archive(
container=self.container['Id'],
path=dest_folder,
data=f.read())
def _create_container(self, **kwargs):
""" Creates a detached container for selected image """
if self.running:
self.logging.debug("Container is running")
return
volume_mount_points = None
host_args = {}
if self.volumes:
volume_mount_points = []
for volume in self.volumes:
volume_mount_points.append(volume.split(":")[0])
host_args['binds'] = self.volumes
# update kwargs with env override
kwargs_env = kwargs["environment"] if "environment" in kwargs else {}
kwargs_env.update(self.environ)
kwargs.update(dict(environment=kwargs_env))
# 'env_json' is an environment dict packed into JSON, possibly supplied by
# steps like 'container is started with args'
if "env_json" in kwargs:
env = json.loads(kwargs["env_json"])
kwargs_env = kwargs["environment"] if "environment" in kwargs else {}
kwargs_env.update(env)
kwargs.update(dict(environment=kwargs_env))
del kwargs["env_json"]
self.logging.debug("Creating container from image '%s'..." % self.image_id)
# we need to split kwargs to the args with belongs to create_host_config and
# create_container - be aware - this moved to differnet place for new docker
# python API
host_c_args_names = docker.utils.utils.create_host_config.__code__.co_varnames
host_c_args_names = list(host_c_args_names) + ['cpu_quota', 'cpu_period', 'mem_limit']
for arg in host_c_args_names:
if arg in kwargs:
host_args[arg] = kwargs.pop(arg)
try:
host_args[arg] = int(host_args[arg])
except:
pass
self.container = d.create_container(image=self.image_id,
detach=True,
entrypoint=self.entrypoint,
volumes=volume_mount_points,
host_config=d.create_host_config(**host_args),
**kwargs)
| StarcoderdataPython |
139275 | import graphene
from ...invoice import models
from ..core.types import Job, ModelObjectType
from ..meta.types import ObjectWithMetadata
class Invoice(ModelObjectType):
number = graphene.String()
external_url = graphene.String()
created_at = graphene.DateTime(required=True)
updated_at = graphene.DateTime(required=True)
message = graphene.String()
url = graphene.String(description="URL to download an invoice.")
class Meta:
description = "Represents an Invoice."
interfaces = [ObjectWithMetadata, Job, graphene.relay.Node]
model = models.Invoice
| StarcoderdataPython |
11264998 | import pytest
from networkx import symmetric_difference
from src import HierarchicalGraph
trivial_data = [
(
[
'abc',
'bcd',
'cde',
],
5,
),
(
[
'cde',
'bcd',
'abc',
],
9,
),
(
[
'abcde',
'dedef',
'fabc',
],
11,
)
]
@pytest.mark.parametrize('strings,expected', trivial_data)
def test_trivial_solution(strings, expected):
hg = HierarchicalGraph(strings)
hg.construct_trivial_graph()
result = hg.to_string()
assert len(result) == expected
for string in strings:
assert string in result
greedy_data = [
(
[
'abc',
'bcd',
'cde',
],
5,
),
(
[
'cde',
'bcd',
'abc',
],
5,
),
(
[
'abcde',
'dedef',
'fabc',
],
9,
),
(
[
'GTCCC',
'TGCCA',
'CCCGA',
'ATGCC',
'CCGAA',
],
13,
),
]
@pytest.mark.parametrize('strings,expected', greedy_data)
def test_greedy_solution(strings, expected):
hg = HierarchicalGraph(strings)
hg.construct_greedy_graph()
result = hg.to_string()
assert len(result) == expected
for string in strings:
assert string in result
deterministic_data = [
(
[
'ccaeae',
'eaeaea',
'aeaecc',
],
'eaeaeaccaeaecc',
),
(
[
'ccaeae',
'aeaecc',
'eaeaea',
],
'eaeaeaccaeaecc',
),
]
@pytest.mark.parametrize('strings,expected', deterministic_data)
def test_gha_is_deterministic_to_order(strings, expected):
hg = HierarchicalGraph(strings)
hg.construct_greedy_graph()
assert hg.to_string() == expected
collapsing_data = [
[
'abc',
'bcd',
'cde',
],
[
'cde',
'bcd',
'abc',
],
[
'abcde',
'dedef',
'fabc',
],
[
'GAA',
'TGG',
'GGA',
],
]
@pytest.mark.parametrize('strings', collapsing_data)
def test_collapsed_greedy_solution(strings):
hg = HierarchicalGraph(strings)
hg.construct_greedy_graph()
graph = hg.graph.copy()
# CA(GHA) == GHA
hg.double_and_collapse()
assert len(symmetric_difference(hg.graph, graph).edges()) == 0
| StarcoderdataPython |
8078648 | import math
a = float(input('Digite o comprimento de uma reta:'))
b = float(input('Digite o comprimento de outra reta:'))
c = float(input('Digite o comprimento de uma terceira reta:'))
if abs(b-c)<a and a<(b+c) and abs(a-c)< b and b<(a+c) and abs(a-b)<c and c<(a+b):
if a==b and b==c:
print('Este e um triangulo equilatero')
elif a==b or a==c or b==c:
print('Este e um triangulo isosceles')
else:
print('Este e um triangulo escaleno')
else:
print('Nao formam um triangulo') | StarcoderdataPython |
1736611 | import esgfpid
import logging
import sys
import datetime
input('Make sure you have an exchange "test123" ready, including a queue and the required bindings (see inside this script). Ok? (press any key)')
if not len(sys.argv) == 4:
print('Please call with <host> <user> <password>!')
exit(999)
# Please fill in functioning values!
HOST = sys.argv[1]
USER = sys.argv[2]
PW = sys.argv[3]
VHOST = 'esgf-pid'
AMQP_PORT = 5672
SSL = False
EXCH = 'test123'
# This exchange needs to have bindings to a queue using these routing keys:
# 2114100.HASH.fresh.publi-ds-repli
# 2114100.HASH.fresh.publi-file-repli
# 2114100.HASH.fresh.unpubli-allvers
# 2114100.HASH.fresh.unpubli-onevers
# PREFIX.HASH.fresh.preflightcheck
# UNROUTABLE.UNROUTABLE.fresh.UNROUTABLE
# 2114100.HASH.fresh.datacart <----- mandatory!
# 2114100.HASH.fresh.errata-add
# 2114100.HASH.fresh.errata-rem
# Dummy values
pid_prefix = '21.14100'
pid_data_node = 'our.data.node.test'
thredds_service_path = '/my/thredds/path/'
test_publication = True
datasetName = 'myDatasetName'
versionNumber = '20209999'
is_replica = True
file_name = 'myFunnyFile.nc'
trackingID = '%s/123456789999' % pid_prefix
checksum = 'checki'
file_size = '999'
publishPath = '/my/publish/path'
checksumType = 'MB99'
fileVersion = '2020999'
# Configure logging
root = logging.getLogger()
root.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
root.addHandler(handler)
pikalogger = logging.getLogger('pika')
pikalogger.setLevel(logging.INFO)
# File for error and warn
filename = './log_datacart.log'
handler = logging.FileHandler(filename=filename)
handler.setFormatter(formatter)
handler.setLevel(logging.WARN)
root.addHandler(handler)
LOGGER = logging.getLogger(__name__)
LOGGER.warning('________________________________________________________')
LOGGER.warning('___________ STARTING SCRIPT: DATA CART! ________________')
LOGGER.warning('___________ %s ___________________________' % datetime.datetime.now().strftime('%Y-%m-%d_%H_%M'))
# Create credentials
# (This does not connect)
creds = dict(
url=HOST,
user=USER,
password=PW,
vhost=VHOST,
port=AMQP_PORT,
ssl_enabled=SSL)
# Create a connector
# (This does not connect)
pid_connector = esgfpid.Connector(
handle_prefix=pid_prefix,
messaging_service_exchange_name=EXCH,
messaging_service_credentials=[creds], # list of dicts
data_node=pid_data_node,
thredds_service_path=thredds_service_path,
test_publication=test_publication)
# Get a pid:
pid = pid_connector.make_handle_from_drsid_and_versionnumber(
drs_id=datasetName,
version_number=versionNumber)
# Open the connection, send messages, close
pid_connector.start_messaging_thread()
pid_connector.create_data_cart_pid(dict(
mydrs1 = 'mypid1',
mydrs2 = 'mypid2'))
pid_connector.finish_messaging_thread()
print('Check log for errors (none expected)')
tmp = 'Routing Key:\t"2114100.HASH.fresh.datacart"\nContent:\t"{"handle": "hdl:...", "message_timestamp": "...", "data_cart_content": {"mydrs1": "mypid1", "mydrs2": "mypid2"}, "operation": "shopping_cart", [...]'
print('Check queue for one new message:\n%s' % tmp)
LOGGER.warning('___________ DONE _______________________________________')
| StarcoderdataPython |
6695310 | def is_number(string):
try:
float(string)
return True
except ValueError:
return False
def continue_calculations():
while True:
choice_ = input(msg_5)
if choice_ == "y":
return True
elif not choice_ == "n":
continue
return False
def is_one_digit(number):
if -10 < number < 10 and number % 1 == 0:
return True
return False
def check(x, y, oper):
msg = ""
if is_one_digit(x) and is_one_digit(y):
msg += msg_6
if (x == 1 or y == 1) and oper == "*":
msg += msg_7
if (x == 0 or y == 0) and oper in ["*", "+", "-"]:
msg += msg_8
if msg != "":
msg = msg_9 + msg
print(msg)
def choice_confirm(res):
if not is_one_digit(res):
return True
while True:
confirmation = input(msg_10)
if confirmation == "n":
return False
elif confirmation != "y":
continue
break
while True:
confirmation = input(msg_11)
if confirmation == "n":
return False
elif confirmation != "y":
continue
break
while True:
confirmation = input(msg_12)
if confirmation == "n":
return False
elif confirmation != "y":
continue
break
return True
msg_0 = "Enter an equation"
msg_1 = "Do you even know what numbers are? Stay focused!"
msg_2 = "Yes ... an interesting math operation. You've slept through all classes, haven't you?"
msg_3 = "Yeah... division by zero. Smart move..."
msg_4 = "Do you want to store the result? (y / n):"
msg_5 = "Do you want to continue calculations? (y / n):"
msg_6 = " ... lazy"
msg_7 = " ... very lazy"
msg_8 = " ... very, very lazy"
msg_9 = "You are"
msg_10 = "Are you sure? It is only one digit! (y / n)"
msg_11 = "Don't be silly! It's just one number! Add to the memory? (y / n)"
msg_12 = "Last chance! Do you really want to embarrass yourself? (y / n)"
operations = ["+", "-", "*", "/"]
memory = 0.0
while True:
calc = input(msg_0)
calc = calc.split()
if calc[0] == "M":
calc[0] = memory
if calc[2] == "M":
calc[2] = memory
if not is_number(calc[0]):
print(msg_1)
continue
if not is_number(calc[2]):
print(msg_1)
continue
if calc[1] not in operations:
print(msg_2)
continue
check(float(calc[0]), float(calc[2]), calc[1])
if calc[1] == "+":
result = float(calc[0]) + float(calc[2])
elif calc[1] == "-":
result = float(calc[0]) - float(calc[2])
elif calc[1] == "*":
result = float(calc[0]) * float(calc[2])
elif calc[1] == "/" and float(calc[2]) != 0:
result = float(calc[0]) / float(calc[2])
else:
print(msg_3)
continue
print(result)
while True:
choice = input(msg_4)
if choice == "y":
if choice_confirm(result):
memory = result
elif not choice == "n":
continue
break
if continue_calculations():
continue
break
| StarcoderdataPython |
8172956 | <filename>src/call_cmd.py<gh_stars>0
#!/usr/bin/env python
# coding: utf-8
import subprocess
import os, sys
class ExitException(Exception):
def __init__(self, retcode):
self.retcode = retcode
def raise_exit(retcode=1, err_msg="Error"):
print(err_msg)
raise ExitException(retcode)
def re_if_not(res, err_msg):
if not res:
raise_exit(err_msg=err_msg)
def call_cmd(cmd, cwd=None, err_msg=None, env=None):
# под win хотим обычный вызов CreateProcess(), без всяких cmd.exe /c ""
shell = sys.platform != 'win32'
retcode = subprocess.call(cmd, cwd=cwd, shell=shell, env=env)
if retcode:
raise_exit(retcode, err_msg if err_msg else 'command failed: %s' % cmd)
def make_call_in_dst(dst):
def call_in_dst(cmd, rel_cwd=None):
cwd = dst
if rel_cwd:
cwd = os.path.join(dst, rel_cwd)
call_cmd(cmd, cwd=cwd)
return call_in_dst
def popen_output(cmd, cwd=None, shell=True):
return subprocess.Popen(cmd, cwd=cwd, stdout=subprocess.PIPE, shell=shell).communicate()[0]
| StarcoderdataPython |
9625167 | <filename>Calculus methods/1 part/lab3/lab.py
import numpy as np
import matplotlib.pyplot as plt
import pylab
import math
def find_y_in_x(X,Y,x):
m= len(X)
y = Y.copy()
for k in range(1,m):
y[0:m-k] = ((x-X[k:m])*y[0:m-k] + (X[0:m-k]-x)*y[1:m-k+1])/(X[0:m-k]-X[k:m])
return y[0]
def main():
X = np.array([0,math.pi/6,math.pi/4,math.pi/3,math.pi/2])
Y = np.sin(X)**2
x_in = math.pi/5
y_true = np.sin(x_in)**2
xnew = np.linspace(np.min(X),np.max(X),100)
ynew = [find_y_in_x(X,Y,i) for i in xnew]
plt.plot(X,Y,'o',xnew,ynew)
plt.grid(True)
y_in = find_y_in_x(X,Y,x_in)
print(y_in-y_true)
pylab.show()
main() | StarcoderdataPython |
1834487 | # Helper Functions
# Function checkCommas: checks if word has comma at front or at last or at both if true then return front,word and last
def checkCommas(word):
start = ""
last = ""
if(len(word) > 1):
if word[-1] == ',' or word[-1] == '.':
last = word[-1]
word = word[:-1]
if word[0] == ',' or word[0] == '.':
start = word[0]
word = word[1:]
return start, word, last
# Function getRules: define all the rules of written english here
def getRules():
rules = {"Numbers": {
"zero": 0,
"one": 1,
"two": 2,
"three": 3,
"four": 4,
"five": 5,
"six": 6,
"seven": 7,
"eight": 8,
"nine": 9,
"ten": 10,
"eleven": 11,
"twelve": 12,
"thirteen": 13,
"forteen": 14,
"fifteen": 15,
"sixteen": 16,
"seventeen": 17,
"eighteen": 18,
"nineteen": 19,
"twenty": 20,
"thirty": 30,
"forty": 40,
"fifty": 50,
"sixty": 60,
"seventy": 70,
"eighty": 80,
"ninety": 90,
"hundred": 100,
"thousand": 1000
},
"Tuples": {
"single": 1,
"double": 2,
"triple": 3,
"quadruple": 4,
"quintuple": 5,
"sextuple": 6,
"septuple": 7,
"octuple": 8,
"nonuple": 9,
"decuple": 10
},
"General": {
"C M": "CM",
"P M": "PM",
"D M": "DM",
"A M": "AM"
}
}
return rules
# MAIN CLASS for the logic: spEngtoWritEng
class spEngtoWritEng:
def __init__(self):
self.rules = getRules()
self.paragraph = ""
self.convertedPara = ""
# to get input from user in the form of paragraph
def getInput(self):
self.paragraph = input("\nEnter spoken english:\n\t")
if not self.paragraph:
raise ValueError("Error: You entered nothing.")
# to print the output after converting to written english
def printOutput(self):
print("\nConverted Written English Paragraph: \n\n \"" +
self.convertedPara+"\"")
# main function to convert spoken to written english
def convert(self):
# splitting paragraph into individual words
words_of_para = self.paragraph.split()
numbers = self.rules['Numbers']
tuples = self.rules['Tuples']
general = self.rules['General']
i = 0
no_of_words = len(words_of_para)
while i < no_of_words:
start, word, last = checkCommas(words_of_para[i])
if i+1 != no_of_words:
# when word is of the form e.g.: two
front_n, next_word, last_n = checkCommas(words_of_para[i+1])
# checking dollar
if word.lower() in numbers.keys() and (next_word.lower() == 'dollars' or next_word.lower() == 'dollar'):
self.convertedPara = self.convertedPara+" " + \
start+"$"+str(numbers[word.lower()])+last
i = i+2
elif word.lower() in tuples.keys() and len(next_word) == 1:
# when word is of form Triple A
self.convertedPara = self.convertedPara+" " + \
front_n+(next_word*tuples[word.lower()])+last_n
i = i+2
elif (word+" "+next_word) in general.keys():
# if word is of form P M or C M
self.convertedPara = self.convertedPara+" "+start+word+next_word+last_n
i = i+2
else:
self.convertedPara = self.convertedPara + \
" "+words_of_para[i]
i = i+1
else:
self.convertedPara = self.convertedPara+" "+words_of_para[i]
i = i+1
# main function
def convert_sp_to_wr():
# creating class object
obj_spoken = spEngtoWritEng()
# taking input
obj_spoken.getInput()
# conversion
obj_spoken.convert()
# showing output
obj_spoken.printOutput()
| StarcoderdataPython |
3398270 | <filename>Example-University-System/university.py<gh_stars>0
class College:
def __init__(self, **kwargs):
'''
**kwargs is the keyworded arguments
'''
self.name = kwargs['name']
self.id = kwargs['id']
# Initialize an empty college.
self.professors = {}
self.staff_members = {}
self.courses = {}
def add_student(self, student):
self.students.add(student)
def add_professor(self, prof):
self.professors.add(prof)
def add_staff_members(self, staff):
self.staff_members.add(staff)
def add_courses(self, course):
self.courses.add(course)
def cal_expenses(self):
'''As an Example all of the costs are salaries'''
return sum(prof.salary for prof in self.professors) + sum(staff.salary for staff in self.staff_members)
def cal_revenue(self):
return sum(course.tuition for course in self.courses) + sum(staff.salary for staff in self.staff_members)
class University:
def __init__(self, colleges):
self.colleges = colleges
def cal_expenses(self):
return sum(c.cal_expenses() for c in self.colleges)
def cal_revenue(self):
return sum(c.cal_revenue() for c in self.colleges)
| StarcoderdataPython |
1895616 | <reponame>nfd/atj2127decrypt
import os
import sys
import difflib
def hexdump(b):
for line in [b[n:n+16] for n in range(0, len(b), 16)]:
charpart = ''.join(chr(x) if (x >= 0x20 and x <= 0x7e) else '.' for x in line)
hexpart = ' '.join('%02x' % (x) for x in line)
yield('%-48s %s' % (hexpart, charpart))
def hexcompare(expected, actual):
hexpected = list(hexdump(expected))
hactual = list(hexdump(actual))
# so lazy
for line in difflib.unified_diff(hexpected, hactual, fromfile='expected', tofile='but got'):
print(line)
class MockUSBDevice:
def __init__(self, packet_template, num_packets, skip_header=0, max_size=None):
self.packet_template = packet_template
self.next_packet = 1
self.num_packets = num_packets
self.skip_header = skip_header
self.max_size = max_size # prior to skip_header
@property
def finished(self):
return self.next_packet == self.num_packets + 1
def log(self, txt):
print(txt)
def trim(self, data):
if self.max_size is None:
return data
else:
return data[:self.max_size - self.skip_header]
def get_next_packet(self):
assert not self.finished
pathname = self.packet_template % (self.next_packet)
self.next_packet += 1
with open(pathname, 'rb') as h:
return h.read()[self.skip_header:]
def write(self, endpoint, data):
self.log('Write, %d, %d bytes' % (endpoint, len(data)))
expected = self.get_next_packet()
data = self.trim(data)
if expected == data:
self.log(' packet %d OK' % (self.next_packet - 1))
else:
may_be_trimmed = self.max_size is not None and len(expected) == self.max_size - self.skip_header
self.log(' packet %d mismatch (%s %d bytes)' % (self.next_packet - 1,
'possibly-trimmed,' if may_be_trimmed else 'expected', len(expected)))
hexcompare(expected, data)
raise Exception()
def read(self, endpoint, max_size):
expected = self.get_next_packet()
self.log('Read packet %x, max %d, actual %d' % (endpoint, max_size, len(expected)))
if len(expected) > max_size:
raise Exception()
self.log(' packet %d OK' % (self.next_packet - 1))
return expected
| StarcoderdataPython |
5143839 | from typing import Any, Dict, Optional, Union
from tartiflette import Scalar
from tartiflette.constants import UNDEFINED_VALUE
from tartiflette.language.ast import IntValueNode
from tartiflette.utils.values import is_integer
_MIN_INT = -2_147_483_648
_MAX_INT = 2_147_483_647
class ScalarInt:
"""
Built-in scalar which handle int values.
"""
def coerce_output(self, value: Any) -> int:
"""
Coerce the resolved value for output.
:param value: value to coerce
:type value: Any
:return: the coerced value
:rtype: int
"""
# pylint: disable=no-self-use
if isinstance(value, bool):
return int(value)
try:
result = value
if value and isinstance(value, str):
float_value = float(value)
result = int(float_value)
if result != float_value:
raise ValueError()
if not is_integer(result):
raise ValueError()
except Exception: # pylint: disable=broad-except
raise TypeError(
f"Int cannot represent non-integer value: < {value} >."
)
if not _MIN_INT <= result <= _MAX_INT:
raise TypeError(
"Int cannot represent non 32-bit signed integer value: "
f"< {value} >."
)
return result
def coerce_input(self, value: Any) -> int:
"""
Coerce the user input from variable value.
:param value: value to coerce
:type value: Any
:return: the coerced value
:rtype: int
"""
# pylint: disable=no-self-use
# ¯\_(ツ)_/¯ booleans are int: `assert isinstance(True, int) is True`
if not is_integer(value):
raise TypeError(
f"Int cannot represent non-integer value: < {value} >."
)
if not _MIN_INT <= value <= _MAX_INT:
raise TypeError(
"Int cannot represent non 32-bit signed integer value: "
f"< {value} >."
)
return int(value)
def parse_literal(self, ast: "Node") -> Union[int, "UNDEFINED_VALUE"]:
"""
Coerce the input value from an AST node.
:param ast: AST node to coerce
:type ast: Node
:return: the coerced value
:rtype: Union[int, UNDEFINED_VALUE]
"""
# pylint: disable=no-self-use
if not isinstance(ast, IntValueNode):
return UNDEFINED_VALUE
try:
value = int(ast.value)
if _MIN_INT <= value <= _MAX_INT:
return value
except Exception: # pylint: disable=broad-except
pass
return UNDEFINED_VALUE
def bake(schema_name: str, config: Optional[Dict[str, Any]] = None) -> str:
"""
Links the scalar to the appropriate schema and returns the SDL related
to the scalar.
:param schema_name: schema name to link with
:param config: configuration of the scalar
:type schema_name: str
:type config: Optional[Dict[str, Any]]
:return: the SDL related to the scalar
:rtype: str
"""
# pylint: disable=unused-argument
Scalar("Int", schema_name=schema_name)(ScalarInt())
return '''
"""The `Int` scalar type represents non-fractional signed whole numeric values. Int can represent values between -(2^31) and 2^31 - 1."""
scalar Int
'''
| StarcoderdataPython |
3547665 | <gh_stars>1-10
import warnings
import torch
import torch.nn as nn
try:
from mmcv.ops import RoIAlign, RoIPool
except (ImportError, ModuleNotFoundError):
warnings.warn('Please install mmcv-full to use RoIAlign and RoIPool')
try:
import mmdet # noqa
from mmdet.models import ROI_EXTRACTORS
except (ImportError, ModuleNotFoundError):
warnings.warn('Please install mmdet to use ROI_EXTRACTORS')
class SingleRoIExtractor3D(nn.Module):
"""Extract RoI features from a single level feature map.
Args:
roi_layer_type (str): Specify the RoI layer type. Default: 'RoIAlign'.
featmap_stride (int): Strides of input feature maps. Default: 16.
output_size (int | tuple): Size or (Height, Width). Default: 16.
sampling_ratio (int): number of inputs samples to take for each
output sample. 0 to take samples densely for current models.
Default: 0.
pool_mode (str, 'avg' or 'max'): pooling mode in each bin.
Default: 'avg'.
aligned (bool): if False, use the legacy implementation in
MMDetection. If True, align the results more perfectly.
Default: True.
with_temporal_pool (bool): if True, avgpool the temporal dim.
Default: True.
with_global (bool): if True, concatenate the RoI feature with global
feature. Default: False.
Note that sampling_ratio, pool_mode, aligned only apply when roi_layer_type
is set as RoIAlign.
"""
def __init__(self,
roi_layer_type='RoIAlign',
featmap_stride=16,
output_size=16,
sampling_ratio=0,
pool_mode='avg',
aligned=True,
with_temporal_pool=True,
with_global=False):
super().__init__()
self.roi_layer_type = roi_layer_type
assert self.roi_layer_type in ['RoIPool', 'RoIAlign']
self.featmap_stride = featmap_stride
self.spatial_scale = 1. / self.featmap_stride
self.output_size = output_size
self.sampling_ratio = sampling_ratio
self.pool_mode = pool_mode
self.aligned = aligned
self.with_temporal_pool = with_temporal_pool
self.with_global = with_global
if self.roi_layer_type == 'RoIPool':
self.roi_layer = RoIPool(self.output_size, self.spatial_scale)
else:
self.roi_layer = RoIAlign(
self.output_size,
self.spatial_scale,
sampling_ratio=self.sampling_ratio,
pool_mode=self.pool_mode,
aligned=self.aligned)
self.global_pool = nn.AdaptiveAvgPool2d(self.output_size)
def init_weights(self):
pass
# The shape of feat is N, C, T, H, W
def forward(self, feat, rois):
if not isinstance(feat, tuple):
feat = (feat, )
if len(feat) >= 2:
assert self.with_temporal_pool
if self.with_temporal_pool:
feat = [torch.mean(x, 2, keepdim=True) for x in feat]
feat = torch.cat(feat, axis=1)
roi_feats = []
for t in range(feat.size(2)):
frame_feat = feat[:, :, t].contiguous()
roi_feat = self.roi_layer(frame_feat, rois)
if self.with_global:
global_feat = self.global_pool(frame_feat.contiguous())
inds = rois[:, 0].type(torch.int64)
global_feat = global_feat[inds]
roi_feat = torch.cat([roi_feat, global_feat], dim=1)
roi_feat = roi_feat.contiguous()
roi_feats.append(roi_feat)
return torch.stack(roi_feats, dim=2)
if 'mmdet' in dir():
ROI_EXTRACTORS.register_module()(SingleRoIExtractor3D)
| StarcoderdataPython |
4959058 | <filename>aioqiwi/core/tooling/datetime.py
import datetime
import typing
class DatetimeModule:
TZD = "03:00"
"""Moscow city default timezone"""
DATETIME_FMT = "%Y-%m-%dT%H:%M:%S+{}"
"""Qiwi API datetime format"""
@property
def datetime_fmt(self):
"""Get datetime format string with qiwi TZD"""
return self.DATETIME_FMT.format(self.TZD)
def parse_date_string(self, dt: str) -> datetime.datetime:
"""Get datetime with qiwi TZD format from string"""
return datetime.datetime.strptime(dt, self.datetime_fmt)
def parse_datetime(self, dt: datetime.datetime):
return dt.strftime(self.datetime_fmt)
def check_and_parse_datetime(
self, dt: typing.Optional[typing.Union[str, datetime.datetime]]
) -> typing.Optional[str]:
return (
dt
if isinstance(dt, str)
else self.parse_datetime(dt)
if isinstance(dt, datetime.datetime)
else None
)
| StarcoderdataPython |
1626161 | # Generated by Django 2.2.1 on 2019-07-09 04:04
from django.db import migrations, models
import stdimage.models
class Migration(migrations.Migration):
dependencies = [
('authentication', '0011_auto_20190625_1458'),
]
operations = [
migrations.AddField(
model_name='user',
name='display_name',
field=models.CharField(default='', max_length=100, verbose_name='表示名'),
preserve_default=False,
),
migrations.AlterField(
model_name='user',
name='icon',
field=stdimage.models.StdImageField(blank=True, null=True, upload_to='media//icons/'),
),
]
| StarcoderdataPython |
3213299 | <gh_stars>10-100
from django.db import migrations
from django.utils import timezone
def update_login(apps, schema_editor):
UserModel = apps.get_model("users", "User")
UserModel.objects.all().update(last_login=timezone.now())
class Migration(migrations.Migration):
dependencies = [
("users", "0025_user_phone_number"),
]
operations = [
migrations.RunPython(update_login),
]
| StarcoderdataPython |
4836614 | <filename>graphics/VTK-7.0.0/Examples/Infovis/Python/streaming_statistics_pyqt.py
#!/usr/bin/env python
from __future__ import print_function
from vtk import *
import os.path
import sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
data_dir = VTK_DATA_ROOT + "/Data/Infovis/SQLite/"
if not os.path.exists(data_dir):
data_dir = VTK_DATA_ROOT + "/Data/Infovis/SQLite/"
if not os.path.exists(data_dir):
data_dir = VTK_DATA_ROOT + "/Data/Infovis/SQLite/"
sqlite_file = data_dir + "temperatures.db"
# I'm sure there's a better way then these global vars
currentRow = 0;
numberOfRows = 1;
done = False;
psuedoStreamingData = vtkProgrammableFilter()
def streamData():
global done
global currentRow
input = psuedoStreamingData.GetInput()
output = psuedoStreamingData.GetOutput()
# Copy just the columns names/types
output.GetRowData().CopyStructure(input.GetRowData())
# Loop through all the input data and grab the next bunch of rows
startRow = currentRow
endRow = startRow + numberOfRows
if (endRow >= input.GetNumberOfRows()):
endRow = input.GetNumberOfRows()
done = True;
print("streaming: ", startRow, "-", endRow)
for i in range(startRow, endRow):
output.InsertNextRow(input.GetRow(i))
currentRow = endRow;
psuedoStreamingData.SetExecuteMethod(streamData)
class Timer(QObject):
def __init__(self, parent=None):
super(Timer, self).__init__(parent)
# Setup the data streaming timer
self.timer = QTimer()
QObject.connect(self.timer, SIGNAL("timeout()"), self.update)
self.timer.start(100)
def update(self):
if (done):
quit();
psuedoStreamingData.Modified() # Is there a way to avoid this?
psuedoStreamingData.GetExecutive().Push()
printStats()
def printStats():
sStats = ss.GetOutputDataObject( 1 )
sPrimary = sStats.GetBlock( 0 )
sDerived = sStats.GetBlock( 1 )
sPrimary.Dump( 15 )
sDerived.Dump( 15 )
if __name__ == "__main__":
""" Main entry point of this python script """
# Set up streaming executive
streamingExec = vtkThreadedStreamingPipeline()
vtkAlgorithm.SetDefaultExecutivePrototype(streamingExec)
streamingExec.FastDelete()
vtkThreadedStreamingPipeline.SetAutoPropagatePush(True)
# Pull the table from the database
databaseToTable = vtkSQLDatabaseTableSource()
databaseToTable.SetURL("sqlite://" + sqlite_file)
databaseToTable.SetQuery("select * from main_tbl")
# Hook up the database to the streaming data filter
psuedoStreamingData.SetInputConnection(databaseToTable.GetOutputPort())
# Calculate offline(non-streaming) descriptive statistics
print("# Calculate offline descriptive statistics:")
ds = vtkDescriptiveStatistics()
ds.SetInputConnection(databaseToTable.GetOutputPort())
ds.AddColumn("Temp1")
ds.AddColumn("Temp2")
ds.Update()
dStats = ds.GetOutputDataObject( 1 )
dPrimary = dStats.GetBlock( 0 )
dDerived = dStats.GetBlock( 1 )
dPrimary.Dump( 15 )
dDerived.Dump( 15 )
# Stats filter to place 'into' the streaming filter
inter = vtkDescriptiveStatistics()
inter.AddColumn("Temp1")
inter.AddColumn("Temp2")
# Calculate online(streaming) descriptive statistics
print("# Calculate online descriptive statistics:")
ss = vtkStreamingStatistics()
ss.SetStatisticsAlgorithm(inter)
ss.SetInputConnection(psuedoStreamingData.GetOutputPort())
# Spin up the timer
app = QApplication(sys.argv)
stream = Timer()
sys.exit(app.exec_())
| StarcoderdataPython |
4835658 | from typing import Any, Optional
import psycopg2
from pypandas_sql.dbconnector.db_connector import DBConnector
from utils import config_helper, credential_helper, filepath_helper
__CREDENTIALS__ = 'credentials'
__ENGINE_NAME__ = 'redshift+psycopg2'
class RedshiftConnector(DBConnector):
def __init__(self) -> None:
config_path = filepath_helper.get_redshift_config_path()
connection_attr = config_helper.read_redshift_config_file(config_path)
credentials = credential_helper.get_redshift_credentials(connection_attr)
connection_attr[__CREDENTIALS__] = credentials
super(RedshiftConnector, self).__init__(engine_name=__ENGINE_NAME__, connection_attr=connection_attr)
def get_uri(self, schema: Optional[str]) -> str:
assert schema is not None and len(schema) > 0
credentials = self.connection_attr[__CREDENTIALS__]
host = config_helper.get_redshift_host(self.connection_attr)
port = config_helper.get_redshift_port(self.connection_attr)
return f'{self.engine_name}://{credentials.user}:{credentials.password}@{host}:{port}/{schema}'
def get_connection(self, schema: Optional[str]) -> Any:
assert schema is not None and len(schema) > 0
credentials = self.connection_attr[__CREDENTIALS__]
return psycopg2.connect(dbname=schema,
host=config_helper.get_redshift_host(self.connection_attr),
port=config_helper.get_redshift_port(self.connection_attr),
user=credentials.user,
password=credentials.password)
| StarcoderdataPython |
9771384 | from setuptools import setup
setup(
name="deviantart",
version="0.1.5",
description="A Python wrapper for the DeviantArt API",
url="https://github.com/neighbordog/deviantart",
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
packages=["deviantart"],
install_requires=[
"sanction"
]
)
| StarcoderdataPython |
113869 | # Generated by Django 2.1.7 on 2019-06-03 05:58
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('contacts', '0003_merge_20190214_1427'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64, verbose_name='Event')),
('event_type',
models.CharField(choices=[('Recurring', 'Recurring'), ('Non-Recurring', 'Non-Recurring')],
max_length=20)),
('status', models.CharField(blank=True,
choices=[('Planned', 'Planned'), ('Held', 'Held'), ('Not Held', 'Not Held'),
('Not Started', 'Not Started'), ('Started', 'Started'),
('Completed', 'Completed'), ('Canceled', 'Canceled'),
('Deferred', 'Deferred')], max_length=64, null=True)),
('start_date', models.DateField(default=None)),
('start_time', models.TimeField(default=None)),
('end_date', models.DateField(default=None)),
('end_time', models.TimeField(blank=True, default=None, null=True)),
('description', models.TextField(blank=True, null=True)),
('created_on', models.DateTimeField(auto_now_add=True, verbose_name='Created on')),
('is_active', models.BooleanField(default=True)),
('disabled', models.BooleanField(default=False)),
('assigned_to',
models.ManyToManyField(blank=True, related_name='event_assigned', to=settings.AUTH_USER_MODEL)),
('contacts', models.ManyToManyField(blank=True, related_name='event_contact', to='contacts.Contact')),
('created_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL,
related_name='event_created_by_user', to=settings.AUTH_USER_MODEL)),
],
),
]
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.