id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
1799982 | from octopus.arch.wasm.cfg import WasmCFG
# Eos smart contract == wasm module
class EosCFG(WasmCFG):
def __init__(self, module_bytecode):
WasmCFG.__init__(self,
module_bytecode=module_bytecode)
def visualize_instrs_per_funcs(self, show=True, save=True,
out_filename="eos_func_analytic.png",
fontsize=8):
super().visualize_instrs_per_funcs(show, save, out_filename, fontsize)
| StarcoderdataPython |
3205764 | import os
import PIL
from rect import Rect
_imageFolder = ''
def setImageFolder(path):
'''
set a path to image folder
simplifies the path definition for SpriteSheets and SheetImages
'''
global _imageFolder
_imageFolder = path
class CssProp:
def __init__(self, selector, pos=(0,0)):
self.selector = selector
self.pos = pos
class SheetImage:
'''
Object representing properties of image which should be placed into spritesheet
also represents properties for css generation
attributes:
path - full path to image file
_image - PIL.Image.Image object
see SheetImage.__init__
...
'''
_repeatDict = {
'no-repeat': (False, False),
'repeat-x': (True, False),
'repeat-y': (False, True),
'repeat': (True, True),
}
def __init__(self, filename=None, image=None, margin=(0,0,0,0), pos=(0,0), color=None, background=None, usedInCss=None, repeat='no-repeat'):
'''
image can be filename or PIL.Image object
pos - shifting image in pixels from topleft corner of image (not including margin).
Usefull for dynamicly positioning image in div. Used only for CSS generation
and background positioning
image - PIL.Image.Image instance
filename - image filename
margin - top, right, bottom, left margin values
it is used for reserving free space around image in sprite sheet.
Image size + margin + abs(image position) should be greater than containing div size.
color, repeat - counterpart of same CSS property
'''
if filename and image:
raise ArgumentError('only filename or image argument can be provided')
if not filename and not image:
raise ArgumentError('filename or image argument has to be provided')
if image:
assert isinstance(image, PIL.Image.Image), 'other image types are not supported'
self.filename = ''
self.path = ''
self.image = image
elif filename:
self.filename = filename
self.path = os.path.join(_imageFolder, self.filename)
self.image = PIL.Image.open(self.path)
self._setCssProp(usedInCss)
self.margin = margin
#CSS properties
self.color = color
self.background = background
self.repeat = repeat
def _setCssProp(self, usedIn):
if usedIn is None:
self.cssProp = []
return
if isinstance(usedIn, str):
self.cssProp = [CssProp(usedIn)]
elif isinstance(usedIn, CssProp):
self.cssProp = [usedIn]
else:
if not hasattr(usedIn, '__iter__'):
raise ArgumentError('usedIn has invalid type %s, it can be str, CssProp or list of CssProp')
self.cssProp = list(usedIn)
def getInnerPos(self, outerPos):
outerX, outerY = outerPos
return (
outerX + self.marginLeft,
outerY + self.marginTop
)
def getOuterRect(self):
r = Rect()
r.size = (
self.marginLeft + self.image.size[0] + self.marginRight,
self.marginTop + self.image.size[1] + self.marginBottom
)
return r
def getRepeats(self):
return self._repeatDict[self.repeat]
@property
def marginTop(self): return self.margin[0]
@property
def marginRight(self): return self.margin[1]
@property
def marginBottom(self): return self.margin[2]
@property
def marginLeft(self): return self.margin[3]
| StarcoderdataPython |
197910 | from astropy.io import fits
from astropy import units as u
import sys, os, pickle
# this version switches to filfinder2D (from filfind_class::fil_finder_2D)
gitpaths=['/Users/remy/lustre/naasc/users/rindebet/github/lmc-alma-analysis/']
for gitpath in gitpaths:
if not gitpath in sys.path:
sys.path.insert(0,gitpath)
from fil_finder import FilFinder2D
import pylab as pl
pl.ion()
import numpy as np
from scipy.signal import savgol_filter
import pdb
label="GMC1_12CO"
mom8file = "GMC1_12CO_12m.maximum.fits"
mom8file = "GMC1_12CO_12m7mTPF.maximum.fits"
cubefile = "GMC1_12CO_12m.image.fits"
#mom0file = "GMC1_12CO_12m.integrated.fits" # use mom0pm5 instead?
distpc=5e4
#def run_filfinder(label='mycloud', cubefile=None, mom8file=None, mom0file=None, redo=False,
#distpc=5e4):
plotfiledir = label+".plots" # NO TRAILING / !
filpropfile = label+".filprops.pkl" # phys props
# initial fil finding in mom8 image:
fits_hdu = fits.open(mom8file)[0]
bmwidth=np.sqrt(fits_hdu.header['BMIN']*fits_hdu.header['BMAJ']) *3600 # arcsec
# setting beamwidth will have no effect if a fits header is given
# beamwidth=bmwidth*u.arcsec,
# it turns out it uses sigma not FWHM so later, nbeam=#sigma=1.6pix here
fils=FilFinder2D(fits_hdu, distance=distpc*u.pc)
# 30Dor: glob=72, flatten=100
# GMC1 1st time: 50, 85
fils.save_name=plotfiledir
if not os.path.exists(plotfiledir):
os.mkdir(plotfiledir)
redo=True
# give radial profiles more breathing room.
fils.skeleton_pad_size=5
fils.preprocess_image(flatten_percent=90)
# 30dor: adapt=75, size=800
# GMC1 first time: 50,500
# can lower glob_thresh, get more small things, then lower adapt_thresh to
# make them break up more, and finally let size_thresh kill them
fils.create_mask(verbose=True, border_masking=False, adapt_thresh=55*u.pix, size_thresh=300*u.pix**2, use_existing_mask=False,save_png=True,smooth_size=2*u.pix, glob_thresh=50)
#Adaptive thresholding patch is larger than 40pixels. Regridding has been disabld.
# warnings.warn("Adaptive thresholding patch is larger than 40"
print "calling medskel"
fils.medskel(verbose=True,save_png=True)
print "getting cube"
cube=fits.getdata(cubefile)[0]
cube[np.isnan(cube)]=0
print "calling analyze_skeletons"
# 30dor:
# fils.branch_thresh=200 # typical lengths are 1-100
# raise from 10 to 30 to elminate more little things
# fils.analyze_skeletons(verbose=True, save_png=True, skel_thresh=10,cubefile=cubefile)
# mc3: double filter instead, with branch_thresh=30; branch_thresh says it overrides all previous settings
fils.analyze_skeletons(verbose=True, save_png=True, prune_criteria="length",max_iter=1,
#cubefile=cubefile,
branch_nbeam_lengths=5,nbeam_lengths=3, skel_thresh=10)
#relintens_thresh=0.1, branch_thresh=30,
pl.clf()
pl.imshow(fils.skeleton,origin="bottom")
pl.xlim(200,850)
pl.ylim(100,650)
pl.draw()
import pdb
pdb.set_trace()
fils.analyze_skeletons(verbose=True, save_png=True, cubefile=cubefile, branch_thresh=30)
# the labeled filament arrays are left separated even though the branches
# have been pruned, so we need to recalculate the labelled_filament_arrays
# pix_identify is supposed to do that;
# fils.filament_arrays comes from make_final_skeletons
if False:
if redo:
pl.clf()
vmin = np.percentile(fils.flat_img[np.isfinite(fils.flat_img)], 20)
vmax = np.percentile(fils.flat_img[np.isfinite(fils.flat_img)], 99)
pl.imshow(fils.flat_img, interpolation=None, origin="lower",
vmin=vmin, vmax=vmax*1.5, cmap='jet')
pl.contour(fils.skeleton_nopad, colors='r')
offs=fils.array_offsets
for i in range(len(offs)):
pl.text(0.5*(offs[i][0][1]+offs[i][1][1]),0.5*(offs[i][0][0]+offs[i][1][0]),str(i),color='orange')
pl.savefig(plotfiledir+"/"+plotfiledir+".skel.png")
#if rerun:
pl.clf()
for n in range(len(fils.branch_properties['length_2d'])):
z=np.where(np.array(fils.branch_properties['length'][n])>0.5)[0]
pl.plot(np.array(fils.branch_properties['length_2d'][n])[z],
np.array(fils.branch_properties['length'][n])[z]/
np.array(fils.branch_properties['length_2d'][n])[z],'o',label=str(n))
pl.xlabel("2D length")
pl.ylabel("3D/2D length")
pl.legend(prop={"size":8})
pl.xscale("log")
pl.yscale("log")
pl.savefig(plotfiledir+"/"+plotfiledir+".lengths_3d_rat2d.png")
# find_widths calculates the 2d distance transform, and uses self.image,
# self.imgscale
fils.image=fits.getdata(mom0file)
fils.save_name=plotfiledir+".mom0wid"
fils.find_widths(verbose=True,save_png=True,pad_to_distance=0.25,max_distance=0.5)
mom0width=fils.width_fits.copy()
pl.clf()
vmin = np.percentile(fils.image[np.isfinite(fils.image)], 20)
vmax = np.percentile(fils.image[np.isfinite(fils.image)], 99)
pl.imshow(fils.image, interpolation=None, origin="lower",
vmin=vmin, vmax=vmax*1.5, cmap='jet')
pl.contour(fils.skeleton_nopad, colors='r')
offs=fils.array_offsets
for i in range(len(offs)):
pl.text(0.5*(offs[i][0][1]+offs[i][1][1]),0.5*(offs[i][0][0]+offs[i][1][0]),str(i),color='orange')
pl.savefig(plotfiledir+"/"+plotfiledir+".int.skel.png")
# widths and orientations on the mom8 directly:
fils.image=fits.getdata(mom8file)
fils.save_name=plotfiledir+".mom8wid"
fils.find_widths(verbose=True,save_png=True,pad_to_distance=0.25,max_distance=0.5)
mom8width=fils.width_fits.copy()
pl.clf()
x=mom0width['Parameters'][:,1]
dx=mom0width['Errors'][:,1]
y=mom8width['Parameters'][:,1]
dy=mom8width['Errors'][:,1]
pl.plot(x,y,'yo')
#ok= [0,4,6,12,13,15,18,20,21,22,25,27,28,30,31,34]
#good=[1,5,9,10,14,16,26,32,33,35]
#pl.plot(x[good],y[good],'ro')
#pl.plot(x[ok],y[ok],'mo')
#for i in good:
# if not (np.isnan(x[i]) or np.isnan(y[i])):
# pl.text(x[i],y[i],str(i))
pl.savefig(plotfiledir+"/"+plotfiledir+".widths.mom8.mom0.png")
pl.clf()
x=mom0width['Parameters'][:,1]
dx=mom0width['Errors'][:,1]
y=mom8width['Parameters'][:,1]/x
dy=y*np.sqrt( (mom8width['Errors'][:,1]/mom8width['Parameters'][:,1])**2+
(mom0width['Errors'][:,1]/mom0width['Parameters'][:,1])**2 )
pl.errorbar(x,y,xerr=dx,yerr=dy,fmt='o')
pl.xlabel("width of mom0")
pl.ylabel("mom8 width / mom0 width")
pl.plot(pl.xlim(),[1,1],'k',linestyle='dotted')
pl.savefig(plotfiledir+"/"+plotfiledir+".widtherrs.mom8.mom0.png")
# return fils
if __name__ == "__main__":
run_filfinder(label=sys.argv[1], cubefile=sys.argv[2], mom0file=sys.argv[3], mom8file=sys.pargv[4])
| StarcoderdataPython |
4821034 | import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from enum import Enum
from collections import deque, defaultdict
class PDN:
def __init__(self, L, C, R, VDC, CLK):
self.L = L
self.C = C
self.R = R
self.VDC = VDC
self.CLK = CLK
self.vout_2_cycle_ago = VDC
self.vout_1_cycle_ago = VDC
self.iout_1_cycle_ago = 0
def get_curr(self, current):
ts = 1/self.CLK
LmulC = self.L*self.C
LdivR = self.L/self.R
vout = self.VDC*ts**2/(LmulC) \
+ self.vout_1_cycle_ago*(2 - ts/(LdivR)) \
+ self.vout_2_cycle_ago*(ts/(LdivR) \
- 1 - ts**2/(LmulC)) \
- current*self.R*ts**2/(LmulC) \
- (1/self.C)*ts*(current - self.iout_1_cycle_ago)
self.vout_2_cycle_ago = self.vout_1_cycle_ago
self.vout_1_cycle_ago = vout
self.iout_1_cycle_ago = current
return vout
class Events(Enum):
NO_EVENT = 1
BRANCH_T = 2
BRANCH_NT = 3
BRANCH_MP = 4
FETCH = 5
TLB_STALL = 6
ICACHE_STALL = 7
COMMIT_BLOCK = 8
IQ_FULL = 9
LSQ_FULL = 10
LOAD_EX = 11
LOAD_WB = 12
LOAD_CFETCH = 13
STORE_EXECUTE = 14
STORE_WB = 15
INSTR_DISPATCH = 16
INSTR_ISSUE = 17
INSTR_EXECUTE = 18
INSTR_COMMIT = 19
MEM_MP = 20
EMPTY_EVENT = 21
DUMMY_EVENT2 = 22
#from other stats
DCACHE_MISS = 23
ICACHE_MISS = 24
L2_MISS = 25
TLB_MISS = 16
class Cycle_Dump:
event_map = {
#from new_events_stats
0:'NO_EVENT',
1:'BRANCH_T',
2:'BRANCH_NT',
3:'BRANCH_MP',
4:'FETCH',
5:'TLB_STALL',
6:'ICACHE_STALL',
7:'COMMIT_BLOCK',
8:'IQ_FULL',
9:'LSQ_FULL',
10:'LOAD_EX',
11:'LOAD_WB',
12:'LOAD_CFETCH',
13:'STORE_EXECUTE',
14:'STORE_WB',
15:'INSTR_DISPATCH',
16:'INSTR_ISSUE',
17:'INSTR_EXECUTE',
18:'INSTR_COMMIT',
19:'MEM_MP',
20:'EMPTY_EVENT',
21:'DUMMY_EVENT2',
#from other stats
22:'DCACHE_MISS',
23:'ICACHE_MISS',
24:'L2_MISS',
25:'TLB_MISS',
}
new_events_blacklist = {
0:'NO_EVENT',
20:'EMPTY_EVENT',
21:'DUMMY_EVENT2',
}
def __init__(self, stats):
self.ve_count = 0
self.action_count = 0
self.stats = stats
self.stats.readline()
self.stats.readline()
self.new_events_var = [] #list of new events the current cycle
self.new_events_prev_var = [] #list of new events the previous cycle of the cycle dump
self.table_index_count = 0
self.cycle = None
self.supply_curr = None
self.supply_volt = None
self.supply_volt_prev = None
self.anchorPC_var = None
self.numCycles_var = None
self.branchMispredicts_count = 0
self.memOrderViolationEvents_count = 0
self.DcacheMisses_count = 0
self.IcacheMisses_count = 0
self.TLBcacheMisses_count = 0
self.L2cacheMisses_count = 0
keys = self.event_map.keys()
self.event_count = {k: 0 for k in keys}
self.EOF = False
def reset(self):
for e in self.new_events_var:
self.event_count[e] += 1
self.new_events_var = [] #list of new events the current cycle
self.new_events_prev_var = [] #list of new events the previous cycle of the cycle dump
self.table_index_count = 0
self.cycle = None
self.supply_curr = None
self.supply_volt = None
self.supply_volt_prev = None
self.anchorPC_var = None
self.numCycles_var = None
return
def new_events(self,line):
linespl = line.split()
event = int(linespl[1])
if (not event in self.new_events_blacklist.keys()) and (event not in self.new_events_var):
self.new_events_var.append(event)
return
# # def new_events_prev(self,line):
# # linespl = line.split()
# # event = int(linespl[1])
# # if event != 20:
# # self.event_count[event] += 1
# # self.new_events_prev_var.append(event)
# return
def counter(self, line):
linespl = line.split()
self.cycle = int(linespl[1])
return
def supply_current(self, line):
linespl = line.split()
self.supply_curr = float(linespl[1])
return
def supply_voltage(self, line):
linespl = line.split()
self.supply_volt = float(linespl[1])
return
def anchorPC(self, line):
linespl = line.split()
self.anchorPC_var = hex(int(linespl[1]))
return
def numCycles(self,line):
linespl = line.split()
self.numCycles_var = int(linespl[1])
return
# def branchMispredicts(self,line):
# linespl = line.split()
# val = int(linespl[1])
# if val > self.branchMispredicts_count:
# self.branchMispredicts_count = val
# self.new_events_var.append(3) #normally enum but its 2am
# def memOrderViolationEvents(self,line):
# linespl = line.split()
# val = int(linespl[1])
# if val > self.memOrderViolationEvents_count:
# self.memOrderViolationEvents_count = val
# self.new_events_var.append(8) #normally enum but its 2am
def overall_misses(self,line):
linespl = line.split()
val = int(linespl[1])
cache = line.split()[0].split('.')[-2]
if (cache == 'l2'):
if val > self.L2cacheMisses_count:
self.L2cacheMisses_count = val
self.new_events_var.append(24) #normally enum but its 2am
if (cache == 'dcache'):
if val > self.DcacheMisses_count:
self.DcacheMisses_count = val
self.new_events_var.append(22) #normally enum but its 2am
if (cache == 'icache'):
if val > self.IcacheMisses_count:
self.IcacheMisses_count = val
self.new_events_var.append(23) #normally enum but its 2am
if (cache == 'itb_walker_cache' or cache == 'dtb_walker_cache'):
if val > self.TLBcacheMisses_count:
self.TLBcacheMisses_count = val
self.new_events_var.append(25) #normally enum but its 2am
def parseCycle(self):
while(True):
line = self.stats.readline()
if not line:
return True
#end of 1 cycle of stat dump
elif (not line.upper().isupper()):
for _ in range(4):
self.stats.readline()
if not line:
return True
return False
else:
#one cycle worth of stat dumps
stat_name = line.split()[0].split('.')[-1].split(':')[0]
func = getattr(self, stat_name, False)
if func:
func(line)
def dump(self):
print('******* CYCLE: ',self.cycle,'*********')
print('SUPPLY CURRENT: ', self.supply_curr)
print('SUPPLY VOLTAGE: ', self.supply_volt)
#print('SUPPLY VOLTAGE_prev: ', self.supply_volt_prev)
print('ANCHOR PC: ', self.anchorPC_var)
#print("EVENTS: ", [event_map[e] for e in self.new_events_var])
# print("New Events : ", " ".join([event_map[i] for i in self.new_events_var]) )
print("***********************************")
def accuracy(action,VE,LEAD_TIME_CAP):
bins = dict()
act_bins = dict()
for i,ve in enumerate(VE):
if ve:
for j in range(0,LEAD_TIME_CAP):
if i-j < 0: break
if action[i-j]:
if j in bins.keys(): bins[j] += 1
else: bins[j] = 1
break
for j in range(0,LEAD_TIME_CAP):
if i-j < 0 or (VE[i-j] and j>0): break
if action[i-j]:
if j in act_bins.keys(): act_bins[j] += 1
else: act_bins[j] = 1
# print(bins)
# print(act_bins)
xvar = [0]
hits = [0]
false_neg = [100]
running_sum = 0
VE_count = sum(VE)
for key in sorted(bins.keys()):
running_sum += bins[key]
false_neg.append(100*(VE_count - running_sum) / VE_count)
xvar.append(key)
hits.append(100 * running_sum / VE_count)
# print(hits)
# print(xvar)
false_pos_x = [0]
false_pos = [100]
action_count = sum(action)
running_sum = 0
for k, v in sorted(act_bins.items()):
running_sum += v
false_pos.append(100*(action_count - running_sum) / action_count)
false_pos_x.append(k)
if (xvar[-1] < false_pos_x[-1]):
xvar.append(false_pos_x[-1])
hits.append(hits[-1])
false_neg.append(false_neg[-1])
if (xvar[-1] > false_pos_x[-1]):
false_pos_x.append(xvar[-1])
false_pos.append(false_pos[-1])
# print(false_neg)
# print(false_pos)
return [xvar,hits,false_neg,false_pos_x,false_pos]
TEST_LIST_spec=[
#"400.perlbench", NO BINARIES
"401.bzip2",
"403.gcc",
"410.bwaves",
#"416.gamess", NO BINARIES
"429.mcf",
"433.milc",
"434.zeusmp",
"435.gromacs",
"436.cactusADM",
"437.leslie3d",
"444.namd",
"445.gobmk",
"447.dealII",
"450.soplex",
"453.povray",
"454.calculix",
"456.hmmer",
"458.sjeng",
"459.GemsFDTD",
"462.libquantum",
"464.h264ref",
"470.lbm",
"471.omnetpp",
"473.astar",
# "481.wrf", \
# "482.sphinx3", \
# "983.xalancbmk", \
# "998.specrand", \
# "999.specrand" \
]
TEST_LIST_mi = [
"basicmath",
"bitcnts",
"blowfish_decrypt",
"blowfish_encrypt",
"qsort",
"susan_smooth",
# "susan_edge",
# "susan_corner",
"dijkstra",
"rijndael_decrypt",
# "rijndael_encrypt",
"sha",
"crc",
"fft",
"ffti",
"toast",
"untoast"
]
| StarcoderdataPython |
107713 | """List view for model news."""
from django.views.generic import ListView
from news.models import News
# Create your views here.
class NewsListView(ListView):
"""List view for model list."""
model = News
template_name = "list.html"
context_object_name = "post_list"
paginate_by = 3
| StarcoderdataPython |
1692296 | import torch
from torch import nn
from torch.nn import functional as F
from torchvision.models import resnet50
class DummyBackbone(nn.Module):
def __init__(self, input_dim: int, hidden_dim: int, num_classes: int):
super().__init__()
self.hidden = nn.Linear(input_dim, hidden_dim)
self.fc = nn.Linear(hidden_dim, num_classes)
self.act = nn.ReLU()
def forward(self, x, y):
x = x.view(x.size(0), -1)
x = self.act(self.hidden(x))
y_hat = self.act(self.fc(x))
if y is None:
return (y_hat,)
loss = F.cross_entropy(y_hat, y)
return (loss, y_hat)
class ResnetBackbone(nn.Module):
def __init__(self, hidden_dim: int, num_classes: int, freeze_resnet: bool = False):
super().__init__()
self.resnet = resnet50(pretrained=True)
if freeze_resnet:
for param in self.resnet.parameters():
param.requires_grad = False
self.head = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Flatten(),
nn.BatchNorm1d(self.resnet.fc.in_features),
nn.Dropout(0.5),
nn.Linear(in_features=self.resnet.fc.in_features, out_features=hidden_dim),
nn.ReLU(),
nn.BatchNorm1d(hidden_dim),
nn.Dropout(0.5),
nn.Linear(in_features=hidden_dim, out_features=num_classes),
)
self.model = nn.Sequential(
nn.Sequential(*list(self.resnet.children())[:-2]),
self.head
)
self.criterion = nn.CrossEntropyLoss()
def forward(self, x, y):
y_hat = self.model(x)
if y is None:
return (y_hat,)
self.y_hat = y_hat
loss = self.criterion(y_hat, y)
return (loss, y_hat)
| StarcoderdataPython |
42749 | import functools
import inspect
import pandas as pd
import awkward1 as ak
from .series import AwkwardSeries
from .dtype import AwkardType
funcs = [n for n in dir(ak) if inspect.isfunction(getattr(ak, n))]
@pd.api.extensions.register_series_accessor("ak")
class AwkwardAccessor:
def __init__(self, pandas_obj):
if not self._validate(pandas_obj):
raise AttributeError("ak accessor called on incompatible data")
self._obj = pandas_obj
self._arr = None
@property
def arr(self):
if self._arr is None:
if isinstance(self._obj, AwkwardSeries):
self._arr = self._obj
elif isinstance(self._obj.dtype, AwkardType) and isinstance(self._obj, pd.Series):
# this is a pandas Series that contains an Awkward
self._arr = self._obj.values
elif isinstance(self._obj.dtype, AwkardType):
# a dask series - figure out what to do here
raise NotImplementedError
else:
# this recreates series, possibly by iteration
self._arr = AwkwardSeries(self._obj)
return self._arr
@staticmethod
def _validate(*_):
return True
def to_arrow(self):
return self.arr.data.to_arrow()
def cartesian(self, other, **kwargs):
if isinstance(other, AwkwardSeries):
other = other.data
return AwkwardSeries(ak.cartesian([self.arr.data, other], **kwargs))
def __getattr__(self, item):
from .series import AwkwardSeries
# replace with concrete implementations of all top-level ak functions
if item not in funcs:
raise AttributeError
func = getattr(ak, item)
@functools.wraps(func)
def f(*others, **kwargs):
others = [other.data if isinstance(getattr(other, "data", None), ak.Array) else other
for other in others]
ak_arr = func(self.arr.data, *others, **kwargs)
# TODO: special case to carry over index and name information where output
# is similar to input, e.g., has same length
if isinstance(ak_arr, ak.Array):
# TODO: perhaps special case here if the output can be represented
# as a regular num/cupy array
return AwkwardSeries(ak_arr)
return ak_arr
return f
| StarcoderdataPython |
4820755 | <filename>tests/demo.py<gh_stars>1-10
#!/usr/bin/env python3
"""
python module exporting some dummy functions for use in testing replacement.py
(c) 2018 <NAME>
"""
from io import StringIO
def ret_kwargs(**kwargs):
"""
Return kwargs as a dictionary.
"""
return kwargs
def ret_a_dict(existing):
"""
Append a key-value to 'existing' (if it exists)
"""
existing = existing or {}
ret = {'secret': 42}
ret.update(existing)
return ret
def ret_a_stream():
"""
Return a stream of text
"""
ret = StringIO()
ret.writelines(['1. hello\n', '2. world\n'])
return ret
def ret_a_list(an_arg):
"""
Append a random number to 'existing' (if it exists)
"""
existing = an_arg or []
return [42, "meaning"] + existing
class aClass:
@staticmethod
def invented_list():
return ['hello', 'from', 'staticmethod']
| StarcoderdataPython |
194384 | #!/usr/bin/env python
atoms = ['*char_', 'eps', '-int_']
infix_ops = ['>>', '|']
unary_ops = ['-']
atoms_4 = []
atom_parts = [None, None, None, None]
for i in range(len(atoms)):
atom_parts[0] = atoms[i]
for j in range(len(atoms)):
atom_parts[1] = atoms[j]
for k in range(len(atoms)):
atom_parts[2] = atoms[k]
for l in range(len(atoms)):
atom_parts[3] = atoms[l]
new_atoms = [atom_parts[0], atom_parts[1], atom_parts[2], atom_parts[3]]
# No more than two in a row.
if new_atoms[0] == new_atoms[1] and new_atoms[1] == new_atoms[2]:
new_atoms = new_atoms[1:]
if new_atoms[0] == new_atoms[1] and new_atoms[1] == new_atoms[2]:
new_atoms = new_atoms[1:]
atoms_4.append(tuple(new_atoms))
atoms_3 = []
atom_parts = [None, None, None]
for i in range(len(atoms)):
atom_parts[0] = atoms[i]
for j in range(len(atoms)):
atom_parts[1] = atoms[j]
for k in range(len(atoms)):
atom_parts[2] = atoms[k]
new_atoms = [atom_parts[0], atom_parts[1], atom_parts[2]]
# No more than two in a row.
if new_atoms[0] == new_atoms[1] and new_atoms[1] == new_atoms[2]:
new_atoms = new_atoms[1:]
atoms_3.append(tuple(new_atoms))
atoms_2 = []
atom_parts = [None, None]
for i in range(len(atoms)):
atom_parts[0] = atoms[i]
for j in range(len(atoms)):
atom_parts[1] = atoms[j]
new_atoms = [atom_parts[0], atom_parts[1]]
atoms_2.append(tuple(new_atoms))
atoms_1 = []
atom_parts = [None]
for i in range(len(atoms)):
atom_parts[0] = atoms[i]
new_atoms = [atom_parts[0]]
atoms_1.append(tuple(new_atoms))
all_atoms = list(set(atoms_1 + atoms_2 + atoms_3 + atoms_4))
def add_infix_op(t, op):
retval = '('
for i in range(len(t)):
if i:
retval += ' ' + op + ' '
retval += t[i]
retval += ')'
return retval
def atom_type(a):
mapping = {'*char_': 'std::vector<char>', 'eps': 'nope', '-int_': 'optional<int>', "char_('z')": 'char'}
return mapping[a]
seq_dump = False
or_dump = False
def seq_fold(x, y):
if y == 'nope':
if seq_dump:
print '0 return', x
return x
y_unwrapped_optional = y.startswith('optional<') and y[len('optional<'):-1] or 'bar'
if x[-1] == y or x[-1] == y_unwrapped_optional:
if x[-1].startswith('std::vector<'):
if seq_dump:
print '1 return', x
return x
if seq_dump:
print '2 return', x[:-1] + ['std::vector<{}>'.format(y)]
final_y = x[-1] == y and y or y_unwrapped_optional
return x[:-1] + ['std::vector<{}>'.format(final_y)]
# x[-1] is a vector, and y is the vector's value_type
x_back_vector_t = x[-1].startswith('std::vector<') and x[-1][len('std::vector<'):-1] or 'foo'
if x_back_vector_t == y or x_back_vector_t == y_unwrapped_optional:
if seq_dump:
print ' ',x_back_vector_t, y_unwrapped_optional
print '3 return',x
return x
# y is a vector, and x[-1] is the vector's value_type
y_vector_t = y.startswith('std::vector<') and y[len('std::vector<'):-1] or 'foo'
x_back_unwrapped_optional = x[-1].startswith('optional<') and x[-1][len('optional<'):-1] or 'bar'
if y_vector_t == x[-1] or y_vector_t == x_back_unwrapped_optional:
if seq_dump:
print ' ',y_vector_t, x_back_unwrapped_optional
print '4 return',x[:-1] + [y]
return x[:-1] + [y]
if x[-1] == 'nope':
if seq_dump:
print '5 return', x[:-1] + [y]
return x[:-1] + [y]
if seq_dump:
print '6 return',x + [y]
return x + [y]
def or_fold(x, y):
if y == 'nope':
return [x[0], True]
if y in x[0]:
return x
return [x[0] + [y], x[1]]
def seq_of(t):
if t[0] not in atoms:
types = t
else:
types = map(atom_type, t)
if seq_dump:
print types
folded = reduce(seq_fold, types[1:], [types[0]])
if len(folded) == 1:
return folded[0]
return 'tuple<{}>'.format(', '.join(folded))
def or_of(t):
if t[0] not in atoms:
types = t
else:
types = map(atom_type, t)
if or_dump:
print types
folded = reduce(or_fold, types, [[], False])
if len(folded[0]) == 0:
return 'nope'
if len(folded[0]) == 1:
retval = folded[0][0]
if retval.startswith('optional<'):
return retval
else:
retval = 'variant<{}>'.format(', '.join(folded[0]))
if folded[1]:
retval = 'optional<{}>'.format(retval)
return retval
def type_of(t, op):
if op == '>>':
return seq_of(t)
else:
return or_of(t)
def optional_of(op_token, type_str):
if op_token == '-' and not type_str.startswith('optional<') and type_str != 'nope':
return 'optional<{}>'.format(type_str)
return type_str
def atom_str(a):
mapping = {'*char_': 'cb', 'eps': '', '-int_': '3', "char_('z')": 'z'}
return mapping[a]
def seq_str(t):
if t[0] not in atoms:
strs = t
else:
strs = map(atom_str, t)
return ''.join(strs)
def or_str(t):
if t[0] not in atoms:
strs = t
else:
strs = map(atom_str, t)
return strs[0]
def str_of(t, op):
if op == '>>':
return seq_str(t)
else:
return or_str(t)
all_exprs = []
all_types = []
all_strs = []
i = 0
for first in all_atoms:
for second in all_atoms:
first_opt = (i % 3) == 0 and '-' or ''
second_opt = (i % 4) == 0 and '-' or ''
all_exprs.append(first_opt + add_infix_op(first, '>>') + ' | ' +
second_opt + add_infix_op(second, '>>'))
all_types.append(or_of((optional_of(first_opt, type_of(first, '>>')),
optional_of(second_opt, type_of(second, '>>')))))
all_strs.append(or_str((str_of(first, '>>'), str_of(second, '>>'))))
i += 1
first_opt = (i % 3) == 0 and '-' or ''
second_opt = (i % 4) == 0 and '-' or ''
all_exprs.append(first_opt + add_infix_op(first, '|') + ' >> ' +
second_opt + add_infix_op(second, '|'))
all_types.append(seq_of((optional_of(first_opt, type_of(first, '|')),
optional_of(second_opt, type_of(second, '|')))))
all_strs.append(seq_str((str_of(first, '|'), str_of(second, '|'))))
i += 1
def type_to_result(type_):
if type_ == 'nope':
return 'bool'
return 'optional<{}>'.format(type_)
def type_to_fail_result(type_):
if type_.startswith('tuple<'):
elements = type_[len('tuple<'):-1].split(', ') + ['std::vector<int>']
return seq_of(elements)
return seq_of((type_, 'std::vector<int>'))
all_checks = []
for expr_,type_,str_ in zip(all_exprs, all_types, all_strs):
check = '''\
{{
constexpr auto parser = {0};
using attr_t = decltype(parse(g_first, g_last, parser));
BOOST_MPL_ASSERT((is_same<attr_t, {1}>));
std::string const str = "{2}";
auto first = str.begin();
auto const last = str.end();
attr_t const attr = parse(first, last, parser);
EXPECT_TRUE(attr);
constexpr auto fail_parser = parser >> repeat(Inf)[int_];
first = str.begin();
auto const fail_attr = parse(first, last, fail_parser);
EXPECT_FALSE(fail_attr);
'''.format(expr_, type_to_result(type_), str_)
if type_to_result(type_) != 'bool':
check += '''\
{
decltype(parse(first, last, fail_parser)) attr;
auto const copy = attr;
EXPECT_FALSE(parse(first, last, fail_parser, attr));
EXPECT_EQ(attr, copy);
}
'''
check += '''\
}
'''
all_checks.append(check)
checks_per_file = 100
checks_per_test = 5
file_prefix = '''\
// WARNING! This file is generated.
// Copyright (C) 2018 <NAME>
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <boost/parser/parser.hpp>
#include <boost/mpl/assert.hpp>
#include <boost/type_traits/is_same.hpp>
#include <gtest/gtest.h>
using namespace boost::parser;
using boost::is_same;
using std::optional;
using std::variant;
using boost::hana::tuple;
char const g_chars[] = "";
auto g_first = std::begin(g_chars);
auto const g_last = std::end(g_chars);
'''
test_prefix = '''\
TEST(parser, generated_{:03}_{:03})
{{
'''
test_suffix = '''\
}
'''
i = 0
while i < len(all_checks):
file_index = i / checks_per_file
f = open('generated_parsers_{:03}.cpp'.format(file_index), 'w')
lo = i
hi = min(i + checks_per_file, len(all_checks))
test_index = 0
f.write(file_prefix)
for j in range(lo, hi):
if test_index != 0 and j % checks_per_test == 0:
f.write(test_suffix)
if j % checks_per_test == 0:
f.write(test_prefix.format(file_index, test_index))
test_index += 1
f.write(all_checks[j])
f.write(test_suffix)
i += checks_per_file
| StarcoderdataPython |
1608477 | <filename>src/cpp_Obj/conditionalObject.py
from cpp_Obj.varObject import VarObject
from cpp_Obj.builtinObject import BuiltinObject
from cpp_Obj.returnObject import ReturnObject
import cpp_Obj.loopObject
class ConditionalObject:
def __init__(self, source_ast, nesting_count):
self.exec_str = ""
self.ast = source_ast['conditional_statement']
self.nesting_count = nesting_count
def transpile(self):
keyword = ""
condition = ""
for ast in self.ast:
try: keyword = ast['keyword']
except: pass
try: condition = ast['condition']
except: pass
try: scope = ast['scope']
except: pass
if keyword != "else":
self.exec_str += keyword + "(" + condition + ") {\n" + self.transpile_scope(scope, self.nesting_count, 2) + "\t" * (self.nesting_count - 1) + "}" + "\n"
else:
self.exec_str += "else {\n" + self.transpile_scope(scope, self.nesting_count, 1) + "\t" * (self.nesting_count - 1) + "}" + "\n"
return self.exec_str
def transpile_scope(self, body_ast, nesting_count, items):
body_exec_string = ""
# Loop through each ast item in the body dictionary
for ast in body_ast:
# This will parse variable declerations within the body
if self.check_ast('variable_declaration', ast):
var_obj = VarObject(ast)
transpile = var_obj.transpile()
if self.should_dedent_trailing(ast, self.ast, items):
body_exec_string += (" " * (nesting_count - 1)) + transpile + "\n"
else:
body_exec_string += (" " * nesting_count) + transpile + "\n"
# This will parse built-in within the body
if self.check_ast('builtin_function', ast):
gen_builtin = BuiltinObject(ast)
transpile = gen_builtin.transpile()
if self.should_dedent_trailing(ast, self.ast, items):
body_exec_string += (" " * (nesting_count - 1)) + transpile[0] + "\n"
else:
body_exec_string += (" " * nesting_count) + transpile[0] + "\n"
# This will parse nested conditional statement within the body
if self.check_ast('conditional_statement', ast):
# Increase nesting count because this is a condition statement inside a conditional statement
# Only increase nest count if needed
if self.should_increment_nest_count(ast, self.ast):
nesting_count += 1
# Create conditional statement exec string
condition_obj = ConditionalObject(ast, nesting_count)
# The second nested statament only needs 1 indent not 2
if nesting_count == 2:
# Add the content of conditional statement with correct indentation
body_exec_string += " " + condition_obj.transpile()
else:
# Add the content of conditional statement with correct indentation
body_exec_string += (" " * (nesting_count - 1)) + condition_obj.transpile()
# This will parse nested conditional statement within the body
if self.check_ast('loop', ast):
# Increase nesting count because this is a condition statement inside a conditional statement
# Only increase nest count if needed
if self.should_increment_nest_count(ast, self.ast):
nesting_count += 1
# Create conditional statement exec string
loop_obj = cpp_Obj.loopObject.LoopObject(ast, nesting_count)
body_exec_string += (" " * (nesting_count - 1)) + loop_obj.transpile()
if self.check_ast('return', ast):
gen_return = ReturnObject(ast)
transpile = gen_return.transpile()
if self.should_dedent_trailing(ast, self.ast, items):
body_exec_string += (" " * (nesting_count - 1)) + transpile + "\n"
else:
body_exec_string += (" " * nesting_count) + transpile + "\n"
return body_exec_string
def check_ast(self, astName, ast):
try:
# In some cases when method is called from should_Dedent_trailing metho ast
# comes back with corret key but empty list value because it is removed. If
# this is removed this method returns None instead and causes condition trailing
# code to be indented one more than it should
if ast[astName] == []: return True
if ast[astName]: return True
except:
return False
def should_dedent_trailing(self, ast, full_ast, items):
#print(full_ast, items)
new_ast = full_ast[len(full_ast) - 1]['scope']
# This will know whether it should dedent
dedent_flag = False
# Loop through body ast's and when a conditonal statement is founds set
# the dedent flag to 'true'
for x in new_ast:
# When a conditional stateemenet AST is found set the dedent trailing to true
if self.check_ast('ConditionalStatement', x):
dedent_flag = True
if ast == x and dedent_flag == True:
return True
return False
def should_increment_nest_count(self, ast, full_ast):
""" Should dedent trailing
This method will check if the ast item being checked is outside a conditional statement e.g.
if a == 11 {
if name == "<NAME>" {
print "Not it";
}
if 1 != 2 { <--- This is the statement that should not be nested more
print "Yo"
}
}
args:
ast (list) : The ConditionalStatement ast we are looking for
full_ast (list) : The full ast being parsed
return:
True : If the nesting should increase by 1
False : If the nesting ahould not be increased
"""
# Counts of the number of statements in that one scope
statement_counts = 0
# Loops through the body to count the number of conditional statements
for x in full_ast[len(full_ast) - 1]['scope']:
# If a statement is found then increment statement count variable value by 1
if self.check_ast('ConditionalStatement', x): statement_counts += 1
# If the statement being checked is the one found then break
if ast == x: break
# Return false if there were less then 1 statements
if statement_counts > 1:
return False
# Returen true if there were more than 1 statements
else:
return True | StarcoderdataPython |
1689341 | <filename>GMX_TEST/GXG/test_all_dip.py
from pmx import *
res_ids =[
'A',
'G',
'I',
'L',
'P',
'V',
'F',
'W',
'Y',
'D',
'E',
'R',
'H',
'K',
'S',
'T',
'C',
'M',
'N',
'Q',
]
print(len(res_ids))
num = 0
for i in res_ids:
num = num +1
print(num)
sequence = "A%sA"%(i)
c = Chain().create( sequence )
c.write("DIP_%s.pdb"%i)
| StarcoderdataPython |
70948 | <filename>notion_scholar/config.py<gh_stars>0
import shutil
import keyring # https://askubuntu.com/a/881212 Solve issues of keyring with WSL
from typing import Any
from typing import Dict
from typing import List
from typing import Tuple
from typing import Optional
from pathlib import Path
from configparser import ConfigParser
from platformdirs import user_config_dir
from notion_scholar.utilities import NotionScholarError
class ConfigError(NotionScholarError):
"""A config exception class for notion-scholar."""
def get_token() -> Optional[str]:
return keyring.get_password("<PASSWORD>", "token")
def add_to_config(section_option_value_list: List[Tuple[str, str, Any]]) -> None:
directory_path = Path(user_config_dir(appname="notion-scholar"))
config_path = directory_path.joinpath('config').with_suffix('.ini')
# Create config folder if not exist
directory_path.mkdir(parents=True, exist_ok=True)
# Create config file if not exist
if not config_path.is_file():
with open(config_path, 'w'):
pass
# Get the config file content
config = ConfigParser()
config.read(config_path)
# Edit the value & add section if doesn't exist
for section, option, value in section_option_value_list:
if not config.has_section(section):
config.add_section(section)
config.set(section, option, value)
# Save the changes
with open(config_path, 'w') as configfile:
config.write(configfile)
def get_config() -> Dict[str, Any]:
directory_path = Path(user_config_dir(appname="notion-scholar"))
config_path = directory_path.joinpath('config').with_suffix('.ini')
if not config_path.is_file():
return {}
else:
config = ConfigParser()
config.read(config_path)
dct = {}
for section in config.sections():
dct.update(dict(config[section]))
return dct
def setup(
token: Optional[str],
database_url: Optional[str],
bib_file_path: Optional[str],
save: Optional[bool],
) -> None:
if token is not None:
keyring.set_password("<PASSWORD>", "token", token) # keyring.get_password("<PASSWORD>", "token")
section_option_list = []
if bib_file_path is not None:
if Path(bib_file_path).is_file():
section_option_list.append(('paths', 'bib_file_path', bib_file_path))
else:
print(f'The file "{bib_file_path}" does not exist, it will not be added to the config file.')
if database_url is not None:
section_option_list.append(('notion_api', 'database_url', database_url))
if save is not None:
section_option_list.append(('preferences', 'save_to_bib_file', str(save)))
add_to_config(section_option_list)
def inspect() -> None:
directory_path = Path(user_config_dir(appname="notion-scholar"))
config_path = directory_path.joinpath('config').with_suffix('.ini')
token = get_token()
print(f'\nconfig_file_path: {str(config_path)}')
print(f'config_file_exist: {config_path.exists()}')
print(f'token: {token}')
config = get_config()
for key, value in config.items():
if key in ['database_url', 'save_to_bib_file', 'bib_file_path']:
print(f'{key}: {value}')
print()
def clear() -> None:
directory_path = Path(user_config_dir(appname="notion-scholar"))
shutil.rmtree(directory_path, ignore_errors=True)
keyring.delete_password("<PASSWORD>", "token")
| StarcoderdataPython |
3225877 | # -*- coding: UTF-8 -*-
import torch
import torch.nn as nn
import numpy as np
import pandas as pd
from utils import utils
from models.BaseModel import GeneralModel
from helpers.KGReader import KGReader
class CFKG(GeneralModel):
reader = 'KGReader'
@staticmethod
def parse_model_args(parser):
parser.add_argument('--emb_size', type=int, default=64,
help='Size of embedding vectors.')
parser.add_argument('--margin', type=float, default=0,
help='Margin in hinge loss.')
return GeneralModel.parse_model_args(parser)
def __init__(self, args, corpus: KGReader):
self.emb_size = args.emb_size
self.margin = args.margin
self.relation_num = corpus.n_relations
self.entity_num = corpus.n_entities
super().__init__(args, corpus)
def _define_params(self):
self.e_embeddings = nn.Embedding(self.user_num + self.entity_num, self.emb_size)
# ↑ user and entity embeddings, user first
self.r_embeddings = nn.Embedding(self.relation_num, self.emb_size)
# ↑ relation embedding: 0 is used for "buy" between users and items
self.loss_function = nn.MarginRankingLoss(margin=self.margin)
def forward(self, feed_dict):
self.check_list = []
head_ids = feed_dict['head_id'] # [batch_size, -1]
tail_ids = feed_dict['tail_id'] # [batch_size, -1]
relation_ids = feed_dict['relation_id'] # [batch_size, -1]
head_vectors = self.e_embeddings(head_ids)
tail_vectors = self.e_embeddings(tail_ids)
relation_vectors = self.r_embeddings(relation_ids)
prediction = -((head_vectors + relation_vectors - tail_vectors)**2).sum(-1)
return {'prediction': prediction.view(feed_dict['batch_size'], -1)}
def loss(self, out_dict):
predictions = out_dict['prediction']
batch_size = predictions.shape[0]
pos_pred, neg_pred = predictions[:, :2].flatten(), predictions[:, 2:].flatten()
target = torch.from_numpy(np.ones(batch_size * 2, dtype=np.float32)).to(self.device)
loss = self.loss_function(pos_pred, neg_pred, target)
return loss
class Dataset(GeneralModel.Dataset):
def _prepare(self):
if self.phase == 'train':
interaction_df = pd.DataFrame({
'head': self.data['user_id'],
'tail': self.data['item_id'],
'relation': np.zeros_like(self.data['user_id']) # "buy" relation
})
self.data = utils.df_to_dict(pd.concat((self.corpus.relation_df, interaction_df), axis=0))
self.neg_heads = np.zeros(len(self), dtype=int)
self.neg_tails = np.zeros(len(self), dtype=int)
super()._prepare()
def _get_feed_dict(self, index):
if self.phase == 'train':
head, tail = self.data['head'][index], self.data['tail'][index]
relation = self.data['relation'][index]
head_id = np.array([head, head, head, self.neg_heads[index]])
tail_id = np.array([tail, tail, self.neg_tails[index], tail])
relation_id = np.array([relation] * 4)
if relation > 0: # head is not a user
head_id = head_id + self.corpus.n_users
else:
target_item = self.data['item_id'][index]
neg_items = self.neg_items[index]
tail_id = np.concatenate([[target_item], neg_items])
head_id = self.data['user_id'][index] * np.ones_like(tail_id)
relation_id = np.zeros_like(tail_id)
tail_id += self.corpus.n_users # tail must not be a user
feed_dict = {'head_id': head_id, 'tail_id': tail_id, 'relation_id': relation_id}
return feed_dict
def actions_before_epoch(self):
for i in range(len(self)):
head, tail, relation = self.data['head'][i], self.data['tail'][i], self.data['relation'][i]
self.neg_tails[i] = np.random.randint(1, self.corpus.n_items)
if relation == 0:
self.neg_heads[i] = np.random.randint(1, self.corpus.n_users)
while self.neg_tails[i] in self.corpus.user_clicked_set[head]:
self.neg_tails[i] = np.random.randint(1, self.corpus.n_items)
while tail in self.corpus.user_clicked_set[self.neg_heads[i]]:
self.neg_heads[i] = np.random.randint(1, self.corpus.n_users)
else:
self.neg_heads[i] = np.random.randint(1, self.corpus.n_items)
while (head, relation, self.neg_tails[i]) in self.corpus.triplet_set:
self.neg_tails[i] = np.random.randint(1, self.corpus.n_items)
while (self.neg_heads[i], relation, tail) in self.corpus.triplet_set:
self.neg_heads[i] = np.random.randint(1, self.corpus.n_items)
| StarcoderdataPython |
24725 | <reponame>LaudateCorpus1/oci-python-sdk<gh_stars>0
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class DetectedLanguage(object):
"""
Language detected in a document.
"""
#: A constant which can be used with the language_code property of a DetectedLanguage.
#: This constant has a value of "ENG"
LANGUAGE_CODE_ENG = "ENG"
#: A constant which can be used with the language_code property of a DetectedLanguage.
#: This constant has a value of "CES"
LANGUAGE_CODE_CES = "CES"
#: A constant which can be used with the language_code property of a DetectedLanguage.
#: This constant has a value of "DAN"
LANGUAGE_CODE_DAN = "DAN"
#: A constant which can be used with the language_code property of a DetectedLanguage.
#: This constant has a value of "NLD"
LANGUAGE_CODE_NLD = "NLD"
#: A constant which can be used with the language_code property of a DetectedLanguage.
#: This constant has a value of "FIN"
LANGUAGE_CODE_FIN = "FIN"
#: A constant which can be used with the language_code property of a DetectedLanguage.
#: This constant has a value of "FRA"
LANGUAGE_CODE_FRA = "FRA"
#: A constant which can be used with the language_code property of a DetectedLanguage.
#: This constant has a value of "DEU"
LANGUAGE_CODE_DEU = "DEU"
#: A constant which can be used with the language_code property of a DetectedLanguage.
#: This constant has a value of "ELL"
LANGUAGE_CODE_ELL = "ELL"
#: A constant which can be used with the language_code property of a DetectedLanguage.
#: This constant has a value of "HUN"
LANGUAGE_CODE_HUN = "HUN"
#: A constant which can be used with the language_code property of a DetectedLanguage.
#: This constant has a value of "ITA"
LANGUAGE_CODE_ITA = "ITA"
#: A constant which can be used with the language_code property of a DetectedLanguage.
#: This constant has a value of "NOR"
LANGUAGE_CODE_NOR = "NOR"
#: A constant which can be used with the language_code property of a DetectedLanguage.
#: This constant has a value of "POL"
LANGUAGE_CODE_POL = "POL"
#: A constant which can be used with the language_code property of a DetectedLanguage.
#: This constant has a value of "POR"
LANGUAGE_CODE_POR = "POR"
#: A constant which can be used with the language_code property of a DetectedLanguage.
#: This constant has a value of "RON"
LANGUAGE_CODE_RON = "RON"
#: A constant which can be used with the language_code property of a DetectedLanguage.
#: This constant has a value of "RUS"
LANGUAGE_CODE_RUS = "RUS"
#: A constant which can be used with the language_code property of a DetectedLanguage.
#: This constant has a value of "SLK"
LANGUAGE_CODE_SLK = "SLK"
#: A constant which can be used with the language_code property of a DetectedLanguage.
#: This constant has a value of "SPA"
LANGUAGE_CODE_SPA = "SPA"
#: A constant which can be used with the language_code property of a DetectedLanguage.
#: This constant has a value of "SWE"
LANGUAGE_CODE_SWE = "SWE"
#: A constant which can be used with the language_code property of a DetectedLanguage.
#: This constant has a value of "TUR"
LANGUAGE_CODE_TUR = "TUR"
#: A constant which can be used with the language_code property of a DetectedLanguage.
#: This constant has a value of "ARA"
LANGUAGE_CODE_ARA = "ARA"
#: A constant which can be used with the language_code property of a DetectedLanguage.
#: This constant has a value of "CHI_SIM"
LANGUAGE_CODE_CHI_SIM = "CHI_SIM"
#: A constant which can be used with the language_code property of a DetectedLanguage.
#: This constant has a value of "HIN"
LANGUAGE_CODE_HIN = "HIN"
#: A constant which can be used with the language_code property of a DetectedLanguage.
#: This constant has a value of "JPN"
LANGUAGE_CODE_JPN = "JPN"
#: A constant which can be used with the language_code property of a DetectedLanguage.
#: This constant has a value of "KOR"
LANGUAGE_CODE_KOR = "KOR"
#: A constant which can be used with the language_code property of a DetectedLanguage.
#: This constant has a value of "OTHERS"
LANGUAGE_CODE_OTHERS = "OTHERS"
def __init__(self, **kwargs):
"""
Initializes a new DetectedLanguage object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param language_code:
The value to assign to the language_code property of this DetectedLanguage.
Allowed values for this property are: "ENG", "CES", "DAN", "NLD", "FIN", "FRA", "DEU", "ELL", "HUN", "ITA", "NOR", "POL", "POR", "RON", "RUS", "SLK", "SPA", "SWE", "TUR", "ARA", "CHI_SIM", "HIN", "JPN", "KOR", "OTHERS", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type language_code: str
:param confidence:
The value to assign to the confidence property of this DetectedLanguage.
:type confidence: float
"""
self.swagger_types = {
'language_code': 'str',
'confidence': 'float'
}
self.attribute_map = {
'language_code': 'languageCode',
'confidence': 'confidence'
}
self._language_code = None
self._confidence = None
@property
def language_code(self):
"""
**[Required]** Gets the language_code of this DetectedLanguage.
Language of the document, abbreviated according to ISO 639-2.
Allowed values for this property are: "ENG", "CES", "DAN", "NLD", "FIN", "FRA", "DEU", "ELL", "HUN", "ITA", "NOR", "POL", "POR", "RON", "RUS", "SLK", "SPA", "SWE", "TUR", "ARA", "CHI_SIM", "HIN", "JPN", "KOR", "OTHERS", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The language_code of this DetectedLanguage.
:rtype: str
"""
return self._language_code
@language_code.setter
def language_code(self, language_code):
"""
Sets the language_code of this DetectedLanguage.
Language of the document, abbreviated according to ISO 639-2.
:param language_code: The language_code of this DetectedLanguage.
:type: str
"""
allowed_values = ["ENG", "CES", "DAN", "NLD", "FIN", "FRA", "DEU", "ELL", "HUN", "ITA", "NOR", "POL", "POR", "RON", "RUS", "SLK", "SPA", "SWE", "TUR", "ARA", "CHI_SIM", "HIN", "JPN", "KOR", "OTHERS"]
if not value_allowed_none_or_none_sentinel(language_code, allowed_values):
language_code = 'UNKNOWN_ENUM_VALUE'
self._language_code = language_code
@property
def confidence(self):
"""
**[Required]** Gets the confidence of this DetectedLanguage.
Confidence score between 0 to 1.
:return: The confidence of this DetectedLanguage.
:rtype: float
"""
return self._confidence
@confidence.setter
def confidence(self, confidence):
"""
Sets the confidence of this DetectedLanguage.
Confidence score between 0 to 1.
:param confidence: The confidence of this DetectedLanguage.
:type: float
"""
self._confidence = confidence
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| StarcoderdataPython |
3264764 | import os
import sys
import time
import json
import base64
import urllib2
from alice.utility import *
from alice.utility import LOG as L
from alice.script import testcase
class TestCase(testcase.TestCase_Base):
pass
| StarcoderdataPython |
3282197 | from __future__ import with_statement
from cgi import parse_qs
from cStringIO import StringIO
import functools
import logging
import re
from urlparse import urlparse
from django.conf import settings
from django.core.cache import cache
from django.core.urlresolvers import reverse, resolve
import django.db.models.signals
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseNotFound, Http404
from oauth.oauth import OAuthConsumer, OAuthToken
import simplejson as json
from templateresponse import TemplateResponse
import typepad
import typepad.api
import typepadapp.signals
from typepadapp.models import Asset, Favorite, Photo, User
import makeaface.context_processors
from makeaface.models import Lastface, Favoriteface
log = logging.getLogger(__name__)
ONE_DAY = 86400
def oops(fn):
@functools.wraps(fn)
def hoops(*args, **kwargs):
try:
return fn(*args, **kwargs)
except Exception, exc:
log.exception(exc)
return HttpResponse('%s: %s' % (type(exc).__name__, str(exc)), status=400, content_type='text/plain')
return hoops
CELL_WIDTH = 150
CELL_PAD = 4
def next_box_loc():
"""Generate the placements of 1x1 boxes beyond the 3x3 box in the upper
left of the page."""
for row in xrange(0, 3):
for phile in xrange(0, 3):
yield {
'row': row,
'file': phile,
'rowlast': True if phile == 2 else False,
}
row = 3
while True:
for phile in xrange(0, 6):
yield {
'row': row,
'file': phile,
'rowlast': True if phile == 5 else False,
}
row += 1
def home(request, page=None, template=None):
authed = request.user.is_authenticated()
elsewhere = None
with typepad.client.batch_request():
events = request.group.events
if page is not None:
page = int(page)
start_index = 50 * (page - 1) + 1
events = events.filter(max_results=50, start_index=start_index)
else:
page = 0
if authed:
elsewhere = request.user.elsewhere_accounts
if authed:
elsewhere = sharing_for_elsewhere(elsewhere)
user_agent = request.META['HTTP_USER_AGENT']
log.debug('User agent is %r', user_agent)
if template is None:
mobile_context = makeaface.context_processors.mobile(request)
template = 'mobileface/home.html' if mobile_context['mobile'] else 'makeaface/home.html'
return TemplateResponse(request, template, {
'events': events,
'next_box_loc': next_box_loc(),
'share': elsewhere,
'next_page': page + 1,
'prev_page': page - 1,
})
def facejson(request, page=None):
resp = home(request, page, 'faces.json')
events = resp.template_context['events']
events_data = list()
for event in events:
about_event = event.__dict__
obj = event.object
if obj is None:
continue
author = obj.author
author_data = {
'xid': author.xid,
'displayName': author.display_name,
}
event_data = {
'xid': obj.xid,
'url': obj.image_link.url,
'published': obj.published.replace(microsecond=0).isoformat(),
'author': author_data,
}
if obj.favorite_count > 0:
xid = obj.xid
with typepad.client.batch_request():
favs = Asset.get_by_url_id(xid).favorites
favfaces = dict()
for face in Favoriteface.objects.all().filter(favorited=xid):
favfaces[face.favoriter] = Asset.get_by_url_id(face.lastface)
favs_data = list()
for fav in favs:
author = fav.author
fav_data = {
'xid': author.xid,
'displayName': author.display_name,
'favorited': fav.published.replace(microsecond=0).isoformat(),
}
if author.xid in favfaces:
face = favfaces[author.xid]
face_data = {
'xid': face.xid,
'url': face.image_link.url,
'published': face.published.replace(microsecond=0).isoformat(),
}
fav_data['face'] = face_data
favs_data.append(fav_data)
event_data['favorites'] = favs_data
events_data.append(event_data)
next_page = resp.template_context['next_page']
next_url = reverse('facejson', kwargs={'page': next_page})
next_url = request.build_absolute_uri(next_url)
data = {
'events': events_data,
'next': next_url,
}
prev_page = resp.template_context['prev_page']
if prev_page > 0:
prev_url = reverse('facejson', kwargs={'page': prev_page})
prev_url = request.build_absolute_uri(prev_url)
data['prev'] = prev_url
jsontext = json.dumps(data, sort_keys=True, indent=4)
return HttpResponse(jsontext, content_type='application/json')
@oops
def asset_meta(request, fresh=False):
if not request.user.is_authenticated():
return HttpResponse('silly rabbit, asset_meta is for authenticated users',
status=400, content_type='text/plain')
user_id = request.user.xid
cache_key = 'favorites:%s' % user_id
favs = None if fresh else cache.get(cache_key)
if favs is None:
log.debug("Oops, going to server for %s's asset_meta", request.user.preferred_username)
fav_objs = {}
html_ids = request.POST.getlist('asset_id')
with typepad.client.batch_request():
for html_id in html_ids:
assert html_id.startswith('asset-')
xid = html_id[6:]
fav_objs[html_id] = Favorite.head_by_user_asset(user_id, xid)
favs = list(html_id for html_id, fav_obj in fav_objs.items()
if fav_obj.found())
if not fresh:
cache.set(cache_key, favs, ONE_DAY)
else:
log.debug('Yay, returning asset_meta for %s from cache', request.user.preferred_username)
favs = dict((html_id, {"favorite": True}) for html_id in favs)
return HttpResponse(json.dumps(favs), content_type='application/json')
def authorize(request):
resp = typepadapp.views.auth.authorize(request)
if isinstance(resp, HttpResponseRedirect):
request.flash['signedin'] = True
return resp
def noob(request):
request.flash['signedin'] = True
return HttpResponseRedirect(reverse('home'))
def sharing_for_elsewhere(ew):
twitter, facebook = None, None
for account in ew:
if account.domain == 'twitter.com':
twitter = account
elif account.domain == 'facebook.com':
facebook = account
return ('Facebook' if facebook is not None
else 'Twitter' if twitter is not None
else 'TypePad')
def photo(request, xid, template=None):
# Ask this up front to get the user object outside of batches.
authed = request.user.is_authenticated()
lastface, elsewhere = None, None
with typepad.client.batch_request():
photo = Asset.get_by_url_id(xid)
favs = photo.favorites
favfaces = dict()
for face in Favoriteface.objects.all().filter(favorited=xid):
favfaces[face.favoriter] = Asset.get_by_url_id(face.lastface)
if authed:
elsewhere = request.user.elsewhere_accounts
try:
lastface_mod = Lastface.objects.get(owner=request.user.xid)
except Lastface.DoesNotExist:
pass
else:
lastface = Asset.get_by_url_id(lastface_mod.face)
userfav = None
if authed:
elsewhere = sharing_for_elsewhere(elsewhere)
# Get the Favorite in a separate batch so we can handle if it fails.
try:
with typepad.client.batch_request():
userfav = Favorite.get_by_user_asset(request.user.xid, xid)
except Favorite.NotFound:
userfav = None
# Annotate the favorites with the last faces, so we get them naturally in their loop.
for fav in favs:
try:
fav.lastface = favfaces[fav.author.xid]
except KeyError:
pass
if template is None:
mobile_context = makeaface.context_processors.mobile(request)
template = 'mobileface/photo.html' if mobile_context['mobile'] else 'makeaface/photo.html'
return TemplateResponse(request, template, {
'photo': photo,
'favorites': favs,
'user_favorite': userfav,
'lastface': lastface,
'share': elsewhere,
})
photo.is_photo_view = True
def oembed(request):
# Ask this up front to get the user object outside of batches.
authed = request.user.is_authenticated()
url = request.GET['url']
urlparts = urlparse(url)
view, args, kwargs = resolve(urlparts.path)
if not getattr(view, 'is_photo_view', False):
return HttpResponseNotFound('not a photo url', content_type='text/plain')
if request.GET.get('format', 'json') != 'json':
return HttpResponse('unsupported format :(', status=501, content_type='text/plain')
xid = kwargs['xid']
maxwidth = request.GET.get('maxwidth')
maxheight = request.GET.get('maxheight')
if maxwidth and maxheight:
size = maxwidth if maxwidth < maxheight else maxheight
elif maxwidth:
size = maxwidth
elif maxheight:
size = maxheight
else:
size = 500
typepad.client.batch_request()
photo = Asset.get_by_url_id(xid)
try:
typepad.client.complete_batch()
except Asset.NotFound:
return HttpResponseNotFound('no such photo', content_type='text/plain')
photo_url = photo.image_link.square(size).url
data = {
'type': 'photo',
'version': '1.0',
'title': "%s's face" % photo.author.display_name,
'author_name': photo.author.display_name,
'provider_name': 'Make A Face',
'provider_url': 'http://make-a-face.org/',
'url': photo_url,
'width': size,
'height': size,
}
if size > 150:
data.update({
'thumbnail_url': photo.image_link.square(150).url,
'thumbnail_width': 150,
'thumbnail_height': 150,
})
return HttpResponse(json.dumps(data), content_type='application/json+javascript')
@oops
def upload_photo(request):
if request.method != 'POST':
return HttpResponse('POST required at this url', status=400, content_type='text/plain')
content_type = request.META['CONTENT_TYPE']
assert content_type.startswith('image/')
bodyfile = StringIO(request.raw_post_data)
target_url = request.group.photo_assets._location
target_parts = urlparse(target_url)
target_path = target_parts.path.replace('.json', '')
log.debug('Using %r as target URL', target_path)
asset = Asset()
asset.title = "a face"
resp, content = typepad.api.browser_upload.upload(asset, bodyfile,
content_type=content_type, redirect_to='http://example.com/',
target_url=target_path, post_type='photo')
if resp.status != 302:
log.debug('%d response from typepad: %s', resp.status, content)
assert resp.status == 302
typepadapp.signals.asset_created.send(sender=asset, instance=asset,
group=request.group, parent=request.group.photo_assets)
if 'location' not in resp:
log.debug('No Location in response, only %r', resp.keys())
loc = resp['location']
loc_parts = parse_qs(urlparse(loc).query)
if 'asset_url' not in loc_parts:
log.warning('New location was %r', loc)
log.warning('Original response/content were %r, %r', resp, content)
loc = loc_parts['asset_url'][0]
log.debug('LOCATION IS A %s %r', type(loc).__name__, loc)
with typepad.client.batch_request():
asset = Asset.get(loc)
image_url = asset.image_link.square(150).url
# Save the photo as a new last face for the poster.
Lastface(owner=request.user.xid, face=asset.xid).save()
# Flash doodad needs a 200, not a redirect.
return HttpResponse(image_url, content_type='text/plain')
@oops
def favorite(request):
if request.method != 'POST':
return HttpResponse('POST required at this url', status=400, content_type='text/plain')
action = request.POST.get('action', 'favorite')
asset_id = request.POST.get('asset_id', '')
try:
(asset_id,) = re.findall('6a\w+', asset_id)
except TypeError:
raise Http404
if action == 'favorite':
with typepad.client.batch_request():
asset = Asset.get_by_url_id(asset_id)
fav = Favorite()
fav.in_reply_to = asset.asset_ref
request.user.favorites.post(fav)
typepadapp.signals.favorite_created.send(sender=fav, instance=fav, parent=asset,
group=request.group)
# Save the user's last face when favorited.
try:
last = Lastface.objects.get(owner=request.user.xid)
except Lastface.DoesNotExist:
pass
else:
Favoriteface(favoriter=request.user.xid, favorited=asset_id,
lastface=last.face).save()
else:
# Getting the xid will do a batch, so don't do it inside our other batch.
xid = request.user.xid
with typepad.client.batch_request():
asset = Asset.get_by_url_id(asset_id)
fav = Favorite.get_by_user_asset(xid, asset_id)
fav.delete()
typepadapp.signals.favorite_deleted.send(sender=fav, instance=fav,
parent=asset, group=request.group)
return HttpResponse('OK', content_type='text/plain')
def uncache_favorites(sender, instance, **kwargs):
cache_key = 'favorites:%s' % instance.author.xid
cache.delete(cache_key)
typepadapp.signals.favorite_created.connect(uncache_favorites)
typepadapp.signals.favorite_deleted.connect(uncache_favorites)
def uncache_lastface(sender, instance, **kwargs):
cache_key = 'lastface:%s' % instance.owner
cache.delete(cache_key)
django.db.models.signals.post_save.connect(uncache_lastface, sender=Lastface)
django.db.models.signals.post_delete.connect(uncache_lastface, sender=Lastface)
@oops
def flag(request):
if request.method != 'POST':
return HttpResponse('POST required at this url', status=400, content_type='text/plain')
action = request.POST.get('action', 'flag')
asset_id = request.POST.get('asset_id', '')
try:
(asset_id,) = re.findall('6a\w+', asset_id)
except TypeError:
raise Http404
cache_key = 'flag:%s' % asset_id
if action != 'flag':
return HttpResponse('Only flag action is supported at this url', status=400, content_type='text/plain')
# YAY UNATOMIC OPERATIONS
flaggers = cache.get(cache_key)
if not flaggers:
log.debug('No flaggers for %r yet, making a new list', asset_id)
flaggers = []
elif request.user.xid in flaggers:
log.debug('%r re-flagged %r (ignored)', request.user.xid, asset_id)
return HttpResponse('OK (though you already flagged it)', content_type='text/plain')
flaggers.append(request.user.xid)
if len(flaggers) >= 3:
log.debug('%r was the last straw for %r! Deleting!', request.user.xid, asset_id)
with typepad.client.batch_request():
# Re-authenticate the client with the superuser credentials that can delete that.
typepad.client.clear_credentials()
backend = urlparse(settings.BACKEND_URL)
csr = OAuthConsumer(settings.OAUTH_CONSUMER_KEY, settings.OAUTH_CONSUMER_SECRET)
token = OAuthToken(settings.OAUTH_SUPERUSER_KEY, settings.OAUTH_SUPERUSER_SECRET)
typepad.client.add_credentials(csr, token, domain=backend[1])
asset = Asset.get_by_url_id(asset_id)
asset.delete()
typepadapp.signals.asset_deleted.send(sender=asset, instance=asset,
group=request.group)
del asset # lose our reference to it
log.debug('BALEETED')
cache.delete(cache_key)
log.debug('Emptied flaggers for %r now that it is deleted', asset_id)
return HttpResponse('BALEETED', content_type='text/plain')
else:
cache.set(cache_key, flaggers, ONE_DAY)
log.debug('Flaggers for %r are now %r', asset_id, flaggers)
return HttpResponse('OK', content_type='text/plain')
def cull_old_lastfavfaces(sender, instance, group, **kwargs):
Lastface.objects.filter(face=instance.xid).delete()
Favoriteface.objects.filter(lastface=instance.xid).delete()
typepadapp.signals.asset_deleted.connect(cull_old_lastfavfaces)
@oops
def delete(request):
if request.method != 'POST':
return HttpResponse('POST required at this url', status=400, content_type='text/plain')
action = request.POST.get('action', 'delete')
asset_id = request.POST.get('asset_id', '')
try:
(asset_id,) = re.findall('6a\w+', asset_id)
except TypeError:
raise Http404
if action == 'delete':
# Getting the xid will do a batch, so don't do it inside our other batch.
xid = request.user.xid
with typepad.client.batch_request():
asset = Asset.get_by_url_id(asset_id)
asset.delete()
typepadapp.signals.asset_deleted.send(sender=asset, instance=asset,
group=request.group)
return HttpResponse('', status=204)
@oops
def lastface(request, xid, spec):
cache_key = 'lastface:%s' % xid
face = cache.get(cache_key)
if face is None:
try:
face = Lastface.objects.get(owner=xid)
except Lastface.DoesNotExist:
# Get that person's userpic.
with typepad.client.batch_request():
user = User.get_by_url_id(xid)
face = user.avatar_link.by_width(200).url
if 'default-userpics' in face:
return HttpResponseNotFound('no such face', content_type='text/plain')
face = face.rsplit('/', 1)[-1].split('-', 1)[0]
else:
face = face.face
cache.set(cache_key, face)
url = 'http://a0.typepad.com/%s-%s' % (face, spec)
return HttpResponseRedirect(url)
def facegrid(request):
with typepad.client.batch_request():
events1 = request.group.events
events2 = request.group.events.filter(start_index=51, max_results=50)
events3 = request.group.events.filter(start_index=101, max_results=50)
photos = []
for events in (events1, events2, events3):
for event in events:
if event.object is not None:
if type(event.object) is Photo:
photos.append(event.object)
return TemplateResponse(request, 'makeaface/grid.html', {
'photos': photos,
})
def faq(request):
return TemplateResponse(request, 'makeaface/faq.html', {
})
def mobile_photo(request):
raise NotImplementedError
def error(request):
raise NotImplementedError
| StarcoderdataPython |
14666 | import pytest
from billy.utils.search import google_book_search
class TestGoogleBookSearch(object):
def test_search_returns_200(self, mock):
"""Ensure a basic search returns a 200 request"""
assert google_book_search("<NAME>")["status"] == 200
def test_search_body_returns_dict(self, mock):
"""Ensure we're getting a JSON dict back from google_book_search()"""
assert type(google_book_search("<NAME>")["body"]) is dict
| StarcoderdataPython |
1697802 | from flask import Blueprint, current_app, request, jsonify
from .model import City
from .serializer import CitySchema
from requests import get, post
import json
from datetime import date, datetime
from sqlalchemy import desc, func
from unidecode import unidecode
bp_citys = Blueprint('citys', __name__)
@bp_citys.route('/cidade/<id_da_cidade>/', methods=['GET'])
def register(id_da_cidade):
# i need consult webservice and
# keep json data in database
token_id = '15?token=<KEY>'
url_endpoint = 'http://apiadvisor.climatempo.com.br/api/v1/forecast/locale/' + str(id_da_cidade) + '/days/' + token_id
r = get(url_endpoint)
data_json = r.json()
for i in data_json['data']:
dt = i["date_br"].split('/')
day_ = int(dt[0])
month_ = int(dt[1])
year_ = int(dt[2])
date_br = date(day=day_,
month=month_,
year=year_).strftime('%d/%m/%Y')
dados = {
"name":data_json["name"],
"state": str(data_json["state"]),
"country": data_json["country"],
"date": str(date_br),
"precipitation": str(i["rain"]["precipitation"]),
'probability': str(i["rain"]["probability"]),
"max_": str(i["temperature"]["max"]),
"min_": str(i["temperature"]["min"])
}
schema = CitySchema()
data_item = str(dados).replace("\'", "\"")
result = schema.load(json.loads(data_item))
current_app.db.session.add(result)
current_app.db.session.commit()
return schema.jsonify(data_json), 201
@bp_citys.route('/analise/<start>/<end>/', methods=['GET'])
def analise(start, end):
start = str(start).split('-')
start_day = int(start[0])
start_month = int(start[1])
start_year = int(start[2])
end = str(end).split('-')
end_day = int(end[0])
end_month = int(end[1])
end_year = int(end[2])
# example: start = 07/02/2020, end = 12/02/202
start = date(day=start_day,
month=start_month,
year=2020).strftime('%d/%m/%Y')
end = date(day=end_day,
month=end_month,
year=end_year).strftime('%d/%m/%Y')
data = City.query.filter(City.date <= end).filter(City.date >= start)
data = data.order_by(desc(City.name))
data2 = data.order_by(desc(City.name))
# methods
temperature = max_temperature(data)
precipitation = avg_precipitation(data)
return jsonify(temperature + precipitation)
def max_temperature(data):
try:
bigger = 0
count = 0
list_max = []
res_list = []
# get max temperature from data set
for i in data:
if int(i.max_) > bigger:
bigger = int(i.max_)
count = count + 1
else:
count = count + 1
if count == data.count():
name_city = str(i.name)
max_temp = int(bigger)
# Checked if there are other cities with
# temperatures equal to the highest obtained
for j in data:
if int(j.max_) == max_temp:
list_max.append({
unidecode(str(j.name)): int(j.max_)
})
# Removing the repeated, since more than one
# city can have equal maximum temperatures
for i in range(len(list_max)):
if list_max[i] not in list_max[i + 1:]:
res_list.append({
"maior temperatura": list_max[i]
})
except:
res_list = {
'excecao': 'Delculpe tivemos alguma problema ao aexecutar o metodo max_temperature'
}
return res_list
def avg_precipitation(data):
try:
city = ''
sum_total = 0
count = 0
count2 = 0
list_precipitation = []
for i in data:
if city == '':
city = i.name
sum_total = sum_total + int(i.precipitation)
count = count + 1
count2 = count2 + 1
elif city == i.name:
# still equal city
sum_total = sum_total + int(i.precipitation)
count = count + 1
count2 = count2 + 1
if data.count() == count2:
list_precipitation.append({
"cidade com maior precipitacao": str(city) + ' - ' + str((sum_total / count))
})
else:
list_precipitation.append({
"cidade com maior precipitacao": str(city) + ' - ' + str((sum_total / count))
})
# clearing info by old city
city = i.name
count = 0
sum_total = 0
# global informations about sum_total
sum_total = sum_total + int(i.precipitation)
count = count + 1
count2 = count2 + 1
except :
list_precipitation = {
'excecao': 'Delculpe, tivemos algum problema ao executar o metodo avg_precipitation'
}
return list_precipitation | StarcoderdataPython |
1795610 | <filename>DFS_BFS/backtracking/combination_sum.py
"""
Leetcode 39.
Combination Sum
经典回溯。DFS,递归即可。
"""
class Solution:
def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:
ans = []
def dfs(n, can_set, cur_ans):
if n == 0:
ans.append(cur_ans)
if n < 0:
return
else:
for i in range(len(can_set)):
dfs(n-can_set[i], can_set[i:], cur_ans+[can_set[i]])
dfs(target, candidates, [])
return ans
| StarcoderdataPython |
1733062 | # -*- coding: utf-8 -*-
# Author <NAME>@<EMAIL>
def extract_missing_tiles():
file_all = 'tuiles_swissimages_nord_vaudois.csv'
#file_existing = 'ortho.csv'
#file_existing = 'lidar.csv'
file_existing = 'mnt.csv'
with open(file_existing) as fe, open(file_all) as fa:
data_fa = fa.readlines()
dataFe = fe.read()
data_fa = [row_fa.strip('\n') for row_fa in data_fa]
matchings = [row_fa for row_fa in data_fa if row_fa.replace('_','-') in dataFe]
missings = [row_fa.replace('_','-') for row_fa in data_fa if row_fa.replace('_','-') not in dataFe]
#template = "https://data.geo.admin.ch/ch.swisstopo.swissimage-dop10/swissimage-dop10_2020_{}/swissimage-dop10_2020_{}_0.1_2056.tif"
#template = "https://data.geo.admin.ch/ch.swisstopo.swisssurface3d/swisssurface3d_2015_{}/swisssurface3d_2015_{}_2056_5728.las.zip"
template = "https://data.geo.admin.ch/ch.swisstopo.swissalti3d/swissalti3d_2019_{}/swissalti3d_2019_{}_0.5_2056_5728.tif"
missing_urls = []
if len(missings)+len(matchings) != len(data_fa):
print("Error: sums of missing + matchings doesnt equals the length of the input data set!")
else:
print("Success!")
missing_urls = [template.format(missing, missing) for missing in missings]
with open('missing_tile_urls.csv', 'w') as f:
for l in missing_urls:
f.write(f"{l}\n")
return missing_urls
| StarcoderdataPython |
3324690 | #!/usr/bin/env python
#The MIT License (MIT)
#Copyright (c) 2016 <NAME>
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
#MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
#CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import cv2
from deepgaze.color_detection import MultiBackProjectionColorDetector
#Load a generic image
img = cv2.imread('tiger.jpg')
#Creating a python list and appending the model-templates
#In this case the list are preprocessed images but you
#can take subframes from
template_list=list()
template_list.append(cv2.imread('model_1.jpg')) #Load the image
template_list.append(cv2.imread('model_2.jpg')) #Load the image
template_list.append(cv2.imread('model_3.jpg')) #Load the image
template_list.append(cv2.imread('model_4.jpg')) #Load the image
template_list.append(cv2.imread('model_5.jpg')) #Load the image
#template_list.append(img[225:275,625:675])
#Defining the deepgaze color detector object
my_back_detector = MultiBackProjectionColorDetector()
my_back_detector.setTemplateList(template_list) #Set the template
#Return the image filterd, it applies the backprojection,
#the convolution and the mask all at once
img_filtered = my_back_detector.returnFiltered(img,
morph_opening=True, blur=True,
kernel_size=3, iterations=1)
cv2.imwrite("result.jpg", img_filtered)
| StarcoderdataPython |
3201557 | RESUME = False
| StarcoderdataPython |
3369338 | <filename>src/soda/sodac.py
#!/usr/bin/python3
import argparse
import logging
import os
import sys
import textx
from haoda import util
from soda import core, grammar
from soda.codegen.frt import core as frt
from soda.codegen.intel import opencl as iocl
from soda.codegen.xilinx import opencl as xocl
from soda.model import xilinx as model
from soda.optimization import args as opt_args
logging.basicConfig(level=logging.WARNING,
format='%(levelname)s:%(name)s:%(lineno)d: %(message)s')
logger = logging.getLogger().getChild(os.path.basename(__file__))
def main():
parser = argparse.ArgumentParser(
prog='sodac',
description='Stencil with Optimized Dataflow Architecture '
'(SODA) compiler')
parser.add_argument('--verbose', '-v',
action='count',
dest='verbose',
help='increase verbosity')
parser.add_argument('--quiet', '-q',
action='count',
dest='quiet',
help='decrease verbosity')
parser.add_argument('--recursion-limit',
type=int,
dest='recursion_limit',
help='override Python recursion limit')
parser.add_argument('--burst-width',
type=int,
dest='burst_width',
help='override burst width')
parser.add_argument('--unroll-factor',
type=int,
metavar='UNROLL_FACTOR',
dest='unroll_factor',
help='override unroll factor')
parser.add_argument('--replication-factor',
type=int,
metavar='REPLICATION_FACTOR',
dest='replication_factor',
help='override replication factor')
parser.add_argument('--tile-size',
type=int,
nargs='+',
metavar='TILE_SIZE',
dest='tile_size',
help='override tile size; '
'0 means no overriding on that dimension')
parser.add_argument('--dram-in',
type=str,
dest='dram_in',
help='override DRAM configuration for input')
parser.add_argument('--dram-out',
type=str,
dest='dram_out',
help='override DRAM configuration for output')
parser.add_argument('--iterate',
type=int,
metavar='#ITERATION',
dest='iterate',
help='override iterate directive; '
'repeat execution multiple times iteratively')
parser.add_argument('--border',
type=str,
metavar='(ignore|preserve)',
dest='border',
help='override border handling strategy')
parser.add_argument('--cluster',
type=str,
metavar='(none|fine|coarse|full)',
dest='cluster',
help='module clustering level, `none` generates '
'standalone compute / forward modules, `fine` '
'fuses forwarders into compute modules, `coarse` '
'fuses each stage together, `full` fuses '
'everything together')
parser.add_argument(type=str,
dest='soda_src',
metavar='file',
help='soda source code')
xocl.add_arguments(parser.add_argument_group('Xilinx OpenCL backend'))
iocl.add_arguments(parser.add_argument_group('Intel OpenCL backend'))
frt.add_arguments(parser.add_argument_group('FPGA runtime backend'))
opt_args.add_arguments(parser.add_argument_group('SODA optimizations'))
parser.add_argument('--model-file',
type=str,
dest='model_file',
metavar='file',
help='resource model specified as json file')
parser.add_argument('--estimation-file',
type=str,
dest='estimation_file',
metavar='file',
help='report resource and performance estimation as '
'json file')
args = parser.parse_args()
verbose = 0 if args.verbose is None else args.verbose
quiet = 0 if args.quiet is None else args.quiet
logging_level = (quiet-verbose)*10+logging.getLogger().getEffectiveLevel()
if logging_level > logging.CRITICAL:
logging_level = logging.CRITICAL
if logging_level < logging.DEBUG:
logging_level = logging.DEBUG
logging.getLogger().setLevel(logging_level)
logger.info('set log level to %s', logging.getLevelName(logging_level))
# TODO: check tile size
if args.recursion_limit is not None:
sys_recursion_limit = sys.getrecursionlimit()
if sys_recursion_limit > args.recursion_limit:
logger.warning(
'Python system recursion limit (%d) > specified value (%d); '
'the latter will be ignored', sys_recursion_limit, args.recursion_limit)
else:
sys.setrecursionlimit(args.recursion_limit)
logger.warning('Python recursion limit is set to %d',
sys.getrecursionlimit())
soda_mm = textx.metamodel_from_str(grammar.GRAMMAR, classes=grammar.CLASSES)
logger.info('build metamodel')
try:
if args.soda_src == '-':
soda_file_name = sys.stdin.name
soda_model = soda_mm.model_from_str(sys.stdin.read())
else:
with open(args.soda_src, 'r') as soda_file:
soda_model = soda_mm.model_from_str(soda_file.read())
soda_file_name = soda_file.name
logger.info('%s parsed as soda file', soda_file_name)
logger.debug('soda program parsed:\n %s',
str(soda_model).replace('\n', '\n '))
tile_size = []
for dim in range(soda_model.dim-1):
if (args.tile_size is not None and
dim < len(args.tile_size) and
args.tile_size[dim] > 0):
tile_size.append(args.tile_size[dim])
else:
tile_size.append(soda_model.tile_size[dim])
tile_size.append(0)
if args.replication_factor is None:
if args.unroll_factor is not None:
unroll_factor = args.unroll_factor
else:
unroll_factor = soda_model.unroll_factor
replication_factor = 1
else:
unroll_factor = args.replication_factor
replication_factor = args.replication_factor
stencil = core.Stencil(
burst_width=args.burst_width if args.burst_width is not None
else soda_model.burst_width,
border=args.border if args.border is not None
else soda_model.border,
iterate=args.iterate if args.iterate is not None
else soda_model.iterate,
cluster=args.cluster if args.cluster is not None
else soda_model.cluster,
dram_in=args.dram_in,
dram_out=args.dram_out,
app_name=soda_model.app_name,
input_stmts=soda_model.input_stmts,
param_stmts=soda_model.param_stmts,
local_stmts=soda_model.local_stmts,
output_stmts=soda_model.output_stmts,
dim=soda_model.dim,
tile_size=tile_size,
unroll_factor=unroll_factor,
replication_factor=replication_factor,
optimizations=opt_args.get_kwargs(args),
)
logger.debug('stencil obtained: %s', stencil)
xocl.print_code(stencil, args, parser)
iocl.print_code(stencil, args)
frt.print_code(stencil, args)
if args.estimation_file is not None:
if args.model_file is None:
if args.soda_src.endswith('.soda'):
model_file = args.soda_src[:-len('.soda')]+'_model.json'
else:
logger.fatal('cannot find resource model file')
sys.exit(1)
else:
model_file = args.model_file
def print_estimation():
def print_estimation():
model.print_estimation(stencil,
model_file,
estimation_file)
if args.estimation_file == '-':
estimation_file = sys.stdout
print_estimation()
else:
with open(args.estimation_file, 'w') as estimation_file:
print_estimation()
if model_file == '-':
model_file = sys.stdin
print_estimation()
else:
with open(model_file) as model_file:
print_estimation()
except textx.exceptions.TextXSyntaxError as e:
logger.error(e)
sys.exit(1)
except util.SemanticError as e:
logger.error(e)
sys.exit(1)
except util.SemanticWarn as w:
logger.warning(w)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1743753 | from networkx.algorithms.bipartite import hopcroft_karp_matching
from networkx.algorithms.cycles import find_cycle
from networkx.algorithms.matching import is_perfect_matching
from networkx.exception import NetworkXNoCycle
from networkx import DiGraph, connected_components, weakly_connected_components, is_directed
from more_itertools import peekable
from networkx import get_node_attributes
import argparse
def enum_perfect_matching(g):
match = maximum_matching_all(g)
matches = [match]
# m, d = build_d(g, match)
if is_perfect_matching(g, match):
enum_perfect_matching_iter(matches, g, match)
else:
print("No perfect matching found!")
def enum_perfect_matching_iter(matches, g, m):
# Step 1
if not peekable(g.edges()):
return
# Step 2 Find a cycle in G
_, d = build_d(g, m)
cycle = find_cycle_in_dgm(d)
if cycle:
# Step 3 - Choose edge e from the cycle obtained
# Step 4 - Find a cycle containing e via DFS
# It is already done as we picked e from the cycle.
# Step 5 - Exchange edges to generate new M'
m_prime = m.copy()
e_start = cycle[0]
s = cycle[0]
e_end = 0
# to detect if we need to add or delete this edge
flip = 0
# to detect if it is the first time to visit the start
init = 0
# define the precursor
temp = s
# Step 5: Exchange edges along the cycle and output
# obtained maximum M'
for x in cycle:
if x == s and init == 0:
init = 1
continue
if flip == 0:
if init == 1:
e_end = x
init = 2
m_prime.remove_edge(temp, x)
flip = 1
else:
m_prime.add_edge(x, temp)
flip = 0
temp = x
# Pre-requisite for Step 6 and 7
g_plus = construct_g_plus(g, e_start, e_end)
g_minus = construct_g_minus(g, e_start, e_end)
# Step 6 and 7
enum_perfect_matching_iter(matches, g_plus, m)
enum_perfect_matching_iter(matches, g_minus, m_prime)
else:
return
def enum_maximum_matching(g):
match = maximum_matching_all(g)
m, d = build_d(g, match)
matches = [m]
if g.is_directed():
enum_maximum_matching_iter(matches, g, m, d)
else:
enum_maximum_matching_iter(matches, build_g(g), m, d)
# Convert di-graphs to maximum matchings
final_matches = []
for match in matches:
ma = maximum_matching_all(match)
final_matches.append(ma)
return final_matches
def enum_maximum_matching_iter(matches, g, m, d):
# If there are no edges in G
if not peekable(g.edges()) or not peekable(d.edges()):
print("D(G, M) or G has no edges!")
return
else:
# Step 2 Find a cycle in D(G, M)
cycle = find_cycle_in_dgm(d)
if cycle:
# Step 3 - Choose edge e from the cycle obtained
# Step 4 - Find a cycle containing e via DFS
# It is already done as we picked e from the cycle.
# Step 5 - Exchange edges to generate new M'
m_prime = m.copy()
e_start = cycle[0]
s = cycle[0]
e_end = 0
# to detect if we need to add or delete this edge
flip = 0
# to detect if it is the first time to visit the start
init = 0
# define the precursor
temp = s
# Step 5: Exchange edges along the cycle and output
# obtained maximum M'
for x in cycle:
if x == s and init == 0:
init = 1
continue
if flip == 0:
if init == 1:
e_end = x
init = 2
m_prime.remove_edge(temp, x)
flip = 1
else:
m_prime.add_edge(x, temp)
flip = 0
temp = x
# Pre-requisite for Step 6 and 7
g_plus = construct_g_plus(g, e_start, e_end)
g_minus = construct_g_minus(g, e_start, e_end)
m.remove_edge(e_start, e_end)
d_plus = construct_d_from_gm2(g_plus, m)
m.add_edge(e_start, e_end)
d_minus = construct_d_from_gm2(g_minus, m_prime)
# Step 6 and 7
enum_maximum_matching_iter(matches, g_plus, m, d_plus)
enum_maximum_matching_iter(matches, g_minus, m_prime, d_minus)
else:
# Step 8
nodes = list(g.nodes())
pair = {key: float("inf") for key in nodes}
for v in nodes:
for w in m.successors(v):
pair[v] = w
pair[w] = v
for v in nodes:
if pair[v] == float("inf"):
# if v is in the left side
for w in g.successors(v):
if pair[w] != float("inf"):
m_prime = m.copy()
m_prime.add_edge(v, w)
m_prime.remove_edge(pair[w], w)
matches.append(m_prime)
g_plus = construct_g_plus(g, v, w)
g_minus = construct_g_minus(g, v, w)
d_plus = construct_d_from_gm2(g_plus, m_prime)
d_minus = construct_d_from_gm2(g_minus, m)
enum_maximum_matching_iter(matches, g_plus, m_prime, d_plus)
enum_maximum_matching_iter(matches, g_minus, m, d_minus)
return
# if v is in the right side
for w in d.successors(v):
if pair[w] != float("inf"):
m_prime = m.copy()
m_prime.add_edge(w, v)
m_prime.remove_edge(w, pair[w])
matches.append(m_prime)
g_plus = construct_g_plus(g, w, v)
d_plus = construct_d_from_gm2(g_plus, m_prime)
g_minus = construct_g_minus(g, w, v)
d_minus = construct_d_from_gm2(g_minus, m)
enum_maximum_matching_iter(matches, g_plus, m_prime, d_plus)
enum_maximum_matching_iter(matches, g_minus, m, d_minus)
return
# -----------------------------Helper functions--------------------------
def maximum_matching_all(bipartite_graph):
matches = dict()
if is_directed(bipartite_graph):
parts = weakly_connected_components(bipartite_graph)
else:
parts = connected_components(bipartite_graph)
for conn in parts:
sub = bipartite_graph.subgraph(conn)
max_matching = hopcroft_karp_matching(sub)
matches.update(max_matching)
return matches
# input: undirected bipartite graph
# output: directed bipartite graph with only arrows 0 to 1
def build_g(undirected_graph):
g = DiGraph()
for n, d in undirected_graph.nodes(data=True):
if d['biparite'] == 0:
g.add_node(n, biparite=0)
else:
g.add_node(n, biparite=1)
top = get_node_attributes(undirected_graph, 'biparite')
# Get edges
for source, target in undirected_graph.edges():
if top[source] == 0:
g.add_edge(source, target)
return g
# d - same as g but points the other way
# m - stores matches
def build_d(g, match):
d = DiGraph()
m = DiGraph()
for node, data in g.nodes(data=True):
d.add_node(node, biparite=data['biparite'])
m.add_node(node, biparite=data['biparite'])
m_edges = []
for s, t in match.items():
m_edges.append((s, t))
data = get_node_attributes(g, 'biparite')
for source, target in g.edges():
if (target, source) in m_edges or (source, target) in m_edges:
if data[source] == 0:
d.add_edge(source, target)
m.add_edge(source, target)
else:
d.add_edge(target, source)
else:
if data[source] == 0:
d.add_edge(target, source)
else:
d.add_edge(source, target)
return m, d
def find_cycle_in_dgm(d):
path = list()
for node in d.nodes():
try:
cycle = find_cycle(d, source=node, orientation=None)
for source, target in cycle:
if source not in path:
path.append(source)
if target not in path:
path.append(target)
path.append(node)
return path
except NetworkXNoCycle:
continue
return None
def construct_g_minus(g, e_start, e_end):
g_minus = g.copy()
g_minus.remove_edge(e_start, e_end)
return g_minus
def construct_g_plus(g, e_start, e_end):
g_plus = g.copy()
# g_plus.remove_node(e_start)
# g_plus.remove_node(e_end)
for x in g.successors(e_start):
g_plus.remove_edge(e_start, x)
for x in g.reverse(copy=True).successors(e_end):
if x != e_start:
g_plus.remove_edge(x, e_end)
return g_plus
def construct_d_from_gm2(g_plus, m_prime):
d = g_plus.copy()
for v in g_plus.nodes():
for w in g_plus.successors(v):
if not m_prime.has_edge(v, w):
d.add_edge(w, v)
d.remove_edge(v, w)
return d
# Follows same format as the Java code
# The main advantage, Java requires nodes to be integers
# This version doesn't need the nodes to be integers!
def read_graph(graph_file):
input_graph = DiGraph()
print("Opening Graph: " + graph_file)
with open(graph_file, 'r') as fd:
# Skip Node and edge number, its not needed in Python NetworkX
next(fd)
next(fd)
# Process each edge
for line in fd:
source, target = line.strip().split(' ')
input_graph.add_node(source, biparite=0)
input_graph.add_node(target, biparite=1)
input_graph.add_edge(source, target)
return input_graph
# Contains pre-configured answers
def read_answers():
answers = dict()
with open('answers.csv', 'r') as fd:
for line in fd:
line.strip()
return answers
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='A python program that provided a bipartite graph can compute all '
'maximum matches')
parser.add_argument('--input', '-i', dest='graph', action='store',
help="Input bipartite graph to get all maximum matches", type=str)
args = parser.parse_args()
graph = read_graph(args.graph)
for max_match in enum_maximum_matching(graph):
print(max_match)
| StarcoderdataPython |
1748581 | import torch
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.linear1 = torch.nn.Linear(1, 2)
self.dropout = torch.nn.Dropout(0.5)
self.batchnorm = torch.nn.BatchNorm1d(2)
self.linear2 = torch.nn.Linear(2, 1)
def forward(self, x):
x = self.linear1(x)
x = self.batchnorm(x)
x = self.dropout(x)
x = self.linear2(x)
return x
def train_func(model, iterations):
optimizer = torch.optim.SGD(model.parameters(), lr=0.001)
for _ in range(iterations):
optimizer.zero_grad()
output = model(torch.randn(2, 1))
if isinstance(output, tuple):
output = output[0]
elif isinstance(output, dict):
output = output["logits"]
loss = (output ** 2).sum()
loss.backward()
optimizer.step()
def not_all_same(samples):
return not all([s.equal(samples[0]) for s in samples])
| StarcoderdataPython |
1613523 | <reponame>t0930198/OAI_nb_IoT
#/*
# * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
# * contributor license agreements. See the NOTICE file distributed with
# * this work for additional information regarding copyright ownership.
# * The OpenAirInterface Software Alliance licenses this file to You under
# * the OAI Public License, Version 1.0 (the "License"); you may not use this file
# * except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.openairinterface.org/?page_id=698
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# *-------------------------------------------------------------------------------
# * For more information about the OpenAirInterface (OAI) Software Alliance:
# * <EMAIL>
# */
# \file case01.py
# \brief test case 01 for OAI: compilations
# \author <NAME>
# \date 2014
# \version 0.1
# @ingroup _test
import log
import openair
import core
makerr1 = '***'
makerr2 = 'Error 1'
def execute(oai, user, pw, host,logfile,logdir,debug):
case = '101'
rv = 1;
oai.send('cd $OPENAIR1_DIR;')
oai.send('cd SIMULATION/LTE_PHY;')
try:
log.start()
test = '01'
name = 'Compile oai.rel8.phy.dlsim.make'
conf = 'make dlsim' # PERFECT_CE=1 # for perfect channel estimation
trace = logdir + '/log_' + case + test + '.txt;'
tee = ' 2>&1 | tee ' + trace
diag = 'check the compilation errors for dlsim in $OPENAIR1_DIR/SIMULATION/LTE_PHY'
oai.send('make clean; make cleanall;')
oai.send('rm -f ./dlsim.rel8.'+host)
oai.send_expect_false('make dlsim -j4' + tee, makerr1, 1500)
oai.send('cp ./dlsim ./dlsim.rel8.'+host)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
rv =0
else:
log.ok(case, test, name, conf, '', logfile)
try:
log.start()
test = '02'
name = 'Compile oai.rel8.phy.ulsim.make'
conf = 'make ulsim'
trace = logdir + '/log_' + case + test + '.txt;'
tee = ' 2>&1 | tee ' + trace
diag = 'check the compilation errors for ulsim in $OPENAIR1_DIR/SIMULATION/LTE_PHY'
oai.send('make cleanall;')
oai.send('rm -f ./ulsim.rel8.'+host)
oai.send_expect_false('make ulsim -j4' + tee, makerr1, 1500)
oai.send('cp ./ulsim ./ulsim.rel8.'+host)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
rv = 0
else:
log.ok(case, test, name, conf, '', logfile)
return rv
| StarcoderdataPython |
14394 | import json
import os
def qald(in_folder, out_folder):
train = json.load(open(os.path.join(in_folder, "qald-7-train-en-wikidata.json")))
test = json.load(open(os.path.join(in_folder, "qald-7-test-en-wikidata-withoutanswers.json")))
train_q = []
test_q = []
for qs in train["questions"]:
for q in qs["question"]:
train_q.append(q["string"])
split_idx = int(len(train_q)*0.75)
dev_q = train_q[split_idx:]
train_q = train_q[:split_idx]
for qs in test["questions"]:
for q in qs["question"]:
test_q.append(q["string"])
for qs, split in zip([train_q, dev_q, test_q], ["train", "dev", "test"]):
os.makedirs(os.path.join(out_folder, split), exist_ok=True)
with open(os.path.join(out_folder, split, "qald-7.txt"), "w", encoding="utf-8") as f:
for q in qs:
f.write(q+"\n")
def websqp(in_folder, out_folder):
train = json.load(open(os.path.join(in_folder, "WebQSP.train.json"), encoding="utf-8"))
test = json.load(open(os.path.join(in_folder, "WebQSP.test.json"), encoding="utf-8"))
train_q = []
test_q = []
for q in train["Questions"]:
train_q.append(q["RawQuestion"])
split_idx = int(len(train_q)*0.75)
dev_q = train_q[split_idx:]
train_q = train_q[:split_idx]
for q in test["Questions"]:
test_q.append(q["RawQuestion"])
for qs, split in zip([train_q, dev_q, test_q], ["train", "dev", "test"]):
os.makedirs(os.path.join(out_folder, split), exist_ok=True)
with open(os.path.join(out_folder, split, "webqsp.txt"), "w", encoding="utf-8") as f:
for q in qs:
f.write(q+"\n")
if __name__ == "__main__":
qald(r"C:\Users\Gregor\Documents\Programming\square-skill-selector\data\kbqa\qald", r"C:\Users\Gregor\Documents\Programming\square-skill-selector\data\kbqa")
websqp(r"C:\Users\Gregor\Documents\Programming\square-skill-selector\data\kbqa\WebQSP\data", r"C:\Users\Gregor\Documents\Programming\square-skill-selector\data\kbqa") | StarcoderdataPython |
78159 | # -*- coding: utf-8 -*-
from odoo.tests import common
import odoo
GIF = b"R0lGODdhAQABAIAAAP///////ywAAAAAAQABAAACAkQBADs="
class test_ir_http_mimetype(common.TransactionCase):
def test_ir_http_mimetype_attachment(self):
""" Test mimetype for attachment """
attachment = self.env['ir.attachment'].create({
'datas': GIF,
'name': 'Test mimetype gif',
'datas_fname': 'file.gif'})
status, headers, content = self.env['ir.http'].binary_content(
id=attachment.id,
mimetype=None,
default_mimetype='application/octet-stream',
env=self.env
)
mimetype = dict(headers).get('Content-Type')
self.assertEqual(mimetype, 'image/gif')
def test_ir_http_mimetype_attachment_name(self):
""" Test mimetype for attachment with bad name"""
attachment = self.env['ir.attachment'].create({
'datas': GIF,
'name': 'Test mimetype gif with png name',
'datas_fname': 'file.png'})
status, headers, content = self.env['ir.http'].binary_content(
id=attachment.id,
mimetype=None,
default_mimetype='application/octet-stream',
env=self.env
)
mimetype = dict(headers).get('Content-Type')
# TODO: fix and change it in master, should be image/gif
self.assertEqual(mimetype, 'image/png')
def test_ir_http_mimetype_basic_field(self):
""" Test mimetype for classic field """
partner = self.env['res.partner'].create({
'image': GIF,
'name': 'Test mimetype basic field',
})
status, headers, content = self.env['ir.http'].binary_content(
model='res.partner',
id=partner.id,
field='image',
default_mimetype='application/octet-stream',
env=self.env
)
mimetype = dict(headers).get('Content-Type')
self.assertEqual(mimetype, 'image/gif')
def test_ir_http_mimetype_computed_field(self):
""" Test mimetype for computed field wich resize picture"""
prop = self.env['ir.property'].create({
'fields_id': self.env['ir.model.fields'].search([], limit=1).id,
'name': "Property binary",
'value_binary': GIF,
'type': 'binary',
})
resized = odoo.tools.image_get_resized_images(prop.value_binary, return_big=True, avoid_resize_medium=True)['image_small']
# Simul computed field which resize and that is not attachement=True (E.G. on product)
prop.write({'value_binary': resized})
status, headers, content = self.env['ir.http'].binary_content(
model='ir.property',
id=prop.id,
field='value_binary',
default_mimetype='application/octet-stream',
env=self.env
)
mimetype = dict(headers).get('Content-Type')
self.assertEqual(mimetype, 'image/gif')
def test_ir_http_attachment_access(self):
""" Test attachment access with and without access token """
public_user = self.env.ref('base.public_user')
attachment = self.env['ir.attachment'].create({
'datas': GIF,
'name': 'Test valid access token with image',
'datas_fname': 'image.gif'
})
defaults = {
'id': attachment.id,
'default_mimetype': 'image/gif',
'env': public_user.sudo(public_user.id).env,
}
def test_access(**kwargs):
status, _, _ = self.env['ir.http'].binary_content(
**dict(defaults, **kwargs)
)
return status
status = test_access()
self.assertEqual(status, 403, "no access")
status = test_access(access_token=u'Secret')
self.assertEqual(status, 403,
"no access if access token for attachment without access token")
attachment.access_token = u'Secret'
status = test_access(access_token=u'Secret')
self.assertEqual(status, 200, "access for correct access token")
status = test_access(access_token=u'Wrong')
self.assertEqual(status, 403, "no access for wrong access token")
attachment.public = True
status = test_access()
self.assertEqual(status, 200, "access for attachment with access")
status = test_access(access_token=u'Wrong')
self.assertEqual(status, 403,
"no access for wrong access token for attachment with access")
attachment.unlink()
status = test_access()
self.assertEqual(status, 404, "no access for deleted attachment")
status = test_access(access_token=u'Secret')
self.assertEqual(status, 404,
"no access with access token for deleted attachment")
| StarcoderdataPython |
4802612 | from graphene import ObjectType, Float, String, Int, Field, DateTime, List, __version__, Schema
class TestGraphene:
package = "graphene"
version = __version__
def __init__(self, allow_extra):
class Location(ObjectType):
latitude = Float()
longitude = Float()
class Skill(ObjectType):
subject = String()
subject_id = Int()
category = String()
qual_level = String()
qual_level_id = Int()
qual_level_ranking = Float()
class Model(ObjectType):
id = Int()
client_name = String()
sort_index = Float()
client_phone = String()
location = Field(Location)
contractor = Int()
upstream_http_referrer = String()
grecaptcha_response = String()
last_updated = DateTime()
skills = List(Skill)
class Query(ObjectType):
model = Field(Model)
@staticmethod
def resolve_model(root, _context):
return Model(**root)
self.allow_extra = allow_extra # unused
self.schema = Schema(Query)
def validate(self, data):
result, errors = self.schema.execute(
source="""{
model {
id,
clientName,
sortIndex,
clientPhone,
location {
latitude,
longitude
},
contractor,
upstreamHttpReferrer,
grecaptchaResponse,
# lastUpdated,
skills {
subject,
subjectId,
category,
qualLevel,
qualLevelId,
qualLevelRanking
}
}
}""",
root_value=data,
)
if errors:
return False, str(errors)
return True, result
| StarcoderdataPython |
1617172 | """Support for Extreme SLX."""
import time
from netmiko.cisco_base_connection import CiscoSSHConnection
class ExtremeSlxSSH(CiscoSSHConnection):
"""Support for Extreme SLX."""
def enable(self, *args, **kwargs):
"""No enable mode on Extreme SLX."""
pass
def exit_enable_mode(self, *args, **kwargs):
"""No enable mode on Extreme Slx."""
pass
def special_login_handler(self, delay_factor=1):
"""Adding a delay after login."""
delay_factor = self.select_delay_factor(delay_factor)
self.write_channel(self.RETURN)
time.sleep(1 * delay_factor)
def save_config(
self,
cmd="copy running-config startup-config",
confirm=True,
confirm_response="y",
):
"""Save Config for Extreme SLX."""
return super().save_config(
cmd=cmd, confirm=confirm, confirm_response=confirm_response
)
| StarcoderdataPython |
3220838 | #! /bin/env python
# utility to add GSSAuthName, NFSv4Name, NFSv4RemoteGroup to ldap
# given userid will lookup uid/gidNumber.
# intentionally a little backwards in requiring GSS name because it is useless
# in our context to have a name mapping without the GSS attribute
# LDAP object doesn't require GSS attr but does require NFSv4RemoteUser
# simple class to share config init and some common calls among utils
from ldaputil import LdapUtil
import argparse
from argparse import RawDescriptionHelpFormatter
import sys
import os
import re
parser = argparse.ArgumentParser(description='Utility to add GSSAuthName, NFSv4Name, NFSv4RemoteGroup to ldap')
def_config = os.path.dirname(os.path.realpath(sys.argv[0])) + '/ldaputil.conf'
parser.add_argument('-c', '--config',
default=def_config,
help='Optional path to config file (default {0})'.format(def_config))
action = parser.add_mutually_exclusive_group()
action.add_argument('-d', '--delete',
action='store_true',
help='Delete specified principal/group/remote user mapping (depending on args)')
action.add_argument('-l', '--list',
action='store_true',
help='List mappings for user argument or all users if none specified')
parser.add_argument('-g', '--group',
action='store_true',
help='Assign or list NFSv4 group mapping to id argument (default is to assign Kerberos principal set by mapping arg)')
parser.add_argument('-u', '--user',
default=None,
help='NFSv4 user mapping (<EMAIL>). If not provided for a Kerberos mapping then the lowercased mapping value is used.')
parser.add_argument('id',
nargs='?',
help='User or group name string matching configured uid_attr or gid_attr in LDAP directory')
parser.add_argument('mapping',
help='Kerberos principal or NFSv4 remote group to be added to id',
nargs='?')
args = parser.parse_args()
ldap = LdapUtil(args.config)
question = lambda q: raw_input(q).lower().strip()[0] == "y" or sys.exit(0)
if not (args.delete or args.mapping or args.list):
parser.error('Argument "mapping" required')
if not (args.list or args.id):
parser.error('Argument "id" required')
if args.list and args.id == None:
args.id = '*'
if args.group:
idnumbers = ldap.get_group_attr(args.id)
if not idnumbers:
print 'LDAP => Group {0} was not found in {1}'.format(args.id,ldap.ldap_groupdn)
sys.exit(1)
else:
idnumbers = ldap.get_user_attr(args.id)
if not idnumbers:
print 'LDAP => User {0} was not found in {1}'.format(args.id,ldap.ldap_userdn)
sys.exit(1)
if not args.user and not args.list and not args.delete:
print 'Using {0} as NFSv4 user mapping'.format(args.mapping.lower())
for d in idnumbers:
# get a list of entries matching arguments to confirm delete
if args.list or args.delete:
op = 'Showing'
if args.group:
entries = ldap.get_group_mappings(d['gidnumber'],args.mapping)
else:
entries = ldap.get_user_mappings(d['uidnumber'], args.mapping, args.user)
if args.list and len(entries) == 0:
continue
if args.delete:
op = 'Deleted'
if len(entries) > 1:
ldap.format_group_entries(entries) if args.group else ldap.format_user_entries(entries)
question('This will delete multiple entries, confirm delete (y/n): ')
if args.group:
entries = ldap.delete_group_mappings(d['gidnumber'],args.mapping)
else:
entries = ldap.delete_user_mapping(d['uidnumber'], args.mapping, args.user)
elif not args.list:
op = 'New'
if args.group:
entries = ldap.add_group_mapping(d['gidnumber'],args.mapping)
else:
entries = ldap.add_user_mapping(d['uidnumber'], d['gidnumber'], args.mapping, args.user)
if len(entries) == 0:
if args.delete:
print 'LDAP => Mapping does not exist'
else:
print 'LDAP => Mapping exists'
sys.exit(0)
if args.group:
print 'LDAP => {0} mappings for {1}:'.format(op,d['gid'])
ldap.format_group_entries(entries)
else:
print 'LDAP => {0} mappings for {1}:'.format(op,d['uid'])
ldap.format_user_entries(entries)
| StarcoderdataPython |
70227 | <gh_stars>1-10
# encoding: utf-8
'''Tests regarding the smisk.mvc module
'''
| StarcoderdataPython |
1630271 | <reponame>RideGreg/LeetCode
class Solution(object):
def reachingPoints(self, sx, sy, tx, ty):
'''
if sx == tx and sy == ty:
return True
import collections
used = {}
used[(sx,sy)] = True
q = collections.deque([(sx,sy)])
while q:
x, y = q.popleft()
for curx, cury in [(x, x+y), (x+y, y)]:
if curx == tx and cury == ty:
return True
if curx <= tx and cury <= ty and (curx, cury) not in used:
used[(curx, cury)] = True
q.append((curx, cury))
return False
'''
if sx == tx and sy == ty: return True
if sx > tx or sy > ty: return False
m = [[0]* (ty+1) for _ in xrange(tx+1)]
m[sx][sy]=1
for i in xrange(sx,len(m)):
for j in xrange(sy,len(m[0])):
if (0<=i-j<len(m) and m[i-j][j]==1) or (0<=j-i<len(m[0]) and m[i][j-i]==1):
m[i][j]=1
return m[-1][-1] == 1
print Solution().reachingPoints(1,1,3,5)
print Solution().reachingPoints(1,1,2,2)
print Solution().reachingPoints(1,1,1,1)
| StarcoderdataPython |
3390210 | <gh_stars>0
# coding: utf-8
"""
Emby Server API
Explore the Emby Server API # noqa: E501
OpenAPI spec version: 4.1.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from embyapi.api_client import ApiClient
class SyncServiceApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def delete_sync_by_targetid_items(self, target_id, **kwargs): # noqa: E501
"""Cancels items from a sync target # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_sync_by_targetid_items(target_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str target_id: TargetId (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_sync_by_targetid_items_with_http_info(target_id, **kwargs) # noqa: E501
else:
(data) = self.delete_sync_by_targetid_items_with_http_info(target_id, **kwargs) # noqa: E501
return data
def delete_sync_by_targetid_items_with_http_info(self, target_id, **kwargs): # noqa: E501
"""Cancels items from a sync target # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_sync_by_targetid_items_with_http_info(target_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str target_id: TargetId (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['target_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_sync_by_targetid_items" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'target_id' is set
if ('target_id' not in params or
params['target_id'] is None):
raise ValueError("Missing the required parameter `target_id` when calling `delete_sync_by_targetid_items`") # noqa: E501
collection_formats = {}
path_params = {}
if 'target_id' in params:
path_params['TargetId'] = params['target_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/Sync/{TargetId}/Items', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_sync_jobitems_by_id(self, id, **kwargs): # noqa: E501
"""Cancels a sync job item # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_sync_jobitems_by_id(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: Id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_sync_jobitems_by_id_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.delete_sync_jobitems_by_id_with_http_info(id, **kwargs) # noqa: E501
return data
def delete_sync_jobitems_by_id_with_http_info(self, id, **kwargs): # noqa: E501
"""Cancels a sync job item # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_sync_jobitems_by_id_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: Id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_sync_jobitems_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_sync_jobitems_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['Id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/Sync/JobItems/{Id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_sync_jobs_by_id(self, id, **kwargs): # noqa: E501
"""Cancels a sync job. # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_sync_jobs_by_id(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: Id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_sync_jobs_by_id_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.delete_sync_jobs_by_id_with_http_info(id, **kwargs) # noqa: E501
return data
def delete_sync_jobs_by_id_with_http_info(self, id, **kwargs): # noqa: E501
"""Cancels a sync job. # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_sync_jobs_by_id_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: Id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_sync_jobs_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_sync_jobs_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['Id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/Sync/Jobs/{Id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_sync_items_ready(self, target_id, **kwargs): # noqa: E501
"""Gets ready to download sync items. # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sync_items_ready(target_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str target_id: TargetId (required)
:return: list[SyncModelSyncedItem]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_sync_items_ready_with_http_info(target_id, **kwargs) # noqa: E501
else:
(data) = self.get_sync_items_ready_with_http_info(target_id, **kwargs) # noqa: E501
return data
def get_sync_items_ready_with_http_info(self, target_id, **kwargs): # noqa: E501
"""Gets ready to download sync items. # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sync_items_ready_with_http_info(target_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str target_id: TargetId (required)
:return: list[SyncModelSyncedItem]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['target_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_sync_items_ready" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'target_id' is set
if ('target_id' not in params or
params['target_id'] is None):
raise ValueError("Missing the required parameter `target_id` when calling `get_sync_items_ready`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'target_id' in params:
query_params.append(('TargetId', params['target_id'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/Sync/Items/Ready', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[SyncModelSyncedItem]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_sync_jobitems(self, **kwargs): # noqa: E501
"""Gets sync job items. # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sync_jobitems(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: QueryResultSyncModelSyncJobItem
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_sync_jobitems_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_sync_jobitems_with_http_info(**kwargs) # noqa: E501
return data
def get_sync_jobitems_with_http_info(self, **kwargs): # noqa: E501
"""Gets sync job items. # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sync_jobitems_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: QueryResultSyncModelSyncJobItem
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_sync_jobitems" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/Sync/JobItems', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='QueryResultSyncModelSyncJobItem', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_sync_jobitems_by_id_additionalfiles(self, id, name, **kwargs): # noqa: E501
"""Gets a sync job item file # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sync_jobitems_by_id_additionalfiles(id, name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: Id (required)
:param str name: Name (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_sync_jobitems_by_id_additionalfiles_with_http_info(id, name, **kwargs) # noqa: E501
else:
(data) = self.get_sync_jobitems_by_id_additionalfiles_with_http_info(id, name, **kwargs) # noqa: E501
return data
def get_sync_jobitems_by_id_additionalfiles_with_http_info(self, id, name, **kwargs): # noqa: E501
"""Gets a sync job item file # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sync_jobitems_by_id_additionalfiles_with_http_info(id, name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: Id (required)
:param str name: Name (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'name'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_sync_jobitems_by_id_additionalfiles" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_sync_jobitems_by_id_additionalfiles`") # noqa: E501
# verify the required parameter 'name' is set
if ('name' not in params or
params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `get_sync_jobitems_by_id_additionalfiles`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['Id'] = params['id'] # noqa: E501
query_params = []
if 'name' in params:
query_params.append(('Name', params['name'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/Sync/JobItems/{Id}/AdditionalFiles', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_sync_jobitems_by_id_file(self, id, **kwargs): # noqa: E501
"""Gets a sync job item file # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sync_jobitems_by_id_file(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: Id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_sync_jobitems_by_id_file_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_sync_jobitems_by_id_file_with_http_info(id, **kwargs) # noqa: E501
return data
def get_sync_jobitems_by_id_file_with_http_info(self, id, **kwargs): # noqa: E501
"""Gets a sync job item file # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sync_jobitems_by_id_file_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: Id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_sync_jobitems_by_id_file" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_sync_jobitems_by_id_file`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['Id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/Sync/JobItems/{Id}/File', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_sync_jobs(self, **kwargs): # noqa: E501
"""Gets sync jobs. # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sync_jobs(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: QueryResultSyncSyncJob
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_sync_jobs_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_sync_jobs_with_http_info(**kwargs) # noqa: E501
return data
def get_sync_jobs_with_http_info(self, **kwargs): # noqa: E501
"""Gets sync jobs. # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sync_jobs_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: QueryResultSyncSyncJob
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_sync_jobs" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/Sync/Jobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='QueryResultSyncSyncJob', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_sync_jobs_by_id(self, id, **kwargs): # noqa: E501
"""Gets a sync job. # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sync_jobs_by_id(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: Id (required)
:return: SyncSyncJob
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_sync_jobs_by_id_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_sync_jobs_by_id_with_http_info(id, **kwargs) # noqa: E501
return data
def get_sync_jobs_by_id_with_http_info(self, id, **kwargs): # noqa: E501
"""Gets a sync job. # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sync_jobs_by_id_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: Id (required)
:return: SyncSyncJob
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_sync_jobs_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_sync_jobs_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['Id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/Sync/Jobs/{Id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SyncSyncJob', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_sync_options(self, user_id, **kwargs): # noqa: E501
"""Gets a list of available sync targets. # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sync_options(user_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str user_id: UserId (required)
:param str item_ids: ItemIds
:param str parent_id: ParentId
:param str target_id: TargetId
:param str category: Category
:return: SyncModelSyncDialogOptions
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_sync_options_with_http_info(user_id, **kwargs) # noqa: E501
else:
(data) = self.get_sync_options_with_http_info(user_id, **kwargs) # noqa: E501
return data
def get_sync_options_with_http_info(self, user_id, **kwargs): # noqa: E501
"""Gets a list of available sync targets. # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sync_options_with_http_info(user_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str user_id: UserId (required)
:param str item_ids: ItemIds
:param str parent_id: ParentId
:param str target_id: TargetId
:param str category: Category
:return: SyncModelSyncDialogOptions
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['user_id', 'item_ids', 'parent_id', 'target_id', 'category'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_sync_options" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'user_id' is set
if ('user_id' not in params or
params['user_id'] is None):
raise ValueError("Missing the required parameter `user_id` when calling `get_sync_options`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'user_id' in params:
query_params.append(('UserId', params['user_id'])) # noqa: E501
if 'item_ids' in params:
query_params.append(('ItemIds', params['item_ids'])) # noqa: E501
if 'parent_id' in params:
query_params.append(('ParentId', params['parent_id'])) # noqa: E501
if 'target_id' in params:
query_params.append(('TargetId', params['target_id'])) # noqa: E501
if 'category' in params:
query_params.append(('Category', params['category'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/Sync/Options', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SyncModelSyncDialogOptions', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_sync_targets(self, user_id, **kwargs): # noqa: E501
"""Gets a list of available sync targets. # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sync_targets(user_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str user_id: UserId (required)
:return: list[SyncSyncTarget]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_sync_targets_with_http_info(user_id, **kwargs) # noqa: E501
else:
(data) = self.get_sync_targets_with_http_info(user_id, **kwargs) # noqa: E501
return data
def get_sync_targets_with_http_info(self, user_id, **kwargs): # noqa: E501
"""Gets a list of available sync targets. # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sync_targets_with_http_info(user_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str user_id: UserId (required)
:return: list[SyncSyncTarget]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['user_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_sync_targets" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'user_id' is set
if ('user_id' not in params or
params['user_id'] is None):
raise ValueError("Missing the required parameter `user_id` when calling `get_sync_targets`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'user_id' in params:
query_params.append(('UserId', params['user_id'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/Sync/Targets', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[SyncSyncTarget]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_sync_by_itemid_status(self, body, item_id, **kwargs): # noqa: E501
"""Gets sync status for an item. # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_sync_by_itemid_status(body, item_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param SyncModelSyncedItemProgress body: SyncedItemProgress: (required)
:param str item_id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.post_sync_by_itemid_status_with_http_info(body, item_id, **kwargs) # noqa: E501
else:
(data) = self.post_sync_by_itemid_status_with_http_info(body, item_id, **kwargs) # noqa: E501
return data
def post_sync_by_itemid_status_with_http_info(self, body, item_id, **kwargs): # noqa: E501
"""Gets sync status for an item. # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_sync_by_itemid_status_with_http_info(body, item_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param SyncModelSyncedItemProgress body: SyncedItemProgress: (required)
:param str item_id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'item_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_sync_by_itemid_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_sync_by_itemid_status`") # noqa: E501
# verify the required parameter 'item_id' is set
if ('item_id' not in params or
params['item_id'] is None):
raise ValueError("Missing the required parameter `item_id` when calling `post_sync_by_itemid_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'item_id' in params:
path_params['ItemId'] = params['item_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/Sync/{ItemId}/Status', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_sync_data(self, body, **kwargs): # noqa: E501
"""Syncs data between device and server # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_sync_data(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param SyncModelSyncDataRequest body: SyncDataRequest: (required)
:return: SyncModelSyncDataResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.post_sync_data_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.post_sync_data_with_http_info(body, **kwargs) # noqa: E501
return data
def post_sync_data_with_http_info(self, body, **kwargs): # noqa: E501
"""Syncs data between device and server # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_sync_data_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param SyncModelSyncDataRequest body: SyncDataRequest: (required)
:return: SyncModelSyncDataResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_sync_data" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_sync_data`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/Sync/Data', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SyncModelSyncDataResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_sync_items_cancel(self, **kwargs): # noqa: E501
"""Cancels items from a sync target # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_sync_items_cancel(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str item_ids: ItemIds
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.post_sync_items_cancel_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.post_sync_items_cancel_with_http_info(**kwargs) # noqa: E501
return data
def post_sync_items_cancel_with_http_info(self, **kwargs): # noqa: E501
"""Cancels items from a sync target # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_sync_items_cancel_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str item_ids: ItemIds
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['item_ids'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_sync_items_cancel" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'item_ids' in params:
query_params.append(('ItemIds', params['item_ids'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/Sync/Items/Cancel', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_sync_jobitems_by_id_enable(self, id, **kwargs): # noqa: E501
"""Enables a cancelled or queued sync job item # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_sync_jobitems_by_id_enable(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: Id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.post_sync_jobitems_by_id_enable_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.post_sync_jobitems_by_id_enable_with_http_info(id, **kwargs) # noqa: E501
return data
def post_sync_jobitems_by_id_enable_with_http_info(self, id, **kwargs): # noqa: E501
"""Enables a cancelled or queued sync job item # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_sync_jobitems_by_id_enable_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: Id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_sync_jobitems_by_id_enable" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `post_sync_jobitems_by_id_enable`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['Id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/Sync/JobItems/{Id}/Enable', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_sync_jobitems_by_id_markforremoval(self, id, **kwargs): # noqa: E501
"""Marks a job item for removal # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_sync_jobitems_by_id_markforremoval(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: Id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.post_sync_jobitems_by_id_markforremoval_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.post_sync_jobitems_by_id_markforremoval_with_http_info(id, **kwargs) # noqa: E501
return data
def post_sync_jobitems_by_id_markforremoval_with_http_info(self, id, **kwargs): # noqa: E501
"""Marks a job item for removal # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_sync_jobitems_by_id_markforremoval_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: Id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_sync_jobitems_by_id_markforremoval" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `post_sync_jobitems_by_id_markforremoval`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['Id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/Sync/JobItems/{Id}/MarkForRemoval', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_sync_jobitems_by_id_transferred(self, id, **kwargs): # noqa: E501
"""Reports that a sync job item has successfully been transferred. # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_sync_jobitems_by_id_transferred(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: Id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.post_sync_jobitems_by_id_transferred_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.post_sync_jobitems_by_id_transferred_with_http_info(id, **kwargs) # noqa: E501
return data
def post_sync_jobitems_by_id_transferred_with_http_info(self, id, **kwargs): # noqa: E501
"""Reports that a sync job item has successfully been transferred. # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_sync_jobitems_by_id_transferred_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: Id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_sync_jobitems_by_id_transferred" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `post_sync_jobitems_by_id_transferred`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['Id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/Sync/JobItems/{Id}/Transferred', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_sync_jobitems_by_id_unmarkforremoval(self, id, **kwargs): # noqa: E501
"""Unmarks a job item for removal # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_sync_jobitems_by_id_unmarkforremoval(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: Id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.post_sync_jobitems_by_id_unmarkforremoval_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.post_sync_jobitems_by_id_unmarkforremoval_with_http_info(id, **kwargs) # noqa: E501
return data
def post_sync_jobitems_by_id_unmarkforremoval_with_http_info(self, id, **kwargs): # noqa: E501
"""Unmarks a job item for removal # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_sync_jobitems_by_id_unmarkforremoval_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: Id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_sync_jobitems_by_id_unmarkforremoval" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `post_sync_jobitems_by_id_unmarkforremoval`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['Id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/Sync/JobItems/{Id}/UnmarkForRemoval', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_sync_jobs(self, body, **kwargs): # noqa: E501
"""Gets sync jobs. # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_sync_jobs(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param SyncModelSyncJobRequest body: SyncJobRequest: (required)
:return: SyncModelSyncJobCreationResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.post_sync_jobs_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.post_sync_jobs_with_http_info(body, **kwargs) # noqa: E501
return data
def post_sync_jobs_with_http_info(self, body, **kwargs): # noqa: E501
"""Gets sync jobs. # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_sync_jobs_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param SyncModelSyncJobRequest body: SyncJobRequest: (required)
:return: SyncModelSyncJobCreationResult
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_sync_jobs" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_sync_jobs`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/Sync/Jobs', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SyncModelSyncJobCreationResult', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_sync_jobs_by_id(self, body, id, **kwargs): # noqa: E501
"""Updates a sync job. # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_sync_jobs_by_id(body, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param SyncSyncJob body: SyncJob: (required)
:param int id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.post_sync_jobs_by_id_with_http_info(body, id, **kwargs) # noqa: E501
else:
(data) = self.post_sync_jobs_by_id_with_http_info(body, id, **kwargs) # noqa: E501
return data
def post_sync_jobs_by_id_with_http_info(self, body, id, **kwargs): # noqa: E501
"""Updates a sync job. # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_sync_jobs_by_id_with_http_info(body, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param SyncSyncJob body: SyncJob: (required)
:param int id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_sync_jobs_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_sync_jobs_by_id`") # noqa: E501
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `post_sync_jobs_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['Id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/Sync/Jobs/{Id}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_sync_offlineactions(self, body, **kwargs): # noqa: E501
"""Reports an action that occurred while offline. # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_sync_offlineactions(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param list[UsersUserAction] body: List`1: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.post_sync_offlineactions_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.post_sync_offlineactions_with_http_info(body, **kwargs) # noqa: E501
return data
def post_sync_offlineactions_with_http_info(self, body, **kwargs): # noqa: E501
"""Reports an action that occurred while offline. # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_sync_offlineactions_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param list[UsersUserAction] body: List`1: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_sync_offlineactions" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_sync_offlineactions`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/Sync/OfflineActions', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| StarcoderdataPython |
3378553 | <reponame>developbiao/pythonbasics
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
L = [('Bob', 75), ('Adam', 92), ('Bart', 66), ('Lisa', 88)]
def by_name(t):
return t[0]
def by_score(t):
return t[1]
L2 = sorted(L, key=by_score, reverse=False)
print(L2)
| StarcoderdataPython |
3247821 | <filename>tests/test_create_oxog_intervals.py
"""Tests the ``gdc_filtration_tools.tools.create_oxog_intervals`` module.
"""
import tempfile
import unittest
import pysam
from gdc_filtration_tools.__main__ import main
from gdc_filtration_tools.tools.create_oxog_intervals import create_oxog_intervals
from tests.utils import captured_output, cleanup_files, get_test_data_path
class TestCreateOxogIntervals(unittest.TestCase):
def test_create_oxog_intervals(self):
ivcf = get_test_data_path("test.vcf")
(fd, fn) = tempfile.mkstemp()
try:
found = []
expected = ["chr1:1", "chr2:1"]
with captured_output() as (_, stderr):
create_oxog_intervals(ivcf, fn)
with open(fn, "rt") as fh:
for line in fh:
found.append(line.rstrip("\r\n"))
self.assertEqual(len(found), 2)
self.assertEqual(found, expected)
serr = stderr.getvalue()
self.assertTrue(
"Extracts interval-file for Broad OxoG metrics from VCF." in serr
)
self.assertTrue("Processed 2 records" in serr)
finally:
cleanup_files(fn)
def test_cli(self):
ivcf = get_test_data_path("test.vcf")
(fd, fn) = tempfile.mkstemp()
try:
found = []
expected = ["chr1:1", "chr2:1"]
with captured_output() as (_, stderr):
main(args=["create-oxog-intervals", ivcf, fn])
with open(fn, "rt") as fh:
for line in fh:
found.append(line.rstrip("\r\n"))
self.assertEqual(len(found), 2)
self.assertEqual(found, expected)
serr = stderr.getvalue()
self.assertTrue(
"Extracts interval-file for Broad OxoG metrics from VCF." in serr
)
self.assertTrue("Processed 2 records" in serr)
serr = [i for i in serr.split("\n") if i.rstrip("\r\n")]
self.assertTrue("gdc_filtration_tools.create_oxog_intervals" in serr[0])
self.assertTrue("gdc_filtration_tools.main" in serr[-1])
finally:
cleanup_files(fn)
| StarcoderdataPython |
3284939 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class EntityReference(Model):
"""The entity reference.
:param type: The type of this referenced entity. Possible values include:
'IntegrationRuntimeReference', 'LinkedServiceReference'
:type type: str or
~azure.mgmt.datafactory.models.IntegrationRuntimeEntityReferenceType
:param reference_name: The name of this referenced entity.
:type reference_name: str
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'reference_name': {'key': 'referenceName', 'type': 'str'},
}
def __init__(self, **kwargs):
super(EntityReference, self).__init__(**kwargs)
self.type = kwargs.get('type', None)
self.reference_name = kwargs.get('reference_name', None)
| StarcoderdataPython |
85655 | import os
import uuid
import re
import mimetypes
from django.shortcuts import render
from .forms import FormUserCreation, FormLogin, FormJobPost, FormApply, FormUploadImage, FormUploadResume, FormApplicantsInfo
from django.http import HttpResponse, JsonResponse
from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.conf import settings
from .models import UserAccount, JobPost, JobPostActivity, UserProfile
from django.shortcuts import redirect
from wsgiref.util import FileWrapper
from django.core import serializers
BASE_DIR = settings.BASE_DIR
HOST_NAME = settings.HOST_NAME
# Create your views here.
def template(request):
"""
Blank Template
:param request:
:return:
"""
data = {"header_name": "Blank_Template",
'email': request.session["email"]}
if request.method == "GET":
return render(request, 'pages/blank.html', data)
def sign_in(request, msg="Start your session!"):
"""
Sign In function
:param request:
:param msg:
:return:
"""
data = {"header_name": "Sign In",
"message": msg,
"form": FormLogin}
if request.method == "GET":
try:
del request.session['email']
del request.session['name']
del request.session['user_type']
del request.session['info_saved']
except Exception as ex:
pass
return render(request, 'pages/sign-in.html', data)
elif request.method == "POST":
form = FormLogin(request.POST)
if form.is_valid():
email = request.POST.get("email")
password = request.POST.get("password")
try:
user_account_obj = UserAccount.objects.get(email=email)
if user_account_obj.password == password and user_account_obj.account_activated=="True":
request.session['email'] = email
request.session['name'] = user_account_obj.user_full_name
img_url = re.sub(r'portal', '', str(user_account_obj.user_image))
request.session['profile_img'] = img_url
request.session['user_type'] = user_account_obj.user_type
request.session['info_saved'] = user_account_obj.info_saved
if user_account_obj.user_type == "1":
return redirect("applicant")
else:
return redirect("org_user")
else:
data["message"] = "Login Failed"
except Exception as ex:
data["message"] = "Login Failed [%s]" % ex
return render(request, 'pages/sign-in.html', data)
def sign_up(request):
"""
User registration
:param request:
:return:
"""
data = {"header_name": "Sign Up"}
try:
del request.session['email']
del request.session['name']
del request.session['user_type']
except Exception as ex:
pass
data["form"] = FormUserCreation
if request.method == "POST":
form = FormUserCreation(request.POST)
if form.is_valid():
email = request.POST.get("email")
try:
user_account_obj = UserAccount.objects.get(email=email)
if user_account_obj.email:
data["message"] = "User exist!"
return render(request, "pages/sign-up.html", data)
except:
pass
try:
result = form.save()
if result:
activation = uuid.uuid4().hex
link = "http://" + HOST_NAME + "/portal/email_activation/" + activation + "/" + email
context = {"confirm_link": link}
message = render_to_string('email_templates/confirm_email.html', context)
# save uuid in db
user_account_obj = UserAccount.objects.get(email=email)
user_account_obj.activation_code = activation
user_account_obj.save()
send_mail(subject="Confirmation Email from HireBob",
message="",
from_email=settings.EMAIL_HOST_USER,
recipient_list=[email],
html_message=message)
else:
raise Exception("Failed to save data")
data["message"] = "Email send for account activation!"
except Exception as ex:
data["message"] = ex
finally:
return render(request, "pages/sign-up.html", data)
else:
return HttpResponse("Form is not valid")
else:
return render(request, 'pages/sign-up.html', data)
def email_activation(request, activation, email):
"""
Email validation and account activation
:param request:
:param activation:
:param email:
:return:
"""
message = "Failed to validate email"
if request.method == "GET":
user_account_obj = UserAccount.objects.get(email=email)
if user_account_obj.activation_code == activation:
user_account_obj.account_activated = True
user_account_obj.save()
message = "Account activated, Kindly login!"
return redirect('sign_in')
def logout(request):
"""
Clear login session and redirect to login page
"""
try:
del request.session['message']
del request.session['email']
del request.session['name']
del request.session['user_type']
except KeyError:
pass
return redirect('sign_in')
def organization(request):
data = {"header_name": "Organization Page",
'email': request.session["email"],
'name': request.session["name"],
'profile_img': request.session["profile_img"],
'info_saved': request.session["info_saved"],
'imgform': FormUploadImage,
'resumeform': FormUploadResume,
'update_info': FormApplicantsInfo(initial={'email': request.session["email"]})}
if "message" in request.session:
data["message"] = request.session["message"]
return render(request, 'pages/profile_org.html', data)
def post_job(request):
data = {"header_name": "Post Jobs",
'email': request.session["email"],
'info_saved': request.session["info_saved"],
'form': FormJobPost(initial={'posted_by_email': request.session["email"]}),
'name': request.session["name"],
'profile_img': request.session["profile_img"]}
job_post_obj = JobPost.objects.filter(posted_by_email=request.session["email"])
data["data"] = job_post_obj
if request.method == "POST":
form = FormJobPost(request.POST)
if form.is_valid():
form.save()
data["message"] = "Job published "
else:
data["message"] = "Failed to publish job"
else:
data["message"] = "Publish Jobs"
return render(request, 'pages/post_job.html', data)
def applicant(request):
data = {"header_name": "Organization Page",
'email': request.session["email"],
'name': request.session["name"],
'info_saved': request.session["info_saved"],
'profile_img': request.session["profile_img"],
'imgform': FormUploadImage,
'resumeform': FormUploadResume,
'update_info': FormApplicantsInfo(initial={'email': request.session["email"]})}
try:
if "message" in request.session:
data["message"] = request.session["message"]
except Exception as ex:
return redirect('sign_in')
return render(request, 'pages/profile_applicant.html', data)
def show_jobs(request):
data = {"header_name": "Organization Page",
'email': request.session["email"],
'info_saved': request.session["info_saved"],
'name': request.session["name"],
'profile_img': request.session["profile_img"]}
if request.method == "GET":
job_activity_obj = JobPostActivity.objects.filter(email=request.session["email"])
job_applied = []
for job in job_activity_obj:
job_applied.append(job.post_id)
job_post_obj = JobPost.objects.exclude(post_id__in=job_applied)
data["data"] = job_post_obj
return render(request, 'pages/jobs.html', data)
def applied_jobs(request):
data = {"header_name": "Organization Page",
'email': request.session["email"],
'info_saved': request.session["info_saved"],
'name': request.session["name"],
'profile_img': request.session["profile_img"]}
if request.method == "GET":
job_activity_obj = JobPostActivity.objects.filter(email=request.session["email"])
job_applied = []
for job in job_activity_obj:
job_applied.append(job.post_id)
job_post_obj = JobPost.objects.filter(post_id__in=job_applied)
data["data"] = job_post_obj
return render(request, 'pages/jobs.html', data)
# return render(request, 'pages/applied_jobs.html', data)
def applicants_list(request):
data = {"header_name": "applicants list",
'email': request.session["email"],
'info_saved': request.session["info_saved"],
'name': request.session["name"],
'profile_img': request.session["profile_img"]}
return render(request, 'pages/applicants_list_org.html', data)
def job_details(request, email, id):
data = {"header_name": "applicants list",
'email': request.session["email"],
'info_saved': request.session["info_saved"],
'name': request.session["name"],
'applied': False,
'profile_img': request.session["profile_img"]}
job_post_obj = JobPost.objects.filter(post_id=id)
data["data"] = job_post_obj
data["form"] = FormApply(initial={'email': request.session["email"],
'post_id': id,
'to_email': job_post_obj[0].posted_by_email,
'job_title': job_post_obj[0].job_title})
try:
job_activity_obj = JobPostActivity.objects.filter(post_id=id).filter(email=email)
if job_activity_obj:
data["applied"] = True
except Exception as ex:
pass
return render(request, 'pages/job_details.html', data)
def apply(request):
form = FormApply(request.POST)
id = request.POST.get("post_id")
if form.is_valid():
job_title = request.POST.get("job_title")
cover_letter = request.POST.get("cover_letter")
email = request.session["email"]
to_email = request.POST.get("to_email")
link = "http://" + HOST_NAME + "/portal/user_profile/" + request.session["email"]
org_context = {"job_link": link, "cover_letter": cover_letter, "title": "Application for %s " % job_title}
applicant_context = {"title": "Application for %s " % job_title,
"email": to_email}
form.save()
org_message = render_to_string('email_templates/apply_job.html', org_context)
applicant_message = render_to_string('email_templates/job_applied.html', applicant_context)
# Celery implementation to send mail
send_mail(subject="Applied for job %s " % job_title,
message="",
from_email=settings.EMAIL_HOST_USER,
recipient_list=[to_email],
html_message=org_message)
send_mail(subject="Application has been sent to Organization",
message="",
from_email=settings.EMAIL_HOST_USER,
recipient_list=[email],
html_message=applicant_message)
return redirect('job_details', email, id)
def upload_profile(request):
if request.session['user_type'] == "1":
out = 'applicant'
else:
out = 'org_user'
form = FormUploadImage(request.POST,
request.FILES)
if form.is_valid():
user_acc_obj = UserAccount.objects.get(email=request.session["email"])
user_acc_obj.user_image = form.cleaned_data["user_image"]
user_acc_obj.save()
img_url = re.sub(r'portal', '', str(user_acc_obj.user_image))
request.session['profile_img'] = img_url
return redirect(out)
else:
return redirect(out)
def upload_resume(request):
form = FormUploadResume(request.POST,
request.FILES)
if form.is_valid():
user_acc_obj = UserAccount.objects.get(email=request.session["email"])
user_acc_obj.resume = form.cleaned_data["resume"]
user_acc_obj.save()
return redirect('applicant')
else:
return redirect('applicant')
def download_resume(request, email):
user_acc_obj = UserAccount.objects.get(email=email)
filename = str(user_acc_obj.resume)
donwload_filename = email + "_" + "resume"
try:
data = open(filename, "rb").read()
response = HttpResponse(data, content_type='application/vnd.ms-word')
response['Content-Disposition'] = 'attachment; filename=%s' % donwload_filename
response['Content-Length'] = os.path.getsize(filename)
return response
except Exception as ex:
return redirect('applicant')
def job_status(request, id):
data = {"header_name": "applicants list",
'email': request.session["email"],
'info_saved': request.session["info_saved"],
'name': request.session["name"],
'applied': False,
'profile_img': request.session["profile_img"]}
job_activity = JobPostActivity.objects.filter(post_id=id)
data["data"] = job_activity
return render(request, "pages/job_status.html", data)
def user_profile(request, email):
data = {"header_name": "applicants list",
'applied': False}
try:
data['user_type'] = request.session['user_type']
data['email'] = request.session['email']
data['info_saved'] = request.session["info_saved"]
data['name'] = request.session["name"]
data['profile_img'] = request.session["profile_img"]
except Exception as ex:
data['user_type'] = ""
pass
try:
user_acc_obj = UserAccount.objects.get(email=email)
user_profile_obj = UserProfile.objects.get(email=email)
data["data"] = user_acc_obj
data["user_info"] = user_profile_obj
img_url = re.sub(r'portal', '', str(user_acc_obj.user_image))
data['img'] = img_url
return render(request, "pages/user_profile.html", data)
except Exception as ex:
return redirect('sing_in')
def update_info(request):
if request.session['user_type'] == "1":
out = 'applicant'
else:
out = 'org_user'
gender = request.POST.get("gender")
email = request.POST.get("email")
gmail = request.POST.get("gmail")
linkedin = request.POST.get("linkedin")
skype_id = request.POST.get("skype_id")
about_me = request.POST.get("about_me")
address = request.POST.get("address")
birthday = request.POST.get("birthday")
job_title = request.POST.get("job_title")
location = request.POST.get("location")
user_acc_obj = UserAccount.objects.get(email=email)
try:
user_profile = UserProfile.objects.get(email=email)
user_profile.gmail = gmail
user_profile.gender = gender
user_profile.linkedin = linkedin
user_profile.skype_id = skype_id
user_profile.about_me = about_me
user_profile.address = address
user_profile.birthday = birthday
user_profile.job_title = job_title
user_profile.location = location
user_profile.save()
request.session["message"] = "Information Updated"
except Exception as ex:
user_profile = UserProfile.objects.create(email=email,
gender=gender,
gmail=gmail,
linkedin=linkedin,
skype_id=skype_id,
about_me=about_me,
address=address,
birthday=birthday,
job_title=job_title,
location=location)
request.session["message"] = "Information added"
finally:
user_acc_obj.info_saved = "2"
user_acc_obj.save()
request.session["info_saved"] = "2"
return redirect(out)
def update_status(request, email, id, status):
try:
user_profile = JobPostActivity.objects.filter(email=email).filter(post_id=id).first()
user_profile.status = status
user_profile.save()
except Exception as ex:
pass
context = {
"title": "Job Status changed",
"status": status,
"email": request.session["email"]}
message = render_to_string('email_templates/status_change.html', context)
send_mail(subject="Job status changed to %s " % status,
message="",
from_email=settings.EMAIL_HOST_USER,
recipient_list=[email],
html_message=message)
return redirect('job_status', id)
| StarcoderdataPython |
1618103 | import os
import logging
from flask import Flask, g, request, jsonify, render_template, abort
import requests
import captain
import json
import random
import string
import shutil
import time
from combine.graph.type_graph import TypeGraph
logging.basicConfig(level=logging.INFO)
LOG_LVL = logging.INFO
logger = logging.getLogger(__name__)
app = Flask(__name__)
upload_fname = 'local_uploads'
parent_path = os.sep.join(os.path.realpath(__file__).split(os.sep)[:-1])
UPLOAD_DIR = os.path.join(parent_path, upload_fname)
#UPLOAD_DIR = 'local_uploads'
def get_random(size=4):
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(size))
@app.route('/fast', methods=['GET', 'POST'])
def fast_labeling():
import pandas as pd
max_slice_size = 10
if request.method=='GET':
return render_template('fast_labeling.html')
else:
uploaded_file = request.files['table']
tname = uploaded_file.filename
ext = uploaded_file.filename[-4:]
fname = "fast_" + get_random() + ext
fdir = os.path.join(UPLOAD_DIR, fname)
uploaded_file.save(fdir)
# To start the services: python captain.py up --services ssspotter elect score combine
df = pd.read_csv(fdir)
total_num_slices = 1
slice_size = min(max_slice_size, df.shape[0])
rows = []
for row_items in df[0:slice_size].values.tolist():
row_items_s = [str(s) for s in row_items]
row = "\t".join(row_items_s)
rows.append(row)
file_content = "\n".join(rows)
files = {'table': (fname, file_content)}
data = {
'technique': 'most_distinct',
'callback': '',
'slice': 0,
'total': 1,
}
spotters_ports = captain.get_ports('ssspotter')
port = spotters_ports[0]
spotter_url = "http://127.0.0.1:"+str(port)+"/spot"
r = requests.post(spotter_url, files=files, data=data)
if r.status_code != 200:
logger.error("error: "+str(r.content))
return "error: "+str(r.content)
else:
col_id = r.json()['subject_col_id']
captain.label_files(files=[fdir], slice_size=max_slice_size, cols=[col_id], sample=str(max_slice_size))
ports_combine = captain.get_ports(service="combine")
pairs = []
while pairs == []:
time.sleep(1)
for p in ports_combine:
url = "http://127.0.0.1:" + p + "/list"
print("url: " + url)
response = requests.get(url)
print(response.json())
apples = response.json()["apples"]
for apple in apples:
if apple['table'] == fname:
pairs.append({'url': url, 'apple': apple, 'port': p})
# pairs.append((url, apple))
# combines[url] = response.json()["apples"]
# apples += response.json()["apples"]
print("pairs: ")
print(pairs)
if len(pairs) == 0:
return "No processed files in any combine instance"
else:
#################
apple_id = pairs[0]['apple']['id']
port = pairs[0]['port']
m = pairs[0]['apple']['m']
graph_dir = get_graph(port=str(port), apple_id=str(apple_id))
if graph_dir:
print("graph_dir is found: " + str(graph_dir))
alpha = 0.01
fsid = 3
print("graph_dir before get labels: " + graph_dir)
g, labels = get_labels_from_graph(graph_dir=graph_dir, m=m, alpha=alpha, fsid=fsid)
return render_template('fast_labeling.html', concept=labels[0], col_id=col_id)
#################
return 'Action'
@app.route('/')
def hello_world():
return 'Hello World! This is the Captain! <br> <a href="/list">List Combine servers</a>'
@app.route('/list')
def show_combine_list():
ports_combine = captain.get_ports(service="combine")
pairs = []
for p in ports_combine:
url = "http://127.0.0.1:"+p+"/list"
print("url: "+url)
response = requests.get(url)
print(response.json())
apples = response.json()["apples"]
for apple in apples:
pairs.append({'url': url, 'apple': apple, 'port': p})
# pairs.append((url, apple))
# combines[url] = response.json()["apples"]
# apples += response.json()["apples"]
print("pairs: ")
print(pairs)
if len(pairs) == 0:
return "No processed files in any combine instance"
return render_template('list_apples.html', pairs=pairs)
@app.route('/get_label', methods=["GET"])
def get_label():
port = request.values.get('port')
apple_id = request.values.get('id')
m = int(request.values.get('m'))
graph_dir = get_graph(port=port, apple_id=apple_id)
if graph_dir:
print("graph_dir is found: "+str(graph_dir))
alpha = 0.01
alpha_passed = request.args.get("alpha")
if alpha_passed is not None:
alpha = float(alpha_passed)
fsid = 3
fsid_passed = request.args.get("fsid")
if fsid_passed is not None:
fsid = int(fsid_passed)
print("graph_dir before get labels: "+graph_dir)
g, labels = get_labels_from_graph(graph_dir=graph_dir, m=m, alpha=alpha, fsid=fsid)
# return render(request, 'ent_ann_recompute.html',
# {'anns': eanns, 'alpha': alpha, 'network': 'network',
# 'highlights': results[:3], 'nodes': get_nodes(g), 'fsid': fsid,
# 'edges': annotator.get_edges(graph), 'results': results, 'selected': entity_ann.id})
return render_template('labels.html', labels=labels, network='network', highlights=labels[:3],
nodes=get_nodes(g), fsid=fsid, edges=g.get_edges(), results=labels,
port=port, apple_id=apple_id, m=m, alpha=alpha)
logger.error("No graph")
abort(500)
def get_labels_from_graph(graph_dir, m, alpha, fsid):
# graph_dir = os.path.join(UPLOAD_DIR, graph_dir)
f = open(graph_dir, 'r')
j = json.loads(f.read())
g = TypeGraph()
g.load(j, m)
g.set_score_for_graph(coverage_weight=alpha, m=m, fsid=fsid)
return g, [n.title for n in g.get_scores()]
def get_nodes(graph):
return [graph.index[t] for t in graph.cache]
def get_graph(port, apple_id):
url = "http://127.0.0.1:" + port + "/get_graph?id="+apple_id
# result = request.get(url)
print("get graph from url: "+url)
r = requests.get(url, stream=True)
dest_path = os.path.join(UPLOAD_DIR, str(apple_id)+"__"+get_random(12)+"_graph.json")
# dest_path = os.path.join(UPLOAD_DIR, str(apple_id)+"_graph.json")
if r.status_code == 200:
with open(dest_path, 'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
return dest_path
else:
print(r.content)
return None
if __name__ == '__main__':
if 'port' in os.environ:
app.run(debug=True, host='0.0.0.0', port=int(os.environ['port']))
else:
app.run(debug=True, host='0.0.0.0')
| StarcoderdataPython |
1788364 | """Add future kline 1min
Revision ID: 9c2dcdde9
Revises: <PASSWORD>
Create Date: 2017-02-23 02:58:32.227072
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '2<PASSWORD>'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'future_kline_1min',
sa.Column('timestamp', sa.Integer, primary_key=True),
sa.Column('open', sa.Float, nullable=False),
sa.Column('high', sa.Float, nullable=False),
sa.Column('low', sa.Float, nullable=False),
sa.Column('close', sa.Float, nullable=False),
sa.Column('volume', sa.Integer, nullable=False),
sa.Column('volume_btc', sa.Float, nullable=False)
)
def downgrade():
op.drop_table('future_kline_1min')
| StarcoderdataPython |
1750521 | <filename>cogs/memes/imagine.py
import io
from typing import cast
from PIL import Image, ImageDraw, ImageFont
from ditto import CONFIG as BOT_CONFIG
import discord
from discord.ext import commands
from discord.utils import MISSING
from ditto.config import CONFIG
CONFIG = BOT_CONFIG.EXTENSIONS[__name__]
IMAGE = "res/imagine.png"
TITLE_FONT = CONFIG.TITLE_FONT
BYLINE_FONT = CONFIG.BYLINE_FONT
IMAGE_WIDTH = 2048
IMAGE_HEIGHT = 1024
TITLE_OFFSET = (128, 192)
TITLE_BOUND = (IMAGE_WIDTH - (TITLE_OFFSET[0] * 2), IMAGE_HEIGHT - 224 - TITLE_OFFSET[1])
BYLINE_OFFSET = (TITLE_OFFSET[0] * 2, TITLE_OFFSET[1] + TITLE_BOUND[1])
BYLINE_BOUND = (IMAGE_WIDTH - (BYLINE_OFFSET[0] * 2), 192)
WHITE = (255, 255, 255)
def draw_text(
draw: ImageDraw.ImageDraw,
text: str,
font_name: str,
colour: tuple[int, int, int],
bounds: tuple[int, int],
offsets: tuple[int, int],
max_font_size: int,
line_height: float = 1,
) -> None:
font_size = max_font_size
lines = text.split("\n")
while not lines[-1]:
lines.pop(-1)
# Calculate font size
while True:
font = ImageFont.truetype(font_name, font_size)
text_width, _ = cast(tuple[int, int], draw.textsize(text, font=font))
_, _line_height = cast(tuple[int, int], draw.textsize("\N{FULL BLOCK}", font=font))
text_height = int(_line_height * line_height * len(lines))
if text_width < bounds[0] and text_height < bounds[1]:
break
font_size -= 1
# Calculate Starting Y position
y_pos = offsets[1] + (bounds[1] - text_height) // 2
# Draw text
for line in lines:
line_width, _ = draw.textsize(line, font=font) # type: ignore
x_pos = offsets[0] + (bounds[0] - line_width) // 2
draw.text((x_pos, y_pos), line, colour, font=font)
y_pos += int(_line_height * line_height)
class Imagine(commands.Cog):
@commands.command(name="imagine")
async def timecard(self, ctx, *, text: commands.clean_content(fix_channel_mentions=True) = "a place\nfor friends and communities"): # type: ignore
"""Imagine."""
async with ctx.typing():
# Load image
image = Image.open(IMAGE)
draw = cast(ImageDraw.ImageDraw, ImageDraw.Draw(image))
title, _, byline = str(text).upper().partition("\n")
if "\n" in byline:
raise commands.BadArgument("Too many lines in input.")
title = f"IMAGINE\n{title.strip()}"
byline = byline.strip()
draw_text(draw, title, TITLE_FONT, WHITE, TITLE_BOUND, TITLE_OFFSET, 300, 0.95)
if byline:
draw_text(draw, byline, BYLINE_FONT, WHITE, BYLINE_BOUND, BYLINE_OFFSET, 100)
out_fp = io.BytesIO()
image.save(out_fp, "PNG")
out_fp.seek(0)
await ctx.send(file=discord.File(out_fp, "imagine.png"))
def setup(bot: commands.Bot):
bot.add_cog(Imagine(bot))
| StarcoderdataPython |
3392563 | """Constants"""
BASE_URL = "http://www.wienerlinien.at/ogd_realtime/monitor?rbl={}"
DEPARTURES = {
"1st": {"key": 0, "name": "{} 1st departure"},
"2nd": {"key": 1, "name": "{} 2nd departure"},
"3rd": {"key": 2, "name": "{} 3rd departure"},
"4th": {"key": 3, "name": "{} 4th departure"},
}
| StarcoderdataPython |
1794457 | <filename>scripts/get_nltk_usage.py
from module_dependencies import Module
import pickle
import json
# module = Module("nltk", count="all", lazy=False)
# with open("app/static/data/nltk_module.pickle", "wb") as f:
# pickle.dump(module, f)
with open("app/static/data/nltk_module.pickle", "rb") as f:
module = pickle.load(f)
print(module.n_uses())
usage = module.usage(cumulative=True)
with open("app/static/data/nltk_usage.json", "w") as f:
json.dump(usage, f)
# params = module.plot(show=False, transparant=True)
# breakpoint()
| StarcoderdataPython |
4804429 | <gh_stars>0
import sqlite3
import random
conn = sqlite3.connect('pokemon.db')
c = conn.cursor()
# xingge
nature_attack_plus = ['Hardy', 'Lonely', 'Adamant', 'Naughty', 'Brave']
nature_defense_plus = ['Bold', 'Docile', 'Impish', 'Lax', 'Relaxed']
nature_sp_plus = ['Modest', 'Mild', 'Bashful', 'Rash', 'Quiet']
nature_spdef_plus = ['Calm', 'Gentle', 'Careful', 'Quirky', 'Sassy']
speed_plus = ['Timid', 'Hasty', 'Jolly', 'Naive', 'Serious']
nature_list = [nature_attack_plus,nature_defense_plus,nature_sp_plus,nature_spdef_plus,speed_plus]
pokemon_name = 'blissey'
c.execute("SELECT * FROM Pokemon_basic_data WHERE name = '%s'" % pokemon_name)
id, name, height, weight, base_exp, species, url, type = c.fetchone()
print id, name, height, weight, base_exp, species, url, type
c.execute("SELECT * FROM Pokemon_combat_data WHERE name = '%s'" % pokemon_name)
id, name, move, hp_base, attack_base, defense_base, special_attack_base, special_deffense_base, speed_base, hp_effort, attack_effort, defense_effort, special_attack_effort, special_defense_effort, speed_effort = c.fetchone()
individual = {'hp':0,'attack':0,'defense':0,'special_attack':0,'special_defense':0,'speed':0}
# try to build one pokemon
stat_lvl = [0, 0, 0, 0, 0, 0]
star = 5
for key in individual:
individual[key] = random.randint(0,31)
lvl = 100
nature_count = (random.randint(0,4),random.randint(0,4))
print nature_count
nature = nature_list[nature_count[0]][nature_count[1]]
hp = (2*hp_base+individual['hp']+hp_effort/4)*lvl/100.0+lvl+10
attack = (2*attack_base+individual['attack']+attack_effort/4)*lvl/100+5
defense = (2*defense_base+individual['defense']+defense_effort/4)*lvl/100+5
special_attack = (2*special_attack_base+individual['special_attack']+special_attack_effort/4)*lvl/100+5
special_defense = (2*special_deffense_base+individual['special_defense']+special_defense_effort/4)*lvl/100+5
speed = (2*speed_base+individual['speed']+speed_effort/4)*lvl/100+5
# nature modification
modified_stat = [attack, defense, special_attack, special_defense,speed]
modified_stat[nature_count[0]]=modified_stat[nature_count[0]]*1.1
modified_stat[nature_count[1]]=modified_stat[nature_count[1]]*0.9
stats = [hp]
for stat in modified_stat:
stats.append(stat)
# star modification
for i in range(0,6):
stats[i] = int(stats[i]*1.2)^star
# stat lvl
for i in range(0,6):
if i == 0:
stats[i] += stats[i]*0.01*stat_lvl[i] + stat_lvl[i]*2
else:
stats[i] += stats[i]*0.01*stat_lvl[i] + + stat_lvl[i]*1
print stats
| StarcoderdataPython |
3336855 | n,x0=[int(s) for s in input().split()]
left=[0]*n
right=[0]*n
for i in range(n):
a=[int(s) for s in input().split()]
a.sort()
left[i],right[i]=a
a=max(left)
b=min(right)
if a>b:
print(-1)
else:
if a<=x0<=b:
print(0)
else:
print(min(abs(a-x0),abs(b-x0)))
| StarcoderdataPython |
3269267 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
First, a few callback functions are defined. Then, those functions are passed to
the Dispatcher and registered at their respective places.
Then, the bot is started and runs until we press Ctrl-C on the command line.
Usage:
Example of a bot-user conversation using ConversationHandler.
Send /start to initiate the conversation.
Press Ctrl-C on the command line or send a signal to the process to stop the
bot.
"""
from ext.N2DChecker import MainScript
import argparse
bt = MainScript()
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument('-t', type=str, help="token")
args = parser.parse_args()
token = str(args.t)
bt.login(token)
bt.start_pulling()
| StarcoderdataPython |
161823 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-06-07 08:38
from __future__ import unicode_literals
from django.db import migrations
import django.db.models.manager
class Migration(migrations.Migration):
dependencies = [
('protector', '0002_auto_20160607_0827'),
]
operations = [
migrations.AlterModelManagers(
name='restriction',
managers=[
('objects', django.db.models.manager.Manager()),
],
),
]
| StarcoderdataPython |
135111 | # coding: utf-8
import pprint
import re
import six
class FreeResourceDetail:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'free_resource_id': 'str',
'free_resource_type_name': 'str',
'quota_reuse_cycle': 'int',
'quota_reuse_cycle_type': 'int',
'usage_type_name': 'str',
'start_time': 'str',
'end_time': 'str',
'amount': 'float',
'original_amount': 'float',
'measure_id': 'int'
}
attribute_map = {
'free_resource_id': 'free_resource_id',
'free_resource_type_name': 'free_resource_type_name',
'quota_reuse_cycle': 'quota_reuse_cycle',
'quota_reuse_cycle_type': 'quota_reuse_cycle_type',
'usage_type_name': 'usage_type_name',
'start_time': 'start_time',
'end_time': 'end_time',
'amount': 'amount',
'original_amount': 'original_amount',
'measure_id': 'measure_id'
}
def __init__(self, free_resource_id=None, free_resource_type_name=None, quota_reuse_cycle=None, quota_reuse_cycle_type=None, usage_type_name=None, start_time=None, end_time=None, amount=None, original_amount=None, measure_id=None):
"""FreeResourceDetail - a model defined in huaweicloud sdk"""
self._free_resource_id = None
self._free_resource_type_name = None
self._quota_reuse_cycle = None
self._quota_reuse_cycle_type = None
self._usage_type_name = None
self._start_time = None
self._end_time = None
self._amount = None
self._original_amount = None
self._measure_id = None
self.discriminator = None
if free_resource_id is not None:
self.free_resource_id = free_resource_id
if free_resource_type_name is not None:
self.free_resource_type_name = free_resource_type_name
if quota_reuse_cycle is not None:
self.quota_reuse_cycle = quota_reuse_cycle
if quota_reuse_cycle_type is not None:
self.quota_reuse_cycle_type = quota_reuse_cycle_type
if usage_type_name is not None:
self.usage_type_name = usage_type_name
if start_time is not None:
self.start_time = start_time
if end_time is not None:
self.end_time = end_time
if amount is not None:
self.amount = amount
if original_amount is not None:
self.original_amount = original_amount
if measure_id is not None:
self.measure_id = measure_id
@property
def free_resource_id(self):
"""Gets the free_resource_id of this FreeResourceDetail.
|参数名称:套餐包ID| |参数约束及描述:套餐包ID|
:return: The free_resource_id of this FreeResourceDetail.
:rtype: str
"""
return self._free_resource_id
@free_resource_id.setter
def free_resource_id(self, free_resource_id):
"""Sets the free_resource_id of this FreeResourceDetail.
|参数名称:套餐包ID| |参数约束及描述:套餐包ID|
:param free_resource_id: The free_resource_id of this FreeResourceDetail.
:type: str
"""
self._free_resource_id = free_resource_id
@property
def free_resource_type_name(self):
"""Gets the free_resource_type_name of this FreeResourceDetail.
|参数名称:免费资源类型名称| |参数约束及描述:免费资源类型名称|
:return: The free_resource_type_name of this FreeResourceDetail.
:rtype: str
"""
return self._free_resource_type_name
@free_resource_type_name.setter
def free_resource_type_name(self, free_resource_type_name):
"""Sets the free_resource_type_name of this FreeResourceDetail.
|参数名称:免费资源类型名称| |参数约束及描述:免费资源类型名称|
:param free_resource_type_name: The free_resource_type_name of this FreeResourceDetail.
:type: str
"""
self._free_resource_type_name = free_resource_type_name
@property
def quota_reuse_cycle(self):
"""Gets the quota_reuse_cycle of this FreeResourceDetail.
|参数名称:重用周期| |参数的约束及描述:重用周期|
:return: The quota_reuse_cycle of this FreeResourceDetail.
:rtype: int
"""
return self._quota_reuse_cycle
@quota_reuse_cycle.setter
def quota_reuse_cycle(self, quota_reuse_cycle):
"""Sets the quota_reuse_cycle of this FreeResourceDetail.
|参数名称:重用周期| |参数的约束及描述:重用周期|
:param quota_reuse_cycle: The quota_reuse_cycle of this FreeResourceDetail.
:type: int
"""
self._quota_reuse_cycle = quota_reuse_cycle
@property
def quota_reuse_cycle_type(self):
"""Gets the quota_reuse_cycle_type of this FreeResourceDetail.
|参数名称:重置周期类别| |参数的约束及描述:重置周期类别|
:return: The quota_reuse_cycle_type of this FreeResourceDetail.
:rtype: int
"""
return self._quota_reuse_cycle_type
@quota_reuse_cycle_type.setter
def quota_reuse_cycle_type(self, quota_reuse_cycle_type):
"""Sets the quota_reuse_cycle_type of this FreeResourceDetail.
|参数名称:重置周期类别| |参数的约束及描述:重置周期类别|
:param quota_reuse_cycle_type: The quota_reuse_cycle_type of this FreeResourceDetail.
:type: int
"""
self._quota_reuse_cycle_type = quota_reuse_cycle_type
@property
def usage_type_name(self):
"""Gets the usage_type_name of this FreeResourceDetail.
|参数名称:使用量类型名称| |参数约束及描述:使用量类型名称|
:return: The usage_type_name of this FreeResourceDetail.
:rtype: str
"""
return self._usage_type_name
@usage_type_name.setter
def usage_type_name(self, usage_type_name):
"""Sets the usage_type_name of this FreeResourceDetail.
|参数名称:使用量类型名称| |参数约束及描述:使用量类型名称|
:param usage_type_name: The usage_type_name of this FreeResourceDetail.
:type: str
"""
self._usage_type_name = usage_type_name
@property
def start_time(self):
"""Gets the start_time of this FreeResourceDetail.
|参数名称:开始时间| |参数约束及描述:开始时间|
:return: The start_time of this FreeResourceDetail.
:rtype: str
"""
return self._start_time
@start_time.setter
def start_time(self, start_time):
"""Sets the start_time of this FreeResourceDetail.
|参数名称:开始时间| |参数约束及描述:开始时间|
:param start_time: The start_time of this FreeResourceDetail.
:type: str
"""
self._start_time = start_time
@property
def end_time(self):
"""Gets the end_time of this FreeResourceDetail.
|参数名称:结束时间| |参数约束及描述:结束时间|
:return: The end_time of this FreeResourceDetail.
:rtype: str
"""
return self._end_time
@end_time.setter
def end_time(self, end_time):
"""Sets the end_time of this FreeResourceDetail.
|参数名称:结束时间| |参数约束及描述:结束时间|
:param end_time: The end_time of this FreeResourceDetail.
:type: str
"""
self._end_time = end_time
@property
def amount(self):
"""Gets the amount of this FreeResourceDetail.
|参数名称:免费资源剩余额度| |参数的约束及描述:免费资源剩余额度|
:return: The amount of this FreeResourceDetail.
:rtype: float
"""
return self._amount
@amount.setter
def amount(self, amount):
"""Sets the amount of this FreeResourceDetail.
|参数名称:免费资源剩余额度| |参数的约束及描述:免费资源剩余额度|
:param amount: The amount of this FreeResourceDetail.
:type: float
"""
self._amount = amount
@property
def original_amount(self):
"""Gets the original_amount of this FreeResourceDetail.
|参数名称:免费资源原始额度| |参数的约束及描述:免费资源原始额度|
:return: The original_amount of this FreeResourceDetail.
:rtype: float
"""
return self._original_amount
@original_amount.setter
def original_amount(self, original_amount):
"""Sets the original_amount of this FreeResourceDetail.
|参数名称:免费资源原始额度| |参数的约束及描述:免费资源原始额度|
:param original_amount: The original_amount of this FreeResourceDetail.
:type: float
"""
self._original_amount = original_amount
@property
def measure_id(self):
"""Gets the measure_id of this FreeResourceDetail.
|参数名称:度量单位| |参数的约束及描述:度量单位|
:return: The measure_id of this FreeResourceDetail.
:rtype: int
"""
return self._measure_id
@measure_id.setter
def measure_id(self, measure_id):
"""Sets the measure_id of this FreeResourceDetail.
|参数名称:度量单位| |参数的约束及描述:度量单位|
:param measure_id: The measure_id of this FreeResourceDetail.
:type: int
"""
self._measure_id = measure_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FreeResourceDetail):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| StarcoderdataPython |
1717397 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""Setup of package."""
import glob
import os
from setuptools import setup, find_packages
from typing import Dict
PACKAGE_NAME = "tac"
here = os.path.abspath(os.path.dirname(__file__))
about = {} # type: Dict[str, str]
with open(
os.path.join(here, PACKAGE_NAME, "__version__.py"), "r", encoding="utf-8"
) as f:
exec(f.read(), about)
with open("README.md", "r", encoding="utf-8") as f:
readme = f.read()
extras = {"gui": ["flask", "flask_restful", "wtforms"]}
setup(
name=about["__title__"],
description=about["__description__"],
version=about["__version__"],
author=about["__author__"],
url=about["__url__"],
long_description=readme,
packages=find_packages(include=["tac*"]),
classifiers=[
"Environment :: Console",
"Environment :: Web Environment",
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Operating System :: MacOS",
"Operating System :: Microsoft",
"Operating System :: Unix",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Communications",
"Topic :: Internet",
"Topic :: Scientific/Engineering",
"Topic :: Software Development",
"Topic :: System",
],
install_requires=[
"aea==0.1.4",
"oef",
"colorlog", # TODO 'oef' dependency, to be fixed.
"numpy",
"python-dateutil",
"visdom",
],
tests_require=["tox"],
extras_require=extras,
zip_safe=False,
include_package_data=True,
data_files=[
(
"sandbox",
["sandbox/docker-compose.yml", "sandbox/config.json", "sandbox/.env"]
+ glob.glob("sandbox/*.py")
+ glob.glob("sandbox/*.sh"),
),
("templates/v1", glob.glob("templates/v1/*.py")),
("scripts/oef", glob.glob("scripts/oef/*.json")),
("simulation/v1", glob.glob("simulation/v1/*")),
(
"oef_search_pluto_scripts",
glob.glob("oef_search_pluto_scripts/*.py")
+ glob.glob("oef_search_pluto_scripts/*.json"),
),
],
license=about["__license__"],
)
| StarcoderdataPython |
78351 | from django.urls import path
from . import views
app_name = 'tickets'
urlpatterns = [
path('', views.ticket_list, name='list'),
path('detail/<int:id>', views.ticket_detail, name='detail'),
path('create/', views.ticket_create, name='create'),
path('edit/<int:id>', views.ticket_edit, name='edit'),
path('assign/<int:id>', views.ticket_assign, name='assign'),
path('history/<int:id>', views.ticket_history, name='history'),
]
| StarcoderdataPython |
1742683 | from flask import Flask, request, jsonify, json, render_template
from get_path import get_path
import os
import shelve
app = Flask(__name__)
db_path = os.environ['DB_PATH']
@app.route('/path', methods=['GET'])
def path():
waypoints = request.args.get('waypoints')
waypoints = json.loads(waypoints)
path = get_path(waypoints)
return jsonify(path)
@app.route('/ride/<string:ride_id>', methods=['GET', 'PUT'])
def route(ride_id):
if request.method == 'PUT':
db = shelve.open(db_path)
db[ride_id] = request.get_json()
db.close()
return 'ok'
elif request.method == 'GET':
db = shelve.open(db_path)
r = db[ride_id]
db.close()
return jsonify(r)
@app.route('/ride/<string:ride_id>/view', methods=['GET'])
def view_ride(ride_id):
return render_template('viewride.html', ride_id=ride_id)
@app.route('/')
def main():
return render_template('gtfsspatial.html')
| StarcoderdataPython |
1647461 | <filename>tests/r/test_bomregions2012.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.bomregions2012 import bomregions2012
def test_bomregions2012():
"""Test module bomregions2012.py by downloading
bomregions2012.csv and testing shape of
extracted data has 113 rows and 22 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = bomregions2012(test_path)
try:
assert x_train.shape == (113, 22)
except:
shutil.rmtree(test_path)
raise()
| StarcoderdataPython |
3241322 | <reponame>astamminger/aiida_core<gh_stars>0
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
from flask_cache import Cache
from aiida.restapi.api import app
from aiida.restapi.common.config import cache_config
#Would be nice to be able to specify here what has to be cached or not!
#Probably this is not doable because cachced and memoize only work as decorators
cache = Cache(app, config=cache_config)
| StarcoderdataPython |
121728 | <reponame>bionanek/ByTheWay-API<gh_stars>0
from django.contrib.auth.models import User
from django.db import models
class LogoUpload(models.Model):
created = models.DateTimeField(auto_now_add=True)
owner = models.ForeignKey(User, to_field='id', on_delete=models.CASCADE)
datafile = models.FileField()
class Tag(models.Model):
name = models.CharField(max_length=20)
def __str__(self):
return self.name
class CompanyType(models.Model):
name = models.CharField(max_length=50, blank=False, unique=True)
class Company(models.Model):
name = models.CharField(max_length=100, blank=False, unique=True)
description = models.TextField()
logo = models.ForeignKey(LogoUpload, related_name="company_logo", blank=True, default=None, null=True, on_delete=models.CASCADE)
pos_lat = models.FloatField(default=10.0)
pos_lon = models.FloatField(default=10.0)
tags = models.ManyToManyField(Tag, related_name="company_tags")
type = models.ForeignKey(CompanyType, related_name="company_type", to_field="id", on_delete=models.CASCADE)
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.name
| StarcoderdataPython |
150051 | import json
def readConfig():
with open('conf.json') as conf:
jconf = conf.read()
conf = json.loads(jconf)
return conf
| StarcoderdataPython |
3374945 | <reponame>salliewalecka/sparkling-water
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ai.h2o.sparkling.ml.params.H2OSharedTreeParams import H2OSharedTreeParams
from ai.h2o.sparkling.ml.params.H2OTypeConverters import H2OTypeConverters
from pyspark.ml.param import *
class H2ODRFParams(H2OSharedTreeParams):
##
# Param definitions
##
binomialDoubleTrees = Param(
Params._dummy(),
"binomialDoubleTrees",
"In case of binary classification, build 2 times more trees (one per class) - can lead "
"to higher accuracy.",
H2OTypeConverters.toBoolean())
mtries = Param(
Params._dummy(),
"mtries",
"Number of variables randomly sampled as candidates at each split. If set to -1, defaults "
"to sqrt{p} for classification and p/3 for regression (where p is the # of predictors",
H2OTypeConverters.toInt())
##
# Getters
##
def getBinomialDoubleTrees(self):
return self.getOrDefault(self.binomialDoubleTrees)
def getMtries(self):
return self.getOrDefault(self.mtries)
##
# Setters
##
def setBinomialDoubleTrees(self, value):
return self._set(binomialDoubleTrees=value)
def setMtries(self, value):
return self._set(mtries=value)
| StarcoderdataPython |
14567 | import logging
import os
import random
import time
import datetime
import sys
import math
from screen import Screen
from scorer import Scorer
from trigger import Trigger
from psychopy import core, event, sound
from psychopy.hardware import keyboard
from pupil_labs import PupilCore
from datalog import Datalog
from config.configSample import CONF
#########################################################################
######################################
# Initialize screen, logger and inputs
logging.basicConfig(
level=CONF["loggingLevel"],
format='%(asctime)s-%(levelname)s-%(message)s',
) # This is a log for debugging the script, and prints messages to the terminal
# needs to be first, so that if it doesn't succeed, it doesn't freeze everything
eyetracker = PupilCore(ip=CONF["pupillometry"]
["ip"], port=CONF["pupillometry"]["port"], shouldRecord=CONF["recordEyetracking"])
trigger = Trigger(CONF["trigger"]["serial_device"],
CONF["sendTriggers"], CONF["trigger"]["labels"])
screen = Screen(CONF)
datalog = Datalog(OUTPUT_FOLDER=os.path.join(
'output', CONF["participant"] + "_" + CONF["session"],
datetime.datetime.now().strftime("%Y-%m-%d")), CONF=CONF) # This is for saving data
kb = keyboard.Keyboard()
mainClock = core.MonotonicClock() # starts clock for timestamping events
alarm = sound.Sound(os.path.join('sounds', CONF["instructions"]["alarm"]),
stereo=True)
questionnaireReminder = sound.Sound(os.path.join(
'sounds', CONF["instructions"]["questionnaireReminder"]), stereo=True)
scorer = Scorer()
logging.info('Initialization completed')
#########################################################################
def quitExperimentIf(shouldQuit):
"Quit experiment if condition is met"
if shouldQuit:
trigger.send("Quit")
scorer.getScore()
logging.info('quit experiment')
eyetracker.stop_recording()
trigger.reset()
sys.exit(2)
def onFlip(stimName, logName):
"send trigger on flip, set keyboard clock, and save timepoint"
trigger.send(stimName)
kb.clock.reset() # this starts the keyboard clock as soon as stimulus appears
datalog[logName] = mainClock.getTime()
##############
# Introduction
##############
# Display overview of session
screen.show_overview()
core.wait(CONF["timing"]["overview"])
# Optionally, display instructions
if CONF["showInstructions"]:
screen.show_instructions()
key = event.waitKeys()
quitExperimentIf(key[0] == 'q')
eyetracker.start_recording(os.path.join(
CONF["participant"], CONF["session"], CONF["task"]["name"]))
# Blank screen for initial rest
screen.show_blank()
logging.info('Starting blank period')
trigger.send("StartBlank")
core.wait(CONF["timing"]["rest"])
trigger.send("EndBlank")
# Cue start of the experiment
screen.show_cue("START")
trigger.send("Start")
core.wait(CONF["timing"]["cue"])
#################
# Main experiment
#################
# customize
datalog["trialID"] = trigger.sendTriggerId()
eyetracker.send_trigger("Stim", {"id": 1, "condition": "sample"})
datalog["pupilSize"] = eyetracker.getPupildiameter()
# save data to file
datalog.flush()
###########
# Concluion
###########
# End main experiment
screen.show_cue("DONE!")
trigger.send("End")
core.wait(CONF["timing"]["cue"])
# Blank screen for final rest
screen.show_blank()
logging.info('Starting blank period')
trigger.send("StartBlank")
core.wait(CONF["timing"]["rest"])
trigger.send("EndBlank")
logging.info('Finished')
scorer.getScore()
trigger.reset()
eyetracker.stop_recording()
questionnaireReminder.play()
core.wait(2)
| StarcoderdataPython |
1664198 | # -*- coding: utf-8 -*-
import pygame
import math
import datetime
import time
from core.weapon import *
# hero class - player
class HeroSprite(pygame.sprite.Sprite):
def __init__(self, screen, x, y):
super(HeroSprite, self).__init__()
self.screen = screen
self.left, self.right, self.up, self.down = [], [], [], []
for i in range(9):
self.left.append(pygame.image.load("./images/hero/hero-left_{}.png".format(i+1)))
self.right.append(pygame.image.load("./images/hero/hero-right_{}.png".format(i+1)))
self.down.append(pygame.image.load("./images/hero/hero-down_{}.png".format(i+1)))
self.up.append(pygame.image.load("./images/hero/hero-up_{}.png".format(i+1)))
self.attack_img = ()
for i in ['left', 'right', 'up', 'down']:
self.attack_img += (pygame.image.load("./images/hero/hero-attack-{}.png".format(i)),)
self.index = 0
self.image = self.right[self.index]
self.x, self.y, self.face = x, y, 'right'
self.rect = pygame.Rect(self.x, self.y, 16, 16)
def attack(self):
if self.face == 'left':
self.image = self.attack_img[0]
elif self.face == 'right':
self.image = self.attack_img[1]
elif self.face == 'up':
self.image = self.attack_img[2]
elif self.face == 'down':
self.image = self.attack_img[3]
def move_left(self):
self.face = 'left'
self.index += 1
if self.index >= len(self.left):
self.index = 0
self.image = self.left[self.index]
self.rect = pygame.Rect(self.x - 0.25, self.y, 16, 16)
self.x -= 0.25
def move_right(self):
self.face = 'right'
self.index += 1
if self.index >= len(self.right):
self.index = 0
self.image = self.right[self.index]
self.rect = pygame.Rect(self.x + 0.25, self.y, 16, 16)
self.x += 0.25
def move_down(self):
self.face = 'down'
self.index += 1
if self.index >= len(self.down):
self.index = 0
self.image = self.down[self.index]
self.rect = pygame.Rect(self.x, self.y + 0.25, 16, 16)
self.y += 0.25
def move_up(self):
self.face = 'up'
self.index += 1
if self.index >= len(self.up):
self.index = 0
self.image = self.up[self.index]
self.rect = pygame.Rect(self.x, self.y - 0.25, 16, 16)
self.y -= 0.25
def update(self):
super(HeroSprite, self).update()
class Hero(pygame.sprite.Group):
def __init__(self, world, x, y, lives):
self.world = world
self.screen = self.world.screen
self.hero_sprite = HeroSprite(self.screen, x, y)
self.x, self.y = self.hero_sprite.x, self.hero_sprite.y
self.face, self.centerx, self.centery = 'right', self.hero_sprite.rect.centerx, self.hero_sprite.rect.centery
self.heart_img = pygame.image.load('./images/hero/heart.png')
self.star_img = pygame.image.load('./images/hero/star.png')
self.enemy_img = pygame.image.load("./images/enemies/enemy-down_3.png")
self.sound_collect = pygame.mixer.Sound("./sounds/hero/collect.ogg")
self.sound_kill_enemy = pygame.mixer.Sound("./sounds/hero/kill_enemy.ogg")
self.sound_die = pygame.mixer.Sound("./sounds/hero/die.ogg")
self.sound_game_over = pygame.mixer.Sound("./sounds/level/game_over.ogg")
self.sound_collect.set_volume(0.025)
self.sound_kill_enemy.set_volume(0.025)
self.lives, self.stars, self.enemy_score, self.stamina = lives, 0, 0, 100
self.weapon = Weapon(self.screen, self)
self.ui = pygame.font.SysFont("monaco", 15)
self.ui_score = pygame.font.SysFont("monaco", 24)
super(Hero, self).__init__(self.hero_sprite)
def drawing(self):
if self.lives > 0:
for i in range(self.lives):
self.screen.blit(self.heart_img, [620-(i*20), 4])
self.screen.blit(self.star_img, [540, 3])
self.screen.blit(self.enemy_img, [484, 0])
ui_stars_score = self.ui_score.render("{}".format(int(self.stars)), 3, (255, 255, 255))
ui_enemy_score = self.ui_score.render("{}".format(int(self.enemy_score)), 3, (255, 255, 255))
self.screen.blit(ui_stars_score, [558, 4])
self.screen.blit(ui_enemy_score, [520, 4])
self.weapon.draw()
cyear = datetime.datetime.now().year
copyright = self.ui.render("Copyright (c) %s by zhzhussupovkz" % cyear, 3, (255, 255, 255))
self.screen.blit(copyright, [240, 620])
def update(self):
if self.lives > 0:
self.face = self.hero_sprite.face
self.x, self.y = self.hero_sprite.x, self.hero_sprite.y
self.centerx, self.centery = self.hero_sprite.rect.centerx, self.hero_sprite.rect.centery
key = pygame.key.get_pressed()
if key[pygame.K_SPACE]:
self.attack()
self.weapon.update()
self.walk_on_horizontal_roads()
self.walk_on_vertical_roads()
self.collect_stars()
self.add_injury_to_enemies()
else:
self.world.level.game_over = True
self.sound_game_over.play()
self.world.pause = True
super(Hero, self).update()
def move_left(self):
self.hero_sprite.move_left()
def move_right(self):
self.hero_sprite.move_right()
def move_down(self):
self.hero_sprite.move_down()
def move_up(self):
self.hero_sprite.move_up()
def walk_on_horizontal_roads(self):
key = pygame.key.get_pressed()
if self.world.level.num == 1:
if (self.x >= 48 and self.x <= 570 and self.y >= 264 and self.y <= 274):
if key[pygame.K_RIGHT]:
if self.x <= 566:
self.move_right()
elif key[pygame.K_LEFT]:
if self.x >= 49:
self.move_left()
elif key[pygame.K_UP]:
if self.y >= 266:
self.move_up()
elif key[pygame.K_DOWN]:
if self.y <= 271:
self.move_down()
if (self.x >= 262 and self.x <= 577 and self.y >= 396 and self.y <= 402):
if key[pygame.K_RIGHT]:
if self.x <= 572:
self.move_right()
elif key[pygame.K_LEFT]:
if self.x >= 264:
self.move_left()
elif key[pygame.K_UP]:
if self.y >= 397:
self.move_up()
elif key[pygame.K_DOWN]:
if self.y <= 399:
self.move_down()
if (self.x >= 266 and self.x <= 600 and self.y >= 556 and self.y <= 560):
if key[pygame.K_RIGHT]:
if self.x <= 598:
self.move_right()
elif key[pygame.K_LEFT]:
if self.x >= 268:
self.move_left()
elif key[pygame.K_UP]:
if self.y >= 557:
self.move_up()
elif key[pygame.K_DOWN]:
if self.y <= 559:
self.move_down()
if self.world.level.num == 2:
if (self.x >= 24 and self.x <= 570 and self.y >= 58 and self.y <= 66):
if key[pygame.K_RIGHT]:
if self.x <= 566:
self.move_right()
elif key[pygame.K_LEFT]:
if self.x >= 25:
self.move_left()
elif key[pygame.K_UP]:
if self.y >= 60:
self.move_up()
elif key[pygame.K_DOWN]:
if self.y <= 63:
self.move_down()
if (self.x >= 104 and self.x <= 573 and self.y >= 524 and self.y <= 530):
if key[pygame.K_RIGHT]:
if self.x <= 569:
self.move_right()
elif key[pygame.K_LEFT]:
if self.x >= 105:
self.move_left()
elif key[pygame.K_UP]:
if self.y >= 526:
self.move_up()
elif key[pygame.K_DOWN]:
if self.y <= 528:
self.move_down()
if (self.x >= 568 and self.x <= 620 and self.y >= 570 and self.y <= 573):
if key[pygame.K_RIGHT]:
if self.x <= 616:
self.move_right()
elif key[pygame.K_LEFT]:
if self.x >= 570:
self.move_left()
elif key[pygame.K_UP]:
if self.y >= 571:
self.move_up()
elif key[pygame.K_DOWN]:
if self.y <= 572:
self.move_down()
if (self.x >= 44 and self.x <= 104 and self.y >= 460 and self.y <= 464):
if key[pygame.K_RIGHT]:
if self.x <= 103:
self.move_right()
elif key[pygame.K_LEFT]:
if self.x >= 45:
self.move_left()
elif key[pygame.K_UP]:
if self.y >= 461:
self.move_up()
elif key[pygame.K_DOWN]:
if self.y <= 463:
self.move_down()
def walk_on_vertical_roads(self):
key = pygame.key.get_pressed()
if self.world.level.num == 1:
if (self.x >= 566 and self.x <= 575 and self.y >= 270 and self.y <= 400):
if key[pygame.K_RIGHT]:
if self.x <= 570:
self.move_right()
elif key[pygame.K_LEFT]:
if self.x >= 569:
self.move_left()
elif key[pygame.K_UP]:
if self.y >= 271:
self.move_up()
elif key[pygame.K_DOWN]:
if self.y <= 396:
self.move_down()
if (self.x >= 262 and self.x <= 269 and self.y >= 398 and self.y <= 560):
if key[pygame.K_RIGHT]:
if self.x <= 266:
self.move_right()
elif key[pygame.K_LEFT]:
if self.x >= 263:
self.move_left()
elif key[pygame.K_UP]:
if self.y >= 399:
self.move_up()
elif key[pygame.K_DOWN]:
if self.y <= 557:
self.move_down()
elif self.world.level.num == 2:
if (self.x >= 103 and self.x <= 106 and self.y >= 62 and self.y <= 532):
if key[pygame.K_RIGHT]:
if self.x <= 105:
self.move_right()
elif key[pygame.K_LEFT]:
if self.x >= 104:
self.move_left()
elif key[pygame.K_UP]:
if self.y >= 63:
self.move_up()
elif key[pygame.K_DOWN]:
if self.y <= 528:
self.move_down()
if (self.x >= 566 and self.x <= 570 and self.y >= 61 and self.y <= 575):
if key[pygame.K_RIGHT]:
if self.x <= 569:
self.move_right()
elif key[pygame.K_LEFT]:
if self.x >= 567:
self.move_left()
elif key[pygame.K_UP]:
if self.y >= 62:
self.move_up()
elif key[pygame.K_DOWN]:
if self.y <= 571:
self.move_down()
# attack by weapon
def attack(self):
self.hero_sprite.attack()
self.weapon.drawing = True
# add injury to enemies
def add_injury_to_enemies(self):
if self.weapon.drawing == True:
for enemy in self.world.level.enemies:
d = math.sqrt((self.weapon.centerx - enemy.centerx)**2 + (self.weapon.centery - enemy.centery)**2)
if d <= 4:
self.weapon.drawing = False
self.sound_kill_enemy.play()
self.world.level.enemies.pop(self.world.level.enemies.index(enemy))
self.enemy_score += 1
else:
for enemy in self.world.level.enemies:
d_hero = math.sqrt((self.centerx - enemy.centerx)**2 + (self.centery - enemy.centery)**2)
d_hero_fire = math.sqrt((self.centerx - enemy.weapon.centerx)**2 + (self.centery - enemy.weapon.centery)**2)
d_weapons = math.sqrt((self.weapon.centerx - enemy.weapon.centerx)**2 + (self.weapon.centery - enemy.weapon.centery)**2)
if d_hero <= 25:
self.add_injury()
if d_hero_fire <= 25:
self.add_injury()
if d_weapons <= 20:
self.weapon.drawing = False
enemy.weapon.drawing = False
if self.hero_see_enemy(enemy):
enemy.attack()
else:
enemy.weapon.drawing = False
def hero_see_enemy(self, enemy):
if enemy.face == 'left':
dx = enemy.centerx - self.centerx
dy = abs(self.centery - enemy.centery)
if dx > 0 and dx < 128 and dy <= 20:
return True
elif enemy.face == 'right':
dx = self.centerx - enemy.centerx
dy = abs(self.centery - enemy.centery)
if dx > 0 and dx < 128 and dy <= 20:
return True
elif enemy.face == 'up':
dx = abs(self.centerx - enemy.centerx)
dy = enemy.centery - self.centery
if dy > 0 and dy < 128 and dx <= 20:
return True
elif enemy.face == 'down':
dx = abs(self.centerx - enemy.centerx)
dy = self.centery - enemy.centery
if dy > 0 and dy < 128 and dx <= 20:
return True
return False
# collect stars
def collect_stars(self):
for star in self.world.level.stars:
d = math.sqrt((self.centerx - star.x)**2 + (self.centery - star.y)**2)
if d <= 16:
self.sound_collect.play()
self.world.level.stars.pop(self.world.level.stars.index(star))
self.stars += 1
# add injury when enemies attack hero
def add_injury(self):
if self.face == 'left':
self.x += 8
elif self.face == 'right':
self.x -= 8
elif self.face == 'up':
self.y += 8
elif self.face == 'down':
self.y -= 8
self.stamina -= 10
if self.stamina <= 0:
if self.lives > 0:
self.sound_die.play()
time.sleep(3)
self.stamina = 0
self.reboot()
# reboot hero if die
def reboot(self):
self.lives -= 1
start_x = self.world.level.generator.start_point()[0]
start_y = self.world.level.generator.start_point()[1]
self.__init__(self.world, start_x, start_y, self.lives)
| StarcoderdataPython |
141650 | <reponame>ml4wireless/gr-echo
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# <NAME> <<EMAIL>>
# 2019 09 13
#
# Copyright 2018 <+YOU OR YOUR COMPANY+>.
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import numpy as np
import time
import uuid
import pmt
from gnuradio import gr
from EchoPacketWrapper import EchoPacketWrapper
class echo_packet_unwrapper(gr.basic_block):
"""
docstring for block echo_packet_unwrapper
"""
def __init__(self, samps_per_symb, beta_rrc, cfo_samps, cfo_freqs, corr_reps):
"""
Inputs:
:param samps_per_symb: number of samples per symbol sent over the air
:param beta_rrc: bandwidth expansion parameter for RRC filter
:param cfo_samps: integer number of samples for the CFO correction header portion
:param cfo_freqs: list of frequencies present in the CFO correction header, Hz
positive values only: freqs are added as cosines to mirror in negative
frequency portion
:param corr_reps: integer number of repetitions for correlation header Golay sequences
"""
gr.basic_block.__init__(self,
name="echo_packet_unwrapper",
in_sig=None,
out_sig=None)
self.wrapper = EchoPacketWrapper(samps_per_symb=samps_per_symb, beta_rrc=beta_rrc,
cfo_samps=cfo_samps, cfo_freqs=cfo_freqs,
corr_repetitions=corr_reps)
self.chan0 = np.zeros((128,), dtype=np.complex64)
self.chan0[64] = 1.0 + 0.0j
self.port_in_id = pmt.intern("frame")
self.port_out_id = pmt.intern("body")
self.message_port_register_in(self.port_in_id)
self.message_port_register_out(self.port_out_id)
self.set_msg_handler(self.port_in_id, self.handle_frame)
self.npackets = 0
self.uuid = uuid.uuid4()
self.uuid_str = str(self.uuid)[-6:]
self.logger = gr.logger("log_debug")
self.logger.set_level("DEBUG")
def handle_frame(self, pdu):
t0 = time.time()
self.npackets += 1
tags = pmt.car(pdu)
data = pmt.to_python(pmt.cdr(pdu))
assert type(data) is np.ndarray
assert type(data[0]) is np.complex64
body, _ = self.wrapper.unwrap(data, do_plot=False)
body = body.astype(np.complex64)
self.message_port_pub(self.port_out_id,
pmt.cons(tags, pmt.to_pmt(body)))
t1 = time.time()
self.logger.debug("packet unwrap {} handled {} symbols in {} seconds".format(
self.uuid_str, data.size, t1 - t0))
| StarcoderdataPython |
3366063 | <filename>predict.py
import random
import torch
from torch.autograd import Variable
from train_util import variable_from_sentence
class ModelPredictor(object):
def __init__(self, encoder, decoder, input_lang, output_lang, max_length):
self.encoder = encoder
self.decoder = decoder
self.input_lang = input_lang
self.output_lang = output_lang
self.max_length = max_length
def evaluate(self, sentence):
SOS_token = self.input_lang.word2index["SOS"]
EOS_token = self.input_lang.word2index["EOS"]
input_variable = variable_from_sentence(self.input_lang, sentence, self.max_length)
input_length = input_variable.size()[0]
encoder_hidden = self.encoder.init_hidden()
encoder_outputs = Variable(torch.zeros(self.max_length, self.encoder.hidden_size))
for ei in range(input_length):
encoder_output, encoder_hidden = self.encoder(input_variable[ei],
encoder_hidden)
encoder_outputs[ei] = encoder_outputs[ei] + encoder_output[0][0]
decoder_input = Variable(torch.LongTensor([[SOS_token]])) # SOS
decoder_hidden = encoder_hidden
decoded_words = []
for di in range(self.max_length):
decoder_output, decoder_hidden, decoder_attention = self.decoder(
decoder_input, decoder_hidden, encoder_outputs)
topv, topi = decoder_output.data.topk(1)
ni = topi[0][0]
if ni == EOS_token:
break
else:
decoded_words.append(self.output_lang.index2word[ni])
decoder_input = Variable(torch.LongTensor([[ni]]))
return decoded_words
def evaluate_randomly(self, pairs, n=10):
match = 0
for i in range(n):
pair = random.choice(pairs)
print('>', pair[0])
print('=', pair[1])
output_words = self.evaluate(pair[0])
output_sentence = ' '.join(output_words)
print('<', output_sentence)
print('')
if pair[1] == output_sentence:
match += 1
print("accuracy: ", (match / n) * 100, "%")
def predict_sentence(self, sentence):
return ' '.join(self.evaluate(sentence))
| StarcoderdataPython |
195332 | <gh_stars>0
import inspect
from functools import wraps
from inspect import signature
from types import FunctionType
from typing import Callable, Generic, Optional, Type, Union
from apischema.typing import get_type_hints
from apischema.utils import PREFIX, T, get_origin_or_type2
MethodOrProperty = Union[Callable, property]
def _method_location(method: MethodOrProperty) -> Optional[Type]:
if isinstance(method, property):
assert method.fget is not None
method = method.fget
while hasattr(method, "__wrapped__"):
method = method.__wrapped__ # type: ignore
assert isinstance(method, FunctionType)
global_name, *class_path = method.__qualname__.split(".")[:-1]
if global_name not in method.__globals__:
return None
location = method.__globals__[global_name]
for attr in class_path:
if hasattr(location, attr):
location = getattr(location, attr)
else:
break
return location
def is_method(method: MethodOrProperty) -> bool:
"""Return if the function is method/property declared in a class"""
return (
isinstance(method, property)
and method.fget is not None
and is_method(method.fget)
) or (
isinstance(method, FunctionType)
and method.__name__ != method.__qualname__
and isinstance(_method_location(method), (type, type(None)))
and next(iter(inspect.signature(method).parameters), None) == "self"
)
def method_class(method: MethodOrProperty) -> Optional[Type]:
cls = _method_location(method)
return cls if isinstance(cls, type) else None
METHOD_WRAPPER_ATTR = f"{PREFIX}method_wrapper"
def method_wrapper(method: MethodOrProperty, name: str = None) -> Callable:
if isinstance(method, property):
assert method.fget is not None
name = name or method.fget.__name__
@wraps(method.fget)
def wrapper(self):
return getattr(self, name)
else:
if hasattr(method, METHOD_WRAPPER_ATTR):
return method
name = name or method.__name__
if list(signature(method).parameters) == ["self"]:
@wraps(method)
def wrapper(self):
return getattr(self, name)()
else:
@wraps(method)
def wrapper(self, *args, **kwargs):
return getattr(self, name)(*args, **kwargs)
setattr(wrapper, METHOD_WRAPPER_ATTR, True)
return wrapper
class MethodWrapper(Generic[T]):
def __init__(self, method: T):
self._method = method
def getter(self, func):
self._method = self._method.getter(func)
return self
def setter(self, func):
self._method = self._method.setter(func)
return self
def deleter(self, func):
self._method = self._method.deleter(func)
return self
def __set_name__(self, owner, name):
setattr(owner, name, self._method)
def __call__(self, *args, **kwargs):
raise RuntimeError("Method __set_name__ has not been called")
def method_registerer(
arg: Optional[Callable],
owner: Optional[Type],
register: Callable[[Callable, Type, str], None],
):
def decorator(method: MethodOrProperty):
if owner is None and is_method(method) and method_class(method) is None:
class Descriptor(MethodWrapper[MethodOrProperty]):
def __set_name__(self, owner, name):
super().__set_name__(owner, name)
register(method_wrapper(method), owner, name)
return Descriptor(method)
else:
owner2 = owner
if is_method(method):
if owner2 is None:
owner2 = method_class(method)
method = method_wrapper(method)
if owner2 is None:
try:
hints = get_type_hints(method)
owner2 = get_origin_or_type2(hints[next(iter(hints))])
except (KeyError, StopIteration):
raise TypeError("First parameter of method must be typed") from None
assert not isinstance(method, property)
register(method, owner2, method.__name__)
return method
return decorator if arg is None else decorator(arg)
| StarcoderdataPython |
1782923 | import socket
class PyCololight:
COMMAND_PREFIX = "535a30300000000000"
COMMAND_CONFIG = "20000000000000000000000000000000000100000000000000000004010301c"
COMMAND_EFFECT = "23000000000000000000000000000000000100000000000000000004010602ff"
CUSTOM_EFFECT_COLOURS = {
"Breath": {
"decimal": 128,
"colours": (
"Red, Green, Blue",
"Rainbow",
"Green",
"Azure",
"Blue",
"Purple",
"Red",
"Orange",
"Yellow",
"White",
"Green, Blue",
),
},
"Shadow": {
"decimal": 139,
"colours": (
"Red, Yellow",
"Red, Green",
"Red, Blue",
"Green, Yellow",
"Green, Azure",
"Green, Blue",
"Blue, Azure",
"Blue, Purple",
"Yellow, White",
"Red, White",
"Green, White",
"Azure, White",
"Blue, White",
"Purple, White",
),
},
"Flash": {
"decimal": 153,
"colours": (
"Red, Green, Blue",
"Rainbow",
"Green",
"Azure",
"Blue",
"Purple",
"Red",
"Orange",
"Yellow",
"White",
),
},
"Flicker": {
"decimal": 163,
"colours": (
"Red, Green, Blue",
"Rainbow",
"Green",
"Azure",
"Blue",
"Purple",
"Red",
"Orange",
"Yellow",
"White",
),
},
"Scene": {
"decimal": 173,
"colours": (
"Birthday",
"Girlfriends",
"Friends",
"Workmates",
"Family",
"Lover",
),
},
"Mood": {
"decimal": 179,
"colours": (
"Red",
"Orange",
"Yellow",
"Green",
"Grass",
"Azure",
"Blue",
"Pink",
"Gold",
"Color",
"True Color",
),
},
"Selected": {
"decimal": 191,
"colours": ("Savasana", "", "Sunrise", "", "Unicorns"),
},
}
CUSTOM_EFFECT_MODES = [
("01", "00"),
("02", "00"),
("05", "10"),
("05", "30"),
("05", "40"),
("05", "50"),
("05", "70"),
("05", "80"),
("05", "90"),
("05", "a0"),
("05", "b0"),
("05", "c0"),
("05", "00"),
("05", "20"),
("05", "30"),
("06", "00"),
("06", "10"),
("06", "20"),
("06", "30"),
("06", "50"),
("05", "f0"),
("05", "10"),
("05", "40"),
("05", "50"),
("06", "60"),
("06", "70"),
("06", "80"),
]
def __init__(self, host, port=8900):
self.host = host
self.port = port
self._on = False
self._brightness = None
self._colour = None
self._effect = None
self._effects = {
"80s Club": "049a0000",
"Cherry Blossom": "04940800",
"Cocktail Parade": "05bd0690",
"Instagrammer": "03bc0190",
"Pensieve": "04c40600",
"Savasana": "04970400",
"Sunrise": "01c10a00",
"The Circus": "04810130",
"Unicorns": "049a0e00",
"Christmas": "068b0900",
"Rainbow Flow": "03810690",
"Music Mode": "07bd0990",
}
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def _send(self, command):
self._sock.sendto(command, (self.host, self.port))
def _cycle_speed_hex(self, cycle_speed, mode):
if not 1 <= cycle_speed <= 32:
raise CycleSpeedException
if mode in [2]:
# Mode 2 only has speeds 1, 2, 3, which are mapped differently to other modes
cycle_speed_values = [3, 11, 19]
cycle_speed_value = cycle_speed_values[min(3, cycle_speed) - 1]
else:
cycle_speed_value = list(reversed(range(33)))[cycle_speed - 1]
cycle_speed_hex = "{:02x}".format(cycle_speed_value)
return cycle_speed_hex
def _colour_hex(self, colour_scheme, colour, mode):
if colour_scheme not in self.custom_effect_colour_schemes():
raise ColourSchemeException
if colour not in self.custom_effect_colour_scheme_colours(colour_scheme):
raise ColourException
starting_decimal = self.CUSTOM_EFFECT_COLOURS[colour_scheme]["decimal"]
colour_key = self.CUSTOM_EFFECT_COLOURS[colour_scheme]["colours"].index(colour)
if mode in [13, 14, 15, 22, 23, 24]:
# These modes have a lower starting decimal of 128
starting_decimal = starting_decimal - 128
colour_decimal = starting_decimal + colour_key
colour_hex = "{:02x}".format(colour_decimal)
return colour_hex
def _mode_hex(self, mode):
if not 1 <= mode <= len(self.CUSTOM_EFFECT_MODES):
raise ModeExecption
return self.CUSTOM_EFFECT_MODES[mode - 1]
@property
def on(self):
return self._on
@on.setter
def on(self, brightness):
if brightness:
self._on = True
self.brightness = brightness
else:
self._on = False
command = bytes.fromhex(
"{}{}{}".format(self.COMMAND_PREFIX, self.COMMAND_CONFIG, "e1e")
)
self._send(command)
@property
def brightness(self):
return self._brightness
@brightness.setter
def brightness(self, brightness):
brightness_prefix = "f"
command = bytes.fromhex(
"{}{}{}{:02x}".format(
self.COMMAND_PREFIX,
self.COMMAND_CONFIG,
brightness_prefix,
int(brightness),
)
)
self._brightness = brightness
self._send(command)
@property
def colour(self):
return self._colour
@colour.setter
def colour(self, colour):
colour_prefix = "00"
command = bytes.fromhex(
"{}{}{}{:02x}{:02x}{:02x}".format(
self.COMMAND_PREFIX, self.COMMAND_EFFECT, colour_prefix, *colour
)
)
self._colour = colour
self._send(command)
@property
def effect(self):
return self._effect
@effect.setter
def effect(self, effect):
command = bytes.fromhex(
"{}{}{}".format(
self.COMMAND_PREFIX,
self.COMMAND_EFFECT,
self._effects[effect],
)
)
self._effect = effect
self._send(command)
@property
def effects(self):
return list(self._effects.keys())
def add_custom_effect(self, name, colour_scheme, colour, cycle_speed, mode):
cycle_speed_hex = self._cycle_speed_hex(int(cycle_speed), int(mode))
colour_hex = self._colour_hex(colour_scheme, colour, int(mode))
mode_hex = self._mode_hex(int(mode))
if mode in [2]:
# Mode 2 has bytes arranged differently to other modes
custom_effect_hex = (
f"{mode_hex[0]}{cycle_speed_hex}{colour_hex}{mode_hex[1]}"
)
else:
custom_effect_hex = (
f"{mode_hex[0]}{colour_hex}{cycle_speed_hex}{mode_hex[1]}"
)
self._effects[name] = custom_effect_hex
def custom_effect_colour_schemes(self):
return list(self.CUSTOM_EFFECT_COLOURS.keys())
def custom_effect_colour_scheme_colours(self, colour_scheme):
return list(filter(None, self.CUSTOM_EFFECT_COLOURS[colour_scheme]["colours"]))
class ColourSchemeException(Exception):
pass
class ColourException(Exception):
pass
class CycleSpeedException(Exception):
pass
class ModeExecption(Exception):
pass | StarcoderdataPython |
20954 | <filename>src/wildfires/cache/same_call.py
# -*- coding: utf-8 -*-
"""Decorator guaranteeing uniform function calls."""
from inspect import Parameter, signature
def extract_uniform_args_kwargs(f, *args, ignore=None, **kwargs):
"""Extract uniform arguments given a function and the parameters it is called with.
Args:
f (callable): Function being called.
*args, **kwargs: Function arguments.
ignored (None or iterable of str): Arguments to ignore. Their corresponding
values will never be returned.
Returns:
args, kwargs: Standardised representation of the given arguments.
"""
if ignore is None:
ignore = set()
sig = signature(f)
name_kind = {p.name: p.kind for p in sig.parameters.values()}
bound_args = sig.bind(*args, **kwargs)
bound_args.apply_defaults()
# Possible argument types:
#
# KEYWORD_ONLY
# POSITIONAL_ONLY
# POSITIONAL_OR_KEYWORD
# VAR_KEYWORD
# VAR_POSITIONAL
#
# Accumulate POSITIONAL_ONLY, POSITIONAL_OR_KEYWORD, and VAR_POSITIONAL in the
# order given in `arguments`.
new_args = []
pos_kind = (
Parameter.POSITIONAL_ONLY,
Parameter.POSITIONAL_OR_KEYWORD,
Parameter.VAR_POSITIONAL,
)
for name, value in bound_args.arguments.items():
if name in ignore:
continue
if name_kind[name] not in pos_kind:
break
if name_kind[name] == Parameter.VAR_POSITIONAL:
new_args.extend(value)
else:
new_args.append(value)
# Accumulate KEYWORD_ONLY and VAR_KEYWORD in the
# order given in `arguments`.
new_kwargs = {}
kw_kind = (Parameter.KEYWORD_ONLY, Parameter.VAR_KEYWORD)
for name, value in bound_args.arguments.items():
if name in ignore:
continue
if name_kind[name] in pos_kind:
continue
assert name_kind[name] in kw_kind
if name_kind[name] == Parameter.VAR_KEYWORD:
new_kwargs.update(value)
else:
new_kwargs[name] = value
return new_args, new_kwargs
| StarcoderdataPython |
3240134 | #!/usr/bin/env python3
from collections import defaultdict
import sys
# func_dict = {func_name: (start_line, end_line, lines)}
# def get_lines(filename='battleship.go'):
# with open(filename) as in_file:
# return [line.strip() for line in in_file]
def funcs(filename='battleship.go'):
# pass one
with open(filename) as in_file:
lines = [] # all lines in in_file
func_dict = {} # key = function name, value = [start_line, end_line]
curr_func = '' # will contain the current function with a trailing '('
for i, line in enumerate(in_file):
line = line.replace('(h helloHandler) ', '') # special case!!!
lines.append(line)
if line.lstrip().startswith('func'):
if curr_func:
func_dict[curr_func].append(i - 1) # record end_line
print(i, curr_func)
curr_func = line.partition(' ')[-1].partition('(')[0] + '('
func_dict[curr_func] = [i + 1] # record start_line
print(sorted(func_dict))
# assert False, 'Dude.'
assert func_dict, 'No functions were found on the first pass!'
func_dict[curr_func].append(i) # record end_line
# print(func_dict, '\n')
# pass two
print(len(lines), len(func_dict))
for curr_func, start_end_lines in func_dict.items():
# print(curr_func, start_end_lines)
func_body = '\n'.join(lines[start_end_lines[0]:start_end_lines[1]])
func_dict[curr_func] = [] # now record any functions that are called
for called_func in func_dict:
if called_func in func_body:
func_dict[curr_func].append(called_func + ')')
return {func + ')': tuple(called) for func, called in func_dict.items()}
print('=' * 25)
filename = sys.argv[1] if sys.argv[1:] else 'battleship.go'
d = funcs(filename)
for func in sorted(d):
called = d[func]
print('{}: {}: {}'.format('NODE' if called else 'LEAF', func, called))
print('\nReversing...')
r = defaultdict(list)
for key, values in d.items():
for value in values:
r[value].append(key)
for func in sorted(r):
print('{} is called by {}'.format(func, r[func]))
| StarcoderdataPython |
3308273 | <reponame>manoj153/firstRPIworkshop
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BOARD)
GPIO.setup(40, GPIO.OUT)
GPIO.setwarnings(False)
#You can import any required modules here
#This can be anything you want
moduleName = "turnON"
#All of the words must be heard in order for this module to be executed
commandWords = ["turn", "on"]
def execute(command):
#Write anything you want to be executed when the commandWords are heard
#The 'command' parameter is the command you speak
print("turning the LED ON, It's not magic, Said: Manojkumar")
GPIO.output(40, GPIO.HIGH)
return
| StarcoderdataPython |
45696 | <reponame>ryosuke0825/atcoder_python
n, a, b = map(int, input().split())
ans = 0
for i in range(1, n+1):
str_i = str(i)
sum = 0
for j in range(len(str_i)):
sum += int(str_i[j])
if a <= sum <= b:
ans +=i
print(ans)
| StarcoderdataPython |
3358580 | #!/usr/bin/python
from scipy.optimize import *
from scipy.integrate import *
import distributions
import mytime
ln = distributions.LogNormal (0, 1)
N = 100000
tmr = mytime.timeit()
for i in xrange(N):
root = brenth (ln.dx_lpdf, 0.1, 25)
print "root was %.10f" % root
elapsed = tmr.total("Time for %d root-findings" % N)
print "Roots per second = %.4f" % (N / elapsed)
N = 10000
tmr = mytime.timeit()
for i in xrange(N):
the_max = bisect (ln.dx_lpdf, 0.1,25)
print "max was %.10f" % the_max
elapsed = tmr.total("Time for %d maxes" % N)
print "Maxes per second = %.4f" % (N / elapsed)
N = 10000
tmr = mytime.timeit()
for i in xrange(N):
integral = quad (ln.lpdf, 0.1, 3)
print "integral was %.10f" % integral[0]
elapsed = tmr.total("Time for %d integrations" % N)
print "Integrations per second = %.4f" % (N / elapsed)
| StarcoderdataPython |
3361287 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""This file contains the tests for the generic text parser."""
import unittest
import pyparsing
from plaso.parsers import text_parser
from tests.parsers import test_lib
class PyparsingConstantsTest(test_lib.ParserTestCase):
"""Tests the PyparsingConstants text parser."""
def testConstants(self):
"""Tests parsing with constants."""
with self.assertRaises(pyparsing.ParseException):
text_parser.PyparsingConstants.MONTH.parseString('MMo')
with self.assertRaises(pyparsing.ParseException):
text_parser.PyparsingConstants.MONTH.parseString('M')
with self.assertRaises(pyparsing.ParseException):
text_parser.PyparsingConstants.MONTH.parseString('March', parseAll=True)
self.assertTrue(text_parser.PyparsingConstants.MONTH.parseString('Jan'))
line = '# This is a comment.'
parsed_line = text_parser.PyparsingConstants.COMMENT_LINE_HASH.parseString(
line)
self.assertEqual(parsed_line[-1], 'This is a comment.')
self.assertEqual(len(parsed_line), 2)
def testConstantIPv4(self):
"""Tests parsing with the IPV4_ADDRESS constant."""
self.assertTrue(
text_parser.PyparsingConstants.IPV4_ADDRESS.parseString(
'192.168.127.12'))
self.assertTrue(
text_parser.PyparsingConstants.IPV4_ADDRESS.parseString(
'255.254.23.1'))
self.assertTrue(
text_parser.PyparsingConstants.IPV4_ADDRESS.parseString('1.1.34.2'))
with self.assertRaises(pyparsing.ParseException):
text_parser.PyparsingConstants.IPV4_ADDRESS.parseString('a.1.34.258')
with self.assertRaises(pyparsing.ParseException):
text_parser.PyparsingConstants.IPV4_ADDRESS.parseString('.34.258')
with self.assertRaises(pyparsing.ParseException):
text_parser.PyparsingConstants.IPV4_ADDRESS.parseString('34.258')
class PyparsingSingleLineTextParserTest(unittest.TestCase):
"""Tests for the single line PyParsing-based text parser."""
# pylint: disable=protected-access
def testIsText(self):
"""Tests the _IsText function."""
parser = text_parser.PyparsingSingleLineTextParser()
bytes_in = b'this is My Weird ASCII and non whatever string.'
self.assertTrue(parser._IsText(bytes_in))
bytes_in = 'Plaso Síar Og Raðar Þessu'
self.assertTrue(parser._IsText(bytes_in))
bytes_in = b'\x01\\62LSO\xFF'
self.assertFalse(parser._IsText(bytes_in))
bytes_in = b'T\x00h\x00i\x00s\x00\x20\x00'
self.assertTrue(parser._IsText(bytes_in))
bytes_in = b'Ascii\x00'
self.assertTrue(parser._IsText(bytes_in))
bytes_in = b'Ascii Open then...\x00\x99\x23'
self.assertFalse(parser._IsText(bytes_in))
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3384381 | import tensorflow as tf
try:
tf.enable_eager_execution()
except:
pass
from tf_supervised_inference.distributions import ImproperMultivariateNormal
import numpy as np
class ImproperMultivariateNormalTest(tf.test.TestCase):
def setUp(self):
tf.set_random_seed(42)
np.random.seed(42)
def test_init(self):
patient = ImproperMultivariateNormal(
tf.zeros([2, 1]), tf.zeros([2, 2]))
self.assertAllEqual(patient.means, tf.zeros([2, 1]))
self.assertAllEqual(patient.precision, tf.zeros([2, 2]))
def test_next(self):
patient = ImproperMultivariateNormal(
tf.zeros([2, 1]), tf.zeros([2, 2]))
num_features = 2
x = np.random.normal(0, 1, [10, num_features]).astype('float32')
y = np.random.normal(0, 1, [10, 1]).astype('float32')
weighted_feature_sums = x.T @ y
empirical_precision = x.T @ x
patient = patient.next(weighted_feature_sums, empirical_precision)
self.assertAllClose(patient.means, [[-0.225088], [0.107223]])
self.assertAllClose(patient.precision,
[[8.534351, 3.616732], [3.616732, 9.563951]])
if __name__ == '__main__':
tf.test.main()
| StarcoderdataPython |
29513 | <filename>openpype/hosts/hiero/plugins/publish/integrate_version_up_workfile.py<gh_stars>10-100
from pyblish import api
import openpype.api as pype
class IntegrateVersionUpWorkfile(api.ContextPlugin):
"""Save as new workfile version"""
order = api.IntegratorOrder + 10.1
label = "Version-up Workfile"
hosts = ["hiero"]
optional = True
active = True
def process(self, context):
project = context.data["activeProject"]
path = context.data.get("currentFile")
new_path = pype.version_up(path)
if project:
project.saveAs(new_path)
self.log.info("Project workfile was versioned up")
| StarcoderdataPython |
78598 | import sys
import random
def printList(a):
strn=''
for item in a:
strn+=str(item)+' '
return strn
def swap(a,p1,p2):
a[p1],a[p2]=a[p2],a[p1]
return a
def Partition(a,p,q):
x=a[q]
i=p-1
for j in range(p,q):
if a[j]<=x:
i=i+1
swap(a,i,j)
swap(a,i+1,q)
return i+1
def RandomPartition(a,p,q):
r=random.randrange(p,q)
swap(a,p,r)
return Partition(a,p,q)
def QuickSort(a,p,q):
if(p<q):
mid=RandomPartition(a,p,q)
QuickSort(a,p,mid-1)
QuickSort(a,mid+1,q)
strm=''
num=sys.stdin.readlines()
for item in num:
strm+=item
a=[int(x) for x in strm.split()]
#to pop first line input which is nothing but size of list
length=a.pop(0)
QuickSort(a,0,length-1)
print(printList(a)) | StarcoderdataPython |
3240944 | <filename>PyTrackX/__init__.py
from .module import *
__version__ = '0.7' | StarcoderdataPython |
3384718 | <reponame>yousong/python_yunionsdk
# Copyright 2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import uuid
import time
import urllib
import logging
import hashlib
import mmap
from yunionclient.common import exceptions
from yunionclient.openstack.common import importutils
# Decorator for cli-args
def arg(*args, **kwargs):
def _decorator(func):
# Because of the sematics of decorator composition if we just append
# to the options list positional options will appear to be backwards.
func.__dict__.setdefault('arguments', []).insert(0, (args, kwargs))
return func
return _decorator
def pretty_choice_list(l):
return ', '.join("'%s'" % i for i in l)
def get_value_ignorecase(dictobj, key):
for k in dictobj.keys():
if k.lower() == key.lower():
return dictobj[k]
return None
def get_attribute_ignorecase(obj, key):
for a in obj.__dict__:
if a.lower() == key().lower():
return getattr(obj, a, None)
return None
def print_list(data, fields=None, formatters={}):
if isinstance(data, list):
objs = data
if len(objs) > 1:
title = 'Total: %d' % len(objs)
else:
title = None
else:
(objs, total, limit, offset) = data
if limit > 0:
pages = int(total)/limit
if pages*limit < total:
pages += 1
page = (offset/limit) + 1
title = 'Total: %d Pages: %d Limit: %d Offset: %d Page: %d' % \
(int(total), pages, limit, offset, page)
else:
title = 'Total: %d' % len(objs)
if fields is None or len(fields) == 0:
fields = []
for o in objs:
for k in o.keys():
k = k.upper()
if k not in fields:
fields.append(k)
import prettytable
pt = prettytable.PrettyTable(fields, caching=False)
pt.align = 'l'
from yunionclient.common.base import ResourceBase
data_fields_tbl = {}
for o in objs:
row = []
for field in fields:
if field in formatters:
row.append(formatters[field](o))
else:
field_name = field.lower().replace(' ', '_')
if isinstance(o, ResourceBase):
o = o.to_dict()
if isinstance(o, dict):
data = get_value_ignorecase(o, field_name)
else:
data = get_attribute_ignorecase(o, field_name)
if data is None:
data = ''
elif field not in data_fields_tbl:
data_fields_tbl[field] = True
row.append(data)
pt.add_row(row)
data_fields = [f for f in fields if f in data_fields_tbl]
print pt.get_string(fields=data_fields) #sortby=fields[0])
if title is not None:
print '****', title, '****'
def pretty_value(val):
ret = ''
if isinstance(val, dict):
for k, v in val.iteritems():
if len(ret) > 0:
ret += ','
ret += '%s:%s' % (k, pretty_value(v))
ret = '{%s}' % ret
elif isinstance(val, list):
for k in val:
if len(ret) > 0:
ret += ','
ret += '%s' % (pretty_value(k))
ret = '[%s]' % ret
else:
ret = '%s' % val
return ret
def truncate(val, vlen):
if len(val) < vlen:
return val
else:
return val[:vlen] + '...'
def print_dict(d):
import prettytable
pt = prettytable.PrettyTable(['Property', 'Value'], caching=False)
pt.aligns = ['l', 'l']
from yunionclient.common.base import ResourceBase
if isinstance(d, ResourceBase):
d = d.to_dict()
elif not isinstance(d, dict):
dd = {}
for k in d.__dict__.keys():
if k[0] != '_':
v = getattr(d, k)
if not callable(v):
dd[k] = v
d = dd
for k, v in d.iteritems():
v = pretty_value(v)
row = [k, v]
pt.add_row(row)
print pt.get_string(sortby='Property')
def find_resource(manager, name_or_id):
"""Helper for the _find_* methods."""
# first try to get entity as integer id
try:
if isinstance(name_or_id, int) or name_or_id.isdigit():
return manager.get(int(name_or_id))
except exceptions.NotFound:
pass
# now try to get entity as uuid
try:
uuid.UUID(str(name_or_id))
return manager.get(name_or_id)
except (ValueError, exceptions.NotFound):
pass
# finally try to find entity by name
try:
return manager.find(name=name_or_id)
except exceptions.NotFound:
msg = "No %s with a name or ID of '%s' exists." % \
(manager.resource_class.__name__.lower(), name_or_id)
raise exceptions.CommandError(msg)
def skip_authentication(f):
"""Function decorator used to indicate a caller may be unauthenticated."""
f.require_authentication = False
return f
def is_authentication_required(f):
"""Checks to see if the function requires authentication.
Use the skip_authentication decorator to indicate a caller may
skip the authentication step.
"""
return getattr(f, 'require_authentication', True)
def string_to_bool(arg):
return arg.strip().lower() in ('t', 'true', 'yes', '1')
def env(*vars, **kwargs):
"""Search for the first defined of possibly many env vars
Returns the first environment variable defined in vars, or
returns the default defined in kwargs.
"""
for v in vars:
value = os.environ.get(v, None)
if value:
return value
return kwargs.get('default', '')
def import_module(submodule=None):
module = 'yunionclient'
if submodule:
module = '.'.join((module, submodule))
return importutils.import_module(module)
def timestr_2_epoch(time_str, zone_index=8):
return int(time.mktime(time.strptime(time_str, '%Y-%m-%d %H:%M:%S')))+3600*zone_index
def confirm(prompt=""):
try:
c = raw_input(prompt).strip()
a = ['', 'Y', 'y']
if c in a:
return True
except Exception as e:
logging.error(e)
def urlencode(data):
assert(isinstance(data, dict))
kw_list = []
for k in data.keys():
if data[k] is not None:
if isinstance(data[k], list):
for v in data[k]:
kw_list.append({k: v})
else:
kw_list.append({k: data[k]})
kw_list = sorted(kw_list, lambda x, y: cmp(x.keys()[0], y.keys()[0]))
return '&'.join(map(urllib.urlencode, kw_list))
def import_dsa_private_key(str):
from Crypto.Util import asn1
from Crypto.PublicKey import DSA
seq2 = asn1.DerSequence()
data = "\n".join(str.strip().split("\n")[1:-1]).decode("base64")
seq2.decode(data)
p, q, g, y, x = seq2[1:]
key2 = DSA.construct((y, g, p, q, x))
return key2
def export_dsa_public_key(key):
import struct
import binascii
from Crypto.Util.number import long_to_bytes
tup1 = [long_to_bytes(x) for x in (key.p, key.q, key.g, key.y)]
def func(x):
if (ord(x[0]) & 0x80):
return chr(0) + x
else:
return x
tup2 = map(func, tup1)
keyparts = [str('ssh-dss')] + tup2
keystring = str('').join(
[struct.pack(">I", len(kp)) + kp for kp in keyparts]
)
return str('ssh-dss ') + binascii.b2a_base64(keystring)[:-1]
def decrypt_dsa(privkey, secret):
key = import_dsa_private_key(privkey)
assert(key.has_private())
return decrypt_aes(export_dsa_public_key(key.publickey()), secret)
def decrypt_rsa(privkey, secret):
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_OAEP
key = RSA.importKey(privkey)
cipher = PKCS1_OAEP.new(key)
message = cipher.decrypt(secret)
return message
def decrypt(privkey, secret):
try:
return decrypt_rsa(privkey, secret)
except:
return decrypt_dsa(privkey, secret)
def decrypt_base64(privkey, secret):
import base64
return decrypt(privkey, base64.b64decode(secret))
def to_aes_key(key):
while len(key) < 32:
key += '$'
if len(key) > 32:
key = key[:32]
return key
def toHex(s):
lst = []
for ch in s:
hv = hex(ord(ch)).replace('0x', '')
if len(hv) == 1:
hv = '0'+hv
lst.append(hv)
return reduce(lambda x,y:x+y, lst)
def decrypt_aes(key, secret):
from Crypto.Cipher import AES
iv = secret[:AES.block_size]
secret = secret[AES.block_size:]
cipher = AES.new(to_aes_key(key), AES.MODE_CFB, iv)
res = cipher.decrypt(secret)
# print toHex(iv), toHex(secret), toHex(res), res
return res
def decrypt_aes_base64(key, secret):
import base64
return decrypt_aes(key, base64.b64decode(secret))
def parse_isotime(expires):
from datetime import datetime
return datetime.strptime(expires+"UTC", '%Y-%m-%dT%H:%M:%S.%fZ%Z')
def get_paging_info(args):
info = {}
if args.limit:
info['limit'] = int(args.limit)
if args.offset:
info['offset'] = int(args.offset)
if args.order_by:
info['order_by'] = args.order_by
if args.order:
info['order'] = args.order
if args.details:
info['details'] = True
else:
info['details'] = False
if args.search:
info['search'] = args.search
if getattr(args, 'meta', False):
info['with_meta'] = True
if getattr(args, 'filter', None) and len(args.filter) > 0:
idx = 0
for f in args.filter:
info['filter.%d' % idx] = f
idx += 1
if args.filter_any:
info['filter_any'] = True
if getattr(args, 'admin', False):
info['admin'] = True
if getattr(args, 'system', False):
info['system'] = True
tenant = getattr(args, 'tenant', None)
if tenant is not None:
info['admin'] = True
info['tenant'] = tenant
user = getattr(args, 'user', None)
if user is not None:
info['admin'] = True
info['user'] = user
if getattr(args, 'field', None) and len(args.field) > 0:
idx = 0
for f in args.field:
info['field.%d' % idx] = f
idx += 1
return info
def md5sum(filename):
md5 = hashlib.md5()
with open(filename, 'rb') as f:
for chunk in iter(lambda: f.read(128*md5.block_size), b''):
md5.update(chunk)
return md5.hexdigest()
def md5sum_m(body):
md5 = hashlib.md5()
md5.update(body)
return md5.hexdigest()
class mmap_open(object):
def __init__(self, fd, length=0, **kwarg):
self.fd = fd
self.length = length
# kwarg contains optionally offset argument for mmap
self.kwarg = kwarg
def __enter__(self):
self.body = mmap.mmap(self.fd.fileno(), self.length,
access=mmap.ACCESS_READ, **self.kwarg)
return self.body
def __exit__(self, type, value, traceback):
if self.body is not None:
self.body.close()
def mkdir_p(path):
offset = 1
path = os.path.abspath(path)
while offset < len(path):
pos = path.find('/', offset)
if pos < 0:
pos = len(path)
p_path = path[:pos]
if os.path.exists(p_path):
if not os.path.isdir(p_path):
raise Exception('%s not a directory' % p_path)
else:
os.mkdir(p_path)
offset = pos + 1
def string_to_boolean(string):
if string.lower() in ['true', 'yes', 'enable']:
return True
else:
return False
def random_password(num):
import string
import random
chars = string.digits + string.letters
npass = ''
for i in range(num):
npass += random.choice(chars)
return npass
def td_total_seconds(td):
return td.days*86400 + td.seconds
def ensure_unicode(s):
if not isinstance(s, basestring):
s = '%s' % s
if isinstance(s, unicode):
return s
else:
return s.decode('utf-8')
def ensure_ascii(s):
if not isinstance(s, basestring):
s = '%s' % s
if isinstance(s, str):
return s
else:
return s.encode('utf-8')
def ensure_bool(s):
if isinstance(s, bool):
return s
elif isinstance(s, int) or isinstance(s, float):
if s > 0:
return True
else:
return False
else:
if not isinstance(s, basestring):
s = '%s' % s
if s.lower() in ['true', 'yes', '1']:
return True
else:
return False
# https://tools.ietf.org/rfc/rfc3986.txt
# Uniform Resource Identifier (URI): Generic Syntax
# RFC 2396 is deprecated
def url_quote(s):
if isinstance(s, unicode):
s = s.encode('utf-8')
return urllib.quote(s)
def url_unquote(s):
return urllib.unquote(s)
def url_join(*args):
args = map(ensure_ascii, args)
args = map(urllib.quote, args)
return '/'.join(args)
def file_get_contents(fn):
try:
with open(fn, 'r') as f:
return f.read()
except Exception as e:
logging.error("Error %s while reading %s" % (str(e), fn))
return None
| StarcoderdataPython |
1778290 | <reponame>zconnect-iot/ibm-iot-emulator<filename>zconnect-mqtt-auth/zconnectmqttauth/connection.py
import logging
from .api import parse_connection
from .auth.mongodb import VMQAuth
logger = logging.getLogger(__name__)
class MQTTConnection:
def __init__(self, api, auth):
self._api = api
self._auth = auth
@property
def api_version(self):
"""which connection api this is using
Returns:
int: enum corresponding to which connection version
"""
return self._api.version
@property
def blacklisted(self):
"""Whether this connection has been blacklisted
Can be blacklisted on:
- project secret (all devices using that secret)
- per project (all devices)
- per device
This doesn't return whether a connection is authorised, only if it has
EXPLICITLY been blacklisted
Returns:
bool: if this connection 'method' has been blacklisted
"""
return self._auth.blacklisted
@property
def authenticated(self):
"""Whether this connection is by a valid user
For it to be valid:
- not blacklisted
- in the project or device secrets (depending on secret type)
Returns:
bool: if the user is authenticated
"""
return self._auth.authenticated
def subscribe_authorized(self, topic):
"""Whether the user is allowed to subscribe to this topic
Args:
topic (str): topic to subsribe to
Returns:
bool: If the user is allowed to subcribe
"""
return self._api.subscribe_authorized(topic)
def publish_authorized(self, topic):
"""Whether the user is allowed to publish to this topic
Args:
topic (str): topic to publish to
Returns:
bool: If the user is allowed to publish to this topic
"""
return self._api.publish_authorized(topic)
@property
def project_id(self):
"""Which project this connection corresponds to
Returns:
str: project id
"""
return self._api.project_id
@property
def device_id(self):
"""Device id of connection
Returns:
str: device id
"""
return self._api.device_id
@property
def secret_type(self):
"""Type of secret used to authenticate
Either a project wide secret or a device specific secret
Returns:
str: type of secret - 'p' for project secret, 'd' for device secret
"""
return self._api.secret_type
def get_connection(username, password, client_id, api_type=None, auth_type=None):
if api_type is None:
api = parse_connection(username, password)
logger.debug("API = %s", api)
if auth_type is None:
auth = VMQAuth(username, password, client_id)
logger.debug("Auth method = %s", auth)
return MQTTConnection(api, auth)
| StarcoderdataPython |
1602215 | <filename>Python/6013.py
#!/usr/bin/env python3
# @Date : 2022/2/20
# @Filename : 6013.py
# @Tag :
# @Autor : <NAME>
# @Difficulty :
from heapq import *
from typing import List, Optional
from collections import defaultdict, deque, Counter
from itertools import product,combinations,permutations,accumulate
from random import choice, randrange,randint
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
# -------------------------
class Solution:
def mergeNodes(self, head: Optional[ListNode]) -> Optional[ListNode]:
dummy_head = ListNode(next=head) # 添加虚拟节点
cur = dummy_head # cur 指针
l = []
temp = 0
while(cur):
if cur.val == 0 and temp !=0:
l.append(temp)
temp = 0
else:
temp += cur.val
cur = cur.next
print(l)
res = ListNode(next=head)
cur2 = res
for i in l:
cur2 = cur2.next
cur2.next = ListNode(i)
return res.next.next
# -------------------------
a = Solution()
h = ListNode()
c = h
b = [1,0,3,0,2,2,0]
for i in b:
c.next = ListNode(i)
c = c.next
res = a.mergeNodes(h)
while res:
print(res.val)
res = res.next | StarcoderdataPython |
1642948 | # Generated by Django 3.1.7 on 2021-04-18 10:06
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('index', '0007_auto_20210327_1831'),
]
operations = [
migrations.RemoveField(
model_name='course',
name='Course_Faculty',
),
migrations.AddField(
model_name='course',
name='Course_Category',
field=models.CharField(default='0', max_length=32, verbose_name='课程分类'),
),
migrations.CreateModel(
name='CourseCategory',
fields=[
('CategoryID', models.AutoField(primary_key=True, serialize=False, verbose_name='分类编号')),
('CategoryName', models.CharField(max_length=64, verbose_name='分类名')),
('CountNumber', models.IntegerField(default=0, verbose_name='下属分类数')),
('ParentID', models.ForeignKey(default='0', limit_choices_to={'ParentID': 0}, on_delete=django.db.models.deletion.CASCADE, to='index.coursecategory')),
],
options={
'verbose_name': '课程分类',
'verbose_name_plural': '课程分类',
'ordering': ['-CategoryID'],
},
),
]
| StarcoderdataPython |
4807189 | from p5 import *
def setup():
size(360, 360)
no_loop()
def draw():
circle((165, 200), 198)
run()
| StarcoderdataPython |
186882 | """Project exceptions"""
class ProjectImportError (Exception):
"""Failure to import a project from a repository."""
pass
| StarcoderdataPython |
1650247 | '''Algoritmo de Busca Binária'''
def bb(val):
lo,hi=0,(n-1)
while hi>=lo:
mid=(hi+lo)//2
if vetor[mid]==val:
return mid
if vetor[mid]<val:
lo=mid+1
else:
hi=mid-1
return -1
'''Inicialização'''
n=int(input())
vetor=[int(x) for x in input().split()]
x=int(input())
print(bb(x)) | StarcoderdataPython |
3219930 | <gh_stars>0
# -*- coding: utf-8 -*-
# Copyright 2020 CISCO. All rights reserved.
# Copyright 2021 <NAME>. All rights reserved.
#
# The contents of this file are licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""NETCONF Driver for IOSXR devices."""
from __future__ import unicode_literals
# import stdlib
import re
import copy
import difflib
import logging
# import third party lib
from ncclient import manager
from ncclient.xml_ import to_ele
from ncclient.operations.rpc import RPCError
from ncclient.operations.errors import TimeoutExpiredError
from lxml import etree as ETREE
from lxml.etree import XMLSyntaxError
from netaddr import IPAddress # needed for traceroute, to check IP version
from netaddr.core import AddrFormatError
# import NAPALM base
from napalm.iosxr_netconf import constants as C
from napalm.iosxr.utilities import strip_config_header
from napalm.base.base import NetworkDriver
import napalm.base.helpers
from napalm.base.exceptions import ConnectionException
from napalm.base.exceptions import MergeConfigException
from napalm.base.exceptions import ReplaceConfigException
logger = logging.getLogger(__name__)
class IOSXRNETCONFDriver(NetworkDriver):
"""IOS-XR NETCONF driver class: inherits NetworkDriver from napalm.base."""
def __init__(self, hostname, username, password, timeout=60, optional_args=None):
"""
Initialize IOSXR driver.
optional_args:
* config_lock (True/False): lock configuration DB after the
connection is established.
* port (int): custom port
* key_file (string): SSH key file path
"""
self.hostname = hostname
self.username = username
self.password = password
self.timeout = timeout
self.pending_changes = False
self.replace = False
self.locked = False
if optional_args is None:
optional_args = {}
self.port = optional_args.get("port", 830)
self.lock_on_connect = optional_args.get("config_lock", False)
self.key_file = optional_args.get("key_file", None)
self.config_encoding = optional_args.get("config_encoding", "cli")
if self.config_encoding not in C.CONFIG_ENCODINGS:
raise ValueError(f"config encoding must be one of {C.CONFIG_ENCODINGS}")
self.platform = "iosxr_netconf"
self.device = None
self.module_set_ns = []
def open(self):
"""Open the connection with the device."""
try:
self.device = manager.connect(
host=self.hostname,
port=self.port,
username=self.username,
password=<PASSWORD>,
key_filename=self.key_file,
timeout=self.timeout,
device_params={"name": "iosxr"},
)
if self.lock_on_connect:
self._lock()
except Exception as conn_err:
logger.error(conn_err.args[0])
raise ConnectionException(conn_err.args[0])
# Retrieve module-set namespaces based on yang library model
for capability in self.device.server_capabilities:
if C.NS["ylib"] in capability:
rpc_reply = self.device.get(
filter=(
"subtree",
C.YANG_LIB_RPC_REQ_FILTER.format(module_set=C.MODULE_SET),
)
).xml
# Converts string to tree
rpc_reply_etree = ETREE.fromstring(rpc_reply)
# Retrieves namespaces
module_set_tree = rpc_reply_etree.xpath(
".//ylib:yang-library/ylib:module-set/ylib:module/ylib:namespace",
namespaces=C.NS,
)
self.module_set_ns = [n.text for n in module_set_tree]
break
def close(self):
"""Close the connection."""
logger.debug("Closed connection with device %s" % (self.hostname))
self._unlock()
self.device.close_session()
def _lock(self):
"""Lock the config DB."""
if not self.locked:
self.device.lock()
self.locked = True
def _unlock(self):
"""Unlock the config DB."""
if self.locked:
self.device.unlock()
self.locked = False
def _load_config(self, filename, config):
"""Edit Configuration."""
if filename is None:
configuration = config
else:
with open(filename) as f:
configuration = f.read()
self.pending_changes = True
self._lock()
return configuration
def _filter_config_tree(self, tree):
"""Return filtered config etree based on YANG module set."""
if self.module_set_ns:
def unexpected(n):
return n not in self.module_set_ns
else:
def unexpected(n):
return n.startswith("http://openconfig.net/yang")
for subtree in tree:
if unexpected(subtree.tag[1:].split("}")[0]):
tree.remove(subtree)
return tree
def _unexpected_modules(self, tree):
"""Return list of unexpected modules based on YANG module set."""
modules = []
if self.module_set_ns:
def unexpected(n):
return n not in self.module_set_ns
else:
def unexpected(n):
return n.startswith("http://openconfig.net/yang")
for subtree in tree:
namespace = subtree.tag[1:].split("}")[0]
if unexpected(namespace):
modules.append(namespace)
return modules
def is_alive(self):
"""Return flag with the state of the connection."""
if self.device is None:
return {"is_alive": False}
return {"is_alive": self.device._session.transport.is_active()}
def load_replace_candidate(self, filename=None, config=None):
"""Open the candidate config and replace."""
self.replace = True
configuration = self._load_config(filename=filename, config=config)
if self.config_encoding == "cli":
configuration = (
'<config><cli xmlns="http://cisco.com/ns/yang/Cisco-IOS-XR-cli-cfg">'
+ configuration
+ "</cli></config>"
)
elif self.config_encoding == "xml":
parser = ETREE.XMLParser(remove_blank_text=True)
unexpected_modules = self._unexpected_modules(
ETREE.XML(configuration, parser=parser)
)
if unexpected_modules:
raise ReplaceConfigException(
f'{C.INVALID_MODEL_REFERENCE} ({", ".join(unexpected_modules)})'
)
configuration = "<source>" + configuration + "</source>"
try:
self.device.copy_config(source=configuration, target="candidate")
except (RPCError, XMLSyntaxError) as e:
self.pending_changes = False
self.replace = False
logger.error(e.args[0])
raise ReplaceConfigException(e)
def load_merge_candidate(self, filename=None, config=None):
"""Open the candidate config and merge."""
self.replace = False
configuration = self._load_config(filename=filename, config=config)
if self.config_encoding == "cli":
configuration = (
'<config><cli xmlns="http://cisco.com/ns/yang/Cisco-IOS-XR-cli-cfg">'
+ configuration
+ "</cli></config>"
)
elif self.config_encoding == "xml":
parser = ETREE.XMLParser(remove_blank_text=True)
unexpected_modules = self._unexpected_modules(
ETREE.XML(configuration, parser=parser)
)
if unexpected_modules:
raise MergeConfigException(
f'{C.INVALID_MODEL_REFERENCE} ({", ".join(unexpected_modules)})'
)
try:
self.device.edit_config(
config=configuration, error_option="rollback-on-error"
)
except (RPCError, XMLSyntaxError) as e:
self.pending_changes = False
logger.error(e.args[0])
raise MergeConfigException(e)
def compare_config(self):
"""Compare candidate config with running."""
diff = ""
encoding = self.config_encoding
if encoding not in C.CLI_DIFF_RPC_REQ:
raise NotImplementedError(
f"config encoding must be one of {C.CONFIG_ENCODINGS}"
)
if self.pending_changes:
parser = ETREE.XMLParser(remove_blank_text=True)
if encoding == "cli":
diff = self.device.dispatch(to_ele(C.CLI_DIFF_RPC_REQ)).xml
diff = ETREE.XML(diff, parser=parser)[0].text.strip()
diff = strip_config_header(diff)
elif encoding == "xml":
run_conf = self.device.get_config("running").xml
can_conf = self.device.get_config("candidate").xml
run_conf = ETREE.tostring(
self._filter_config_tree(ETREE.XML(run_conf, parser=parser)[0]),
pretty_print=True,
).decode()
can_conf = ETREE.tostring(
self._filter_config_tree(ETREE.XML(can_conf, parser=parser)[0]),
pretty_print=True,
).decode()
for line in difflib.unified_diff(
run_conf.splitlines(1), can_conf.splitlines(1)
):
diff += line
return diff
def commit_config(self, message="", revert_in=None):
"""Commit configuration."""
if revert_in is not None:
raise NotImplementedError(
"Commit confirm has not been implemented on this platform."
)
if message:
raise NotImplementedError(
"Commit message not implemented for this platform"
)
self.device.commit()
self.pending_changes = False
self._unlock()
def discard_config(self):
"""Discard changes."""
self.device.discard_changes()
self.pending_changes = False
self._unlock()
def rollback(self):
"""Rollback to previous commit."""
self.device.dispatch(to_ele(C.ROLLBACK_RPC_REQ))
def _find_txt(self, xml_tree, path, default=None, namespaces=None):
"""
Extract the text value from a leaf in an XML tree using XPath.
Will return a default value if leaf path not matched.
:param xml_tree:the XML Tree object. <type'lxml.etree._Element'>.
:param path: XPath to be applied in order to extract the desired data.
:param default: Value to be returned in case of a no match.
:param namespaces: namespace dictionary.
:return: a str value or None if leaf path not matched.
"""
value = None
xpath_applied = xml_tree.xpath(path, namespaces=namespaces)
if xpath_applied:
if not len(xpath_applied[0]):
if xpath_applied[0].text is not None:
value = xpath_applied[0].text.strip()
else:
value = ""
else:
value = default
return value
def get_facts(self):
"""Return facts of the device."""
facts = {
"vendor": "Cisco",
"os_version": "",
"hostname": "",
"uptime": -1,
"serial_number": "",
"fqdn": "",
"model": "",
"interface_list": [],
}
interface_list = []
facts_rpc_reply = self.device.dispatch(to_ele(C.FACTS_RPC_REQ)).xml
# Converts string to etree
facts_rpc_reply_etree = ETREE.fromstring(facts_rpc_reply)
# Retrieves hostname
hostname = napalm.base.helpers.convert(
str,
self._find_txt(
facts_rpc_reply_etree,
".//suo:system-time/\
suo:uptime/suo:host-name",
default="",
namespaces=C.NS,
),
)
# Retrieves uptime
uptime = napalm.base.helpers.convert(
int,
self._find_txt(
facts_rpc_reply_etree,
".//suo:system-time/\
suo:uptime/suo:uptime",
default="",
namespaces=C.NS,
),
-1,
)
# Retrieves interfaces name
interface_tree = facts_rpc_reply_etree.xpath(
".//int:interfaces/int:interfaces/int:interface", namespaces=C.NS
)
for interface in interface_tree:
name = self._find_txt(
interface, "./int:interface-name", default="", namespaces=C.NS
)
interface_list.append(name)
# Retrieves os version, model, serial number
basic_info_tree = facts_rpc_reply_etree.xpath(
".//imo:inventory/imo:entities/imo:entity/imo:attributes/\
imo:inv-basic-bag",
namespaces=C.NS,
)
if basic_info_tree:
os_version = napalm.base.helpers.convert(
str,
self._find_txt(
basic_info_tree[0],
"./imo:software-revision",
default="",
namespaces=C.NS,
),
)
model = napalm.base.helpers.convert(
str,
self._find_txt(
basic_info_tree[0], "./imo:model-name", default="", namespaces=C.NS
),
)
serial = napalm.base.helpers.convert(
str,
self._find_txt(
basic_info_tree[0],
"./imo:serial-number",
default="",
namespaces=C.NS,
),
)
else:
os_version = ""
model = ""
serial = ""
facts.update(
{
"os_version": os_version,
"hostname": hostname,
"model": model,
"uptime": uptime,
"serial_number": serial,
"fqdn": hostname,
"interface_list": interface_list,
}
)
return facts
def get_interfaces(self):
"""Return interfaces details."""
interfaces = {}
INTERFACE_DEFAULTS = {
"is_enabled": False,
"is_up": False,
"mac_address": "",
"description": "",
"speed": -1,
"last_flapped": -1.0,
}
interfaces_rpc_reply = self.device.get(
filter=("subtree", C.INT_RPC_REQ_FILTER)
).xml
# Converts string to etree
interfaces_rpc_reply_etree = ETREE.fromstring(interfaces_rpc_reply)
# Retrieves interfaces details
for (interface_tree, description_tree) in zip(
interfaces_rpc_reply_etree.xpath(
".//int:interfaces/int:interface-xr/int:interface", namespaces=C.NS
),
interfaces_rpc_reply_etree.xpath(
".//int:interfaces/int:interfaces/int:interface", namespaces=C.NS
),
):
interface_name = self._find_txt(
interface_tree, "./int:interface-name", default="", namespaces=C.NS
)
if not interface_name:
continue
is_up = (
self._find_txt(
interface_tree, "./int:line-state", default="", namespaces=C.NS
)
== "im-state-up"
)
enabled = (
self._find_txt(
interface_tree, "./int:state", default="", namespaces=C.NS
)
!= "im-state-admin-down"
)
raw_mac = self._find_txt(
interface_tree,
"./int:mac-address/int:address",
default="",
namespaces=C.NS,
)
mac_address = napalm.base.helpers.convert(
napalm.base.helpers.mac, raw_mac, raw_mac
)
speed = napalm.base.helpers.convert(
int,
napalm.base.helpers.convert(
int,
self._find_txt(interface_tree, "./int:bandwidth", namespaces=C.NS),
0,
)
* 1e-3,
)
mtu = int(
self._find_txt(interface_tree, "./int:mtu", default="", namespaces=C.NS)
)
description = self._find_txt(
description_tree, "./int:description", default="", namespaces=C.NS
)
interfaces[interface_name] = copy.deepcopy(INTERFACE_DEFAULTS)
interfaces[interface_name].update(
{
"is_up": is_up,
"speed": speed,
"mtu": mtu,
"is_enabled": enabled,
"mac_address": mac_address,
"description": description,
}
)
return interfaces
def get_interfaces_counters(self):
"""Return interfaces counters."""
rpc_reply = self.device.get(
filter=("subtree", C.INT_COUNTERS_RPC_REQ_FILTER)
).xml
# Converts string to tree
rpc_reply_etree = ETREE.fromstring(rpc_reply)
interface_counters = {}
# Retrieves interfaces counters details
interface_xr_tree = rpc_reply_etree.xpath(
".//int:interfaces/int:interface-xr/int:interface", namespaces=C.NS
)
for interface in interface_xr_tree:
interface_name = self._find_txt(
interface, "./int:interface-name", default="", namespaces=C.NS
)
if interface_name[:8] == "Loopback" and interface_name[8:].isdigit():
continue
interface_stats = {}
if (
self._find_txt(
interface,
"./int:interface-statistics/int:stats-type",
default="",
namespaces=C.NS,
)
== "basic"
):
interface_stats["tx_multicast_packets"] = ""
interface_stats["tx_discards"] = ""
interface_stats["tx_octets"] = ""
interface_stats["tx_errors"] = ""
interface_stats["rx_octets"] = ""
interface_stats["tx_unicast_packets"] = ""
interface_stats["rx_errors"] = ""
interface_stats["tx_broadcast_packets"] = ""
interface_stats["rx_multicast_packets"] = ""
interface_stats["rx_broadcast_packets"] = ""
interface_stats["rx_discards"] = ""
interface_stats["rx_unicast_packets"] = ""
else:
int_stats_xpath = "./int:interface-statistics/int:full-interface-stats/"
interface_stats["tx_multicast_packets"] = napalm.base.helpers.convert(
int,
self._find_txt(
interface,
int_stats_xpath + "int:multicast-packets-sent",
"0",
namespaces=C.NS,
),
)
interface_stats["tx_discards"] = napalm.base.helpers.convert(
int,
self._find_txt(
interface,
int_stats_xpath + "int:output-drops",
"0",
namespaces=C.NS,
),
)
interface_stats["tx_octets"] = napalm.base.helpers.convert(
int,
self._find_txt(
interface,
int_stats_xpath + "int:bytes-sent",
"0",
namespaces=C.NS,
),
)
interface_stats["tx_errors"] = napalm.base.helpers.convert(
int,
self._find_txt(
interface,
int_stats_xpath + "int:output-errors",
"0",
namespaces=C.NS,
),
)
interface_stats["rx_octets"] = napalm.base.helpers.convert(
int,
self._find_txt(
interface,
int_stats_xpath + "int:bytes-received",
"0",
namespaces=C.NS,
),
)
interface_stats["tx_unicast_packets"] = napalm.base.helpers.convert(
int,
self._find_txt(
interface,
int_stats_xpath + "int:packets-sent",
"0",
namespaces=C.NS,
),
)
interface_stats["rx_errors"] = napalm.base.helpers.convert(
int,
self._find_txt(
interface,
int_stats_xpath + "int:input-errors",
"0",
namespaces=C.NS,
),
)
interface_stats["tx_broadcast_packets"] = napalm.base.helpers.convert(
int,
self._find_txt(
interface,
int_stats_xpath + "int:broadcast-packets-sent",
"0",
namespaces=C.NS,
),
)
interface_stats["rx_multicast_packets"] = napalm.base.helpers.convert(
int,
self._find_txt(
interface,
int_stats_xpath + "int:multicast-packets-received",
"0",
namespaces=C.NS,
),
)
interface_stats["rx_broadcast_packets"] = napalm.base.helpers.convert(
int,
self._find_txt(
interface,
int_stats_xpath + "int:broadcast-packets-received",
"0",
namespaces=C.NS,
),
)
interface_stats["rx_discards"] = napalm.base.helpers.convert(
int,
self._find_txt(
interface,
int_stats_xpath + "int:input-drops",
"0",
namespaces=C.NS,
),
)
interface_stats["rx_unicast_packets"] = napalm.base.helpers.convert(
int,
self._find_txt(
interface,
int_stats_xpath + "int:packets-received",
"0",
namespaces=C.NS,
),
)
interface_counters[interface_name] = interface_stats
return interface_counters
def get_bgp_neighbors(self):
"""Return BGP neighbors details."""
def get_vrf_neighbors(rpc_reply_etree, xpath):
"""Return BGP neighbors details for a given VRF."""
neighbors = {}
for neighbor in rpc_reply_etree.xpath(xpath, namespaces=C.NS):
this_neighbor = {}
this_neighbor["local_as"] = napalm.base.helpers.convert(
int,
self._find_txt(
neighbor, "./bgp:local-as", default="", namespaces=C.NS
),
)
this_neighbor["remote_as"] = napalm.base.helpers.convert(
int,
self._find_txt(
neighbor, "./bgp:remote-as", default="", namespaces=C.NS
),
)
this_neighbor["remote_id"] = napalm.base.helpers.convert(
str,
self._find_txt(
neighbor, "./bgp:router-id", default="", namespaces=C.NS
),
)
try:
this_neighbor["description"] = napalm.base.helpers.convert(
str,
self._find_txt(
neighbor, "./bgp:description", default="", namespaces=C.NS
),
)
except AttributeError:
logger.debug(
"No attribute 'description' for neighbor %s"
% (this_neighbor["remote_as"])
)
this_neighbor["description"] = ""
this_neighbor["is_enabled"] = not (
self._find_txt(
neighbor,
"./bgp:is-administratively-shut-down",
default="",
namespaces=C.NS,
)
== "true"
)
if (
str(
self._find_txt(
neighbor,
"./bgp:connection-state",
default="",
namespaces=C.NS,
)
)
== "bgp-st-estab"
):
this_neighbor["is_up"] = True
this_neighbor["uptime"] = napalm.base.helpers.convert(
int,
self._find_txt(
neighbor,
"./bgp:connection-established-time",
default="",
namespaces=C.NS,
),
)
else:
this_neighbor["is_up"] = False
this_neighbor["uptime"] = -1
this_neighbor["address_family"] = {}
if (
self._find_txt(
neighbor,
"./bgp:connection-remote-address/\
bgp:afi",
default="",
namespaces=C.NS,
)
== "ipv4"
):
this_afi = "ipv4"
elif (
self._find_txt(
neighbor,
"./bgp:connection-remote-address/bgp:afi",
default="",
namespaces=C.NS,
)
== "ipv6"
):
this_afi = "ipv6"
else:
this_afi = self._find_txt(
neighbor,
"./bgp:connection-remote-address/bgp:afi",
default="",
namespaces=C.NS,
)
this_neighbor["address_family"][this_afi] = {}
try:
this_neighbor["address_family"][this_afi][
"received_prefixes"
] = napalm.base.helpers.convert(
int,
self._find_txt(
neighbor,
"./bgp:af-data/bgp:prefixes-accepted",
default="",
namespaces=C.NS,
),
0,
) + napalm.base.helpers.convert(
int,
self._find_txt(
neighbor,
"./bgp:af-data/bgp:prefixes-denied",
default="",
namespaces=C.NS,
),
0,
)
this_neighbor["address_family"][this_afi][
"accepted_prefixes"
] = napalm.base.helpers.convert(
int,
self._find_txt(
neighbor,
"./bgp:af-data/bgp:prefixes-accepted",
default="",
namespaces=C.NS,
),
0,
)
this_neighbor["address_family"][this_afi][
"sent_prefixes"
] = napalm.base.helpers.convert(
int,
self._find_txt(
neighbor,
"./bgp:af-data/\
bgp:prefixes-advertised",
default="",
namespaces=C.NS,
),
0,
)
except AttributeError:
this_neighbor["address_family"][this_afi]["received_prefixes"] = -1
this_neighbor["address_family"][this_afi]["accepted_prefixes"] = -1
this_neighbor["address_family"][this_afi]["sent_prefixes"] = -1
neighbor_ip = napalm.base.helpers.ip(
self._find_txt(
neighbor, "./bgp:neighbor-address", default="", namespaces=C.NS
)
)
neighbors[neighbor_ip] = this_neighbor
return neighbors
rpc_reply = self.device.get(filter=("subtree", C.BGP_NEIGHBOR_REQ_FILTER)).xml
# Converts string to tree
rpc_reply_etree = ETREE.fromstring(rpc_reply)
result = {}
this_vrf = {}
this_vrf["peers"] = {}
# get neighbors and router id from default(global) VRF
default_vrf_xpath = """.//bgp:bgp/bgp:instances/bgp:instance/
bgp:instance-active/bgp:default-vrf/"""
this_vrf["router_id"] = napalm.base.helpers.convert(
str,
self._find_txt(
rpc_reply_etree,
default_vrf_xpath
+ "bgp:global-process-info/\
bgp:vrf/bgp:router-id",
default="",
namespaces=C.NS,
),
)
this_vrf["peers"] = get_vrf_neighbors(
rpc_reply_etree, default_vrf_xpath + "bgp:neighbors/bgp:neighbor"
)
result["global"] = this_vrf
# get neighbors and router id from other VRFs
vrf_xpath = """.//bgp:bgp/bgp:instances/
bgp:instance/bgp:instance-active/bgp:vrfs"""
for vrf in rpc_reply_etree.xpath(vrf_xpath + "/bgp:vrf", namespaces=C.NS):
this_vrf = {}
this_vrf["peers"] = {}
this_vrf["router_id"] = napalm.base.helpers.convert(
str,
self._find_txt(
vrf,
"./bgp:global-process-info/bgp:vrf/\
bgp:router-id",
default="",
namespaces=C.NS,
),
)
vrf_name = self._find_txt(
vrf, "./bgp:vrf-name", default="", namespaces=C.NS
)
this_vrf["peers"] = get_vrf_neighbors(
rpc_reply_etree,
vrf_xpath
+ "/bgp:vrf[bgp:vrf-name='"
+ vrf_name
+ "']\
/bgp:neighbors/bgp:neighbor",
)
result[vrf_name] = this_vrf
return result
def get_environment(self):
"""Return environment details."""
def env_ns_prefix():
"""Return prefix for ENVMON model in router capabilities."""
for prefix in C.ENVMON_NAMESPACES:
for capability in self.device.server_capabilities:
if C.ENVMON_NAMESPACES[prefix] in capability:
return prefix
return None
environment_status = {}
environment_status["fans"] = {}
environment_status["temperature"] = {}
environment_status["power"] = {}
environment_status["cpu"] = {}
environment_status["memory"] = 0.0
router_model = self.get_facts().get("model")
if router_model not in C.PLAT_NO_ENVMON:
nsp = env_ns_prefix()
rpc_reply = self.device.get(
filter=("subtree", C.ENVMON_RPC_REQ_FILTER[nsp])
).xml
# Converts string to etree
result_tree = ETREE.fromstring(rpc_reply)
#
# FAN
#
fans = {}
fan_location_xpath = ".//{}:environment/{}:oper/{}:fan/\
{}:location".format(
nsp, nsp, nsp, nsp
)
for fan_location in result_tree.xpath(
fan_location_xpath, namespaces=C.ENVMON_NAMESPACES
):
fan_name = self._find_txt(
fan_location,
"./{}:location".format(nsp),
default="",
namespaces=C.ENVMON_NAMESPACES,
).lstrip("0/")
if "FT" in fan_name:
fans[fan_name] = {"status": True}
environment_status["fans"] = fans
#
# POWER
#
power = {}
power_location_xpath = ".//{}:environment/{}:oper/{}:power/\
{}:location".format(
nsp, nsp, nsp, nsp
)
capacity = 0.0
for power_location in result_tree.xpath(
power_location_xpath, namespaces=C.ENVMON_NAMESPACES
):
power_location_name = self._find_txt(
power_location,
"./{}:location".format(nsp),
default="",
namespaces=C.ENVMON_NAMESPACES,
)
if power_location_name.isdigit():
capacity = float(
self._find_txt(
power_location,
"./{}:pem_attributes/\
{}:usable_power_capacity".format(
nsp, nsp
),
default="",
namespaces=C.ENVMON_NAMESPACES,
)
)
continue
if (
re.search(r"\d/PT\d", power_location_name) is not None
or re.search(r"\d/PM\d", power_location_name) is not None
):
for pem_attr in power_location.xpath(
"./{}:pem_attributes".format(nsp),
namespaces=C.ENVMON_NAMESPACES,
):
pem = self._find_txt(
pem_attr,
"./{}:pem".format(nsp),
default="",
namespaces=C.ENVMON_NAMESPACES,
)
status = self._find_txt(
pem_attr,
"./{}:status".format(nsp),
default="",
namespaces=C.ENVMON_NAMESPACES,
)
output_voltage = float(
self._find_txt(
pem_attr,
"./{}:output_voltage".format(nsp),
default="0.0",
namespaces=C.ENVMON_NAMESPACES,
)
)
output_current = float(
self._find_txt(
pem_attr,
"./{}:output_current".format(nsp),
default="0.0",
namespaces=C.ENVMON_NAMESPACES,
)
)
power[pem] = {
"status": status == "OK",
"output": round(output_voltage * output_current, 2),
"capacity": capacity,
}
environment_status["power"] = power
#
# TEMPERATURE
#
temperature = {}
temp_location_xpath = ".//{}:environment/{}:oper/{}:temperatures/\
{}:location".format(
nsp, nsp, nsp, nsp
)
for temp_location in result_tree.xpath(
temp_location_xpath, namespaces=C.ENVMON_NAMESPACES
):
temp_location_name = self._find_txt(
temp_location,
"./{}:location".format(nsp),
default="",
namespaces=C.ENVMON_NAMESPACES,
)
for sensor_attributes in temp_location.xpath(
"./{}:sensor_attributes".format(nsp), namespaces=C.ENVMON_NAMESPACES
):
sensor_id = self._find_txt(
sensor_attributes,
"./{}:sensor_id".format(nsp),
default="",
namespaces=C.ENVMON_NAMESPACES,
)
if sensor_id in ["Inlet", "Control Sensor"]:
temp_value = float(
self._find_txt(
sensor_attributes,
"./{}:value".format(nsp),
default="",
namespaces=C.ENVMON_NAMESPACES,
)
)
major_lo = float(
self._find_txt(
sensor_attributes,
"./{}:major_lo".format(nsp),
default="",
namespaces=C.ENVMON_NAMESPACES,
)
)
major_hi = float(
self._find_txt(
sensor_attributes,
"./{}:major_hi".format(nsp),
default="",
namespaces=C.ENVMON_NAMESPACES,
)
)
critical_lo = float(
self._find_txt(
sensor_attributes,
"./{}:critical_lo".format(nsp),
default="",
namespaces=C.ENVMON_NAMESPACES,
)
)
critical_hi = float(
self._find_txt(
sensor_attributes,
"./{}:critical_hi".format(nsp),
default="",
namespaces=C.ENVMON_NAMESPACES,
)
)
is_alert = (temp_value <= major_lo) or (temp_value >= major_hi)
is_critical = (temp_value <= critical_lo) or (
temp_value >= critical_hi
)
temperature[temp_location_name] = {
"is_alert": is_alert,
"temperature": temp_value,
"is_critical": is_critical,
}
break
environment_status["temperature"] = temperature
#
# CPU
#
cpu = {}
rpc_reply = self.device.get(
filter=("subtree", C.ENV_SYS_MON_RPC_REQ_FILTER)
).xml
# Converts string to etree
result_tree = ETREE.fromstring(rpc_reply)
for module in result_tree.xpath(
".//sys:system-monitoring/sys:cpu-utilization", namespaces=C.NS
):
this_cpu = {}
this_cpu["%usage"] = napalm.base.helpers.convert(
float,
self._find_txt(
module, "./sys:total-cpu-five-minute", default="", namespaces=C.NS
),
)
node_name = self._find_txt(
module, "./sys:node-name", default="", namespaces=C.NS
)
cpu[node_name] = this_cpu
environment_status["cpu"] = cpu
#
# Memory
#
rpc_reply = self.device.get(filter=("subtree", C.ENV_MEM_RPC_REQ_FILTER)).xml
# Converts string to etree
result_tree = ETREE.fromstring(rpc_reply)
for node in result_tree.xpath(
".//mem:memory-summary/mem:nodes/mem:node", namespaces=C.NS
):
node_name = self._find_txt(
node, "./mem:node-name", default="", namespaces=C.NS
)
slot = node_name.split("/")[1]
if slot in ["RP0", "RSP0"]:
available_ram = napalm.base.helpers.convert(
int,
self._find_txt(
node,
"./mem:summary/mem:system-ram-memory",
default="",
namespaces=C.NS,
),
)
free_ram = napalm.base.helpers.convert(
int,
self._find_txt(
node,
"./mem:summary/\
mem:free-physical-memory",
default="",
namespaces=C.NS,
),
)
if available_ram and free_ram:
used_ram = available_ram - free_ram
memory = {}
memory["available_ram"] = available_ram
memory["used_ram"] = used_ram
environment_status["memory"] = memory
break # we're only looking at one of the RSP's
return environment_status
def get_lldp_neighbors(self):
"""Return LLDP neighbors details."""
# init result dict
lldp_neighbors = {}
rpc_reply = self.device.get(filter=("subtree", C.LLDP_RPC_REQ_FILTER)).xml
# Converts string to etree
result_tree = ETREE.fromstring(rpc_reply)
lldp_xpath = ".//lldp:lldp/lldp:nodes/lldp:node/lldp:neighbors\
/lldp:details/lldp:detail"
for neighbor in result_tree.xpath(
lldp_xpath + "/lldp:lldp-neighbor", namespaces=C.NS
):
interface_name = self._find_txt(
neighbor, "./lldp:receiving-interface-name", default="", namespaces=C.NS
)
system_name = napalm.base.helpers.convert(
str,
self._find_txt(
neighbor,
"./lldp:detail/lldp:system-name",
default="",
namespaces=C.NS,
),
)
port_id = napalm.base.helpers.convert(
str,
self._find_txt(
neighbor, "./lldp:port-id-detail", default="", namespaces=C.NS
),
)
if interface_name not in lldp_neighbors.keys():
lldp_neighbors[interface_name] = []
lldp_neighbors[interface_name].append(
{"hostname": system_name, "port": port_id}
)
return lldp_neighbors
def get_lldp_neighbors_detail(self, interface=""):
"""Detailed view of the LLDP neighbors."""
lldp_neighbors_detail = {}
rpc_reply = self.device.get(filter=("subtree", C.LLDP_RPC_REQ_FILTER)).xml
# Converts string to etree
result_tree = ETREE.fromstring(rpc_reply)
lldp_neighbor_xpath = ".//lldp:lldp/lldp:nodes/lldp:node/lldp:neighbors\
/lldp:details/lldp:detail/lldp:lldp-neighbor"
for neighbor in result_tree.xpath(lldp_neighbor_xpath, namespaces=C.NS):
interface_name = napalm.base.helpers.convert(
str,
self._find_txt(
neighbor,
"./lldp:receiving-interface-name",
default="",
namespaces=C.NS,
),
)
parent_interface = napalm.base.helpers.convert(
str,
self._find_txt(
neighbor,
"./lldp:receiving-parent-interface-name",
default="None",
namespaces=C.NS,
),
)
chassis_id_raw = self._find_txt(
neighbor, "./lldp:chassis-id", default="", namespaces=C.NS
)
chassis_id = napalm.base.helpers.convert(
napalm.base.helpers.mac, chassis_id_raw, chassis_id_raw
)
port_id = napalm.base.helpers.convert(
str,
self._find_txt(
neighbor, "./lldp:port-id-detail", default="", namespaces=C.NS
),
)
port_descr = napalm.base.helpers.convert(
str,
self._find_txt(
neighbor,
"./lldp:detail/lldp:port-description",
default="",
namespaces=C.NS,
),
)
system_name = napalm.base.helpers.convert(
str,
self._find_txt(
neighbor,
"./lldp:detail/lldp:system-name",
default="",
namespaces=C.NS,
),
)
system_descr = napalm.base.helpers.convert(
str,
self._find_txt(
neighbor,
"./lldp:detail/lldp:system-description",
default="",
namespaces=C.NS,
),
)
system_capabilities = napalm.base.helpers.convert(
str,
self._find_txt(
neighbor,
"./lldp:detail/lldp:system-capabilities",
default="",
namespaces=C.NS,
),
)
enabled_capabilities = napalm.base.helpers.convert(
str,
self._find_txt(
neighbor,
"./lldp:detail/lldp:enabled-capabilities",
default="",
namespaces=C.NS,
),
)
if interface_name not in lldp_neighbors_detail.keys():
lldp_neighbors_detail[interface_name] = []
lldp_neighbors_detail[interface_name].append(
{
"parent_interface": parent_interface,
"remote_chassis_id": chassis_id,
"remote_port": port_id,
"remote_port_description": port_descr,
"remote_system_name": system_name,
"remote_system_description": system_descr,
"remote_system_capab": napalm.base.helpers.transform_lldp_capab(
system_capabilities
),
"remote_system_enable_capab": napalm.base.helpers.transform_lldp_capab(
enabled_capabilities
),
}
)
return lldp_neighbors_detail
def cli(self, commands):
"""Execute raw CLI commands and returns their output."""
return NotImplementedError
def get_bgp_config(self, group="", neighbor=""):
"""Return BGP configuration."""
bgp_config = {}
# a helper
def build_prefix_limit(af_table, limit, prefix_percent, prefix_timeout):
prefix_limit = {}
inet = False
inet6 = False
preifx_type = "inet"
if "ipv4" in af_table.lower():
inet = True
if "ipv6" in af_table.lower():
inet6 = True
preifx_type = "inet6"
if inet or inet6:
prefix_limit = {
preifx_type: {
af_table[5:].lower(): {
"limit": limit,
"teardown": {
"threshold": prefix_percent,
"timeout": prefix_timeout,
},
}
}
}
return prefix_limit
# here begins actual method...
rpc_reply = self.device.get_config(
source="running", filter=("subtree", C.BGP_CFG_RPC_REQ_FILTER)
).xml
# Converts string to etree
result_tree = ETREE.fromstring(rpc_reply)
if not group:
neighbor = ""
bgp_group_neighbors = {}
bgp_neighbor_xpath = ".//bgpc:bgp/bgpc:instance/bgpc:instance-as/\
bgpc:four-byte-as/bgpc:default-vrf/bgpc:bgp-entity/bgpc:neighbors/bgpc:neighbor"
for bgp_neighbor in result_tree.xpath(bgp_neighbor_xpath, namespaces=C.NS):
group_name = self._find_txt(
bgp_neighbor,
"./bgpc:neighbor-group-add-member",
default="",
namespaces=C.NS,
)
peer = napalm.base.helpers.ip(
self._find_txt(
bgp_neighbor, "./bgpc:neighbor-address", default="", namespaces=C.NS
)
)
if neighbor and peer != neighbor:
continue
description = self._find_txt(
bgp_neighbor, "./bgpc:description", default="", namespaces=C.NS
)
peer_as_x = napalm.base.helpers.convert(
int,
self._find_txt(
bgp_neighbor,
"./bgpc:remote-as/bgpc:as-xx",
default="",
namespaces=C.NS,
),
0,
)
peer_as_y = napalm.base.helpers.convert(
int,
self._find_txt(
bgp_neighbor,
"./bgpc:remote-as/bgpc:as-yy",
default="",
namespaces=C.NS,
),
0,
)
peer_as = peer_as_x * 65536 + peer_as_y
local_as_x = napalm.base.helpers.convert(
int,
self._find_txt(
bgp_neighbor,
"./bgpc:local-as/bgpc:as-xx",
default="",
namespaces=C.NS,
),
0,
)
local_as_y = napalm.base.helpers.convert(
int,
self._find_txt(
bgp_neighbor,
"./bgpc:local-as/bgpc:as-yy",
default="",
namespaces=C.NS,
),
0,
)
local_as = local_as_x * 65536 + local_as_y
af_table = self._find_txt(
bgp_neighbor,
"./bgpc:neighbor-afs/bgpc:neighbor-af/bgpc:af-name",
default="",
namespaces=C.NS,
)
prefix_limit = napalm.base.helpers.convert(
int,
self._find_txt(
bgp_neighbor,
"./bgpc:neighbor-afs/bgpc:neighbor-af/\
bgpc:maximum-prefixes/bgpc:prefix-limit",
default="",
namespaces=C.NS,
),
0,
)
prefix_percent = napalm.base.helpers.convert(
int,
self._find_txt(
bgp_neighbor,
"./bgpc:neighbor-afs/bgpc:neighbor-af/\
bgpc:maximum-prefixes/bgpc:warning-percentage",
default="",
namespaces=C.NS,
),
0,
)
prefix_timeout = napalm.base.helpers.convert(
int,
self._find_txt(
bgp_neighbor,
"./bgpc:neighbor-afs/bgpc:neighbor-af/\
bgpc:maximum-prefixes/bgpc:restart-time",
default="",
namespaces=C.NS,
),
0,
)
import_policy = self._find_txt(
bgp_neighbor,
"./bgpc:neighbor-afs/bgpc:neighbor-af/bgpc:route-policy-in",
default="",
namespaces=C.NS,
)
export_policy = self._find_txt(
bgp_neighbor,
"./bgpc:neighbor-afs/bgpc:neighbor-af/bgpc:route-policy-out",
default="",
namespaces=C.NS,
)
local_addr_raw = self._find_txt(
bgp_neighbor,
"./bgpc:local-address/bgpc:local-ip-address",
default="",
namespaces=C.NS,
)
local_address = napalm.base.helpers.convert(
napalm.base.helpers.ip, local_addr_raw, local_addr_raw
)
password = self._find_txt(
bgp_neighbor,
"./bgpc:password/bgpc:password",
default="",
namespaces=C.NS,
)
nhs = False
route_reflector = False
if group_name not in bgp_group_neighbors.keys():
bgp_group_neighbors[group_name] = {}
bgp_group_neighbors[group_name][peer] = {
"description": description,
"remote_as": peer_as,
"prefix_limit": build_prefix_limit(
af_table, prefix_limit, prefix_percent, prefix_timeout
),
"export_policy": export_policy,
"import_policy": import_policy,
"local_address": local_address,
"local_as": local_as,
"authentication_key": password,
"nhs": nhs,
"route_reflector_client": route_reflector,
}
if neighbor and peer == neighbor:
break
bgp_neighbor_group_xpath = ".//bgpc:bgp/bgpc:instance/bgpc:instance-as/\
bgpc:four-byte-as/bgpc:default-vrf/bgpc:bgp-entity/\
bgpc:neighbor-groups/bgpc:neighbor-group"
for bgp_group in result_tree.xpath(bgp_neighbor_group_xpath, namespaces=C.NS):
group_name = self._find_txt(
bgp_group, "./bgpc:neighbor-group-name", default="", namespaces=C.NS
)
if group and group != group_name:
continue
bgp_type = "external" # by default external
# must check
description = self._find_txt(
bgp_group, "./bgpc:description", default="", namespaces=C.NS
)
import_policy = self._find_txt(
bgp_group,
"./bgpc:neighbor-group-afs/\
bgpc:neighbor-group-af/bgpc:route-policy-in",
default="",
namespaces=C.NS,
)
export_policy = self._find_txt(
bgp_group,
"./bgpc:neighbor-group-afs/\
bgpc:neighbor-group-af/bgpc:route-policy-out",
default="",
namespaces=C.NS,
)
multipath = (
self._find_txt(
bgp_group,
"./bgpc:neighbor-group-afs/\
bgpc:neighbor-group-af/bgpc:multipath",
default="",
namespaces=C.NS,
)
== "true"
)
peer_as_x = napalm.base.helpers.convert(
int,
self._find_txt(
bgp_group,
"./bgpc:remote-as/bgpc:as-xx",
default="",
namespaces=C.NS,
),
0,
)
peer_as_y = napalm.base.helpers.convert(
int,
self._find_txt(
bgp_group,
"./bgpc:remote-as/bgpc:as-yy",
default="",
namespaces=C.NS,
),
0,
)
peer_as = peer_as_x * 65536 + peer_as_y
local_as_x = napalm.base.helpers.convert(
int,
self._find_txt(
bgp_group,
"./bgpc:local-as/bgpc:as-xx",
default="",
namespaces=C.NS,
),
0,
)
local_as_y = napalm.base.helpers.convert(
int,
self._find_txt(
bgp_group,
"./bgpc:local-as/bgpc:as-yy",
default="",
namespaces=C.NS,
),
0,
)
local_as = local_as_x * 65536 + local_as_y
multihop_ttl = napalm.base.helpers.convert(
int,
self._find_txt(
bgp_group,
"./bgpc:ebgp-multihop/bgpc:max-hop-count",
default="",
namespaces=C.NS,
),
0,
)
local_addr_raw = self._find_txt(
bgp_group,
"./bgpc:local-address/bgpc:local-ip-address",
default="",
namespaces=C.NS,
)
local_address = napalm.base.helpers.convert(
napalm.base.helpers.ip, local_addr_raw, local_addr_raw
)
af_table = self._find_txt(
bgp_group,
"./bgpc:neighbor-afs/bgpc:neighbor-af/bgpc:af-name",
default="",
namespaces=C.NS,
)
prefix_limit = napalm.base.helpers.convert(
int,
self._find_txt(
bgp_group,
"./bgpc:neighbor-group-afs/\
bgpc:neighbor-group-af/bgpc:maximum-prefixes/\
bgpc:prefix-limit",
default="",
namespaces=C.NS,
),
0,
)
prefix_percent = napalm.base.helpers.convert(
int,
self._find_txt(
bgp_group,
"./bgpc:neighbor-group-afs/\
bgpc:neighbor-group-af/bgpc:maximum-prefixes/\
bgpc:warning-percentage",
default="",
namespaces=C.NS,
),
0,
)
prefix_timeout = napalm.base.helpers.convert(
int,
self._find_txt(
bgp_group,
"./bgpc:neighbor-group-afs/\
bgpc:neighbor-group-af/bgpc:maximum-prefixes/\
bgpc:restart-time",
default="",
namespaces=C.NS,
),
0,
)
remove_private = True # is it specified in the XML?
bgp_config[group_name] = {
"apply_groups": [], # on IOS-XR will always be empty list!
"description": description,
"local_as": local_as,
"type": str(bgp_type),
"import_policy": import_policy,
"export_policy": export_policy,
"local_address": local_address,
"multipath": multipath,
"multihop_ttl": multihop_ttl,
"remote_as": peer_as,
"remove_private_as": remove_private,
"prefix_limit": build_prefix_limit(
af_table, prefix_limit, prefix_percent, prefix_timeout
),
"neighbors": bgp_group_neighbors.get(group_name, {}),
}
if group and group == group_name:
break
if "" in bgp_group_neighbors.keys():
bgp_config["_"] = {
"apply_groups": [],
"description": "",
"local_as": 0,
"type": "",
"import_policy": "",
"export_policy": "",
"local_address": "",
"multipath": False,
"multihop_ttl": 0,
"remote_as": 0,
"remove_private_as": False,
"prefix_limit": {},
"neighbors": bgp_group_neighbors.get("", {}),
}
return bgp_config
def get_bgp_neighbors_detail(self, neighbor_address=""):
"""Detailed view of the BGP neighbors operational data."""
def get_vrf_neighbors_detail(
rpc_reply_etree, xpath, vrf_name, vrf_keepalive, vrf_holdtime
):
"""Detailed view of the BGP neighbors operational data for a given VRF."""
bgp_vrf_neighbors_detail = {}
bgp_vrf_neighbors_detail[vrf_name] = {}
for neighbor in rpc_reply_etree.xpath(xpath, namespaces=C.NS):
up = (
self._find_txt(
neighbor, "./bgp:connection-state", default="", namespaces=C.NS
)
== "bgp-st-estab"
)
local_as = napalm.base.helpers.convert(
int,
self._find_txt(
neighbor, "./bgp:local-as", default="", namespaces=C.NS
),
0,
)
remote_as = napalm.base.helpers.convert(
int,
self._find_txt(
neighbor, "./bgp:remote-as", default="", namespaces=C.NS
),
0,
)
router_id = napalm.base.helpers.ip(
self._find_txt(
neighbor, "./bgp:router-id", default="", namespaces=C.NS
)
)
remote_address = napalm.base.helpers.ip(
self._find_txt(
neighbor, "./bgp:neighbor-address", default="", namespaces=C.NS
)
)
local_address_configured = (
self._find_txt(
neighbor,
"./bgp:is-local-address-configured",
default="",
namespaces=C.NS,
)
== "true"
)
local_address = napalm.base.helpers.ip(
self._find_txt(
neighbor,
"./bgp:connection-local-address/\
bgp:ipv4-address",
default="",
namespaces=C.NS,
)
or self._find_txt(
neighbor,
"./bgp:connection-local-address/\
bgp:ipv6-address",
default="",
namespaces=C.NS,
)
)
local_port = napalm.base.helpers.convert(
int,
self._find_txt(
neighbor,
"./bgp:connection-local-port",
default="",
namespaces=C.NS,
),
)
remote_address = napalm.base.helpers.ip(
self._find_txt(
neighbor,
"./bgp:connection-remote-address/\
bgp:ipv4-address",
default="",
namespaces=C.NS,
)
or self._find_txt(
neighbor,
"./bgp:connection-remote-address/\
bgp:ipv6-address",
default="",
namespaces=C.NS,
)
)
remote_port = napalm.base.helpers.convert(
int,
self._find_txt(
neighbor,
"./bgp:connection-remote-port",
default="",
namespaces=C.NS,
),
)
multihop = (
self._find_txt(
neighbor,
"\
./bgp:is-external-neighbor-not-directly-connected",
default="",
namespaces=C.NS,
)
== "true"
)
remove_private_as = (
self._find_txt(
neighbor,
"./bgp:af-data/\
bgp:remove-private-as-from-updates",
default="",
namespaces=C.NS,
)
== "true"
)
multipath = (
self._find_txt(
neighbor,
"./bgp:af-data/\
bgp:selective-multipath-eligible",
default="",
namespaces=C.NS,
)
== "true"
)
import_policy = self._find_txt(
neighbor,
"./bgp:af-data/bgp:route-policy-in",
default="",
namespaces=C.NS,
)
export_policy = self._find_txt(
neighbor,
"./bgp:af-data/bgp:route-policy-out",
default="",
namespaces=C.NS,
)
input_messages = napalm.base.helpers.convert(
int,
self._find_txt(
neighbor, "./bgp:messges-received", default="", namespaces=C.NS
),
0,
)
output_messages = napalm.base.helpers.convert(
int,
self._find_txt(
neighbor, "./bgp:messages-sent", default="", namespaces=C.NS
),
0,
)
connection_down_count = napalm.base.helpers.convert(
int,
self._find_txt(
neighbor,
"./bgp:connection-down-count",
default="",
namespaces=C.NS,
),
0,
)
messages_queued_out = napalm.base.helpers.convert(
int,
self._find_txt(
neighbor,
"./bgp:messages-queued-out",
default="",
namespaces=C.NS,
),
0,
)
connection_state = (
self._find_txt(
neighbor, "./bgp:connection-state", default="", namespaces=C.NS
)
.replace("bgp-st-", "")
.title()
)
if connection_state == "Estab":
connection_state = "Established"
previous_connection_state = napalm.base.helpers.convert(
str,
_BGP_STATE_.get(
self._find_txt(
neighbor,
"./bgp:previous-connection-state",
"0",
namespaces=C.NS,
)
),
)
active_prefix_count = napalm.base.helpers.convert(
int,
self._find_txt(
neighbor,
"./bgp:af-data/bgp:number-of-bestpaths",
default="",
namespaces=C.NS,
),
0,
)
accepted_prefix_count = napalm.base.helpers.convert(
int,
self._find_txt(
neighbor,
"./bgp:af-data/bgp:prefixes-accepted",
default="",
namespaces=C.NS,
),
0,
)
suppressed_prefix_count = napalm.base.helpers.convert(
int,
self._find_txt(
neighbor,
"./bgp:af-data/bgp:prefixes-denied",
default="",
namespaces=C.NS,
),
0,
)
received_prefix_count = accepted_prefix_count + suppressed_prefix_count
advertised_prefix_count = napalm.base.helpers.convert(
int,
self._find_txt(
neighbor,
"./bgp:af-data/\
bgp:prefixes-advertised",
default="",
namespaces=C.NS,
),
0,
)
suppress_4byte_as = (
self._find_txt(
neighbor, "./bgp:suppress4-byte-as", default="", namespaces=C.NS
)
== "true"
)
local_as_prepend = (
self._find_txt(
neighbor,
"./bgp:local-as-no-prepend",
default="",
namespaces=C.NS,
)
!= "true"
)
holdtime = (
napalm.base.helpers.convert(
int,
self._find_txt(
neighbor, "./bgp:hold-time", default="", namespaces=C.NS
),
0,
)
or vrf_holdtime
)
configured_holdtime = napalm.base.helpers.convert(
int,
self._find_txt(
neighbor,
"./bgp:configured-hold-time",
default="",
namespaces=C.NS,
),
0,
)
keepalive = (
napalm.base.helpers.convert(
int,
self._find_txt(
neighbor,
"./bgp:keep-alive-time",
default="",
namespaces=C.NS,
),
0,
)
or vrf_keepalive
)
configured_keepalive = napalm.base.helpers.convert(
int,
self._find_txt(
neighbor,
"./bgp:configured-keepalive",
default="",
namespaces=C.NS,
),
0,
)
flap_count = int(connection_down_count / 2)
if up:
flap_count -= 1
if remote_as not in bgp_vrf_neighbors_detail[vrf_name].keys():
bgp_vrf_neighbors_detail[vrf_name][remote_as] = []
bgp_vrf_neighbors_detail[vrf_name][remote_as].append(
{
"up": up,
"local_as": local_as,
"remote_as": remote_as,
"router_id": router_id,
"local_address": local_address,
"routing_table": vrf_name,
"local_address_configured": local_address_configured,
"local_port": local_port,
"remote_address": remote_address,
"remote_port": remote_port,
"multihop": multihop,
"multipath": multipath,
"import_policy": import_policy,
"export_policy": export_policy,
"input_messages": input_messages,
"output_messages": output_messages,
"input_updates": 0,
"output_updates": 0,
"messages_queued_out": messages_queued_out,
"connection_state": connection_state,
"previous_connection_state": previous_connection_state,
"last_event": "",
"remove_private_as": remove_private_as,
"suppress_4byte_as": suppress_4byte_as,
"local_as_prepend": local_as_prepend,
"holdtime": holdtime,
"configured_holdtime": configured_holdtime,
"keepalive": keepalive,
"configured_keepalive": configured_keepalive,
"active_prefix_count": active_prefix_count,
"received_prefix_count": received_prefix_count,
"accepted_prefix_count": accepted_prefix_count,
"suppressed_prefix_count": suppressed_prefix_count,
"advertised_prefix_count": advertised_prefix_count,
"flap_count": flap_count,
}
)
return bgp_vrf_neighbors_detail
rpc_reply = self.device.get(filter=("subtree", C.BGP_NEIGHBOR_REQ_FILTER)).xml
# Converts string to tree
rpc_reply_etree = ETREE.fromstring(rpc_reply)
_BGP_STATE_ = {
"0": "Unknown",
"1": "Idle",
"2": "Connect",
"3": "OpenSent",
"4": "OpenConfirm",
"5": "Active",
"6": "Established",
}
bgp_neighbors_detail = {}
# get neighbors from default(global) VRF
default_vrf_xpath = """.//bgp:bgp/bgp:instances/bgp:instance/
bgp:instance-active/bgp:default-vrf"""
vrf_name = "default"
default_vrf_keepalive = napalm.base.helpers.convert(
int,
self._find_txt(
rpc_reply_etree,
default_vrf_xpath
+ "/bgp:global-process-info/bgp:vrf/\
bgp:keep-alive-time",
default="",
namespaces=C.NS,
),
)
default_vrf_holdtime = napalm.base.helpers.convert(
int,
self._find_txt(
rpc_reply_etree,
default_vrf_xpath
+ "/bgp:global-process-info/bgp:vrf/\
bgp:hold-time",
default="",
namespaces=C.NS,
),
)
bgp_neighbors_detail["global"] = get_vrf_neighbors_detail(
rpc_reply_etree,
default_vrf_xpath + "/bgp:neighbors/bgp:neighbor",
vrf_name,
default_vrf_keepalive,
default_vrf_holdtime,
)[vrf_name]
# get neighbors from other VRFs
vrf_xpath = """.//bgp:bgp/bgp:instances/
bgp:instance/bgp:instance-active/bgp:vrfs"""
for vrf in rpc_reply_etree.xpath(vrf_xpath + "/bgp:vrf", namespaces=C.NS):
vrf_name = self._find_txt(
vrf, "./bgp:vrf-name", default="", namespaces=C.NS
)
vrf_keepalive = napalm.base.helpers.convert(
int,
self._find_txt(
vrf,
"./bgp:global-process-info/bgp:vrf/\
bgp:keep-alive-time",
default="",
namespaces=C.NS,
),
)
vrf_holdtime = napalm.base.helpers.convert(
int,
self._find_txt(
vrf,
"./bgp:global-process-info/bgp:vrf/\
bgp:hold-time",
default="",
namespaces=C.NS,
),
)
bgp_neighbors_detail.update(
get_vrf_neighbors_detail(
rpc_reply_etree,
vrf_xpath
+ "/bgp:vrf[bgp:vrf-name='"
+ vrf_name
+ "']\
/bgp:neighbors/bgp:neighbor",
vrf_name,
vrf_keepalive,
vrf_holdtime,
)
)
return bgp_neighbors_detail
def get_arp_table(self, vrf=""):
"""Return the ARP table."""
if vrf:
msg = "VRF support has not been added for \
this getter on this platform."
raise NotImplementedError(msg)
arp_table = []
rpc_reply = self.device.get(filter=("subtree", C.ARP_RPC_REQ_FILTER)).xml
# Converts string to etree
result_tree = ETREE.fromstring(rpc_reply)
arp_entry_xpath = ".//arp:arp/arp:nodes/arp:node/arp:entries/arp:entry"
for arp_entry in result_tree.xpath(arp_entry_xpath, namespaces=C.NS):
interface = napalm.base.helpers.convert(
str,
self._find_txt(
arp_entry, "./arp:interface-name", default="", namespaces=C.NS
),
)
ip = napalm.base.helpers.convert(
str,
self._find_txt(arp_entry, "./arp:address", default="", namespaces=C.NS),
)
age = napalm.base.helpers.convert(
float,
self._find_txt(arp_entry, "./arp:age", default="0.0", namespaces=C.NS),
)
mac_raw = self._find_txt(
arp_entry, "./arp:hardware-address", default="", namespaces=C.NS
)
arp_table.append(
{
"interface": interface,
"mac": napalm.base.helpers.mac(mac_raw),
"ip": napalm.base.helpers.ip(ip),
"age": age,
}
)
return arp_table
def get_ntp_peers(self):
"""Return the NTP peers configured on the device."""
ntp_peers = {}
rpc_reply = self.device.get_config(
source="running", filter=("subtree", C.NTP_RPC_REQ_FILTER)
).xml
# Converts string to etree
result_tree = ETREE.fromstring(rpc_reply)
for version in ["ipv4", "ipv6"]:
ntp_xpath = ".//ntpc:ntp/ntpc:peer-vrfs/ntpc:peer-vrf/\
ntpc:peer-{version}s".format(
version=version
)
for peer in result_tree.xpath(
ntp_xpath + "/ntpc:peer-{version}".format(version=version),
namespaces=C.NS,
):
peer_type = self._find_txt(
peer,
"./ntpc:peer-type-{version}/\
ntpc:peer-type".format(
version=version
),
default="",
namespaces=C.NS,
)
if peer_type != "peer":
continue
peer_address = self._find_txt(
peer,
"./ntpc:address-{version}".format(version=version),
default="",
namespaces=C.NS,
)
if not peer_address:
continue
ntp_peers[peer_address] = {}
return ntp_peers
def get_ntp_servers(self):
"""Return the NTP servers configured on the device."""
ntp_servers = {}
rpc_reply = self.device.get_config(
source="running", filter=("subtree", C.NTP_RPC_REQ_FILTER)
).xml
# Converts string to etree
result_tree = ETREE.fromstring(rpc_reply)
for version in ["ipv4", "ipv6"]:
ntp_xpath = ".//ntpc:ntp/ntpc:peer-vrfs/ntpc:peer-vrf/\
ntpc:peer-{version}s".format(
version=version
)
for peer in result_tree.xpath(
ntp_xpath + "/ntpc:peer-{version}".format(version=version),
namespaces=C.NS,
):
peer_type = self._find_txt(
peer,
"./ntpc:peer-type-{version}/\
ntpc:peer-type".format(
version=version
),
default="",
namespaces=C.NS,
)
if peer_type != "server":
continue
server_address = self._find_txt(
peer,
"./ntpc:address-{version}".format(version=version),
default="",
namespaces=C.NS,
)
if not server_address:
continue
ntp_servers[server_address] = {}
return ntp_servers
def get_ntp_stats(self):
"""Return NTP stats (associations)."""
ntp_stats = []
rpc_reply = self.device.get(filter=("subtree", C.NTP_STAT_RPC_REQ_FILTER)).xml
# Converts string to etree
result_tree = ETREE.fromstring(rpc_reply)
xpath = ".//ntp:ntp/ntp:nodes/ntp:node/ntp:associations/\
ntp:peer-summary-info/ntp:peer-info-common"
for node in result_tree.xpath(xpath, namespaces=C.NS):
synchronized = (
self._find_txt(node, "./ntp:is-sys-peer", default="", namespaces=C.NS)
== "true"
)
address = self._find_txt(node, "./ntp:address", default="", namespaces=C.NS)
if address == "DLRSC node":
continue
referenceid = self._find_txt(
node, "./ntp:reference-id", default="", namespaces=C.NS
)
hostpoll = napalm.base.helpers.convert(
int, self._find_txt(node, "./ntp:host-poll", "0", namespaces=C.NS)
)
reachability = napalm.base.helpers.convert(
int, self._find_txt(node, "./ntp:reachability", "0", namespaces=C.NS)
)
stratum = napalm.base.helpers.convert(
int, self._find_txt(node, "./ntp:stratum", "0", namespaces=C.NS)
)
delay = napalm.base.helpers.convert(
float, self._find_txt(node, "./ntp:delay", "0.0", namespaces=C.NS)
)
offset = napalm.base.helpers.convert(
float, self._find_txt(node, "./ntp:offset", "0.0", namespaces=C.NS)
)
jitter = napalm.base.helpers.convert(
float, self._find_txt(node, "./ntp:dispersion", "0.0", namespaces=C.NS)
)
ntp_stats.append(
{
"remote": address,
"synchronized": synchronized,
"referenceid": referenceid,
"stratum": stratum,
"type": "",
"when": "",
"hostpoll": hostpoll,
"reachability": reachability,
"delay": delay,
"offset": offset,
"jitter": jitter,
}
)
return ntp_stats
def get_interfaces_ip(self):
"""Return the configured IP addresses."""
interfaces_ip = {}
rpc_reply = self.device.dispatch(to_ele(C.INT_IPV4_IPV6_RPC_REQ)).xml
# Converts string to etree
ipv4_ipv6_tree = ETREE.fromstring(rpc_reply)
# parsing IPv4
int4_xpath = ".//int4:ipv4-network/int4:nodes/int4:node/\
int4:interface-data/int4:vrfs/int4:vrf/int4:details"
for interface in ipv4_ipv6_tree.xpath(
int4_xpath + "/int4:detail", namespaces=C.NS
):
interface_name = napalm.base.helpers.convert(
str,
self._find_txt(
interface, "./int4:interface-name", default="", namespaces=C.NS
),
)
primary_ip = napalm.base.helpers.ip(
self._find_txt(
interface, "./int4:primary-address", default="", namespaces=C.NS
)
)
primary_prefix = napalm.base.helpers.convert(
int,
self._find_txt(
interface, "./int4:prefix-length", default="", namespaces=C.NS
),
)
if interface_name not in interfaces_ip.keys():
interfaces_ip[interface_name] = {}
if "ipv4" not in interfaces_ip[interface_name].keys():
interfaces_ip[interface_name]["ipv4"] = {}
if primary_ip not in interfaces_ip[interface_name].get("ipv4", {}).keys():
interfaces_ip[interface_name]["ipv4"][primary_ip] = {
"prefix_length": primary_prefix
}
for secondary_address in interface.xpath(
"./int4:secondary-address", namespaces=C.NS
):
secondary_ip = napalm.base.helpers.ip(
self._find_txt(
secondary_address, "./int4:address", default="", namespaces=C.NS
)
)
secondary_prefix = napalm.base.helpers.convert(
int,
self._find_txt(
secondary_address,
"./int4:prefix-length",
default="",
namespaces=C.NS,
),
)
if secondary_ip not in interfaces_ip[interface_name]:
interfaces_ip[interface_name]["ipv4"][secondary_ip] = {
"prefix_length": secondary_prefix
}
# parsing IPv6
int6_xpath = ".//int6:ipv6-network/int6:nodes/int6:node/\
int6:interface-data"
for interface in ipv4_ipv6_tree.xpath(
int6_xpath
+ "/int6:vrfs/int6:vrf/int6:global-details/\
int6:global-detail",
namespaces=C.NS,
):
interface_name = napalm.base.helpers.convert(
str,
self._find_txt(
interface, "./int6:interface-name", default="", namespaces=C.NS
),
)
if interface_name not in interfaces_ip.keys():
interfaces_ip[interface_name] = {}
if "ipv6" not in interfaces_ip[interface_name].keys():
interfaces_ip[interface_name]["ipv6"] = {}
for address in interface.xpath("./int6:address", namespaces=C.NS):
address_ip = napalm.base.helpers.ip(
self._find_txt(
address, "./int6:address", default="", namespaces=C.NS
)
)
address_prefix = napalm.base.helpers.convert(
int,
self._find_txt(
address, "./int6:prefix-length", default="", namespaces=C.NS
),
)
if (
address_ip
not in interfaces_ip[interface_name].get("ipv6", {}).keys()
):
interfaces_ip[interface_name]["ipv6"][address_ip] = {
"prefix_length": address_prefix
}
return interfaces_ip
def get_mac_address_table(self):
"""Return the MAC address table."""
mac_table = []
rpc_reply = self.device.get(filter=("subtree", C.MAC_TABLE_RPC_REQ_FILTER)).xml
# Converts string to etree
result_tree = ETREE.fromstring(rpc_reply)
mac_xpath = ".//mac:l2vpn-forwarding/mac:nodes/mac:node/mac:l2fibmac-details"
for mac_entry in result_tree.xpath(
mac_xpath + "/mac:l2fibmac-detail", namespaces=C.NS
):
mac_raw = self._find_txt(
mac_entry, "./mac:address", default="", namespaces=C.NS
)
vlan = napalm.base.helpers.convert(
int,
self._find_txt(
mac_entry, "./mac:name", default="", namespaces=C.NS
).replace("vlan", ""),
0,
)
interface = self._find_txt(
mac_entry,
"./mac:segment/mac:ac/\
mac:interface-handle",
default="",
namespaces=C.NS,
)
mac_table.append(
{
"mac": napalm.base.helpers.mac(mac_raw),
"interface": interface,
"vlan": vlan,
"active": True,
"static": False,
"moves": 0,
"last_move": 0.0,
}
)
return mac_table
def get_route_to(self, destination="", protocol="", longer=False):
"""Return route details to a specific destination."""
routes = {}
if not isinstance(destination, str):
raise TypeError("Please specify a valid destination!")
if longer:
raise NotImplementedError("Longer prefixes not yet supported for IOS-XR")
protocol = protocol.lower()
if protocol == "direct":
protocol = "connected"
dest_split = destination.split("/")
network = dest_split[0]
prefix_length = 0
if len(dest_split) == 2:
prefix_length = dest_split[1]
ipv = 4
try:
ipv = IPAddress(network).version
except AddrFormatError:
logger.error("Wrong destination IP Address format supplied to get_route_to")
raise TypeError("Wrong destination IP Address!")
if ipv == 6:
route_info_rpc_command = (C.ROUTE_IPV6_RPC_REQ_FILTER).format(
network=network, prefix_length=prefix_length
)
else:
route_info_rpc_command = (C.ROUTE_IPV4_RPC_REQ_FILTER).format(
network=network, prefix_length=prefix_length
)
rpc_reply = self.device.get(filter=("subtree", route_info_rpc_command)).xml
# Converts string to etree
routes_tree = ETREE.fromstring(rpc_reply)
if ipv == 6:
route_xpath = ".//rib{}:ipv6-rib".format(ipv)
else:
route_xpath = ".//rib{}:rib".format(ipv)
route_xpath = (
route_xpath
+ "/rib{ip}:vrfs/rib{ip}:vrf/rib{ip}:afs/\
rib{ip}:af/rib{ip}:safs/rib{ip}:saf/rib{ip}:ip-rib-route-table-names/\
rib{ip}:ip-rib-route-table-name/rib{ip}:routes/rib{ip}:route".format(
ip=ipv
)
)
for route in routes_tree.xpath(route_xpath, namespaces=C.NS):
route_protocol = napalm.base.helpers.convert(
str,
self._find_txt(
route,
"./rib{}:protocol-name".format(ipv),
default="",
namespaces=C.NS,
).lower(),
)
if protocol and route_protocol != protocol:
continue # ignore routes learned via a different protocol
# only in case the user requested a certain protocol
route_details = {}
address = self._find_txt(
route, "./rib{}:prefix".format(ipv), default="", namespaces=C.NS
)
length = self._find_txt(
route,
"./rib{}:prefix-length-xr".format(ipv),
default="",
namespaces=C.NS,
)
priority = napalm.base.helpers.convert(
int,
self._find_txt(
route, "./rib{}:priority".format(ipv), default="", namespaces=C.NS
),
)
age = napalm.base.helpers.convert(
int,
self._find_txt(
route, "./rib{}:route-age".format(ipv), default="", namespaces=C.NS
),
)
destination = napalm.base.helpers.convert(
str, "{prefix}/{length}".format(prefix=address, length=length)
)
if destination not in routes.keys():
routes[destination] = []
route_details = {
"current_active": False,
"last_active": False,
"age": age,
"next_hop": "",
"protocol": route_protocol,
"outgoing_interface": "",
"preference": priority,
"selected_next_hop": False,
"inactive_reason": "",
"routing_table": "default",
"protocol_attributes": {},
}
first_route = True
for route_entry in route.xpath(
".//rib{ipv}:route-path/rib{ipv}:ipv{ipv}-rib-edm-path".format(ipv=ipv),
namespaces=C.NS,
):
# get all possible entries
next_hop = self._find_txt(
route_entry,
"./rib{ipv}:address".format(ipv=ipv),
default="",
namespaces=C.NS,
)
single_route_details = {}
single_route_details.update(route_details)
single_route_details.update(
{"current_active": first_route, "next_hop": next_hop}
)
routes[destination].append(single_route_details)
first_route = False
return routes
def get_snmp_information(self):
"""Return the SNMP configuration."""
snmp_information = {}
rpc_reply = self.device.get_config(
source="running", filter=("subtree", C.SNMP_RPC_REQ_FILTER)
).xml
# Converts string to etree
snmp_result_tree = ETREE.fromstring(rpc_reply)
_PRIVILEGE_MODE_MAP_ = {"read-only": "ro", "read-write": "rw"}
snmp_information = {
"chassis_id": self._find_txt(
snmp_result_tree,
".//snmp:snmp/snmp:system/snmp:chassis-id",
default="",
namespaces=C.NS,
),
"contact": self._find_txt(
snmp_result_tree,
".//snmp:snmp/snmp:system/snmp:contact",
default="",
namespaces=C.NS,
),
"location": self._find_txt(
snmp_result_tree,
".//snmp:snmp/snmp:system/snmp:location",
default="",
namespaces=C.NS,
),
"community": {},
}
for community in snmp_result_tree.xpath(
".//snmp:snmp/snmp:administration/\
snmp:default-communities/snmp:default-community",
namespaces=C.NS,
):
name = self._find_txt(
community, "./snmp:community-name", default="", namespaces=C.NS
)
privilege = self._find_txt(
community, "./snmp:priviledge", default="", namespaces=C.NS
)
acl = self._find_txt(
community, "./snmp:v6-access-list", default="", namespaces=C.NS
) or self._find_txt(
community, "./snmp:v4-access-list", default="", namespaces=C.NS
)
snmp_information["community"][name] = {
"mode": _PRIVILEGE_MODE_MAP_.get(privilege, ""),
"acl": acl,
}
return snmp_information
def get_probes_config(self):
"""Return the configuration of the probes."""
sla_config = {}
_PROBE_TYPE_XML_TAG_MAP_ = {
"icmp-echo": "icmp-ping",
"udp-echo": "udp-ping",
"icmp-jitter": "icmp-ping-timestamp",
"udp-jitter": "udp-ping-timestamp",
}
rpc_reply = self.device.get_config(
source="running", filter=("subtree", C.PROBE_CFG_RPC_REQ_FILTER)
).xml
# Converts string to etree
sla_config_result_tree = ETREE.fromstring(rpc_reply)
probes_config_xpath = ".//prbc:ipsla/prbc:operation/prbc:definitions/\
prbc:definition"
for probe in sla_config_result_tree.xpath(probes_config_xpath, namespaces=C.NS):
probe_name = self._find_txt(
probe, "./prbc:operation-id", default="", namespaces=C.NS
)
operation_type_etree = probe.xpath("./prbc:operation-type", namespaces=C.NS)
if len(operation_type_etree):
operation_type = (
operation_type_etree[0]
.getchildren()[0]
.tag.replace("{" + C.NS.get("prbc") + "}", "")
)
probe_type = _PROBE_TYPE_XML_TAG_MAP_.get(operation_type, "")
operation_xpath = "./prbc:operation-type/prbc:{op_type}".format(
op_type=operation_type
)
operation = probe.xpath(operation_xpath, namespaces=C.NS)[0]
test_name = self._find_txt(
operation, "./prbc:tag", default="", namespaces=C.NS
)
source = self._find_txt(
operation, "./prbc:source-address", default="", namespaces=C.NS
)
target = self._find_txt(
operation, "./prbc:dest-address", default="", namespaces=C.NS
)
test_interval = napalm.base.helpers.convert(
int,
self._find_txt(
operation, "./prbc:frequency", default="0", namespaces=C.NS
),
)
probe_count = napalm.base.helpers.convert(
int,
self._find_txt(
operation,
"./prbc:history/prbc:buckets",
default="0",
namespaces=C.NS,
),
)
if probe_name not in sla_config.keys():
sla_config[probe_name] = {}
if test_name not in sla_config[probe_name]:
sla_config[probe_name][test_name] = {}
sla_config[probe_name][test_name] = {
"probe_type": probe_type,
"source": source,
"target": target,
"probe_count": probe_count,
"test_interval": test_interval,
}
return sla_config
def get_probes_results(self):
"""Return the results of the probes."""
sla_results = {}
_PROBE_TYPE_XML_TAG_MAP_ = {
"icmp-echo": "icmp-ping",
"udp-echo": "udp-ping",
"icmp-jitter": "icmp-ping-timestamp",
"udp-jitter": "udp-ping-timestamp",
}
rpc_reply = self.device.get(filter=("subtree", C.PROBE_OPER_RPC_REQ_FILTER)).xml
# Converts string to etree
sla_results_tree = ETREE.fromstring(rpc_reply)
probes_config = (
self.get_probes_config()
) # need to retrieve also the configuration
# source and tag/test_name not provided
probe_result_xpath = ".//prb:ipsla/prb:operation-data/\
prb:operations/prb:operation"
for probe in sla_results_tree.xpath(probe_result_xpath, namespaces=C.NS):
probe_name = self._find_txt(
probe, "./prb:operation-id", default="", namespaces=C.NS
)
test_name = list(probes_config.get(probe_name).keys())[0]
target = self._find_txt(
probe,
"./prb:history/prb:path/prb:lifes/prb:life/prb:buckets/\
prb:bucket[0]/prb:samples/prb:sample/prb:target-address/\
prb:ipv4-prefix-target/prb:address",
default="",
namespaces=C.NS,
)
source = probes_config.get(probe_name).get(test_name, {}).get("source", "")
probe_type = _PROBE_TYPE_XML_TAG_MAP_.get(
self._find_txt(
probe,
"./prb:statistics/prb:latest/prb:target/\
prb:specific-stats/prb:op-type",
default="",
namespaces=C.NS,
),
"",
)
probe_count = (
probes_config.get(probe_name).get(test_name, {}).get("probe_count", 0)
)
response_times = probe.xpath(
"./prb:history/prb:target/prb:lifes/prb:life[last()]/\
prb:buckets/prb:bucket/prb:response-time",
namespaces=C.NS,
)
response_times = [
napalm.base.helpers.convert(
int,
self._find_txt(response_time, ".", default="0", namespaces=C.NS),
)
for response_time in response_times
]
rtt = 0.0
if len(response_times):
rtt = sum(response_times, 0.0) / len(response_times)
return_codes = probe.xpath(
"./prb:history/prb:target/prb:lifes/prb:life[last()]/\
prb:buckets/prb:bucket/prb:return-code",
namespaces=C.NS,
)
return_codes = [
self._find_txt(return_code, ".", default="", namespaces=C.NS)
for return_code in return_codes
]
last_test_loss = 0
if len(return_codes):
last_test_loss = napalm.base.helpers.convert(
int,
100
* (
1
- return_codes.count("ipsla-ret-code-ok")
/ napalm.base.helpers.convert(float, len(return_codes))
),
)
rms = napalm.base.helpers.convert(
float,
self._find_txt(
probe,
"./prb:statistics/prb:aggregated/prb:hours/prb:hour/\
prb:distributed/prb:target/prb:distribution-intervals/\
prb:distribution-interval/prb:common-stats/\
prb:sum2-response-time",
default="0.0",
namespaces=C.NS,
),
)
global_test_updates = napalm.base.helpers.convert(
float,
self._find_txt(
probe,
"./prb:statistics/prb:aggregated/prb:hours/prb:hour/\
prb:distributed/prb:target/prb:distribution-intervals/\
prb:distribution-interval/prb:common-stats/\
prb:update-count",
default="0.0",
namespaces=C.NS,
),
)
jitter = 0.0
if global_test_updates:
jitter = rtt - (rms / global_test_updates) ** 0.5
# jitter = max(rtt - max(response_times), rtt - min(response_times))
current_test_min_delay = 0.0 # no stats for undergoing test :(
current_test_max_delay = 0.0
current_test_avg_delay = 0.0
last_test_min_delay = napalm.base.helpers.convert(
float,
self._find_txt(
probe,
"./prb:statistics/prb:latest/prb:target/\
prb:common-stats/prb:min-response-time",
default="0.0",
namespaces=C.NS,
),
)
last_test_max_delay = napalm.base.helpers.convert(
float,
self._find_txt(
probe,
"./prb:statistics/prb:latest/prb:target/\
prb:common-stats/prb:max-response-time",
default="0.0",
namespaces=C.NS,
),
)
last_test_sum_delay = napalm.base.helpers.convert(
float,
self._find_txt(
probe,
"./prb:statistics/prb:latest/prb:target/\
prb:common-stats/prb:sum-response-time",
default="0.0",
namespaces=C.NS,
),
)
last_test_updates = napalm.base.helpers.convert(
float,
self._find_txt(
probe,
".//prb:statistics/prb:latest/prb:target/\
prb:common-stats/prb:update-count",
default="0.0",
namespaces=C.NS,
),
)
last_test_avg_delay = 0.0
if last_test_updates:
last_test_avg_delay = last_test_sum_delay / last_test_updates
global_test_min_delay = napalm.base.helpers.convert(
float,
self._find_txt(
probe,
"./prb:statistics/prb:aggregated/prb:hours/prb:hour/\
prb:distributed/prb:target/prb:distribution-intervals/\
prb:distribution-interval/prb:common-stats/\
prb:min-response-time",
default="0.0",
namespaces=C.NS,
),
)
global_test_max_delay = napalm.base.helpers.convert(
float,
self._find_txt(
probe,
"./prb:statistics/prb:aggregated/prb:hours/prb:hour/\
prb:distributed/prb:target/prb:distribution-intervals/\
prb:distribution-interval/prb:common-stats/\
prb:max-response-time",
default="0.0",
namespaces=C.NS,
),
)
global_test_sum_delay = napalm.base.helpers.convert(
float,
self._find_txt(
probe,
"./prb:statistics/prb:aggregated/prb:hours/prb:hour/\
prb:distributed/prb:target/prb:distribution-intervals/\
prb:distribution-interval/prb:common-stats/\
prb:sum-response-time",
default="0.0",
namespaces=C.NS,
),
)
global_test_avg_delay = 0.0
if global_test_updates:
global_test_avg_delay = global_test_sum_delay / global_test_updates
if probe_name not in sla_results.keys():
sla_results[probe_name] = {}
sla_results[probe_name][test_name] = {
"target": target,
"source": source,
"probe_type": probe_type,
"probe_count": probe_count,
"rtt": rtt,
"round_trip_jitter": jitter,
"last_test_loss": last_test_loss,
"current_test_min_delay": current_test_min_delay,
"current_test_max_delay": current_test_max_delay,
"current_test_avg_delay": current_test_avg_delay,
"last_test_min_delay": last_test_min_delay,
"last_test_max_delay": last_test_max_delay,
"last_test_avg_delay": last_test_avg_delay,
"global_test_min_delay": global_test_min_delay,
"global_test_max_delay": global_test_max_delay,
"global_test_avg_delay": global_test_avg_delay,
}
return sla_results
def traceroute(
self,
destination,
source=C.TRACEROUTE_SOURCE,
ttl=C.TRACEROUTE_TTL,
timeout=C.TRACEROUTE_TIMEOUT,
vrf=C.TRACEROUTE_VRF,
):
"""Execute traceroute and return results."""
traceroute_result = {}
ipv = 4
try:
ipv = IPAddress(destination).version
except AddrFormatError:
logger.error(
"Incorrect format of IP Address in traceroute \
with value provided:%s"
% (str(destination))
)
return {"error": "Wrong destination IP Address!"}
source_tag = ""
ttl_tag = ""
timeout_tag = ""
vrf_tag = ""
if source:
source_tag = "<source>{source}</source>".format(source=source)
if ttl:
ttl_tag = "<max-ttl>{maxttl}</max-ttl>".format(maxttl=ttl)
if timeout:
timeout_tag = "<timeout>{timeout}</timeout>".format(timeout=timeout)
if vrf:
vrf_tag = "<vrf-name>{vrf}</vrf-name>".format(vrf=vrf)
traceroute_rpc_command = C.TRACEROUTE_RPC_REQ.format(
version=ipv,
destination=destination,
vrf_tag=vrf_tag,
source_tag=source_tag,
ttl_tag=ttl_tag,
timeout_tag=timeout_tag,
)
try:
rpc_reply = self.device.dispatch(to_ele(traceroute_rpc_command)).xml
except TimeoutExpiredError:
return {"error": "Timed out while waiting for reply"}
except RPCError as e:
if e.message:
return {"error": e.message}
else:
return {"error": "Invalid request ({})".format(e.tag)}
# Converts string to etree
traceroute_tree = ETREE.fromstring(rpc_reply)
hops = traceroute_tree.xpath(
".//tr:ipv{}/tr:hops/tr:hop".format(ipv), namespaces=C.NS
)
traceroute_result["success"] = {}
for hop in hops:
hop_index = napalm.base.helpers.convert(
int,
self._find_txt(hop, "./tr:hop-index", default="-1", namespaces=C.NS),
)
hop_address = self._find_txt(
hop, "./tr:hop-address", default="", namespaces=C.NS
)
if hop_address == "":
continue
hop_name = self._find_txt(
hop, "./tr:hop-hostname", default=hop_address, namespaces=C.NS
)
traceroute_result["success"][hop_index] = {"probes": {}}
for probe in hop.xpath("./tr:probes/tr:probe", namespaces=C.NS):
probe_index = (
napalm.base.helpers.convert(
int,
self._find_txt(
probe, "./tr:probe-index", default="", namespaces=C.NS
),
0,
)
+ 1
)
probe_hop_address = str(
self._find_txt(
probe, "./tr:hop-address", default=hop_address, namespaces=C.NS
)
)
probe_hop_name = str(
self._find_txt(
probe, "./tr:hop-hostname", default=hop_name, namespaces=C.NS
)
)
rtt = napalm.base.helpers.convert(
float,
self._find_txt(
probe, "./tr:delta-time", default="", namespaces=C.NS
),
timeout * 1000.0,
) # ms
traceroute_result["success"][hop_index]["probes"][probe_index] = {
"ip_address": probe_hop_address,
"host_name": probe_hop_name,
"rtt": rtt,
}
return traceroute_result
def get_users(self):
"""Return user configuration."""
users = {}
_CISCO_GROUP_TO_CISCO_PRIVILEGE_MAP = {
"root-system": 15,
"operator": 5,
"sysadmin": 1,
"serviceadmin": 1,
"root-lr": 15,
}
_DEFAULT_USER_DETAILS = {"level": 0, "password": "", "sshkeys": []}
rpc_reply = self.device.get_config(
source="running", filter=("subtree", C.USERS_RPC_REQ_FILTER)
).xml
# Converts string to etree
users_xml_reply = ETREE.fromstring(rpc_reply)
for user_entry in users_xml_reply.xpath(
".//aaa:aaa/usr:usernames/\
usr:username",
namespaces=C.NS,
):
username = self._find_txt(
user_entry, "./usr:name", default="", namespaces=C.NS
)
group = self._find_txt(
user_entry,
"./usr:usergroup-under-usernames/\
usr:usergroup-under-username/usr:name",
default="",
namespaces=C.NS,
)
level = _CISCO_GROUP_TO_CISCO_PRIVILEGE_MAP.get(group, 0)
password = self._find_txt(
user_entry, "./usr:password", default="", namespaces=C.NS
)
user_details = _DEFAULT_USER_DETAILS.copy()
user_details.update({"level": level, "password": password})
users[username] = user_details
return users
def get_config(self, retrieve="all", full=False, sanitized=False):
"""Return device configuration."""
encoding = self.config_encoding
# 'full' argument not supported; 'with-default' capability not supported.
if full:
raise NotImplementedError(
"'full' argument has not been implemented on the IOS-XR NETCONF driver"
)
if sanitized:
raise NotImplementedError(
"sanitized argument has not been implemented on the IOS-XR NETCONF driver"
)
# default values
config = {"startup": "", "running": "", "candidate": ""}
if encoding == "cli":
subtree_filter = ("subtree", C.CLI_CONFIG_RPC_REQ_FILTER)
elif encoding == "xml":
subtree_filter = None
else:
raise NotImplementedError(
f"config encoding must be one of {C.CONFIG_ENCODINGS}"
)
if retrieve.lower() in ["running", "all"]:
config["running"] = str(
self.device.get_config(source="running", filter=subtree_filter).xml
)
if retrieve.lower() in ["candidate", "all"]:
config["candidate"] = str(
self.device.get_config(source="candidate", filter=subtree_filter).xml
)
parser = ETREE.XMLParser(remove_blank_text=True)
# Validate XML config strings and remove rpc-reply tag
for datastore in config:
if config[datastore] != "":
if encoding == "cli":
cli_tree = ETREE.XML(config[datastore], parser=parser)[0]
if cli_tree:
config[datastore] = cli_tree[0].text.strip()
else:
config[datastore] = ""
else:
config[datastore] = ETREE.tostring(
self._filter_config_tree(
ETREE.XML(config[datastore], parser=parser)[0]
),
pretty_print=True,
encoding="unicode",
)
if sanitized and encoding == "cli":
return napalm.base.helpers.sanitize_configs(
config, C.CISCO_SANITIZE_FILTERS
)
return config
| StarcoderdataPython |
3290105 | <gh_stars>0
from django import forms
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from django.contrib.auth.models import User
from .models import Profile
class LoginForm(forms.Form):
username = forms.CharField(max_length=30, required=True)
password = forms.CharField(widget=forms.PasswordInput, required=True)
class SignUpForm(UserCreationForm):
username = forms.CharField(max_length=30, required=True,
widget=forms.TextInput(attrs={'class': 'form-control'}))
email = forms.EmailField(max_length=254, required=True,
widget=forms.TextInput(attrs={'class': 'form-control'}))
real_name = forms.CharField(max_length=30, required=True,
widget=forms.TextInput(attrs={'class': 'form-control'}))
class Meta:
model = User
fields = ('username', 'real_name', 'email', '<PASSWORD>', '<PASSWORD>',)
class EditProfileForm(UserChangeForm):
real_name = forms.CharField(max_length=30, widget=forms.TextInput(attrs={'class': 'form-control'}))
avatar = forms.FileField()
dob = forms.DateField(widget=forms.DateInput(attrs={'class': 'form-control', 'placeholder': '2018-01-30'}))
country = forms.CharField(max_length=30, widget=forms.TextInput(attrs={'class': 'form-control'}))
city = forms.CharField(max_length=30, widget=forms.TextInput(attrs={'class': 'form-control'}))
phone = forms.CharField(max_length=20, widget=forms.TextInput(attrs={'class': 'form-control'}))
class Meta:
model = Profile
fields = ('real_name', 'city', 'country', 'phone', 'dob', 'avatar')
| StarcoderdataPython |
3205710 | import collections
import json
import os
from ..common import Manager
from copy import deepcopy
from pymongo.cursor import Cursor
from datacatalog.jsonschemas import JSONSchemaBaseObject
from datacatalog.extensible import ExtensibleAttrDict
from datacatalog.identifiers import typeduuid
from datacatalog.linkedstores.association import Association, AssociationError
from datacatalog.linkedstores.annotations import AnnotationError
from datacatalog.linkedstores.annotations.tag import (
TagAnnotation, TagAnnotationDocument)
from datacatalog.linkedstores.annotations.text import TextAnnotation
from datacatalog import settings
DeletedRecordCounts = collections.namedtuple(
'DeletedRecordCounts', 'annotations associations')
AssociatedAnnotation = collections.namedtuple(
'AssociatedAnnotation', 'annotation association')
AnnotationResponse = collections.namedtuple(
'AnnotationResponse', 'record_uuid annotation_uuid association_uuid')
class AnnotationManagerSchema(JSONSchemaBaseObject):
"""Defines the baseline Annotation Manager event"""
DEFAULT_DOCUMENT_NAME = 'anno.json'
def __init__(self, **kwargs):
schemafile = os.path.join(os.path.dirname(__file__), self.DEFAULT_DOCUMENT_NAME)
j = json.load(open(schemafile, 'rb'))
super(AnnotationManagerSchema, self).__init__(**j)
self.update_id()
class AnnotationManager(Manager):
PUBLIC_USER = settings.TAPIS_ANY_AUTHENTICATED_USERNAME
def __init__(self, mongodb, agave=None, *args, **kwargs):
Manager.__init__(self, mongodb, agave=agave, *args, **kwargs)
def _new_annotation_association(self,
anno_uuid,
record_uuid,
owner=None,
note='',
token=None,
**kwargs):
"""Private: Creates an Association between a Record and an Annotation.
"""
self.validate_tapis_username(owner, permissive=True)
# Either a single or list of target UUIDs is allowed. A list of
# associations is returned in either case. This promotes batch
# association of an Annotation to multiple UUIDs
record_uuids = self.listify_uuid(record_uuid)
associations = list()
# TODO - Consider parallelizing this
for record_uuid in record_uuids:
try:
self.logger.debug('Associating anno:{} with record: {}'.format(anno_uuid, record_uuid))
assoc = self.stores['association'].associate(
anno_uuid, record_uuid, note=note, owner=owner)
associations.append(Association(self.sanitize(assoc)))
except Exception as exc:
self.logger.error('Association not created: {}'.format(exc))
if len(associations) != len(record_uuids):
self.logger.warning('Different number of records requested as linked')
return associations
def _associations_for_annotation(self,
connects_from,
connects_to=None,
owner=None,
only_visible=True):
"""Private: Finds Associations referencing an Annotation
Arguments:
connects_from (str): UUID of the Annotation to query
connects_to (str/list, optional): Record UUID(s) to filter the response on
only_visible (bool, optional): Return items not marked as deleted
Returns:
list: Associations connected with the specified Annotation
"""
self.logger.info(
'Finding associations for annotation {}'.format(connects_from))
query = {'connects_from': connects_from}
if connects_to is not None:
query['connects_to'] = connects_to
if only_visible:
query[self.stores['association'].DELETE_FIELD] = True
# TODO - revisit public/userspace filtering - do we want to return public & username or just username
if owner is not None:
query['owner'] = owner
orig_assoc = self.stores['association'].query(
query=query)
found_associations = list()
if isinstance(orig_assoc, Cursor):
for a in orig_assoc:
# # When filter_field is 'uuid', the response is a single str
# # but is a list of str otherwise, so pass thru listify_uuid
# # to be safe
# if isinstance(fa, str):
# fa = self.listify_uuid(fa)
# Uniqueness filter. Do not use list(set()) on strings
if a not in found_associations:
found_associations.append(a)
self.logger.debug(
'Found {} associations from {}'.format(
len(found_associations), connects_from))
return found_associations
def tags_list(self,
limit=None,
skip=None,
public=True,
private=True,
visible=True,
**kwargs):
"""Retrieves the list of Tags
Args:
limit (int, optional): Maximum number of records to return
skip (int, optional): Skip this many matching records
public (bool, optional): Return public tags
private (bool, optional): Return private (to user) tags
visible (bool, optional): Return only tags that are not soft-deleted
Returns:
tuple: All tags matching the filter criteria
"""
tags_all = list()
query = dict()
if visible:
query[self.stores['association'].DELETE_FIELD] = True
if public is True:
query['owner'] = self.PUBLIC_USER
for t in self.stores['tag_annotation'].query(
query, attr_dict=True, projection=None, limit=limit, skip=skip):
tags_all.append(t)
return tags_all
# def new_tag_annotation(self,
# connects_to=None,
# name=None,
# owner=None,
# description='',
# tag_owner=None,
# association_note='',
# token=None, **kwargs):
# """Creates a Tag and associates it with metadata Record.
# Args:
# connects_to (str): UUID5 of the Record to be annotated
# name (str): Name of the new Tag
# description (str, optional): Plaintext description of the Tag
# owner (str, optional): TACC.cloud username owning the Tag and Association
# tag_owner (str, optional): TACC.cloud username owning the Tag (if different)
# Returns:
# tuple: The created or updated (TagAnnotation, Association)
# Raises:
# AnnotationError: Error prevented creation of the Tag Annotation
# AssociationError: Error occurred creating the Association
# """
# self.validate_tapis_username(owner)
# if tag_owner is not None:
# self.validate_tapis_username(tag_owner)
# else:
# tag_owner = owner
# connects_from = None
# anno = self.stores['tag_annotation'].new_tag(name=name,
# description=description,
# owner=tag_owner,
# token=token,
# **kwargs)
# connects_from = anno.get('uuid', None)
# assoc = None
# if connects_from is not None:
# assoc = self.stores['association'].associate(
# connects_from, connects_to, note=association_note, owner=owner)
# return AssociatedAnnotation(anno, assoc)
def delete_association(self,
uuid=None,
token=None,
force=False,
**kwargs):
"""Deletes an Association by its UUID
Args:
uuid (str/list): Association UUID (or list) to delete
Returns:
tuple: (0, Associations deleted)
"""
del_uuids = self.listify_uuid(uuid)
count_deleted_uuids = 0
for duuid in del_uuids:
try:
self.logger.info('Deleting Association {}'.format(duuid))
self.stores['association'].delete_document(
duuid, token=None, force=force)
count_deleted_uuids = count_deleted_uuids + 1
except Exception as exc:
self.logger.error(
'Failed to delete {}: {}'.format(duuid, exc))
self.logger.debug(
'Deleted {} Associations'.format(count_deleted_uuids))
return DeletedRecordCounts(0, count_deleted_uuids)
# Create a Text Anno AND bind to record in one shot
# Create a Text Anno AND bind as child of another in one shot
# Validate usernames via Agave call
# Create and retire Tag and Text associations
# Batch purge by target, source, username
| StarcoderdataPython |
189336 | import pytest as pytest
from main import select_tasks
from task_selector.taskexceptions import EmptyFileException, \
JSONMissingFieldsExepction
def test_empty_json_list():
assert select_tasks("[]") == "[]"
@pytest.mark.parametrize('json_str,exception', [
(""
, EmptyFileException),
("[{'name': 'A', 'resources': ['1', '2', '3']}]"
, JSONMissingFieldsExepction),
("[{'name': 'A', 'profit': 9.2}]"
, JSONMissingFieldsExepction),
("[{'name': 'A', 'resources': [1], 'profit': 1},"
" {'resources': ['2','3'], 'profit': 1},"
" {'name': 'C', 'resources': ['3','4'], 'profit': 2}]"
, JSONMissingFieldsExepction)
])
def test_missing_json_fields(json_str, exception):
with pytest.raises(exception):
select_tasks(json_str)
@pytest.mark.parametrize('json_str,result', [
("[{'name': 'A', 'resources': ['1', '2', '3'], 'profit': 9.2}]",
"[{'name': 'A', 'resources': ['1', '2', '3'], 'profit': 9.2}]"
),
("[{'name': 'A', 'resources': ['1', '2', '3'], 'profit': 9.2},"
" {'name': 'B', 'resources': ['2'] , 'profit': 0.4},"
" {'name': 'C', 'resources': ['3'] , 'profit': 2.9}]",
"[{'name': 'A', 'resources': ['1', '2', '3'], 'profit': 9.2}]"
),
("[{'name': 'A', 'resources': ['1', '2'], 'profit': 1},"
" {'name': 'B', 'resources': ['2', '3'], 'profit': 1},"
" {'name': 'C', 'resources': ['3', '4'], 'profit': 1}]",
"[{'name': 'A', 'resources': ['1', '2'], 'profit': 1.0},"
" {'name': 'C', 'resources': ['3', '4'], 'profit': 1.0}]"),
("[{'name': 'A', 'resources': [], 'profit': 1},"
" {'name': 'B', 'resources': ['2', '3'], 'profit': 1},"
" {'name': 'C', 'resources': ['3', '4'], 'profit': 2}]",
"[{'name': 'A', 'resources': [], 'profit': 1.0},"
" {'name': 'C', 'resources': ['3', '4'], 'profit': 2.0}]")
])
def test_select_tasks(json_str, result):
assert select_tasks(json_str) == result
| StarcoderdataPython |
1675028 | """
Hadoop Blueprint
"""
from calm.dsl.builtins import ref, basic_cred
from calm.dsl.builtins import action
from calm.dsl.builtins import CalmTask
from calm.dsl.builtins import CalmVariable
from calm.dsl.builtins import Service, Package, Substrate
from calm.dsl.builtins import Deployment, Profile, Blueprint
from calm.dsl.builtins import read_provider_spec, read_local_file
CENTOS_PASSWD = read_local_file(".tests/password")
DefaultCred = basic_cred("centos", CENTOS_PASSWD, name="default cred", default=True)
class Hadoop_Master(Service):
"""Hadoop_Master service"""
@action
def __create__():
CalmTask.Exec.ssh(name="ConfigureMaster", filename="scripts/ConfigureMaster.sh")
@action
def __start__():
CalmTask.Exec.ssh(
name="StartMasterServices", filename="scripts/StartMasterServices.sh"
)
class Hadoop_Slave(Service):
"""Hadoop_Slave service"""
@action
def __create__():
CalmTask.Exec.ssh(name="ConfigureSlave", filename="scripts/ConfigureSlave.sh")
@action
def __start__():
CalmTask.Exec.ssh(
name="StartSlaveServices", filename="scripts/StartSlaveServices.sh"
)
class Hadoop_Master_Package(Package):
"""Hadoop Master package"""
services = [ref(Hadoop_Master)]
@action
def __install__():
CalmTask.Exec.ssh(
name="PackageInstallTask", filename="scripts/master_PackageInstallTask.sh"
)
class Hadoop_Slave_Package(Package):
"""Hadoop Slave package"""
services = [ref(Hadoop_Slave)]
@action
def __install__():
CalmTask.Exec.ssh(
name="PackageInstallTask", filename="scripts/slave_PackageInstallTask.sh"
)
class Hadoop_Master_AHV(Substrate):
"""Hadoop Master Substrate"""
provider_spec = read_provider_spec("ahv_spec.yaml")
provider_spec.spec["name"] = "Hadoop_Master-@@{calm_array_index}@@-@@{calm_time}@@"
readiness_probe = {
"disabled": False,
"delay_secs": "0",
"connection_type": "SSH",
"connection_port": 22,
"credential": ref(DefaultCred),
}
class Hadoop_Slave_AHV(Substrate):
"""Hadoop Slave Substrate"""
provider_spec = read_provider_spec("ahv_spec.yaml")
provider_spec.spec["name"] = "Hadoop_Slave-@@{calm_array_index}@@-@@{calm_time}@@"
readiness_probe = {
"disabled": False,
"delay_secs": "0",
"connection_type": "SSH",
"connection_port": 22,
"credential": ref(DefaultCred),
}
class Hadoop_Master_Deployment(Deployment):
"""Hadoop Master Deployment"""
packages = [ref(Hadoop_Master_Package)]
substrate = ref(Hadoop_Master_AHV)
class Hadoop_Slave_Deployment(Deployment):
"""Hadoop Slave Deployment"""
min_replicas = "2"
max_replicas = "5"
packages = [ref(Hadoop_Slave_Package)]
substrate = ref(Hadoop_Slave_AHV)
class Nutanix(Profile):
"""Hadoop Profile"""
deployments = [Hadoop_Master_Deployment, Hadoop_Slave_Deployment]
@action
def ScaleOutSlaves():
COUNT = CalmVariable.Simple.int("1", is_mandatory=True, runtime=True) # noqa
CalmTask.Scaling.scale_out(
"@@{COUNT}@@", target=ref(Hadoop_Slave_Deployment), name="ScaleOutSlaves"
)
@action
def ScaleInSlaves():
COUNT = CalmVariable.Simple.int("1", is_mandatory=True, runtime=True) # noqa
CalmTask.Scaling.scale_in(
"@@{COUNT}@@", target=ref(Hadoop_Slave_Deployment), name="ScaleInSlaves"
)
class HadoopDslBlueprint(Blueprint):
"""* [Hadoop Master Name Node Dashboard](http://@@{Hadoop_Master.address}@@:50070)
* [Hadoop Master Data Node Dashboard](http://@@{Hadoop_Master.address}@@:8088)
"""
credentials = [DefaultCred]
services = [Hadoop_Master, Hadoop_Slave]
packages = [Hadoop_Master_Package, Hadoop_Slave_Package]
substrates = [Hadoop_Master_AHV, Hadoop_Slave_AHV]
profiles = [Nutanix]
def main():
print(HadoopDslBlueprint.json_dumps(pprint=True))
if __name__ == "__main__":
main()
| StarcoderdataPython |
4838638 | <filename>patron/tests/unit/api/openstack/test_api_version_request.py
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from patron.api.openstack import api_version_request
from patron import exception
from patron import test
class APIVersionRequestTests(test.NoDBTestCase):
def test_valid_version_strings(self):
def _test_string(version, exp_major, exp_minor):
v = api_version_request.APIVersionRequest(version)
self.assertEqual(v.ver_major, exp_major)
self.assertEqual(v.ver_minor, exp_minor)
_test_string("1.1", 1, 1)
_test_string("2.10", 2, 10)
_test_string("5.234", 5, 234)
_test_string("12.5", 12, 5)
_test_string("2.0", 2, 0)
_test_string("2.200", 2, 200)
def test_null_version(self):
v = api_version_request.APIVersionRequest()
self.assertTrue(v.is_null())
def test_invalid_version_strings(self):
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "2")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "200")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "2.1.4")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "172.16.17.32")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "5 .3")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "5. 3")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "5.03")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "02.1")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "2.001")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, " 2.1")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "2.1 ")
def test_version_comparisons(self):
v1 = api_version_request.APIVersionRequest("2.0")
v2 = api_version_request.APIVersionRequest("2.5")
v3 = api_version_request.APIVersionRequest("5.23")
v4 = api_version_request.APIVersionRequest("2.0")
v_null = api_version_request.APIVersionRequest()
self.assertTrue(v1 < v2)
self.assertTrue(v3 > v2)
self.assertTrue(v1 != v2)
self.assertTrue(v1 == v4)
self.assertTrue(v1 != v_null)
self.assertTrue(v_null == v_null)
self.assertRaises(TypeError, v1.__cmp__, "2.1")
def test_version_matches(self):
v1 = api_version_request.APIVersionRequest("2.0")
v2 = api_version_request.APIVersionRequest("2.5")
v3 = api_version_request.APIVersionRequest("2.45")
v4 = api_version_request.APIVersionRequest("3.3")
v5 = api_version_request.APIVersionRequest("3.23")
v6 = api_version_request.APIVersionRequest("2.0")
v7 = api_version_request.APIVersionRequest("3.3")
v8 = api_version_request.APIVersionRequest("4.0")
v_null = api_version_request.APIVersionRequest()
self.assertTrue(v2.matches(v1, v3))
self.assertTrue(v2.matches(v1, v_null))
self.assertTrue(v1.matches(v6, v2))
self.assertTrue(v4.matches(v2, v7))
self.assertTrue(v4.matches(v_null, v7))
self.assertTrue(v4.matches(v_null, v8))
self.assertFalse(v1.matches(v2, v3))
self.assertFalse(v5.matches(v2, v4))
self.assertFalse(v2.matches(v3, v1))
self.assertRaises(ValueError, v_null.matches, v1, v3)
def test_get_string(self):
v1_string = "3.23"
v1 = api_version_request.APIVersionRequest(v1_string)
self.assertEqual(v1_string, v1.get_string())
self.assertRaises(ValueError,
api_version_request.APIVersionRequest().get_string)
| StarcoderdataPython |
3206340 | <reponame>Sunshengjin/RoboWare-Studio<filename>extensions/RichardHe.you-complete-me-1.0.36/ycmd/ycmd/tests/rust/subcommands_test.py
#!/usr/bin/env python
#
# Copyright (C) 2015 ycmd contributors.
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from rust_handlers_test import Rust_Handlers_test
from nose.tools import eq_
class Rust_Subcommands_test( Rust_Handlers_test ):
def _GoTo( self, params ):
filepath = self._PathToTestFile( 'test.rs' )
contents = open( filepath ).read()
self._WaitUntilServerReady()
command = params[ 'command' ]
goto_data = self._BuildRequest( completer_target = 'filetype_default',
command_arguments = [ command ],
line_num = 7,
column_num = 12,
contents = contents,
filetype = 'rust',
filepath = filepath )
results = self._app.post_json( '/run_completer_command',
goto_data )
eq_( {
'line_num': 1, 'column_num': 8, 'filepath': filepath
}, results.json )
def GoTo_all_test( self ):
tests = [
{ 'command': 'GoTo' },
{ 'command': 'GoToDefinition' },
{ 'command': 'GoToDeclaration' }
]
for test in tests:
yield ( self._GoTo, test )
| StarcoderdataPython |
122433 | import pandas as pd
# change this
def filter_func(x):
return x['credit'] >= 5
data = pd.read_csv('output/courses.csv')
df = data[data.apply(filter_func, axis=1)]
#data.iloc[0,:].to_csv('output/query.csv', header=False, index=False)
df.to_csv('output/query.csv', index=False) | StarcoderdataPython |
3377893 | <filename>dvorfs/process_genewise.py
#!/usr/bin/env python3
import sys, argparse, os
import pandas as pd
import numpy as np
from collections import defaultdict
from . import parse_genewise
# NOTE for sequence arithmetic: records use BED-style index
def target_overlap(h1,h2): # uses bed index
overlap = min(h1['tend'],h2['tend']) - max(h1['tstart'],h2['tstart'])
return overlap
def query_overlap(h1,h2): # uses bed index
overlap = min(h1['qend'],h2['qend']) - max(h1['qstart'],h2['qstart'])
return overlap
def correct_order(h1,h2):
q_order = sorted([h1,h2], key=lambda x: x['qstart'])
if h1['tstrand'] == '+':
t_order = sorted([h1,h2], key=lambda x: x['tstart'])
elif h1['tstrand'] == '-':
t_order = sorted([h1,h2], key=lambda x: x['tend'], reverse=True)
if q_order == t_order:
return True
else:
return False
def simplify_record(r):
return {k:v for k,v in r.items() if k not in ['target','tstrand','query']}
def combine(r1,r2):
overlap = query_overlap(r1,r2)
if overlap > 0:
r1['aaseq'] = r1['aaseq'][:-overlap] + r2['aaseq']
r1['naseq'] = r1['naseq'][:-overlap] + r2['naseq']
r1['alpos'] = r1['alpos'][:-overlap] + r2['alpos']
r1['matches'] = r1['matches'] + r2['matches'] - overlap
else:
r1['aaseq'] = r1['aaseq'] + ['x']*-overlap + r2['aaseq']
r1['naseq'] = r1['naseq'] + ['nnn']*-overlap + r2['naseq']
r1['alpos'] = (r1['alpos']
+ list(zip(range(r1['alpos'][-1][0]+1, r2['alpos'][0][0]),[0]*-overlap))
+ r2['alpos'])
r1['matches'] = r1['matches'] + r2['matches']
r1['qstart'] = min([r1['qstart'],r2['qstart']])
r1['qend'] = max([r1['qend'],r2['qend']])
r1['tstart'] = min([r1['tstart'],r2['tstart']])
r1['tend'] = max([r1['tend'],r2['tend']])
r1['bits'] = r1['bits'] + r2['bits']
r1['overlapped'] += r2['overlapped']
r1['hsps'].append(simplify_record(r2))
return r1
def merge_hits(df, max_overlap=0, max_distance=0):
merged = []
for g_tup, g in df.groupby(['target','tstrand','query'], sort=False):
target, tstrand, query = g_tup
# remove lower scoring overlapping hits
stack = g.sort_values('bits', ascending=False).to_dict(orient='records')
no_overlaps = []
while len(stack) > 0:
r1 = stack.pop(0)
r1['target'], r1['tstrand'], r1['query'] = g_tup
r1['overlapped'] = 0
new_stack = []
for r2 in stack:
if target_overlap(r1,r2) > 0:
# r2 is a worse hit and not useful, remove it
r1['overlapped'] += 1
else:
# keep r2
new_stack.append(r2)
stack = new_stack
no_overlaps.append(r1)
# sort by position and merge
if tstrand == '+':
merge_stack = sorted(no_overlaps, key=lambda x: x['tstart'])
else:
merge_stack = sorted(no_overlaps, key=lambda x: x['tend'], reverse=True)
while len(merge_stack) > 0:
r1 = merge_stack.pop(0)
r1['hsps'] = [simplify_record(r1)]
new_merge_stack = []
for i, r2 in enumerate(merge_stack):
if query_overlap(r1,r2) > max_overlap:
# don't merge but keep r2
new_merge_stack.append(r2)
elif -1*target_overlap(r1,r2) > max_distance:
# don't merge but keep all following r2s
new_merge_stack += merge_stack[i:]
break
elif not correct_order(r1,r2):
# query pieces in wrong order:
# don't merge but keep all following r2s
new_merge_stack += merge_stack[i:]
break
else:
# no overlaps and not too far between r1 and r2
# so combine them and remove r2 from the stack
r1 = combine(r1,r2)
merge_stack = new_merge_stack
r1['no_hsps'] = len(r1['hsps'])
merged.append(r1)
all_merged_hits = pd.DataFrame.from_dict(merged)
return all_merged_hits
def parse_mask_tsv(f):
d = defaultdict(list)
for l in f:
k, *v = l.strip().split('\t')
if len(v) < 2:
d[k].append((0, 1000000))
else:
d[k].append((int(v[0]), int(v[1])))
return dict(d)
def make_ali_array(hit_df):
"""
Make a numpy array of the codon alignment with final row as codon reference row.
hit_df must contain the columns: id, alpos, naseq.
Outputs the array and a list of the hit ids representing the row-wise order of sequences in the
alignment.
"""
col_dict = defaultdict(lambda: np.full((len(hit_df)+1,1), '---', dtype=object))
# Mark all the canoncial cols
for c in range(hit_df['qstart'].min(), hit_df['qend'].max()+1):
col_dict[(c,0)][-1,0] = 'N--'
seq_order = []
# Fill in the codon sequences in the cols
for row, hit in enumerate(hit_df.itertuples()):
seq_order.append(hit.id)
for pos,s in zip(hit.alpos, hit.naseq):
col_dict[pos][row,0] = s
# if its non-canonical col label it
if pos[1] > 0:
col_dict[pos][-1,0] = 'n--'
col_items = sorted(list(col_dict.items()), key=lambda i: i[0])
cols = [ i[1] for i in col_items ]
# pad each column to mod 3 length
for c in cols:
width = max(len(i[0]) for i in c)
width += -width % 3
for i in range(len(c)):
c[i,0] += '-'*(width-len(c[i,0]))
# remove empty cols
cols = [ c for c in cols if not np.all(c[:-1] == '---') ]
ali_arr = np.hstack(cols)
return ali_arr, seq_order
def process_genewise(infile, fasta, windowed=False,
merge=False, merge_distance=1000, merge_overlap=2,
filter_type='no-overlap', hit_mask=None, bit_cutoff=15.0, length_cutoff=30,
out_cols=[], make_alis=False):
results = parse_genewise.parse(infile, fasta)
df = pd.DataFrame.from_records(results, columns=parse_genewise.HSP._fields)
df = df[df['bits'] > 0.0]
if windowed:
# parse the target seq id of window to calculate real coordinates on the contig and real contig name
df['wstart'] = df['target'].apply(lambda x: int(x.split(':')[1].split('-')[0]))
df['target'] = df['target'].apply(lambda x: x.split(':')[0])
df['tstart'] = df['tstart'] + df['wstart']
df['tend'] = df['tend'] + df['wstart']
df = df.drop('wstart', axis=1)
# filter / merge hits depending on filter mode
if merge:
hits = merge_hits(df, max_overlap=merge_overlap, max_distance=merge_distance)
else:
hits = df
hits['no_hsps'] = 1
if len(hits) < 1:
filtered_hits = hits
elif filter_type == 'all':
filtered_hits = hits
elif filter_type == 'best-per':
filtered_hits = (hits
.groupby('target')
.apply(lambda x: x.nlargest(1,'bits'))
.sort_values('bits', ascending=False)
.reset_index(drop=True)
)
elif filter_type == 'no-overlap':
keep = []
for g_tup, g in hits.groupby(['target','tstrand'], sort=False):
stack = list(g.sort_values('bits', ascending=False).itertuples())
while len(stack) > 0:
r1 = stack.pop(0)
new_stack = []
for r2 in stack:
if min(r1.tend,r2.tend) - max(r1.tstart,r2.tstart) <= 0:
new_stack.append(r2)
stack = new_stack
keep.append(r1.Index)
filtered_hits = hits.iloc[keep]
# apply hit_mask
if len(filtered_hits) < 1:
pass
elif hit_mask:
with open(hit_mask) as f:
hit_mask = parse_mask_tsv(f)
def mask_hits(r):
masked = False
regions = hit_mask.get(r.query,[])
for start,end in regions:
if r.qstart >= start and r.qend <= end:
masked = True
return not masked
mask = filtered_hits.apply(mask_hits, axis=1)
filtered_hits = filtered_hits[mask]
# apply quality filters
if len(filtered_hits) < 1:
final_hits = filtered_hits
else:
mask = filtered_hits.apply(lambda r: r.matches >= length_cutoff and r.bits >= bit_cutoff, axis=1)
final_hits = filtered_hits[mask].reset_index(drop=True)
final_hits = (final_hits
.sort_values(['query','bits'],ascending=[True,False])
.reset_index(drop=True)
.reset_index()
)
final_hits['id'] = final_hits['index'] + 1
# finalise output format
cols = ['id', 'bits', 'query', 'qstart', 'qend', 'target',
'tstrand', 'tstart', 'tend', 'no_hsps', 'matches' ]
cols += out_cols
output_df = final_hits[cols]
if make_alis:
alis = []
for name, hit_df in final_hits.groupby(['query']):
alis.append( (name, *make_ali_array(hit_df)) )
else:
alis = None
return output_df, alis
def main():
parser = argparse.ArgumentParser(description=f"process_genewise.py is part of DVORFS")
parser.add_argument('infile',
type=argparse.FileType('r'),
help="""Genewise output file. Genewise must have been run with the `-alb` argument.""")
parser.add_argument('-s', '--fasta',
type=argparse.FileType('r'), required=True,
help="""Exact fasta file that genewise was run with.""")
parser.add_argument('-w', '--windowed',
action='store_true',
help="""Input fasta to genewise is windowed. Parses the seq IDs to get real coordinates.""")
parser.add_argument('-m', '--merge',
action='store_true',
help="""Hits will be merged before filtering.
(Worse hits from same query are removed at overlaps.)""")
parser.add_argument('-d', '--merge-distance',
type=int, default=1000,
help="""Maximum allowed distance between two hits in the subject sequence (in bp) for them to be merged.""")
parser.add_argument('-o', '--merge-overlap',
type=int, default=2,
help="""Maximum number of positions overlapping in the query pHMM for two hits to be merged.""")
parser.add_argument('-f', '--filter',
choices=['all', 'no-overlap', 'best-per'], default='no-overlap',
help="""
all: All hits are kept.
no_overlap: Hits are removed if they are overlapped by a better hit from a
different query.
best_per: Only the highest scoring hit per contig is kept.""")
parser.add_argument('-k','--hit-mask',
type=argparse.FileType('r'),
help="""TSV file with 3 columns: 1. name of query, 2. start position of masked region, 3. end position of masked region.""")
parser.add_argument('-b', '--bit-cutoff',
type=float, default=15.0,
help="""Minimum bit score for hits to be kept after merging.""")
parser.add_argument('-l', '--length-cutoff',
type=int, default=30,
help="""Minimum number of codons aligned to the query pHMM for hits to be kept after merging.""")
parser.add_argument('-a', '--aaseq',
action='store_true',
help="""Output predicted AA sequence.""")
parser.add_argument('-n', '--naseq',
action='store_true',
help="""Output nucleotide sequence with comma seperated codons/indels.""")
parser.add_argument('--full',
action='store_true')
parser.add_argument('--out',
type=argparse.FileType('w'), default=sys.stdout,
help="""Specify output file. By default, output goes to STDOUT.""")
parser.add_argument('--aliout',
help="""Output an alignment fasta of hits for each HMM with any hits into specified dir.""")
args = parser.parse_args()
outcols = []
if args.full:
args.aaseq = args.naseq = True
if args.aaseq:
outcols.append('aaseq')
if args.naseq:
outcols.append('naseq')
if args.full:
outcols += ['overlapped', 'hsps']
make_alis = True if args.aliout else False
hit_mask = args.hit_mask.name if args.hit_mask else None
df, alis = process_genewise(args.infile.name, args.fasta.name, windowed=args.windowed,
merge=args.merge, merge_distance=args.merge_distance, merge_overlap=args.merge_overlap,
filter_type=args.filter, hit_mask=hit_mask, bit_cutoff=args.bit_cutoff, length_cutoff=args.length_cutoff,
out_cols=outcols, make_alis=make_alis)
if args.aaseq:
df['aaseq'] = df['aaseq'].apply(lambda l:''.join(l))
if args.naseq:
df['naseq'] = df['naseq'].apply(lambda l:','.join(l))
df.to_csv(args.out, sep='\t', index=False, float_format='%.2f')
if args.aliout:
os.makedirs(args.aliout, exist_ok=True)
for name, ali, order in alis:
with open(os.path.join(args.aliout, f'{name}.ali.fa'), 'w') as f:
print(">CODONS", file=f)
print(''.join(ali[-1]), file=f)
for id, row in zip(order,ali[:-1]):
print(f">hitid-{id}", file=f)
print(''.join(row), file=f)
| StarcoderdataPython |
61111 | from pkg_resources import resource_string
import json
proto_info = None
try:
proto_json = resource_string(__name__, 'proto.json')
proto_info = json.loads(proto_json)
except Exception:
pass
| StarcoderdataPython |
3386776 | <filename>cyberbattle/agents/baseline/notebooks/notebook_dql.py
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# -*- coding: utf-8 -*-
# %%
"""Deep Q-learning agent (notebook)
This notebooks can be run directly from VSCode, to generate a
traditional Jupyter Notebook to open in your browser
you can run the VSCode command `Export Currenty Python File As Jupyter Notebook`.
"""
# %%
import os
import sys
import logging
import gym
import torch
import cyberbattle.agents.baseline.learner as learner
import cyberbattle.agents.baseline.plotting as p
import cyberbattle.agents.baseline.agent_wrapper as w
import cyberbattle.agents.baseline.agent_dql as dqla
from cyberbattle.agents.baseline.agent_wrapper import Verbosity
import cyberbattle.agents.baseline.agent_randomcredlookup as rca
import importlib
import cyberbattle._env.cyberbattle_env as cyberbattle_env
import cyberbattle._env.cyberbattle_chain as cyberbattle_chain
importlib.reload(learner)
importlib.reload(cyberbattle_env)
importlib.reload(cyberbattle_chain)
logging.basicConfig(stream=sys.stdout, level=logging.ERROR, format="%(levelname)s: %(message)s")
# %%
torch.cuda.is_available()
# %%
# To run once
# import plotly.io as pio
# pio.orca.config.use_xvfb = True
# pio.orca.config.save()
# %%
cyberbattlechain_4 = gym.make('CyberBattleChain-v0', size=4, attacker_goal=cyberbattle_env.AttackerGoal(reward=2180))
cyberbattlechain_10 = gym.make('CyberBattleChain-v0', size=10, attacker_goal=cyberbattle_env.AttackerGoal(reward=4000))
cyberbattlechain_20 = gym.make('CyberBattleChain-v0', size=20, attacker_goal=cyberbattle_env.AttackerGoal(reward=7000))
ep = w.EnvironmentBounds.of_identifiers(
maximum_total_credentials=22,
maximum_node_count=22,
identifiers=cyberbattlechain_10.identifiers
)
iteration_count = 9000
training_episode_count = 50
eval_episode_count = 10
# %%
# Run Deep Q-learning
# 0.015
best_dqn_learning_run_10 = learner.epsilon_greedy_search(
cyberbattle_gym_env=cyberbattlechain_10,
environment_properties=ep,
learner=dqla.DeepQLearnerPolicy(
ep=ep,
gamma=0.015,
replay_memory_size=10000,
target_update=10,
batch_size=512,
learning_rate=0.01), # torch default is 1e-2
episode_count=training_episode_count,
iteration_count=iteration_count,
epsilon=0.90,
render=False,
# epsilon_multdecay=0.75, # 0.999,
epsilon_exponential_decay=5000, # 10000
epsilon_minimum=0.10,
verbosity=Verbosity.Quiet,
title="DQL"
)
# %% Plot episode length
p.plot_episodes_length([best_dqn_learning_run_10])
# %%
if not os.path.exists("images"):
os.mkdir("images")
# %%
dql_exploit_run = learner.epsilon_greedy_search(
cyberbattlechain_10,
ep,
learner=best_dqn_learning_run_10['learner'],
episode_count=eval_episode_count,
iteration_count=iteration_count,
epsilon=0.0, # 0.35,
render=False,
render_last_episode_rewards_to='images/chain10',
title="Exploiting DQL",
verbosity=Verbosity.Quiet
)
# %%
random_run = learner.epsilon_greedy_search(
cyberbattlechain_10,
ep,
learner=learner.RandomPolicy(),
episode_count=eval_episode_count,
iteration_count=iteration_count,
epsilon=1.0, # purely random
render=False,
verbosity=Verbosity.Quiet,
title="Random search"
)
# %%
# Plot averaged cumulative rewards for DQL vs Random vs DQL-Exploit
themodel = dqla.CyberBattleStateActionModel(ep)
p.plot_averaged_cummulative_rewards(
all_runs=[
best_dqn_learning_run_10,
random_run,
dql_exploit_run
],
title=f'Benchmark -- max_nodes={ep.maximum_node_count}, episodes={eval_episode_count},\n'
f'State: {[f.name() for f in themodel.state_space.feature_selection]} '
f'({len(themodel.state_space.feature_selection)}\n'
f"Action: abstract_action ({themodel.action_space.flat_size()})")
# %%
# plot cumulative rewards for all episodes
p.plot_all_episodes(best_dqn_learning_run_10)
##################################################
# %%
# %%
best_dqn_4 = learner.epsilon_greedy_search(
cyberbattle_gym_env=cyberbattlechain_4,
environment_properties=ep,
learner=dqla.DeepQLearnerPolicy(
ep=ep,
gamma=0.15,
replay_memory_size=10000,
target_update=5,
batch_size=256,
learning_rate=0.01),
episode_count=training_episode_count,
iteration_count=iteration_count,
epsilon=0.90,
render=False,
epsilon_exponential_decay=5000,
epsilon_minimum=0.10,
verbosity=Verbosity.Quiet,
title="DQL"
)
# %%
learner.transfer_learning_evaluation(
environment_properties=ep,
trained_learner=best_dqn_learning_run_10,
eval_env=cyberbattlechain_20,
eval_epsilon=0.0, # alternate with exploration to help generalization to bigger network
eval_episode_count=eval_episode_count,
iteration_count=iteration_count,
benchmark_policy=rca.CredentialCacheExploiter(),
benchmark_training_args={'epsilon': 0.90,
'epsilon_exponential_decay': 10000,
'epsilon_minimum': 0.10,
'title': 'Credential lookups (ϵ-greedy)'}
)
# %%
learner.transfer_learning_evaluation(
environment_properties=ep,
trained_learner=best_dqn_4,
eval_env=cyberbattlechain_10,
eval_epsilon=0.0, # exploit Q-matrix only
eval_episode_count=eval_episode_count,
iteration_count=iteration_count,
benchmark_policy=rca.CredentialCacheExploiter(),
benchmark_training_args={'epsilon': 0.90,
'epsilon_exponential_decay': 10000,
'epsilon_minimum': 0.10,
'title': 'Credential lookups (ϵ-greedy)'}
)
# %%
learner.transfer_learning_evaluation(
environment_properties=ep,
trained_learner=best_dqn_4,
eval_env=cyberbattlechain_20,
eval_epsilon=0.0, # exploit Q-matrix only
eval_episode_count=eval_episode_count,
iteration_count=iteration_count,
benchmark_policy=rca.CredentialCacheExploiter(),
benchmark_training_args={'epsilon': 0.90,
'epsilon_exponential_decay': 10000,
'epsilon_minimum': 0.10,
'title': 'Credential lookups (ϵ-greedy)'}
)
# %%
| StarcoderdataPython |
1699349 | '''
Code for HackerRank
Interview Preparation Kit
<NAME>
###############
problem
###############
There are a number of people queued up, and each person wears a sticker indicating their initial position in the queue
Initial positions increment by 1 from 1 at the front of the line to n at the back
Any person in the queue can bribe the person directly in front of them to swap positions
If two people swap positions, they still wear the same sticker denoting their original places in line.
One person can bribe at most two others.
the minimum number of bribes that took place to get the queue into its current state!
It must print an integer representing the minimum number of bribes necessary,
or Too chaotic if the line configuration is not possible.
###############
Observations
###############
* initial state is the ascending sorted line
* if the current position is larger than two, so it is chaotic
* how to count the number of bribes?
my first approach is counting the number of bubbles in bubble sort algorithm,
but this solution doesn't work for large list of numbers,
even if I break the counting when I realize that the list is chaotic.
who far is a element from initial position?
if distance > 2, list is too chaotic
else, sum the distances.
we don't care if P has moved
forwards, it is better to count how many times
P has RECEIVED a bribe, by looking at who is
ahead of P. P's original position is the value
of P.
Anyone who bribed P cannot get to higher than
one position in front if P's original position,
so we need to look from one position in front
of P's original position to one in front of P's
current position, and see how many of those
positions in Q contain a number large than P.
In other words we will look from P-1 to i-1,
which in Python is range(P-1,i-1+1), or simply
range(P-1,i). To make sure we don't try an
index less than zero, replace P-1 with
max(P-1,0)
'''
import math
import os
import random
import re
import sys
def minimumBribes(q):
length = len(q)
bribe = 0
distance = 0
for i,p in enumerate(q):
print(i,p)
distance = p - (i+1)
if distance > 2:
print('Too chaotic')
return
for j in range(max(p-2,0),i):
if q[j] > p:
bribe += 1
print(bribe)
def main():
t = 2
for i in range(t):
n = int(input())
c = list(map(int, input().rstrip().split()))
minimumBribes(c)
main()
| StarcoderdataPython |
3241167 | import mitsuba
import pytest
import enoki as ek
from enoki.dynamic import UInt32
@pytest.fixture(scope="module")
def interaction():
from mitsuba.core import Frame3f
from mitsuba.render import SurfaceInteraction3f
si = SurfaceInteraction3f()
si.t = 0.1
si.p = [0, 0, 0]
si.n = [0, 0, 1]
si.sh_frame = Frame3f(si.n)
return si
def test01_create(variant_scalar_rgb):
from mitsuba.render import BSDFFlags
from mitsuba.core.xml import load_string
bsdf = load_string("""<bsdf version="2.0.0" type="twosided">
<bsdf type="diffuse"/>
</bsdf>""")
assert bsdf is not None
assert bsdf.component_count() == 2
assert bsdf.flags(0) == BSDFFlags.DiffuseReflection | BSDFFlags.FrontSide
assert bsdf.flags(1) == BSDFFlags.DiffuseReflection | BSDFFlags.BackSide
assert bsdf.flags() == bsdf.flags(0) | bsdf.flags(1)
bsdf = load_string("""<bsdf version="2.0.0" type="twosided">
<bsdf type="roughconductor"/>
<bsdf type="diffuse"/>
</bsdf>""")
assert bsdf is not None
assert bsdf.component_count() == 2
assert bsdf.flags(0) == BSDFFlags.GlossyReflection | BSDFFlags.FrontSide
assert bsdf.flags(1) == BSDFFlags.DiffuseReflection | BSDFFlags.BackSide
assert bsdf.flags() == bsdf.flags(0) | bsdf.flags(1)
def test02_pdf(variant_scalar_rgb, interaction):
from mitsuba.core.math import InvPi
from mitsuba.render import BSDFContext
from mitsuba.core.xml import load_string
bsdf = load_string("""<bsdf version="2.0.0" type="twosided">
<bsdf type="diffuse"/>
</bsdf>""")
interaction.wi = [0, 0, 1]
ctx = BSDFContext()
p_pdf = bsdf.pdf(ctx, interaction, [0, 0, 1])
assert ek.allclose(p_pdf, InvPi)
p_pdf = bsdf.pdf(ctx, interaction, [0, 0, -1])
assert ek.allclose(p_pdf, 0.0)
def test03_sample_eval_pdf(variant_scalar_rgb, interaction):
from mitsuba.core.math import InvPi
from mitsuba.core.warp import square_to_uniform_sphere
from mitsuba.render import BSDFContext
from mitsuba.core.xml import load_string
bsdf = load_string("""<bsdf version="2.0.0" type="twosided">
<bsdf type="diffuse">
<rgb name="reflectance" value="0.1, 0.1, 0.1"/>
</bsdf>
<bsdf type="diffuse">
<rgb name="reflectance" value="0.9, 0.9, 0.9"/>
</bsdf>
</bsdf>""")
n = 5
ctx = BSDFContext()
for u in ek.arange(UInt32, n):
for v in ek.arange(UInt32, n):
interaction.wi = square_to_uniform_sphere([u / float(n-1),
v / float(n-1)])
up = ek.dot(interaction.wi, [0, 0, 1]) > 0
for x in ek.arange(UInt32, n):
for y in ek.arange(UInt32, n):
sample = [x / float(n-1), y / float(n-1)]
(bs, s_value) = bsdf.sample(ctx, interaction, 0.5, sample)
if ek.any(s_value > 0):
# Multiply by square_to_cosine_hemisphere_theta
s_value *= bs.wo[2] * InvPi
if not up:
s_value *= -1
e_value = bsdf.eval(ctx, interaction, bs.wo)
p_pdf = bsdf.pdf(ctx, interaction, bs.wo)
assert ek.allclose(s_value, e_value, atol=1e-2)
assert ek.allclose(bs.pdf, p_pdf)
assert not ek.any(ek.isnan(e_value) | ek.isnan(s_value))
# Otherwise, sampling failed and we can't rely on bs.wo.
| StarcoderdataPython |
55678 | from steem import Steem
from datetime import datetime, date, timedelta
from math import ceil, log, isnan
import requests
API = 'https://api.steemjs.com/'
def tag_filter(tag, limit = 10):
tag_search = Steem()
tag_query = {
"tag":tag,
"limit": limit
}
tag_filters = tag_search.get_discussions_by_created(tag_query)
#yesterday_post = []
#import ipdb; ipdb.set_trace()
# for _tag in tag_filters:
# _create_time = datetime.strptime(_tag['created'], '%Y-%m-%dT%H:%M:%S')
# _yersterday = date.today() - timedelta(1)
# if _yersterday.day == _create_time.day:
# yesterday_post.append(_tag)
return tag_filters
def get_vp_rp(steemit_name):
url = '{}get_accounts?names[]=%5B%22{}%22%5D'.format(API, steemit_name)
data = requests.get(url).json()[0]
vp = data['voting_power']
_reputation = data['reputation']
_reputation = int(_reputation)
rep = str(_reputation)
neg = True if rep[0] == '-' else False
rep = rep[1:] if neg else rep
srt = rep
leadingDigits = int(srt[0:4])
log_n = log(leadingDigits / log(10), 2.71828)
n = len(srt) - 1
out = n + (log_n - int(log_n))
if isnan(out): out = 0
out = max(out - 9, 0)
out = (-1 * out) if neg else (1 * out)
out = out * 9 + 25
out = int(out)
return [ceil(vp / 100), out] | StarcoderdataPython |
1605384 | # Description: Performs a sed-like substitution on the last message by the
# calling user
# Author: <NAME> <<EMAIL>>
# Website: http://pacopablo.com
# License: BSD
#
# BHJTW: ported to JSONBOT 27-8-2012
__author__ = '<NAME> <<EMAIL>>'
__license__ = "BSD"
__status__ = "seen"
## jsb imports
from jsb.lib.callbacks import callbacks
from jsb.lib.commands import cmnds
from jsb.lib.datadir import datadir
from jsb.utils.pdod import Pdod
from jsb.lib.persistconfig import PersistConfig
from jsb.lib.examples import examples
## basic imports
import os
import time
import re
import logging
## defines
cfg = PersistConfig()
cfg.define('cmd_req', 0)
cfg.define('channels', [])
sed_expression = r'^s([/|#.:;])(.*?)\1(.*?)\1?([gi]*)$'
sedre = re.compile(sed_expression)
## LastLine class
class LastLine(Pdod):
def __init__(self):
self.datadir = os.path.join(datadir, 'plugs', 'jsb.plugs.common.sed')
Pdod.__init__(self, os.path.join(self.datadir, 'sed.data'))
if not self.data:
self.data = {}
def handle_sed(self, bot, ievent):
""" Perform substitution """
target = [bot.cfg.name, ievent.channel]
if target not in cfg.channels: logging.warn("sed is not enabled in %s" % str(target)) ; return
ievent.untildone = True
channel = ievent.channel.lower()
nick = ievent.nick.lower()
try:
(delim, broke, fix, flags) = ievent.groups
except ValueError:
ievent.missing('<delim><broke><delim><fix><delim>')
return
try:
source = self.data[channel][nick]
if 'g' in flags:
count = 0
else:
count = 1
if 'i' in flags:
broke = '(?i)'+broke
new_text = re.sub(broke, fix, source, count)
if source != new_text:
ievent.reply("%s meant: %s" % (nick, new_text))
return
except KeyError:
ievent.reply('I wasn\'t listening to you. Try saying something first.')
except Exception, ex:
ievent.reply('Error processing regex: %s' % str(ex))
ievent.done(silent=True)
def precb(self, bot, ievent):
if ievent.iscommand or ievent.regex: return False
target = [bot.cfg.name, ievent.channel]
if target not in cfg.channels: logging.debug("sed is not enabled in %s" % str(target)) ; return
else: return True
def privmsgcb(self, bot, ievent):
channel = ievent.channel.lower()
nick = ievent.nick.lower()
regex = sedre.match(ievent.txt)
if not cfg.get('cmd_req') and regex:
try:
(delim, broke, fix, flags) = regex.groups()
except ValueError:
return
try:
source = self.data[channel][nick]
if 'g' in flags:
count = 0
else:
count = 1
if 'i' in flags:
broke = '(?i)'+broke
new_text = re.sub(broke, fix, source, count)
if source != new_text:
ievent.reply("%s meant: %s" % (nick, new_text))
return
except KeyError:
return
except Exception, ex:
ievent.reply('Error processing regex: %s' % str(ex))
self.data.setdefault(channel, {})
if not regex: self.data[channel][nick] = ievent.txt
## defines
lastline = None
## sed command
def handle_sed(bot, ievent):
global lastline
lastline.handle_sed(bot, ievent)
## init function
def init():
global lastline
lastline = LastLine()
callbacks.add('PRIVMSG', lastline.privmsgcb, lastline.precb)
callbacks.add('CONSOLE', lastline.privmsgcb, lastline.precb)
callbacks.add('Message', lastline.privmsgcb, lastline.precb)
cmnds.add(sed_expression, handle_sed, 'USER', regex=True)
examples.add('s', 'Perform substitution on last message spoken.', 's/foo/bar/')
return 1
## sed-enable command
def handle_sedenable(bot, event):
target = [bot.cfg.name, event.channel]
if not target in cfg.channels:
cfg.channels.append(target)
cfg.save()
event.reply("sed enabled in %s" % str(target))
else: event.reply("sed is already enabled in %s" % str(target))
cmnds.add("sed-enable", handle_sedenable, "OPER")
examples.add("sed-enable", "enable the sed plugin in the channel (the command is given in)", "sed-enable")
## sed-disable command
def handle_seddisable(bot, event):
target = [bot.cfg.name, event.channel]
try:
cfg.channels.remove(target)
cfg.save()
event.reply("sed disabled in %s" % str(target))
except ValueError: event.reply("sed not enabled in %s" % str(target))
cmnds.add("sed-disable", handle_seddisable, "OPER")
examples.add("sed-disable", "disable the sed plugin in the channel (the command is given in)", "sed-disable")
## sed-list command
def handle_sedlist(bot, event):
event.reply("sed enabled channels: ", cfg.channels)
cmnds.add("sed-list", handle_sedlist, "OPER")
examples.add("sed-list", "list sed enabled channels", "sed-list")
## shutdown function
def shutdown():
if lastline: lastline.save()
#### BHJTW 23-01-2012
| StarcoderdataPython |
4923 | import tensorflow as tf
@tf.function
def BinaryAccuracy_Infiltrates(y_true, y_pred, i=0):
return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i])
@tf.function
def BinaryAccuracy_Pneumonia(y_true, y_pred, i=1):
return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i])
@tf.function
def BinaryAccuracy_Covid19(y_true, y_pred, i=2):
return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i])
@tf.function
def BinaryAccuracy_Normal(y_true, y_pred, i=3):
return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i])
class CustomModelCheckpoint(tf.keras.callbacks.ModelCheckpoint):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.wait_epoch_warmup = kwargs.get("wait_epoch_warmup")
def on_epoch_end(self, epoch, logs=None):
if self.wait_epoch_warmup:
if (epoch + 1) >= self.wait_epoch_warmup:
super().on_epoch_end(epoch, logs)
else:
self.epochs_since_last_save += 1
print(f"Skipping save model (wait_epoch_warmup={self.wait_epoch_warmup - (epoch + 1)})")
else:
super().on_epoch_end(epoch, logs)
class CustomEarlyStopping(tf.keras.callbacks.EarlyStopping):
def __init__(self, *args, **kwargs):
self.minimum_epochs = kwargs.get("minimum_epochs", 0)
kwargs.pop('minimum_epochs', None) # Problems with EarlyStopping kwargs
super().__init__(*args, **kwargs)
def on_epoch_end(self, epoch, logs=None):
if epoch >= self.minimum_epochs:
super().on_epoch_end(epoch, logs)
def get_losses():
losses = [tf.keras.losses.BinaryCrossentropy()]
return losses
def get_metrics(single_output_idx, add_normal=False):
metrics = []
if single_output_idx is None: # Multi-label
print("###### Multi-label classification ######")
metrics += [
BinaryAccuracy_Infiltrates,
BinaryAccuracy_Pneumonia,
BinaryAccuracy_Covid19
]
# Add normal class
if add_normal:
metrics.append(BinaryAccuracy_Normal)
else:
print(f"###### Multi-class classification (cls: '{single_output_idx}') ######")
metrics = [
tf.keras.metrics.BinaryAccuracy(),
tf.keras.metrics.AUC(),
tf.keras.metrics.Precision(),
tf.keras.metrics.Recall()
]
return metrics
def get_model(backbone, classes=None, target_size=None, freeze_base_model=True, ignore_model=None):
istrainable = not freeze_base_model
# Select backbone
if backbone == "resnet50":
from tensorflow.keras.applications.resnet import ResNet50 as TFModel
from tensorflow.keras.applications.resnet import preprocess_input
elif backbone == "resnet50v2":
from tensorflow.keras.applications.resnet_v2 import ResNet50V2 as TFModel
from tensorflow.keras.applications.resnet_v2 import preprocess_input
elif backbone == "resnet101v2":
from tensorflow.keras.applications.resnet_v2 import ResNet101V2 as TFModel
from tensorflow.keras.applications.resnet_v2 import preprocess_input
elif backbone == "vgg16":
from tensorflow.keras.applications.vgg16 import VGG16 as TFModel
from tensorflow.keras.applications.vgg16 import preprocess_input
elif backbone == "efficientnetb0":
from tensorflow.keras.applications.efficientnet import EfficientNetB0 as TFModel
from tensorflow.keras.applications.efficientnet import preprocess_input
elif backbone == "efficientnetb7":
from tensorflow.keras.applications.efficientnet import EfficientNetB7 as TFModel
from tensorflow.keras.applications.efficientnet import preprocess_input
else:
raise ValueError(f"Unknown backbone: {backbone}")
if ignore_model:
model = None
else:
# Instantiate base model with pre-trained weights
base_model = TFModel(input_shape=(*target_size, 3), include_top=False, weights="imagenet")
# Freeze base model
# base_model.trainable = istrainable
for layers in base_model.layers:
layers.trainable = istrainable
# Create a new model on top
inputs = base_model.input
x = base_model(inputs)
# Option A
x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x)
# Option B
# x = tf.keras.layers.Flatten(name='flatten')(x)
# x = tf.keras.layers.Dense(512, activation='relu', name='fc1')(x)
# x = tf.keras.layers.Dense(512, activation='relu', name='fc2')(x)
# Outputs
outputs = tf.keras.layers.Dense(classes, activation="sigmoid", name='predictions')(x)
model = tf.keras.Model(inputs, outputs)
return model, preprocess_input
def add_tabular_input(model, classes):
# Input1
input1 = model.input
input2 = tf.keras.layers.Input(shape=(2,), name="input_2b")
# Pre-outputs 1x3 + 1x3
output1 = model.output
output2 = tf.keras.layers.Dense(classes, activation="sigmoid", name='output_tab')(input2)
# Outputs
x = tf.keras.layers.Concatenate(axis=1)([output1, output2])
output = tf.keras.layers.Dense(classes, activation="sigmoid", name='final_predictions')(x)
model = tf.keras.Model([input1, input2], output)
return model
def unfreeze_base_model(model, n=None, unfreeze=True):
base_model = model.layers[1].layers
# Select number of layers to unfreeze
idx = 0
if n is not None:
if isinstance(n, int):
idx = n
print(f"Unfreezing {len(base_model) - idx} layers")
elif isinstance(n, float) and 0.0 < n <= 1.0:
idx = int(len(base_model) * n)
print(f"Unfreezing {idx} layers")
else:
raise ValueError("Invalid number of layers")
# We unfreeze all layers but BatchNorm (to not destroy the non-trainable weights)
for layer in base_model[-idx:]:
if not isinstance(layer, tf.keras.layers.BatchNormalization):
layer.trainable = True
| StarcoderdataPython |
3389342 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 22 21:47:01 2019
@author: zejiran.
"""
import year_analyzer_module as mod
def ejecutar_analizar_anio() -> None:
anio = int(input("Ingrese el año que desea analizar: "))
resultado = mod.analizar_anio(anio)
print(resultado)
def iniciar_aplicacion() -> None:
print("Bienvenido al analizador de años")
ejecutar_analizar_anio()
# PROGRAMA PRINCIPAL
iniciar_aplicacion()
| StarcoderdataPython |
3270464 | class TweetCounter(object):
def __init__(self, **kwargs):
self.counter = 0
def add_tweet(self,tweet):
self.counter += 1
def get(self):
return [(self.counter,self.get_name())]
def get_name(self):
return 'TweetCounter'
def combine(self,new):
self.counter += new.counter
class ReTweetCounter(object):
def __init__(self, **kwargs):
self.counter = 0
def add_tweet(self,tweet):
if tweet['verb'] == 'share':
self.counter += 1
def get(self):
return [(self.counter,self.get_name())]
def get_name(self):
return 'ReTweetCounter'
def combine(self,new):
self.counter += new.counter
measurement_class_list = [TweetCounter, ReTweetCounter]
| StarcoderdataPython |
3386889 | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 1 14:44:28 2016
@author: poyu
Data Pre-processing for Feature Representation
"""
import os
import nibabel as nib
import numpy as np
#def load_dataset():
def normalized_3d(inputs):
inputs=np.copy(inputs)
x_dim, y_dim, z_dim = inputs.shape
number_non_zero = np.float32(np.count_nonzero(inputs))
sum_inputs = np.float32(inputs.sum())
mean_inputs = sum_inputs/number_non_zero
input_nonzero = inputs[np.nonzero(inputs)]
std_input = np.std(input_nonzero)
for i in range(x_dim):
for j in range(y_dim):
for k in range(z_dim):
if inputs[i,j,k] != 0 :
inputs[i,j,k] = (inputs[i,j,k]-mean_inputs)/std_input
return inputs
def createDatasetFeatureRepresentation():
boundary_t1 = [11, 167, 9, 207, 9, 153]
t1_filepaths = [os.path.join(root,name) for root, dirs, files in os.walk(os.getcwd()) for name in files if "Brain" in name and "MR_T1" in name and name.endswith("nii")]
t1_filepaths.sort()
boundary_fa = [6, 84, 5, 104, 8, 80]
fa_filepaths = [os.path.join(root,name) for root, dirs, files in os.walk(os.getcwd()) for name in files if "Brain" in name and "DT_FA" in name and name.endswith("nii")]
fa_filepaths.sort()
boundary_md = [6, 84, 5, 104, 8, 80]
md_filepaths = [os.path.join(root,name) for root, dirs, files in os.walk(os.getcwd()) for name in files if "Brain" in name and "DT_MD" in name and name.endswith("nii")]
md_filepaths.sort()
# =============================Train========================================= #
train_subject = np.array([5, 11, 17, 22, 27, 3, 10, 16, 20, 26, 4, 7, 8, 13, 25])-1
train_label = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2])
train_t1 = np.zeros((train_subject.shape[0],156,198,144), dtype = np.float32)
train_t1_scaled = train_t1
train_fa = np.zeros((train_subject.shape[0],78,99,72), dtype = np.float32)
train_fa_scaled = train_fa
train_md = np.zeros((train_subject.shape[0],78,99,72), dtype = np.float32)
train_md_scaled = train_md
count_train = 0
for sub in train_subject:
img_t1 = nib.load(t1_filepaths[sub])
t1_data = img_t1.get_data()
train_t1[count_train,:] = t1_data[boundary_t1[0]:boundary_t1[1],boundary_t1[2]:boundary_t1[3],boundary_t1[4]:boundary_t1[5]]
train_t1_scaled[count_train,:] = normalized_3d(train_t1[count_train,:])
img_fa = nib.load(fa_filepaths[sub])
fa_data = img_fa.get_data()
train_fa[count_train,:] = fa_data[boundary_fa[0]:boundary_fa[1],boundary_fa[2]:boundary_fa[3],boundary_fa[4]:boundary_fa[5]]
train_fa_scaled[count_train,:] = normalized_3d(train_fa[count_train,:])
img_md = nib.load(md_filepaths[sub])
md_data = img_md.get_data()
train_md[count_train,:] = md_data[boundary_md[0]:boundary_md[1],boundary_md[2]:boundary_md[3],boundary_md[4]:boundary_md[5]]
train_md_scaled[count_train,:] = normalized_3d(train_md[count_train,:])
count_train = count_train+1
# ==============================Test========================================= #
test_subject = np.array([1, 2, 6, 9, 12, 14, 15, 18, 19, 21, 23, 24])-1
test_t1 = np.zeros((test_subject.shape[0],156,198,144), dtype = np.float32)
test_t1_scaled = test_t1
test_fa = np.zeros((test_subject.shape[0],78,99,72), dtype = np.float32)
test_fa_scaled = test_fa
test_md = np.zeros((test_subject.shape[0],78,99,72), dtype = np.float32)
test_md_scaled = test_md
count_test = 0
for sub in test_subject:
img_t1 = nib.load(t1_filepaths[sub])
t1_data = img_t1.get_data()
test_t1[count_test,:] = t1_data[boundary_t1[0]:boundary_t1[1],boundary_t1[2]:boundary_t1[3],boundary_t1[4]:boundary_t1[5]]
test_t1_scaled[count_test,:] = normalized_3d(test_t1[count_test,:] )
img_fa = nib.load(fa_filepaths[sub])
fa_data = img_fa.get_data()
test_fa[count_test,:] = fa_data[boundary_fa[0]:boundary_fa[1],boundary_fa[2]:boundary_fa[3],boundary_fa[4]:boundary_fa[5]]
test_fa_scaled[count_test,:] = normalized_3d(test_fa[count_test,:])
img_md = nib.load(md_filepaths[sub])
md_data = img_md.get_data()
test_md[count_test,:] = md_data[boundary_md[0]:boundary_md[1],boundary_md[2]:boundary_md[3],boundary_md[4]:boundary_md[5]]
test_md_scaled[count_test,:] = normalized_3d(test_md[count_test,:])
count_test = count_test+1
print train_t1_scaled.shape, train_fa_scaled.shape, train_md_scaled.shape
print test_t1_scaled.shape, test_fa_scaled.shape, test_md_scaled.shape
if not os.path.exists(os.path.join(os.getcwd(), 'numpy_data')):
os.makedirs(os.path.join(os.getcwd(), 'numpy_data'))
#np.save('X_T1_train.npy', train_t1)
np.save(os.path.join(os.getcwd(),'numpy_data','X_T1_scaled_train.npy'), train_t1_scaled)
#np.save('X_FA_train.npy', train_fa)
np.save(os.path.join(os.getcwd(),'numpy_data','X_FA_scaled_train.npy'), train_fa_scaled)
#np.save('X_MD_train.npy', train_md)
np.save(os.path.join(os.getcwd(),'numpy_data','X_MD_scaled_train.npy'), train_md_scaled)
np.save(os.path.join(os.getcwd(),'numpy_data','y_train.npy'), train_label)
#np.save('X_T1_test.npy', test_t1)
np.save(os.path.join(os.getcwd(),'numpy_data','X_T1_scaled_test.npy'), test_t1_scaled)
#np.save('X_FA_test.npy', test_fa)
np.save(os.path.join(os.getcwd(),'numpy_data','X_FA_scaled_test.npy'), test_fa_scaled)
#np.save('X_MD_test.npy', test_md)
np.save(os.path.join(os.getcwd(),'numpy_data','X_MD_scaled_test.npy'), test_md_scaled)
def main():
createDatasetFeatureRepresentation()
if __name__ == "__main__":
main() | StarcoderdataPython |
3289601 | from more_one_memo.web import web
from more_one_memo.web.model import WebConfig
import click
@click.command(context_settings={'auto_envvar_prefix': 'MORE_ONE_MEMO_WEB'})
@click.option('--mongo-uri', help='MongoDB URI. Database name must be included', type=str, required=True, show_envvar=True)
@click.option('--slack-token', help='Slack token to get channel and user information', type=str, required=True, show_envvar=True)
@click.option('--host', help='Bind host address', type=str, default="127.0.0.1", show_default=True, show_envvar=True)
@click.option('--port', help='Bind port', type=int, default=8080, show_default=True, show_envvar=True)
def main(mongo_uri, slack_token, host, port):
web_config = WebConfig(mongo_uri, slack_token, host, port)
web.run(web_config)
| StarcoderdataPython |
26852 | <reponame>mmiyajima2/django-kantanoidc
from logging import getLogger
from django.http import HttpResponseRedirect
from django.views.generic.base import View
from django.contrib.auth import login
from django.contrib.auth import get_user_model
from django.urls import reverse
from .client import client
from .errors import IllegalStateError
import string
import random
logger = getLogger(__name__)
UserModel = get_user_model()
class Start(View):
http_method_names = ['get']
def get(self, request, *args, **kwargs):
chars = string.ascii_letters + string.digits
stored_nonce = ''.join([random.choice(chars) for i in range(32)])
stored_state = ''.join([random.choice(chars) for i in range(32)])
request.session['stored_nonce'] = stored_nonce
request.session['stored_state'] = stored_state
client.prepare(request)
redirect_uri = \
request.build_absolute_uri(reverse('kantanoidc:callback'))
request.session['redirect_uri'] = redirect_uri
return HttpResponseRedirect(
client.build_starturl(redirect_uri, stored_nonce, stored_state)
)
class Callback(View):
http_method_names = ['get']
def get(self, request, *args, **kwargs):
state = request.GET.get('state')
if state != request.session['stored_state']:
raise IllegalStateError('state <> stored_state')
code = request.GET.get('code')
stored_nonce = request.session['stored_nonce']
redirect_uri = request.session['redirect_uri']
sub = client.get_sub(redirect_uri, code, stored_nonce)
logger.info('%s coming at CallbackView', sub)
try:
user = UserModel.objects.get_by_natural_key(sub)
except UserModel.DoesNotExist as e:
logger.error('username=%s, does not exists', sub)
raise e
login(request, user)
nexturl = client.build_nexturl(request)
return HttpResponseRedirect(nexturl)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.