hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7900996eb1d7fb9f6c9c731ffebd5a50edb0db98 | 374 | py | Python | scale_image.py | JorgeGarciaIrazabal/ml-face-detector | 11321ac13fdb02c17072f134a0a838779b483cfe | [
"Apache-2.0"
] | null | null | null | scale_image.py | JorgeGarciaIrazabal/ml-face-detector | 11321ac13fdb02c17072f134a0a838779b483cfe | [
"Apache-2.0"
] | null | null | null | scale_image.py | JorgeGarciaIrazabal/ml-face-detector | 11321ac13fdb02c17072f134a0a838779b483cfe | [
"Apache-2.0"
] | null | null | null | #%%
import cv2
from pathlib import Path
#%%
root = Path(__file__).resolve().absolute().parent
jorge_path = root / "jorge"
jorge_dst_path = root / "jorge_100"
marissa_path = root / "marissa"
marissa_dst_path = root / "marissa_100"
#%%
for f in jorge_path.iterdir():
old_image = cv2.imread(str(f))
image = cv2.resize(old_image, 100)
print(image) | 22 | 50 | 0.663102 |
import cv2
from pathlib import Path
root = Path(__file__).resolve().absolute().parent
jorge_path = root / "jorge"
jorge_dst_path = root / "jorge_100"
marissa_path = root / "marissa"
marissa_dst_path = root / "marissa_100"
for f in jorge_path.iterdir():
old_image = cv2.imread(str(f))
image = cv2.resize(old_image, 100)
print(image) | true | true |
7900999d8d9a583be7467034c092ce097ab720c0 | 441 | py | Python | data/scripts/templates/object/tangible/deed/faction_perk/hq/shared_hq_s05.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/templates/object/tangible/deed/faction_perk/hq/shared_hq_s05.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/templates/object/tangible/deed/faction_perk/hq/shared_hq_s05.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/deed/faction_perk/hq/shared_hq_s05.iff"
result.attribute_template_id = 2
result.stfName("deed","hq_s05")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 25.941176 | 75 | 0.721088 | true | true | |
790099a3b5a04d1fd21626d7782c716678795487 | 514 | py | Python | VENV/lib/python3.6/site-packages/PyInstaller/hooks/hook-netCDF4.py | workingyifei/display-pattern-generator | b27be84c6221fa93833f283109870737b05bfbf6 | [
"MIT"
] | 3 | 2018-11-27T06:30:23.000Z | 2021-05-30T15:56:32.000Z | VENV/lib/python3.6/site-packages/PyInstaller/hooks/hook-netCDF4.py | workingyifei/display-pattern-generator | b27be84c6221fa93833f283109870737b05bfbf6 | [
"MIT"
] | 1 | 2018-11-15T02:00:31.000Z | 2021-12-06T02:20:32.000Z | VENV/lib/python3.6/site-packages/PyInstaller/hooks/hook-netCDF4.py | workingyifei/display-pattern-generator | b27be84c6221fa93833f283109870737b05bfbf6 | [
"MIT"
] | 1 | 2020-11-06T18:46:35.000Z | 2020-11-06T18:46:35.000Z | #-----------------------------------------------------------------------------
# Copyright (c) 2015-2017, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
# netCDF4 (tested with v.1.1.9) has some hidden imports
hiddenimports = ['netCDF4.utils', 'netcdftime']
| 39.538462 | 78 | 0.529183 |
hiddenimports = ['netCDF4.utils', 'netcdftime']
| true | true |
79009a0e172bb334196352ff179af9fc27fa35c8 | 18,400 | py | Python | illustrip.py | ksburaya/aphantasia | de9d430dee7108abfcb1b19eb2d8d806b8e5d899 | [
"MIT"
] | 1 | 2021-11-17T10:17:47.000Z | 2021-11-17T10:17:47.000Z | illustrip.py | ksburaya/aphantasia | de9d430dee7108abfcb1b19eb2d8d806b8e5d899 | [
"MIT"
] | null | null | null | illustrip.py | ksburaya/aphantasia | de9d430dee7108abfcb1b19eb2d8d806b8e5d899 | [
"MIT"
] | null | null | null | # coding: UTF-8
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import warnings
warnings.filterwarnings("ignore")
import argparse
import numpy as np
import shutil
import PIL
import time
from imageio import imread, imsave
from googletrans import Translator
import torch
import torchvision
import torch.nn.functional as F
from torchvision import transforms as T
import clip
os.environ['KMP_DUPLICATE_LIB_OK']='True'
from clip_fft import to_valid_rgb, fft_image, resume_fft, pixel_image
from utils import slice_imgs, derivat, sim_func, slerp, basename, file_list, img_list, img_read, pad_up_to, txt_clean, latent_anima, cvshow, checkout, save_cfg, old_torch
import transforms
try: # progress bar for notebooks
get_ipython().__class__.__name__
from progress_bar import ProgressIPy as ProgressBar
except: # normal console
from progress_bar import ProgressBar
clip_models = ['ViT-B/16', 'ViT-B/32', 'RN50', 'RN50x4', 'RN50x16', 'RN101']
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--size', default='1280-720', help='Output resolution')
parser.add_argument('-t', '--in_txt', default=None, help='Text string or file to process (main topic)')
parser.add_argument('-pre', '--in_txt_pre', default=None, help='Prefix for input text')
parser.add_argument('-post', '--in_txt_post', default=None, help='Postfix for input text')
parser.add_argument('-t2', '--in_txt2', default=None, help='Text string or file to process (style)')
parser.add_argument('-t0', '--in_txt0', default=None, help='input text to subtract')
parser.add_argument('-im', '--in_img', default=None, help='input image or directory with images')
parser.add_argument('-w0', '--weight0', default=0.3, type=float, help='weight for subtraction')
parser.add_argument('-w2', '--weight2', default=0.5, type=float, help='weight for style')
parser.add_argument('-wi', '--weight_img', default=0.5, type=float, help='weight for images')
parser.add_argument('-r', '--resume', default=None, help='Resume from saved params or from an image')
parser.add_argument( '--out_dir', default='_out')
parser.add_argument('-tr', '--translate', action='store_true', help='Translate with Google Translate')
parser.add_argument( '--invert', action='store_true', help='Invert criteria')
parser.add_argument('-v', '--verbose', default=True, type=bool)
# training
parser.add_argument( '--gen', default='RGB', help='Generation (optimization) method: FFT or RGB')
parser.add_argument('-m', '--model', default='ViT-B/32', choices=clip_models, help='Select CLIP model to use')
parser.add_argument( '--steps', default=300, type=int, help='Iterations (frames) per scene (text line)')
parser.add_argument( '--samples', default=100, type=int, help='Samples to evaluate per frame')
parser.add_argument('-lr', '--lrate', default=1, type=float, help='Learning rate')
# motion
parser.add_argument('-opt', '--opt_step', default=1, type=int, help='How many optimizing steps per save/transform step')
parser.add_argument('-sm', '--smooth', action='store_true', help='Smoothen interframe jittering for FFT method')
parser.add_argument('-it', '--interpol', default=True, help='Interpolate topics? (or change by cut)')
parser.add_argument( '--fstep', default=100, type=int, help='How many frames before changing motion')
parser.add_argument( '--scale', default=0.012, type=float)
parser.add_argument( '--shift', default=10., type=float, help='in pixels')
parser.add_argument( '--angle', default=0.8, type=float, help='in degrees')
parser.add_argument( '--shear', default=0.4, type=float)
parser.add_argument( '--anima', default=True, help='Animate motion')
# tweaks
parser.add_argument('-a', '--align', default='overscan', choices=['central', 'uniform', 'overscan', 'overmax'], help='Sampling distribution')
parser.add_argument('-tf', '--transform', default='custom', choices=['none', 'custom', 'elastic'], help='use augmenting transforms?')
parser.add_argument( '--contrast', default=1.2, type=float)
parser.add_argument( '--colors', default=2, type=float)
parser.add_argument('-sh', '--sharp', default=None, type=float)
parser.add_argument('-mc', '--macro', default=0.4, type=float, help='Endorse macro forms 0..1 ')
parser.add_argument('-e', '--enforce', default=0, type=float, help='Enforce details (by boosting similarity between two parallel samples)')
parser.add_argument('-x', '--expand', default=0, type=float, help='Boosts diversity (by enforcing difference between prev/next samples)')
parser.add_argument('-n', '--noise', default=2., type=float, help='Add noise to make composition sparse (FFT only)') # 0.04
parser.add_argument( '--sim', default='mix', help='Similarity function (angular/spherical/mixed; None = cossim)')
parser.add_argument( '--rem', default=None, help='Dummy text to add to project name')
a = parser.parse_args()
if a.size is not None: a.size = [int(s) for s in a.size.split('-')][::-1]
if len(a.size)==1: a.size = a.size * 2
a.gen = a.gen.upper()
a.invert = -1. if a.invert is True else 1.
# Overriding some parameters, depending on other settings
if a.gen == 'RGB':
a.smooth = False
a.align = 'overscan'
if a.sharp is None: a.sharp = -1. if a.gen == 'RGB' else 1.
if a.model == 'ViT-B/16': a.sim = 'cossim'
return a
def frame_transform(img, size, angle, shift, scale, shear):
if old_torch(): # 1.7.1
img = T.functional.affine(img, angle, shift, scale, shear, fillcolor=0, resample=PIL.Image.BILINEAR)
img = T.functional.center_crop(img, size)
img = pad_up_to(img, size)
else: # 1.8+
img = T.functional.affine(img, angle, shift, scale, shear, fill=0, interpolation=T.InterpolationMode.BILINEAR)
img = T.functional.center_crop(img, size) # on 1.8+ also pads
return img
def main():
a = get_args()
# Load CLIP models
model_clip, _ = clip.load(a.model, jit=old_torch())
try:
a.modsize = model_clip.visual.input_resolution
except:
a.modsize = 288 if a.model == 'RN50x4' else 384 if a.model == 'RN50x16' else 224
if a.verbose is True: print(' using model', a.model)
xmem = {'ViT-B/16':0.25, 'RN50':0.5, 'RN50x4':0.16, 'RN50x16':0.06, 'RN101':0.33}
if a.model in xmem.keys():
a.samples = int(a.samples * xmem[a.model])
if a.translate:
translator = Translator()
if a.enforce != 0:
a.samples = int(a.samples * 0.5)
if 'elastic' in a.transform:
trform_f = transforms.transforms_elastic
a.samples = int(a.samples * 0.95)
elif 'custom' in a.transform:
trform_f = transforms.transforms_custom
a.samples = int(a.samples * 0.95)
else:
trform_f = transforms.normalize()
def enc_text(txt):
if a.translate:
txt = translator.translate(txt, dest='en').text
emb = model_clip.encode_text(clip.tokenize(txt).cuda()[:77])
return emb.detach().clone()
def enc_image(img_file):
img_t = torch.from_numpy(img_read(img_file)/255.).unsqueeze(0).permute(0,3,1,2).cuda()[:,:3,:,:]
in_sliced = slice_imgs([img_t], a.samples, a.modsize, transforms.normalize(), a.align)[0]
emb = model_clip.encode_image(in_sliced)
return emb.detach().clone()
# Encode inputs
count = 0
texts = []
styles = []
images = []
if a.in_txt is not None:
if os.path.isfile(a.in_txt):
with open(a.in_txt, 'r', encoding="utf-8") as f:
texts = f.readlines()
texts = [tt.strip() for tt in texts if len(tt.strip()) > 0 and tt[0] != '#']
else:
texts = [a.in_txt]
if a.in_txt_pre is not None:
texts = [' '.join([a.in_txt_pre, tt]).strip() for tt in texts]
if a.in_txt_post is not None:
texts = [' '.join([tt, a.in_txt_post]).strip() for tt in texts]
key_txt_encs = [enc_text(txt) for txt in texts]
count = max(count, len(key_txt_encs))
if a.in_txt2 is not None:
if os.path.isfile(a.in_txt2):
with open(a.in_txt2, 'r', encoding="utf-8") as f:
styles = f.readlines()
styles = [tt.strip() for tt in styles if len(tt.strip()) > 0 and tt[0] != '#']
else:
styles = [a.in_txt2]
key_styl_encs = [enc_text(style) for style in styles]
count = max(count, len(key_styl_encs))
if a.in_img is not None and os.path.exists(a.in_img):
images = file_list(a.in_img) if os.path.isdir(a.in_img) else [a.in_img]
key_img_encs = [enc_image(image) for image in images]
count = max(count, len(key_img_encs))
assert count > 0, "No inputs found!"
if a.in_txt0 is not None:
if a.verbose is True: print(' subtract text:', a.in_txt0)
if a.translate:
a.in_txt0 = translator.translate(a.in_txt0, dest='en').text
# if a.verbose is True: print(' translated to:', a.in_txt0)
anti_txt_encs = [enc_text(txt) for txt in a.in_txt0.split('.')]
if a.verbose is True: print(' samples:', a.samples)
global params_tmp
shape = [1, 3, *a.size]
if a.gen == 'RGB':
params_tmp, _, sz = pixel_image(shape, a.resume)
params_tmp = params_tmp[0].cuda().detach()
else:
params_tmp, sz = resume_fft(a.resume, shape, decay=1.5, sd=1)
if sz is not None: a.size = sz
# [glob]steps = for save/move, opt_steps = for optimization cycle
steps = a.steps
glob_steps = count * steps
opt_steps = steps * a.opt_step
if glob_steps == a.fstep: a.fstep = glob_steps // 2 # otherwise no motion
workname = basename(a.in_txt) if a.in_txt is not None else basename(a.in_img)
workname = txt_clean(workname)
workdir = os.path.join(a.out_dir, workname)
if a.rem is not None: workdir += '-%s' % a.rem
if 'RN' in a.model.upper(): workdir += '-%s' % a.model
if a.noise > 0: workdir += '-n%.2g' % a.noise
if a.macro > 0: workdir += '-m%.2g' % a.macro
if a.smooth is True: workdir += '-sm'
if a.transform != 'custom': workdir += '-tf%s' % a.transform
if a.gen == 'RGB': workdir += '-rgb'
tempdir = os.path.join(workdir, 'ttt')
os.makedirs(tempdir, exist_ok=True)
save_cfg(a, workdir)
if a.in_txt is not None and os.path.isfile(a.in_txt):
shutil.copy(a.in_txt, os.path.join(workdir, os.path.basename(a.in_txt)))
if a.in_txt2 is not None and os.path.isfile(a.in_txt2):
shutil.copy(a.in_txt2, os.path.join(workdir, os.path.basename(a.in_txt2)))
midp = 0.5
if a.anima:
if a.gen == 'RGB': # zoom in
m_scale = latent_anima([1], glob_steps, a.fstep, uniform=True, cubic=True, start_lat=[-0.3], verbose=False)
m_scale = 1 + (m_scale + 0.3) * a.scale
else:
m_scale = latent_anima([1], glob_steps, a.fstep, uniform=True, cubic=True, start_lat=[0.6], verbose=False)
m_scale = 1 - (m_scale-0.6) * a.scale
m_shift = latent_anima([2], glob_steps, a.fstep, uniform=True, cubic=True, start_lat=[midp,midp], verbose=False)
m_angle = latent_anima([1], glob_steps, a.fstep, uniform=True, cubic=True, start_lat=[midp], verbose=False)
m_shear = latent_anima([1], glob_steps, a.fstep, uniform=True, cubic=True, start_lat=[midp], verbose=False)
m_shift = (midp-m_shift) * a.shift * abs(m_scale-1) / a.scale
m_angle = (midp-m_angle) * a.angle * abs(m_scale-1) / a.scale
m_shear = (midp-m_shear) * a.shear * abs(m_scale-1) / a.scale
def get_encs(encs, num):
cnt = len(encs)
if cnt == 0: return []
enc_1 = encs[min(num, cnt-1)]
enc_2 = encs[min(num+1, cnt-1)]
return slerp(enc_1, enc_2, opt_steps)
prev_enc = 0
def process(num):
global params_tmp, opt_state, params, image_f, optimizer
if a.interpol is True: # linear topics interpolation
txt_encs = get_encs(key_txt_encs, num)
styl_encs = get_encs(key_styl_encs, num)
img_encs = get_encs(key_img_encs, num)
else: # change by cut
txt_encs = [key_txt_encs[min(num, len(key_txt_encs)-1)][0]] * opt_steps if len(key_txt_encs) > 0 else []
styl_encs = [key_styl_encs[min(num, len(key_styl_encs)-1)][0]] * opt_steps if len(key_styl_encs) > 0 else []
img_encs = [key_img_encs[min(num, len(key_img_encs)-1)][0]] * opt_steps if len(key_img_encs) > 0 else []
if a.verbose is True:
if len(texts) > 0: print(' ref text: ', texts[min(num, len(texts)-1)][:80])
if len(styles) > 0: print(' ref style: ', styles[min(num, len(styles)-1)][:80])
if len(images) > 0: print(' ref image: ', basename(images[min(num, len(images)-1)])[:80])
pbar = ProgressBar(steps)
for ii in range(opt_steps):
glob_step = num * steps + ii // a.opt_step # save/transform
loss = 0
txt_enc = txt_encs[ii % len(txt_encs)].unsqueeze(0) if len(txt_encs) > 0 else None
styl_enc = styl_encs[ii % len(styl_encs)].unsqueeze(0) if len(styl_encs) > 0 else None
img_enc = img_encs[ii % len(img_encs)].unsqueeze(0) if len(img_encs) > 0 else None
# MOTION: transform frame, reload params
if ii % a.opt_step == 0:
scale = m_scale[glob_step] if a.anima else 1 + a.scale
shift = tuple(m_shift[glob_step]) if a.anima else [0, a.shift]
angle = m_angle[glob_step][0] if a.anima else a.angle
shear = m_shear[glob_step][0] if a.anima else a.shear
if a.gen == 'RGB':
img_tmp = frame_transform(params_tmp, a.size, angle, shift, scale, shear)
params, image_f, _ = pixel_image([1, 3, *a.size], resume=img_tmp)
else: # FFT
if old_torch(): # 1.7.1
img_tmp = torch.irfft(params_tmp, 2, normalized=True, signal_sizes=a.size)
img_tmp = frame_transform(img_tmp, a.size, angle, shift, scale, shear)
params_tmp = torch.rfft(img_tmp, 2, normalized=True)
else: # 1.8+
if type(params_tmp) is not torch.complex64:
params_tmp = torch.view_as_complex(params_tmp)
img_tmp = torch.fft.irfftn(params_tmp, s=a.size, norm='ortho')
img_tmp = frame_transform(img_tmp, a.size, angle, shift, scale, shear)
params_tmp = torch.fft.rfftn(img_tmp, s=a.size, dim=[2,3], norm='ortho')
params_tmp = torch.view_as_real(params_tmp)
params, image_f, _ = fft_image([1, 3, *a.size], sd=1, resume=params_tmp)
optimizer = torch.optim.Adam(params, a.lrate)
# optimizer = torch.optim.AdamW(params, a.lrate, weight_decay=0.01, amsgrad=True)
image_f = to_valid_rgb(image_f, colors = a.colors)
del img_tmp
if a.smooth is True and num + ii > 0:
optimizer.load_state_dict(opt_state)
noise = a.noise * (torch.rand(1, 1, a.size[0], a.size[1]//2+1, 1)-0.5).cuda() if a.noise>0 else 0.
img_out = image_f(noise)
img_sliced = slice_imgs([img_out], a.samples, a.modsize, trform_f, a.align, a.macro)[0]
out_enc = model_clip.encode_image(img_sliced)
if a.gen == 'RGB': # empirical hack
loss += 1.66 * abs(img_out.mean((2,3)) - 0.45).sum() # fix brightness
loss += 1.66 * abs(img_out.std((2,3)) - 0.17).sum() # fix contrast
if txt_enc is not None:
loss -= a.invert * sim_func(txt_enc, out_enc, a.sim)
if styl_enc is not None:
loss -= a.weight2 * sim_func(styl_enc, out_enc, a.sim)
if img_enc is not None:
loss -= a.weight_img * sim_func(img_enc, out_enc, a.sim)
if a.in_txt0 is not None: # subtract text
for anti_txt_enc in anti_txt_encs:
loss += 0.3 * sim_func(anti_txt_enc, out_enc, a.sim)
if a.sharp != 0: # scharr|sobel|naive
loss -= a.sharp * derivat(img_out, mode='naive')
if a.enforce != 0:
img_sliced = slice_imgs([image_f(noise)], a.samples, a.modsize, trform_f, a.align, a.macro)[0]
out_enc2 = model_clip.encode_image(img_sliced)
loss -= a.enforce * sim_func(out_enc, out_enc2, a.sim)
del out_enc2; torch.cuda.empty_cache()
if a.expand > 0:
global prev_enc
if ii > 0:
loss += a.expand * sim_func(prev_enc, out_enc, a.sim)
prev_enc = out_enc.detach().clone()
del img_out, img_sliced, out_enc; torch.cuda.empty_cache()
optimizer.zero_grad()
loss.backward()
optimizer.step()
if ii % a.opt_step == a.opt_step-1:
params_tmp = params[0].detach().clone()
if a.smooth is True:
opt_state = optimizer.state_dict()
if ii % a.opt_step == 0:
with torch.no_grad():
img_t = image_f(contrast=a.contrast)[0].permute(1,2,0)
img = torch.clip(img_t*255, 0, 255).cpu().numpy().astype(np.uint8)
imsave(os.path.join(tempdir, '%06d.jpg' % glob_step), img, quality=95)
if a.verbose is True: cvshow(img)
del img, img_t
pbar.upd()
params_tmp = params[0].detach().clone()
glob_start = time.time()
try:
for i in range(count):
process(i)
except KeyboardInterrupt:
pass
os.system('ffmpeg -v warning -y -i %s/\%%06d.jpg "%s.mp4"' % (tempdir, os.path.join(workdir, workname)))
if __name__ == '__main__':
main()
| 49.329759 | 170 | 0.598804 |
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import warnings
warnings.filterwarnings("ignore")
import argparse
import numpy as np
import shutil
import PIL
import time
from imageio import imread, imsave
from googletrans import Translator
import torch
import torchvision
import torch.nn.functional as F
from torchvision import transforms as T
import clip
os.environ['KMP_DUPLICATE_LIB_OK']='True'
from clip_fft import to_valid_rgb, fft_image, resume_fft, pixel_image
from utils import slice_imgs, derivat, sim_func, slerp, basename, file_list, img_list, img_read, pad_up_to, txt_clean, latent_anima, cvshow, checkout, save_cfg, old_torch
import transforms
try:
get_ipython().__class__.__name__
from progress_bar import ProgressIPy as ProgressBar
except:
from progress_bar import ProgressBar
clip_models = ['ViT-B/16', 'ViT-B/32', 'RN50', 'RN50x4', 'RN50x16', 'RN101']
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--size', default='1280-720', help='Output resolution')
parser.add_argument('-t', '--in_txt', default=None, help='Text string or file to process (main topic)')
parser.add_argument('-pre', '--in_txt_pre', default=None, help='Prefix for input text')
parser.add_argument('-post', '--in_txt_post', default=None, help='Postfix for input text')
parser.add_argument('-t2', '--in_txt2', default=None, help='Text string or file to process (style)')
parser.add_argument('-t0', '--in_txt0', default=None, help='input text to subtract')
parser.add_argument('-im', '--in_img', default=None, help='input image or directory with images')
parser.add_argument('-w0', '--weight0', default=0.3, type=float, help='weight for subtraction')
parser.add_argument('-w2', '--weight2', default=0.5, type=float, help='weight for style')
parser.add_argument('-wi', '--weight_img', default=0.5, type=float, help='weight for images')
parser.add_argument('-r', '--resume', default=None, help='Resume from saved params or from an image')
parser.add_argument( '--out_dir', default='_out')
parser.add_argument('-tr', '--translate', action='store_true', help='Translate with Google Translate')
parser.add_argument( '--invert', action='store_true', help='Invert criteria')
parser.add_argument('-v', '--verbose', default=True, type=bool)
parser.add_argument( '--gen', default='RGB', help='Generation (optimization) method: FFT or RGB')
parser.add_argument('-m', '--model', default='ViT-B/32', choices=clip_models, help='Select CLIP model to use')
parser.add_argument( '--steps', default=300, type=int, help='Iterations (frames) per scene (text line)')
parser.add_argument( '--samples', default=100, type=int, help='Samples to evaluate per frame')
parser.add_argument('-lr', '--lrate', default=1, type=float, help='Learning rate')
parser.add_argument('-opt', '--opt_step', default=1, type=int, help='How many optimizing steps per save/transform step')
parser.add_argument('-sm', '--smooth', action='store_true', help='Smoothen interframe jittering for FFT method')
parser.add_argument('-it', '--interpol', default=True, help='Interpolate topics? (or change by cut)')
parser.add_argument( '--fstep', default=100, type=int, help='How many frames before changing motion')
parser.add_argument( '--scale', default=0.012, type=float)
parser.add_argument( '--shift', default=10., type=float, help='in pixels')
parser.add_argument( '--angle', default=0.8, type=float, help='in degrees')
parser.add_argument( '--shear', default=0.4, type=float)
parser.add_argument( '--anima', default=True, help='Animate motion')
parser.add_argument('-a', '--align', default='overscan', choices=['central', 'uniform', 'overscan', 'overmax'], help='Sampling distribution')
parser.add_argument('-tf', '--transform', default='custom', choices=['none', 'custom', 'elastic'], help='use augmenting transforms?')
parser.add_argument( '--contrast', default=1.2, type=float)
parser.add_argument( '--colors', default=2, type=float)
parser.add_argument('-sh', '--sharp', default=None, type=float)
parser.add_argument('-mc', '--macro', default=0.4, type=float, help='Endorse macro forms 0..1 ')
parser.add_argument('-e', '--enforce', default=0, type=float, help='Enforce details (by boosting similarity between two parallel samples)')
parser.add_argument('-x', '--expand', default=0, type=float, help='Boosts diversity (by enforcing difference between prev/next samples)')
parser.add_argument('-n', '--noise', default=2., type=float, help='Add noise to make composition sparse (FFT only)')
parser.add_argument( '--sim', default='mix', help='Similarity function (angular/spherical/mixed; None = cossim)')
parser.add_argument( '--rem', default=None, help='Dummy text to add to project name')
a = parser.parse_args()
if a.size is not None: a.size = [int(s) for s in a.size.split('-')][::-1]
if len(a.size)==1: a.size = a.size * 2
a.gen = a.gen.upper()
a.invert = -1. if a.invert is True else 1.
if a.gen == 'RGB':
a.smooth = False
a.align = 'overscan'
if a.sharp is None: a.sharp = -1. if a.gen == 'RGB' else 1.
if a.model == 'ViT-B/16': a.sim = 'cossim'
return a
def frame_transform(img, size, angle, shift, scale, shear):
if old_torch():
img = T.functional.affine(img, angle, shift, scale, shear, fillcolor=0, resample=PIL.Image.BILINEAR)
img = T.functional.center_crop(img, size)
img = pad_up_to(img, size)
else:
img = T.functional.affine(img, angle, shift, scale, shear, fill=0, interpolation=T.InterpolationMode.BILINEAR)
img = T.functional.center_crop(img, size)
return img
def main():
a = get_args()
model_clip, _ = clip.load(a.model, jit=old_torch())
try:
a.modsize = model_clip.visual.input_resolution
except:
a.modsize = 288 if a.model == 'RN50x4' else 384 if a.model == 'RN50x16' else 224
if a.verbose is True: print(' using model', a.model)
xmem = {'ViT-B/16':0.25, 'RN50':0.5, 'RN50x4':0.16, 'RN50x16':0.06, 'RN101':0.33}
if a.model in xmem.keys():
a.samples = int(a.samples * xmem[a.model])
if a.translate:
translator = Translator()
if a.enforce != 0:
a.samples = int(a.samples * 0.5)
if 'elastic' in a.transform:
trform_f = transforms.transforms_elastic
a.samples = int(a.samples * 0.95)
elif 'custom' in a.transform:
trform_f = transforms.transforms_custom
a.samples = int(a.samples * 0.95)
else:
trform_f = transforms.normalize()
def enc_text(txt):
if a.translate:
txt = translator.translate(txt, dest='en').text
emb = model_clip.encode_text(clip.tokenize(txt).cuda()[:77])
return emb.detach().clone()
def enc_image(img_file):
img_t = torch.from_numpy(img_read(img_file)/255.).unsqueeze(0).permute(0,3,1,2).cuda()[:,:3,:,:]
in_sliced = slice_imgs([img_t], a.samples, a.modsize, transforms.normalize(), a.align)[0]
emb = model_clip.encode_image(in_sliced)
return emb.detach().clone()
count = 0
texts = []
styles = []
images = []
if a.in_txt is not None:
if os.path.isfile(a.in_txt):
with open(a.in_txt, 'r', encoding="utf-8") as f:
texts = f.readlines()
texts = [tt.strip() for tt in texts if len(tt.strip()) > 0 and tt[0] != '#']
else:
texts = [a.in_txt]
if a.in_txt_pre is not None:
texts = [' '.join([a.in_txt_pre, tt]).strip() for tt in texts]
if a.in_txt_post is not None:
texts = [' '.join([tt, a.in_txt_post]).strip() for tt in texts]
key_txt_encs = [enc_text(txt) for txt in texts]
count = max(count, len(key_txt_encs))
if a.in_txt2 is not None:
if os.path.isfile(a.in_txt2):
with open(a.in_txt2, 'r', encoding="utf-8") as f:
styles = f.readlines()
styles = [tt.strip() for tt in styles if len(tt.strip()) > 0 and tt[0] != '#']
else:
styles = [a.in_txt2]
key_styl_encs = [enc_text(style) for style in styles]
count = max(count, len(key_styl_encs))
if a.in_img is not None and os.path.exists(a.in_img):
images = file_list(a.in_img) if os.path.isdir(a.in_img) else [a.in_img]
key_img_encs = [enc_image(image) for image in images]
count = max(count, len(key_img_encs))
assert count > 0, "No inputs found!"
if a.in_txt0 is not None:
if a.verbose is True: print(' subtract text:', a.in_txt0)
if a.translate:
a.in_txt0 = translator.translate(a.in_txt0, dest='en').text
anti_txt_encs = [enc_text(txt) for txt in a.in_txt0.split('.')]
if a.verbose is True: print(' samples:', a.samples)
global params_tmp
shape = [1, 3, *a.size]
if a.gen == 'RGB':
params_tmp, _, sz = pixel_image(shape, a.resume)
params_tmp = params_tmp[0].cuda().detach()
else:
params_tmp, sz = resume_fft(a.resume, shape, decay=1.5, sd=1)
if sz is not None: a.size = sz
steps = a.steps
glob_steps = count * steps
opt_steps = steps * a.opt_step
if glob_steps == a.fstep: a.fstep = glob_steps // 2
workname = basename(a.in_txt) if a.in_txt is not None else basename(a.in_img)
workname = txt_clean(workname)
workdir = os.path.join(a.out_dir, workname)
if a.rem is not None: workdir += '-%s' % a.rem
if 'RN' in a.model.upper(): workdir += '-%s' % a.model
if a.noise > 0: workdir += '-n%.2g' % a.noise
if a.macro > 0: workdir += '-m%.2g' % a.macro
if a.smooth is True: workdir += '-sm'
if a.transform != 'custom': workdir += '-tf%s' % a.transform
if a.gen == 'RGB': workdir += '-rgb'
tempdir = os.path.join(workdir, 'ttt')
os.makedirs(tempdir, exist_ok=True)
save_cfg(a, workdir)
if a.in_txt is not None and os.path.isfile(a.in_txt):
shutil.copy(a.in_txt, os.path.join(workdir, os.path.basename(a.in_txt)))
if a.in_txt2 is not None and os.path.isfile(a.in_txt2):
shutil.copy(a.in_txt2, os.path.join(workdir, os.path.basename(a.in_txt2)))
midp = 0.5
if a.anima:
if a.gen == 'RGB':
m_scale = latent_anima([1], glob_steps, a.fstep, uniform=True, cubic=True, start_lat=[-0.3], verbose=False)
m_scale = 1 + (m_scale + 0.3) * a.scale
else:
m_scale = latent_anima([1], glob_steps, a.fstep, uniform=True, cubic=True, start_lat=[0.6], verbose=False)
m_scale = 1 - (m_scale-0.6) * a.scale
m_shift = latent_anima([2], glob_steps, a.fstep, uniform=True, cubic=True, start_lat=[midp,midp], verbose=False)
m_angle = latent_anima([1], glob_steps, a.fstep, uniform=True, cubic=True, start_lat=[midp], verbose=False)
m_shear = latent_anima([1], glob_steps, a.fstep, uniform=True, cubic=True, start_lat=[midp], verbose=False)
m_shift = (midp-m_shift) * a.shift * abs(m_scale-1) / a.scale
m_angle = (midp-m_angle) * a.angle * abs(m_scale-1) / a.scale
m_shear = (midp-m_shear) * a.shear * abs(m_scale-1) / a.scale
def get_encs(encs, num):
cnt = len(encs)
if cnt == 0: return []
enc_1 = encs[min(num, cnt-1)]
enc_2 = encs[min(num+1, cnt-1)]
return slerp(enc_1, enc_2, opt_steps)
prev_enc = 0
def process(num):
global params_tmp, opt_state, params, image_f, optimizer
if a.interpol is True:
txt_encs = get_encs(key_txt_encs, num)
styl_encs = get_encs(key_styl_encs, num)
img_encs = get_encs(key_img_encs, num)
else:
txt_encs = [key_txt_encs[min(num, len(key_txt_encs)-1)][0]] * opt_steps if len(key_txt_encs) > 0 else []
styl_encs = [key_styl_encs[min(num, len(key_styl_encs)-1)][0]] * opt_steps if len(key_styl_encs) > 0 else []
img_encs = [key_img_encs[min(num, len(key_img_encs)-1)][0]] * opt_steps if len(key_img_encs) > 0 else []
if a.verbose is True:
if len(texts) > 0: print(' ref text: ', texts[min(num, len(texts)-1)][:80])
if len(styles) > 0: print(' ref style: ', styles[min(num, len(styles)-1)][:80])
if len(images) > 0: print(' ref image: ', basename(images[min(num, len(images)-1)])[:80])
pbar = ProgressBar(steps)
for ii in range(opt_steps):
glob_step = num * steps + ii // a.opt_step
loss = 0
txt_enc = txt_encs[ii % len(txt_encs)].unsqueeze(0) if len(txt_encs) > 0 else None
styl_enc = styl_encs[ii % len(styl_encs)].unsqueeze(0) if len(styl_encs) > 0 else None
img_enc = img_encs[ii % len(img_encs)].unsqueeze(0) if len(img_encs) > 0 else None
if ii % a.opt_step == 0:
scale = m_scale[glob_step] if a.anima else 1 + a.scale
shift = tuple(m_shift[glob_step]) if a.anima else [0, a.shift]
angle = m_angle[glob_step][0] if a.anima else a.angle
shear = m_shear[glob_step][0] if a.anima else a.shear
if a.gen == 'RGB':
img_tmp = frame_transform(params_tmp, a.size, angle, shift, scale, shear)
params, image_f, _ = pixel_image([1, 3, *a.size], resume=img_tmp)
else:
if old_torch():
img_tmp = torch.irfft(params_tmp, 2, normalized=True, signal_sizes=a.size)
img_tmp = frame_transform(img_tmp, a.size, angle, shift, scale, shear)
params_tmp = torch.rfft(img_tmp, 2, normalized=True)
else:
if type(params_tmp) is not torch.complex64:
params_tmp = torch.view_as_complex(params_tmp)
img_tmp = torch.fft.irfftn(params_tmp, s=a.size, norm='ortho')
img_tmp = frame_transform(img_tmp, a.size, angle, shift, scale, shear)
params_tmp = torch.fft.rfftn(img_tmp, s=a.size, dim=[2,3], norm='ortho')
params_tmp = torch.view_as_real(params_tmp)
params, image_f, _ = fft_image([1, 3, *a.size], sd=1, resume=params_tmp)
optimizer = torch.optim.Adam(params, a.lrate)
image_f = to_valid_rgb(image_f, colors = a.colors)
del img_tmp
if a.smooth is True and num + ii > 0:
optimizer.load_state_dict(opt_state)
noise = a.noise * (torch.rand(1, 1, a.size[0], a.size[1]//2+1, 1)-0.5).cuda() if a.noise>0 else 0.
img_out = image_f(noise)
img_sliced = slice_imgs([img_out], a.samples, a.modsize, trform_f, a.align, a.macro)[0]
out_enc = model_clip.encode_image(img_sliced)
if a.gen == 'RGB':
loss += 1.66 * abs(img_out.mean((2,3)) - 0.45).sum()
loss += 1.66 * abs(img_out.std((2,3)) - 0.17).sum()
if txt_enc is not None:
loss -= a.invert * sim_func(txt_enc, out_enc, a.sim)
if styl_enc is not None:
loss -= a.weight2 * sim_func(styl_enc, out_enc, a.sim)
if img_enc is not None:
loss -= a.weight_img * sim_func(img_enc, out_enc, a.sim)
if a.in_txt0 is not None:
for anti_txt_enc in anti_txt_encs:
loss += 0.3 * sim_func(anti_txt_enc, out_enc, a.sim)
if a.sharp != 0:
loss -= a.sharp * derivat(img_out, mode='naive')
if a.enforce != 0:
img_sliced = slice_imgs([image_f(noise)], a.samples, a.modsize, trform_f, a.align, a.macro)[0]
out_enc2 = model_clip.encode_image(img_sliced)
loss -= a.enforce * sim_func(out_enc, out_enc2, a.sim)
del out_enc2; torch.cuda.empty_cache()
if a.expand > 0:
global prev_enc
if ii > 0:
loss += a.expand * sim_func(prev_enc, out_enc, a.sim)
prev_enc = out_enc.detach().clone()
del img_out, img_sliced, out_enc; torch.cuda.empty_cache()
optimizer.zero_grad()
loss.backward()
optimizer.step()
if ii % a.opt_step == a.opt_step-1:
params_tmp = params[0].detach().clone()
if a.smooth is True:
opt_state = optimizer.state_dict()
if ii % a.opt_step == 0:
with torch.no_grad():
img_t = image_f(contrast=a.contrast)[0].permute(1,2,0)
img = torch.clip(img_t*255, 0, 255).cpu().numpy().astype(np.uint8)
imsave(os.path.join(tempdir, '%06d.jpg' % glob_step), img, quality=95)
if a.verbose is True: cvshow(img)
del img, img_t
pbar.upd()
params_tmp = params[0].detach().clone()
glob_start = time.time()
try:
for i in range(count):
process(i)
except KeyboardInterrupt:
pass
os.system('ffmpeg -v warning -y -i %s/\%%06d.jpg "%s.mp4"' % (tempdir, os.path.join(workdir, workname)))
if __name__ == '__main__':
main()
| true | true |
79009a415910d76735237b1e2ab606e420652e3d | 296 | py | Python | Python 3 - Curso completo/exercicio049.py | PedroMunizdeMatos/Estudos-e-Projetos | 5949c1f2a80100c1e2db56c7b60f5f0475c0d1dc | [
"MIT"
] | null | null | null | Python 3 - Curso completo/exercicio049.py | PedroMunizdeMatos/Estudos-e-Projetos | 5949c1f2a80100c1e2db56c7b60f5f0475c0d1dc | [
"MIT"
] | null | null | null | Python 3 - Curso completo/exercicio049.py | PedroMunizdeMatos/Estudos-e-Projetos | 5949c1f2a80100c1e2db56c7b60f5f0475c0d1dc | [
"MIT"
] | null | null | null | # Refaça o exercicio009, mostrando a tabuada de um número que um usuário escolher utilizando FOR.
print('=-='*3)
print('TABUADA')
print('=-='*3)
m = 0
n = int(input('Digite o número que deseja saber a tabuada: '))
for c in range(1, 11):
m = n * c
print('{} x {} = {}.'.format(n, c, m))
| 24.666667 | 97 | 0.611486 |
print('=-='*3)
print('TABUADA')
print('=-='*3)
m = 0
n = int(input('Digite o número que deseja saber a tabuada: '))
for c in range(1, 11):
m = n * c
print('{} x {} = {}.'.format(n, c, m))
| true | true |
79009c1afff10844c5894a7cdc1561d5a428b4f3 | 466 | py | Python | dorchester/point.py | eyeseast/dorchester | 72b6641ca8837cce01114c620869d055a00d9b66 | [
"Apache-2.0"
] | 3 | 2021-04-09T21:07:46.000Z | 2021-07-26T05:17:23.000Z | dorchester/point.py | eyeseast/dorchester | 72b6641ca8837cce01114c620869d055a00d9b66 | [
"Apache-2.0"
] | 33 | 2021-04-08T17:32:39.000Z | 2022-03-30T15:38:23.000Z | dorchester/point.py | eyeseast/dorchester | 72b6641ca8837cce01114c620869d055a00d9b66 | [
"Apache-2.0"
] | null | null | null | # this is here to avoid a circular import
from collections import namedtuple
class Point(namedtuple("Point", ["x", "y", "group", "fid"])):
@property
def __geo_interface__(self):
return {"type": "Point", "coordinates": (self.x, self.y)}
def as_feature(self):
geometry = self.__geo_interface__
properties = {"group": self.group, "fid": self.fid}
return {"type": "Feature", "properties": properties, "geometry": geometry}
| 33.285714 | 82 | 0.641631 |
from collections import namedtuple
class Point(namedtuple("Point", ["x", "y", "group", "fid"])):
@property
def __geo_interface__(self):
return {"type": "Point", "coordinates": (self.x, self.y)}
def as_feature(self):
geometry = self.__geo_interface__
properties = {"group": self.group, "fid": self.fid}
return {"type": "Feature", "properties": properties, "geometry": geometry}
| true | true |
79009cd0b7ce5709a71cc30bd5fdc5a155fff4f4 | 3,559 | gyp | Python | src/prediction/prediction_test.gyp | dancerj/mozc | a5a4927c1f709d2ff0c681585c746f73a434e4c9 | [
"BSD-3-Clause"
] | null | null | null | src/prediction/prediction_test.gyp | dancerj/mozc | a5a4927c1f709d2ff0c681585c746f73a434e4c9 | [
"BSD-3-Clause"
] | 1 | 2021-06-30T14:59:51.000Z | 2021-06-30T15:31:56.000Z | src/prediction/prediction_test.gyp | dancerj/mozc | a5a4927c1f709d2ff0c681585c746f73a434e4c9 | [
"BSD-3-Clause"
] | 1 | 2022-03-25T09:01:39.000Z | 2022-03-25T09:01:39.000Z | # Copyright 2010-2020, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
{
'variables': {
'relative_dir': 'prediction',
'gen_out_dir': '<(SHARED_INTERMEDIATE_DIR)/<(relative_dir)',
},
'targets': [
{
'target_name': 'prediction_test',
'type': 'executable',
'sources': [
'dictionary_predictor_test.cc',
'user_history_predictor_test.cc',
'predictor_test.cc',
'zero_query_dict_test.cc',
],
'dependencies': [
'../base/base_test.gyp:clock_mock',
'../composer/composer.gyp:composer',
'../config/config.gyp:config_handler',
'../converter/converter_base.gyp:connector',
'../converter/converter_base.gyp:converter_mock',
'../converter/converter_base.gyp:immutable_converter',
'../converter/converter_base.gyp:segmenter',
'../converter/converter_base.gyp:segments',
'../data_manager/testing/mock_data_manager.gyp:mock_data_manager',
'../dictionary/dictionary.gyp:dictionary',
'../dictionary/dictionary.gyp:dictionary_mock',
'../dictionary/dictionary.gyp:suffix_dictionary',
'../dictionary/dictionary_base.gyp:pos_matcher',
'../dictionary/system/system_dictionary.gyp:system_dictionary',
'../dictionary/system/system_dictionary.gyp:value_dictionary',
'../protocol/protocol.gyp:commands_proto',
'../protocol/protocol.gyp:config_proto',
'../session/session_base.gyp:request_test_util',
'../storage/storage.gyp:storage',
'../testing/testing.gyp:gtest_main',
'../usage_stats/usage_stats_test.gyp:usage_stats_testing_util',
'prediction.gyp:prediction',
],
'variables': {
'test_size': 'small',
},
'cflags': [
'-Wno-unknown-warning-option',
'-Wno-inconsistent-missing-override',
],
},
# Test cases meta target: this target is referred from gyp/tests.gyp
{
'target_name': 'prediction_all_test',
'type': 'none',
'dependencies': [
'prediction_test',
],
},
],
}
| 40.908046 | 74 | 0.688677 |
{
'variables': {
'relative_dir': 'prediction',
'gen_out_dir': '<(SHARED_INTERMEDIATE_DIR)/<(relative_dir)',
},
'targets': [
{
'target_name': 'prediction_test',
'type': 'executable',
'sources': [
'dictionary_predictor_test.cc',
'user_history_predictor_test.cc',
'predictor_test.cc',
'zero_query_dict_test.cc',
],
'dependencies': [
'../base/base_test.gyp:clock_mock',
'../composer/composer.gyp:composer',
'../config/config.gyp:config_handler',
'../converter/converter_base.gyp:connector',
'../converter/converter_base.gyp:converter_mock',
'../converter/converter_base.gyp:immutable_converter',
'../converter/converter_base.gyp:segmenter',
'../converter/converter_base.gyp:segments',
'../data_manager/testing/mock_data_manager.gyp:mock_data_manager',
'../dictionary/dictionary.gyp:dictionary',
'../dictionary/dictionary.gyp:dictionary_mock',
'../dictionary/dictionary.gyp:suffix_dictionary',
'../dictionary/dictionary_base.gyp:pos_matcher',
'../dictionary/system/system_dictionary.gyp:system_dictionary',
'../dictionary/system/system_dictionary.gyp:value_dictionary',
'../protocol/protocol.gyp:commands_proto',
'../protocol/protocol.gyp:config_proto',
'../session/session_base.gyp:request_test_util',
'../storage/storage.gyp:storage',
'../testing/testing.gyp:gtest_main',
'../usage_stats/usage_stats_test.gyp:usage_stats_testing_util',
'prediction.gyp:prediction',
],
'variables': {
'test_size': 'small',
},
'cflags': [
'-Wno-unknown-warning-option',
'-Wno-inconsistent-missing-override',
],
},
{
'target_name': 'prediction_all_test',
'type': 'none',
'dependencies': [
'prediction_test',
],
},
],
}
| true | true |
79009cd3ebc42252a8c8d8816598bfd6c1ce6dd9 | 1,169 | py | Python | airflow/contrib/hooks/gcp_cloud_build_hook.py | IGIT-CN/airflow | a6e5bcd59198afe5716813e84ebc4c59eade532c | [
"Apache-2.0"
] | 8 | 2017-04-20T16:15:44.000Z | 2020-10-11T13:44:10.000Z | airflow/contrib/hooks/gcp_cloud_build_hook.py | IGIT-CN/airflow | a6e5bcd59198afe5716813e84ebc4c59eade532c | [
"Apache-2.0"
] | 219 | 2017-03-15T18:40:16.000Z | 2022-02-28T22:52:43.000Z | airflow/contrib/hooks/gcp_cloud_build_hook.py | IGIT-CN/airflow | a6e5bcd59198afe5716813e84ebc4c59eade532c | [
"Apache-2.0"
] | 4 | 2020-07-17T14:02:28.000Z | 2022-02-23T04:29:58.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.google.cloud.hooks.cloud_build`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.google.cloud.hooks.cloud_build import CloudBuildHook # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.google.cloud.hooks.cloud_build`.",
DeprecationWarning, stacklevel=2
)
| 40.310345 | 96 | 0.775021 |
import warnings
from airflow.providers.google.cloud.hooks.cloud_build import CloudBuildHook
warnings.warn(
"This module is deprecated. Please use `airflow.providers.google.cloud.hooks.cloud_build`.",
DeprecationWarning, stacklevel=2
)
| true | true |
79009d32adde13c522a2b7ab816dceb84c6045fd | 1,979 | py | Python | sandbox/legacy_plot_code/plot_icd_vs_colorgrad_vs_sersic.py | boada/ICD | c1bfedf5f8e5b0e9f77c6d1194bf1e0266d7efd8 | [
"MIT"
] | null | null | null | sandbox/legacy_plot_code/plot_icd_vs_colorgrad_vs_sersic.py | boada/ICD | c1bfedf5f8e5b0e9f77c6d1194bf1e0266d7efd8 | [
"MIT"
] | null | null | null | sandbox/legacy_plot_code/plot_icd_vs_colorgrad_vs_sersic.py | boada/ICD | c1bfedf5f8e5b0e9f77c6d1194bf1e0266d7efd8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# File: plot_icd_vs_colorgrad.py
# Created on: Tue 08 May 2012 11:03:26 AM CDT
# Last Change: Sun 21 Oct 2012 02:43:33 PM CDT
# Purpose of script: <+INSERT+>
# Author: Steven Boada
import pylab as pyl
from mk_galaxy_struc import mk_galaxy_struc
galaxies = mk_galaxy_struc()
f1 = pyl.figure(1,figsize=(8,8))
f1s1 = f1.add_subplot(221)
f1s2 = f1.add_subplot(222)
f1s3 = f1.add_subplot(223)
f1s4 = f1.add_subplot(224)
for galaxy in galaxies:
if galaxy.ston_I >= 30. and galaxy.Color_grad != None and galaxy.sersic !=\
None:
if galaxy.sersic < 1.:
col1 =f1s1.scatter(galaxy.ICD_IH, galaxy.Color_grad, s=50, c='k',
edgecolor='w')
if 1. < galaxy.sersic < 2.:
col2 =f1s2.scatter(galaxy.ICD_IH, galaxy.Color_grad, s=50,c='k',
edgecolor='w')
if 2. < galaxy.sersic < 3.:
col3 =f1s3.scatter(galaxy.ICD_IH, galaxy.Color_grad, s=50, c='k',
edgecolor='w')
if 3. < galaxy.sersic:
col4 =f1s4.scatter(galaxy.ICD_IH, galaxy.Color_grad, s=50, c='k',
edgecolor='w')
#pyl.scatter(galaxy.ICD_IH,galaxy.Color_grad,s=50,edgecolor='w')
#f1s1.vlines(0.04,-3.,1,lw=2,zorder=0)
#f1s1.hlines(0.0,-0.1,0.25,lw=2,zorder=0)
#pyl.text(0.24, 0.7, "Blue Core, Red Edge", size=15, ha="right", va="top",
# bbox = dict(boxstyle="round", ec=(1., 0.5, 0.5),
# fc=(1., 0.8, 0.8)))
#pyl.text(0.24, -2.5, "Red Core, Blue Edge", size=15, ha="right", va="top",
# bbox = dict(boxstyle="round", ec=(1., 0.5, 0.5),
# fc=(1., 0.8, 0.8)))
# Finish Plot
f1s1.set_xlim(-0.05,0.25)
f1s1.set_ylim(-3.,1)
f1s2.set_xlim(-0.05,0.25)
f1s2.set_ylim(-3.,1)
f1s3.set_xlim(-0.05,0.25)
f1s3.set_ylim(-3.,1)
f1s4.set_xlim(-0.05,0.25)
f1s4.set_ylim(-3.,1)
#pyl.subplots_adjust(left=0.15,bottom=0.15)
f1s1.set_xlabel(r'$\xi[I,H]$')
f1s1.set_ylabel('Color Gradient')
pyl.savefig('icd_vs_color_grad_vs_sersic_IH.eps',bbox='tight')
pyl.show()
| 29.537313 | 79 | 0.624558 |
import pylab as pyl
from mk_galaxy_struc import mk_galaxy_struc
galaxies = mk_galaxy_struc()
f1 = pyl.figure(1,figsize=(8,8))
f1s1 = f1.add_subplot(221)
f1s2 = f1.add_subplot(222)
f1s3 = f1.add_subplot(223)
f1s4 = f1.add_subplot(224)
for galaxy in galaxies:
if galaxy.ston_I >= 30. and galaxy.Color_grad != None and galaxy.sersic !=\
None:
if galaxy.sersic < 1.:
col1 =f1s1.scatter(galaxy.ICD_IH, galaxy.Color_grad, s=50, c='k',
edgecolor='w')
if 1. < galaxy.sersic < 2.:
col2 =f1s2.scatter(galaxy.ICD_IH, galaxy.Color_grad, s=50,c='k',
edgecolor='w')
if 2. < galaxy.sersic < 3.:
col3 =f1s3.scatter(galaxy.ICD_IH, galaxy.Color_grad, s=50, c='k',
edgecolor='w')
if 3. < galaxy.sersic:
col4 =f1s4.scatter(galaxy.ICD_IH, galaxy.Color_grad, s=50, c='k',
edgecolor='w')
f1s1.set_xlim(-0.05,0.25)
f1s1.set_ylim(-3.,1)
f1s2.set_xlim(-0.05,0.25)
f1s2.set_ylim(-3.,1)
f1s3.set_xlim(-0.05,0.25)
f1s3.set_ylim(-3.,1)
f1s4.set_xlim(-0.05,0.25)
f1s4.set_ylim(-3.,1)
f1s1.set_xlabel(r'$\xi[I,H]$')
f1s1.set_ylabel('Color Gradient')
pyl.savefig('icd_vs_color_grad_vs_sersic_IH.eps',bbox='tight')
pyl.show()
| true | true |
79009dcadfd004abaca7cf03dd82a7c65de93b90 | 972 | py | Python | chapter12/examples/example02.py | YordanIH/Intro_to_CS_w_Python | eebbb8efd7ef0d07be9bc45b6b1e8f20737ce01a | [
"MIT"
] | null | null | null | chapter12/examples/example02.py | YordanIH/Intro_to_CS_w_Python | eebbb8efd7ef0d07be9bc45b6b1e8f20737ce01a | [
"MIT"
] | null | null | null | chapter12/examples/example02.py | YordanIH/Intro_to_CS_w_Python | eebbb8efd7ef0d07be9bc45b6b1e8f20737ce01a | [
"MIT"
] | null | null | null | #Find,Remove,Find
"""Return a tuple of the indices of the two smallest values in list L.
>>> items = [809, 834, 477, 478, 307, 122, 96, 102, 324, 476]
>>> find_two_smallest(items)
(6, 7)
>>> items == [809, 834, 477, 478, 307, 122, 96, 102, 324, 476]
True
"""
from typing import List, Tuple
def find_two_smallest(L:List[float]) -> Tuple[int, int]:
""" (see above) """
# Find the index of the minimum and remove that item
smallest = min(L)
min1 = L.index(smallest)
L.remove(smallest)
# Find the index of the new minimum item in the list
next_smallest = min(L)
min2 = L.index(next_smallest)
# Put smallest back into L
L.insert(min1, smallest)
# Fix min2 in case it was affected by the removal and reinsertion:
if min1 <= min2:
min2 +=1
return (min1, min2)
if __name__ == '__main__':
import doctest
doctest.testmod()
print(find_two_smallest([0, 1, 3, 2, 5, 6, 1]))
| 24.3 | 70 | 0.614198 |
from typing import List, Tuple
def find_two_smallest(L:List[float]) -> Tuple[int, int]:
smallest = min(L)
min1 = L.index(smallest)
L.remove(smallest)
next_smallest = min(L)
min2 = L.index(next_smallest)
L.insert(min1, smallest)
if min1 <= min2:
min2 +=1
return (min1, min2)
if __name__ == '__main__':
import doctest
doctest.testmod()
print(find_two_smallest([0, 1, 3, 2, 5, 6, 1]))
| true | true |
79009e394b41ee662959a7f12813a047e779881f | 1,623 | py | Python | smoothot/tests/test_projection.py | cptq/smooth-ot | a165c0c949730ec0490a0670352e04c39762062c | [
"BSD-2-Clause"
] | null | null | null | smoothot/tests/test_projection.py | cptq/smooth-ot | a165c0c949730ec0490a0670352e04c39762062c | [
"BSD-2-Clause"
] | null | null | null | smoothot/tests/test_projection.py | cptq/smooth-ot | a165c0c949730ec0490a0670352e04c39762062c | [
"BSD-2-Clause"
] | null | null | null | import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from smoothot.projection import projection_simplex
def _projection_simplex(v, z=1):
"""
Old implementation for test and benchmark purposes.
The arguments v and z should be a vector and a scalar, respectively.
"""
n_features = v.shape[0]
u = np.sort(v)[::-1]
cssv = np.cumsum(u) - z
ind = np.arange(n_features) + 1
cond = u - cssv / ind > 0
rho = ind[cond][-1]
theta = cssv[cond][-1] / float(rho)
w = np.maximum(v - theta, 0)
return w
def test_projection_simplex():
rng = np.random.RandomState(0)
V = rng.rand(100, 10)
# Axis = None case.
w = projection_simplex(V[0], z=1, axis=None)
w2 = _projection_simplex(V[0], z=1)
assert_array_almost_equal(w, w2)
w = projection_simplex(V, z=1, axis=None)
w2 = _projection_simplex(V.ravel(), z=1)
assert_array_almost_equal(w, w2)
# Axis = 1 case.
W = projection_simplex(V, axis=1)
# Check same as with for loop.
W2 = np.array([_projection_simplex(V[i]) for i in range(V.shape[0])])
assert_array_almost_equal(W, W2)
# Check works with vector z.
W3 = projection_simplex(V, np.ones(V.shape[0]), axis=1)
assert_array_almost_equal(W, W3)
# Axis = 0 case.
W = projection_simplex(V, axis=0)
# Check same as with for loop.
W2 = np.array([_projection_simplex(V[:, i]) for i in range(V.shape[1])]).T
assert_array_almost_equal(W, W2)
# Check works with vector z.
W3 = projection_simplex(V, np.ones(V.shape[1]), axis=0)
assert_array_almost_equal(W, W3)
| 28.473684 | 78 | 0.650647 | import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from smoothot.projection import projection_simplex
def _projection_simplex(v, z=1):
n_features = v.shape[0]
u = np.sort(v)[::-1]
cssv = np.cumsum(u) - z
ind = np.arange(n_features) + 1
cond = u - cssv / ind > 0
rho = ind[cond][-1]
theta = cssv[cond][-1] / float(rho)
w = np.maximum(v - theta, 0)
return w
def test_projection_simplex():
rng = np.random.RandomState(0)
V = rng.rand(100, 10)
w = projection_simplex(V[0], z=1, axis=None)
w2 = _projection_simplex(V[0], z=1)
assert_array_almost_equal(w, w2)
w = projection_simplex(V, z=1, axis=None)
w2 = _projection_simplex(V.ravel(), z=1)
assert_array_almost_equal(w, w2)
W = projection_simplex(V, axis=1)
W2 = np.array([_projection_simplex(V[i]) for i in range(V.shape[0])])
assert_array_almost_equal(W, W2)
W3 = projection_simplex(V, np.ones(V.shape[0]), axis=1)
assert_array_almost_equal(W, W3)
W = projection_simplex(V, axis=0)
W2 = np.array([_projection_simplex(V[:, i]) for i in range(V.shape[1])]).T
assert_array_almost_equal(W, W2)
W3 = projection_simplex(V, np.ones(V.shape[1]), axis=0)
assert_array_almost_equal(W, W3)
| true | true |
7900a0013c282cba7d5fba70491bdfc7a24c2c82 | 10,659 | py | Python | src/rdb/tests/rdb_test_runner.py | vatelzh/daos | 3aca9ae033946ca24179ba0a180c0b8422cd2738 | [
"Apache-2.0"
] | 1 | 2019-11-28T07:26:38.000Z | 2019-11-28T07:26:38.000Z | src/rdb/tests/rdb_test_runner.py | vatelzh/daos | 3aca9ae033946ca24179ba0a180c0b8422cd2738 | [
"Apache-2.0"
] | 52 | 2019-12-04T05:47:10.000Z | 2020-06-09T03:26:12.000Z | src/rdb/tests/rdb_test_runner.py | vatelzh/daos | 3aca9ae033946ca24179ba0a180c0b8422cd2738 | [
"Apache-2.0"
] | 8 | 2019-12-04T08:26:00.000Z | 2020-06-09T07:40:11.000Z | #!/usr/bin/python
# Copyright (c) 2018-2019 Intel Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE
# The Government's rights to use, modify, reproduce, release, perform, display,
# or disclose this software are subject to the terms of the Apache License as
# provided in Contract No. 8F-30005.
# Any reproduction of computer software, computer software documentation, or
# portions thereof marked with this legend must also reproduce the markings.
"""
This script runs the rdb tests. From the command line the tests are run with:
server:
orterun -N 1 --report-uri /tmp/urifile -x LD_LIBRARY_PATH
daos_server -o <builddir>/utils/config/examples/daos_server_rdb_tests.yml
start -d ./ -t 1 -m vos,rdb,rsvc,mgmt,rdbt
client:
orterun --ompi-server file:/tmp/urifile <debug_cmds> -np 1 rdbt init
--group=daos_server --uuid <uuid>
orterun --ompi-server file:/tmp/urifile <debug_cmds> -np 1 rdbt test --update
--group=daos_server
orterun --ompi-server file:/tmp/urifile <debug_cmds> -np 1 rdbt test
--group=daos_server
orterun --ompi-server file:/tmp/urifile <debug_cmds> -np 1 rdbt fini
--group=daos_server
Where debug_cmds = -x D_LOG_MASK=DEBUG,RPC=ERR,MEM=ERR -x DD_SUBSYS=all
-x DD_MASK=all
This script automates the process.
"""
import subprocess
import os
import sys
import time
import signal
import shlex
import string
build_root = os.path.join(sys.path[0], "../../../")
sys.path.insert(0, os.path.join(build_root, "utils/sl"))
from build_info import BuildInfo
from env_modules import load_mpi
from distutils.spawn import find_executable
urifile = "/tmp/urifile"
pid_file = "/tmp/" + str(os.getpid()) + "_output"
# To avoid repetition of parts of the oretrun command.
client_prefix = ""
client_suffix = ""
# In case orterun has quit but the daos_server is still running, save the PID.
#daos_server = None
class ServerFailedToStart(Exception):
pass
class ServerTimedOut(Exception):
pass
def set_logfile(config, logfile):
f = open(config, "r+")
for line in f.readlines():
string.replace(line,
" log_file: /tmp/server.log",
" log_file: {}".format(logfile))
f.close()
def start_server(binfo, orterun):
"""
Start the DAOS server with an orterun command as a child process. We use
subprocess.Popen since it returns control to the calling process and
provides access to the polling feature.
"""
config_file = os.path.join(build_root, "utils", "config", "examples",
"daos_server_unittests.yml")
log_file = os.path.join(binfo.get("PREFIX"),
"TESTING",
"daos-rdb-test.log")
set_logfile(config_file, log_file) # set D_LOG_FILE through config file
print("Starting DAOS server\n")
cmd = orterun
cmd += " -N 1 --report-uri {} ".format(urifile)
cmd += "-x LD_LIBRARY_PATH "
cmd += binfo.get("PREFIX") + "/bin/daos_server "
cmd += "--debug --config {} ".format(config_file)
cmd += "start -d ./ -t 1 -m vos,rdb,rsvc,mgmt,rdbt -i --recreate-superblocks "
print("Running command:\n{}".format(cmd))
sys.stdout.flush()
try:
p = subprocess.Popen(shlex.split(cmd))
return p
except Exception as e:
raise ServerFailedToStart("Server failed to start:\n{}".format(e))
def run_client(segment_type):
"""
There are four client segments to be run, init, update, test, and fini.
The command line varies slightly for each and in some cases there is a
tail after the suffix.
"""
tail = ""
if segment_type == "init":
uuid = subprocess.check_output(['uuidgen'])
tail = " --uuid {}".format(uuid)
elif segment_type == "update":
segment_type = "test --update"
cmd = client_prefix + segment_type + client_suffix + tail
print("Running command:\n{}".format(cmd))
rc = os.system(cmd)
if rc:
raise Exception("command {} failed with return code {}\n".format(
cmd, rc))
return 0
def pid_info(output_line):
"""
Take a line of 'ps -o pid,comm' output and return the PID number and name.
The line looks something like:
9108 orterun
or
10183 daos_server
Need both items. Return a tuple (name, pid)
Note: there could be leading spaces on the pid.
"""
info = output_line.lstrip().split()
try:
return info[1], info[0]
except Exception as e:
print("Unable to retrieve PID info from {}".format(output_line))
return "", None
def find_child(parent_pid, child_name):
"""
Given a PID and a process name, see if this PID has any children with the
specified name. If is does, return the child PID. If not, return None.
ps -o pid,comm --no-headers --ppid <pid> gives output that looks like this:
41108 orterun
41519 ps
"""
child_pid = None
cmd = ['ps', '-o', 'pid,comm', '--no-headers', '--ppid', str(parent_pid)]
try:
res = subprocess.check_output(cmd)
except subprocess.CalledProcessError:
# parent_pid has no children
return None
except Exception as e:
print("ps command failed with: {}".format(e))
return None
# Get rid of the trailing blank line from subprocess.check_output
res = [s for s in res.splitlines() if s]
for line in res:
try:
current_name, current_pid = pid_info(line)
except Exception as e:
print("Unable to extract pid and process name from {}".format(
line))
continue
if current_pid is None:
return None
if current_name.startswith(child_name):
# This is the droid, uh, child we're looking for
return current_pid
child_pid = find_child(current_pid, child_name)
if child_pid is not None:
return child_pid
return child_pid
def daos_server_pid():
"""
Find the pid for the daos_server. Start drilling down from the parent
(current) process until we get output where one line contains
"daos_io_server" or "daos_server".
"""
parent_pid = os.getpid()
return find_child(parent_pid, "daos_")
def cleanup(daos_server):
""" Perform cleanup operations. Shut down the DAOS server by killing the
child processes that have been created. If the daos_server process is
killed, so are the processes for daos_io_server and orterun (theoretically).
It has been observed on occasion to go zombie until orterun itself is
killed.
"""
# Get PID of the daos server
cmd = "{} signal.SIGKILL".format(daos_server)
try:
os.kill(int(daos_server), signal.SIGKILL)
print("Shut down DAOS server with os.kill({} signal.SIGKILL)".format(
daos_server))
except Exception as e:
if daos_server is None:
print("No PID was found for the DAOS server")
elif "No such process" in e:
print("The daos_server process is no longer available"
" and could not be killed.")
else:
print("Unable to shut down DAOS server: {}".format(e))
if __name__ == "__main__":
"""
Start a DAOS server and then run the four stages of the client.
"""
print("Running rdb tests")
rc = 0
binfo = BuildInfo(os.path.join(build_root, ".build_vars.json"));
debug_cmds = "-x D_LOG_MASK=DEBUG,RPC=ERR,MEM=ERR " + \
"-x DD_SUBSYS=all -x DD_MASK=all"
load_mpi('openmpi')
orterun = find_executable('orterun')
if orterun is None:
raise ServerFailedToStart("No orterun installed")
try:
# Server operations
p = start_server(binfo, orterun)
counter = 0
daos_server = daos_server_pid()
while daos_server is None:
if counter >= 120:
raise ServerTimedOut("No DAOS server process detected before "\
"timeout")
counter += 1
time.sleep(1)
daos_server = daos_server_pid()
# Give daos_io_server some time to get ready.
time.sleep(10)
print("DAOS server started")
# Client operations
client_prefix = "{} --ompi-server " \
"file:{} {} --np 1 rdbt ".format(
orterun urifile, debug_cmds)
client_suffix = " --group=daos_server"
# orterun is called for the client four times: init, update, test,
# and fini
client_segments = ['init', 'update', 'test', 'fini']
try:
for segment in client_segments:
run_client(segment)
print("SUCCESS\nrbd tests PASSED")
except Exception as e:
print("rbd tests FAILED")
print("{}".format(e))
rc = 1
except ServerFailedToStart as e:
print("ServerFailedToStart: {}".format(e.message))
print("FAIL")
rc = 1
except ServerTimedOut as e:
print("ServerTimedOut: {}".format(e))
print("FAIL")
rc = 1
finally:
# Shut down the DAOS server when we are finished.
try:
if not p or p.poll() is not None:
# If the server is dead, somthing went very wrong
print("The server is unexpectedly absent.")
print("FAIL")
rc = 1
except NameError:
rc = 1
try:
cleanup(daos_server)
except NameError:
# The daos_server was never defined.
rc = 1
sys.exit(rc)
| 34.383871 | 82 | 0.638334 |
# or disclose this software are subject to the terms of the Apache License as
# provided in Contract No. 8F-30005.
# Any reproduction of computer software, computer software documentation, or
# portions thereof marked with this legend must also reproduce the markings.
"""
This script runs the rdb tests. From the command line the tests are run with:
server:
orterun -N 1 --report-uri /tmp/urifile -x LD_LIBRARY_PATH
daos_server -o <builddir>/utils/config/examples/daos_server_rdb_tests.yml
start -d ./ -t 1 -m vos,rdb,rsvc,mgmt,rdbt
client:
orterun --ompi-server file:/tmp/urifile <debug_cmds> -np 1 rdbt init
--group=daos_server --uuid <uuid>
orterun --ompi-server file:/tmp/urifile <debug_cmds> -np 1 rdbt test --update
--group=daos_server
orterun --ompi-server file:/tmp/urifile <debug_cmds> -np 1 rdbt test
--group=daos_server
orterun --ompi-server file:/tmp/urifile <debug_cmds> -np 1 rdbt fini
--group=daos_server
Where debug_cmds = -x D_LOG_MASK=DEBUG,RPC=ERR,MEM=ERR -x DD_SUBSYS=all
-x DD_MASK=all
This script automates the process.
"""
import subprocess
import os
import sys
import time
import signal
import shlex
import string
build_root = os.path.join(sys.path[0], "../../../")
sys.path.insert(0, os.path.join(build_root, "utils/sl"))
from build_info import BuildInfo
from env_modules import load_mpi
from distutils.spawn import find_executable
urifile = "/tmp/urifile"
pid_file = "/tmp/" + str(os.getpid()) + "_output"
# To avoid repetition of parts of the oretrun command.
client_prefix = ""
client_suffix = ""
# In case orterun has quit but the daos_server is still running, save the PID.
#daos_server = None
class ServerFailedToStart(Exception):
pass
class ServerTimedOut(Exception):
pass
def set_logfile(config, logfile):
f = open(config, "r+")
for line in f.readlines():
string.replace(line,
" log_file: /tmp/server.log",
" log_file: {}".format(logfile))
f.close()
def start_server(binfo, orterun):
"""
Start the DAOS server with an orterun command as a child process. We use
subprocess.Popen since it returns control to the calling process and
provides access to the polling feature.
"""
config_file = os.path.join(build_root, "utils", "config", "examples",
"daos_server_unittests.yml")
log_file = os.path.join(binfo.get("PREFIX"),
"TESTING",
"daos-rdb-test.log")
set_logfile(config_file, log_file) # set D_LOG_FILE through config file
print("Starting DAOS server\n")
cmd = orterun
cmd += " -N 1 --report-uri {} ".format(urifile)
cmd += "-x LD_LIBRARY_PATH "
cmd += binfo.get("PREFIX") + "/bin/daos_server "
cmd += "--debug --config {} ".format(config_file)
cmd += "start -d ./ -t 1 -m vos,rdb,rsvc,mgmt,rdbt -i --recreate-superblocks "
print("Running command:\n{}".format(cmd))
sys.stdout.flush()
try:
p = subprocess.Popen(shlex.split(cmd))
return p
except Exception as e:
raise ServerFailedToStart("Server failed to start:\n{}".format(e))
def run_client(segment_type):
"""
There are four client segments to be run, init, update, test, and fini.
The command line varies slightly for each and in some cases there is a
tail after the suffix.
"""
tail = ""
if segment_type == "init":
uuid = subprocess.check_output(['uuidgen'])
tail = " --uuid {}".format(uuid)
elif segment_type == "update":
segment_type = "test --update"
cmd = client_prefix + segment_type + client_suffix + tail
print("Running command:\n{}".format(cmd))
rc = os.system(cmd)
if rc:
raise Exception("command {} failed with return code {}\n".format(
cmd, rc))
return 0
def pid_info(output_line):
"""
Take a line of 'ps -o pid,comm' output and return the PID number and name.
The line looks something like:
9108 orterun
or
10183 daos_server
Need both items. Return a tuple (name, pid)
Note: there could be leading spaces on the pid.
"""
info = output_line.lstrip().split()
try:
return info[1], info[0]
except Exception as e:
print("Unable to retrieve PID info from {}".format(output_line))
return "", None
def find_child(parent_pid, child_name):
"""
Given a PID and a process name, see if this PID has any children with the
specified name. If is does, return the child PID. If not, return None.
ps -o pid,comm --no-headers --ppid <pid> gives output that looks like this:
41108 orterun
41519 ps
"""
child_pid = None
cmd = ['ps', '-o', 'pid,comm', '--no-headers', '--ppid', str(parent_pid)]
try:
res = subprocess.check_output(cmd)
except subprocess.CalledProcessError:
# parent_pid has no children
return None
except Exception as e:
print("ps command failed with: {}".format(e))
return None
# Get rid of the trailing blank line from subprocess.check_output
res = [s for s in res.splitlines() if s]
for line in res:
try:
current_name, current_pid = pid_info(line)
except Exception as e:
print("Unable to extract pid and process name from {}".format(
line))
continue
if current_pid is None:
return None
if current_name.startswith(child_name):
# This is the droid, uh, child we're looking for
return current_pid
child_pid = find_child(current_pid, child_name)
if child_pid is not None:
return child_pid
return child_pid
def daos_server_pid():
"""
Find the pid for the daos_server. Start drilling down from the parent
(current) process until we get output where one line contains
"daos_io_server" or "daos_server".
"""
parent_pid = os.getpid()
return find_child(parent_pid, "daos_")
def cleanup(daos_server):
""" Perform cleanup operations. Shut down the DAOS server by killing the
child processes that have been created. If the daos_server process is
killed, so are the processes for daos_io_server and orterun (theoretically).
It has been observed on occasion to go zombie until orterun itself is
killed.
"""
cmd = "{} signal.SIGKILL".format(daos_server)
try:
os.kill(int(daos_server), signal.SIGKILL)
print("Shut down DAOS server with os.kill({} signal.SIGKILL)".format(
daos_server))
except Exception as e:
if daos_server is None:
print("No PID was found for the DAOS server")
elif "No such process" in e:
print("The daos_server process is no longer available"
" and could not be killed.")
else:
print("Unable to shut down DAOS server: {}".format(e))
if __name__ == "__main__":
"""
Start a DAOS server and then run the four stages of the client.
"""
print("Running rdb tests")
rc = 0
binfo = BuildInfo(os.path.join(build_root, ".build_vars.json"));
debug_cmds = "-x D_LOG_MASK=DEBUG,RPC=ERR,MEM=ERR " + \
"-x DD_SUBSYS=all -x DD_MASK=all"
load_mpi('openmpi')
orterun = find_executable('orterun')
if orterun is None:
raise ServerFailedToStart("No orterun installed")
try:
p = start_server(binfo, orterun)
counter = 0
daos_server = daos_server_pid()
while daos_server is None:
if counter >= 120:
raise ServerTimedOut("No DAOS server process detected before "\
"timeout")
counter += 1
time.sleep(1)
daos_server = daos_server_pid()
time.sleep(10)
print("DAOS server started")
client_prefix = "{} --ompi-server " \
"file:{} {} --np 1 rdbt ".format(
orterun urifile, debug_cmds)
client_suffix = " --group=daos_server"
client_segments = ['init', 'update', 'test', 'fini']
try:
for segment in client_segments:
run_client(segment)
print("SUCCESS\nrbd tests PASSED")
except Exception as e:
print("rbd tests FAILED")
print("{}".format(e))
rc = 1
except ServerFailedToStart as e:
print("ServerFailedToStart: {}".format(e.message))
print("FAIL")
rc = 1
except ServerTimedOut as e:
print("ServerTimedOut: {}".format(e))
print("FAIL")
rc = 1
finally:
try:
if not p or p.poll() is not None:
print("The server is unexpectedly absent.")
print("FAIL")
rc = 1
except NameError:
rc = 1
try:
cleanup(daos_server)
except NameError:
rc = 1
sys.exit(rc)
| false | true |
7900a012d73d2c6e3706b7e5adc64f9e5d1aa94e | 3,769 | py | Python | app/services/pool/pool.py | somespecialone/clever-inspect | 8735e0b445c8e7e9b83c627d4a5fbed1428c1891 | [
"MIT"
] | 1 | 2022-03-12T05:44:12.000Z | 2022-03-12T05:44:12.000Z | app/services/pool/pool.py | somespecialone/clever-inspect | 8735e0b445c8e7e9b83c627d4a5fbed1428c1891 | [
"MIT"
] | null | null | null | app/services/pool/pool.py | somespecialone/clever-inspect | 8735e0b445c8e7e9b83c627d4a5fbed1428c1891 | [
"MIT"
] | null | null | null | import logging
import asyncio
from steam.ext.csgo import Client
from steam.ext.csgo.enums import Language
from steam.ext.csgo.backpack import BaseInspectedItem
from steam.protobufs import GCMsgProto, EMsg, MsgProto
from steam.protobufs.client_server import CMsgClientLicenseListLicense
from steam_tradeoffer_manager.base import SteamBot, SteamBotPool
_log = logging.getLogger(__name__)
# https://steamdb.info/app/730/subs/
_CSGO_PACKAGE_IDS = {
17039,
88535,
54029,
161243,
261665,
14,
211096,
133828,
4,
49,
16236,
16237,
17878,
18702,
18703,
18939,
27267,
29197,
29198,
36071,
39221,
39297,
51835,
51836,
53711,
59228,
62690,
88534,
88541,
88623,
88624,
61,
392171,
61986,
329385,
303386,
63290,
15740,
298963,
298962,
298961,
272766,
199420,
154735,
277644,
273865,
266388,
229740,
226979,
16222,
16223,
16018,
16019,
54030,
63289,
197847,
4116,
11470,
11758,
15990,
17905,
27618,
27762,
35043,
54627,
60765,
62486,
62606,
62688,
113904,
124041,
125313,
}
_CSGO_ID = 730
class InspectBot(SteamBot[int, "InspectPool"], Client):
_licenses: dict[int, CMsgClientLicenseListLicense]
async def on_ready(self) -> None:
await super().on_ready()
await asyncio.sleep(0.1) # ensure licenses event was emitted
for package_id in _CSGO_PACKAGE_IDS:
if package_id in self.licenses:
break
else:
# TODO: errors requesting free license
_log.info(f"Request free CSGO license for {self}")
await self.request_free_license([_CSGO_ID]) # request CSGO license
self.pool.queue.put_nowait(self)
@property
def licenses(self) -> dict[int, CMsgClientLicenseListLicense]:
return getattr(self, "_licenses", {})
async def on_licenses(self, licenses: list[CMsgClientLicenseListLicense]):
self._licenses = {}
for steam_license in licenses:
self.licenses[steam_license.package_id] = steam_license
def timeout(self) -> asyncio.Task:
async def _timeout():
await asyncio.sleep(1)
self.pool.queue.put_nowait(self)
return asyncio.create_task(_timeout())
def request_free_license(self, app_ids: list[int]): # pragma: no cover
return self.ws.send_proto_and_wait(MsgProto(EMsg.ClientRequestFreeLicense, appids=app_ids))
async def inspect_item(self, s: int, a: int, d: int, m: int, timeout: int) -> BaseInspectedItem: # pragma: no cover
await self.ws.send_gc_message(
GCMsgProto(
Language.Client2GCEconPreviewDataBlockRequest,
param_s=s,
param_a=a,
param_d=d,
param_m=m,
)
)
return await self.wait_for("inspect_item_info", timeout=timeout, check=lambda item: item.id == a)
class InspectPool(SteamBotPool[int, InspectBot]):
INSPECT_TIMEOUT: int
def __init__(self) -> None:
super().__init__()
self.queue: asyncio.Queue[InspectBot] = asyncio.Queue()
async def startup(self) -> None:
await super().startup()
# waiting for first bot is ready and then return
bot = await self.queue.get()
self.queue.put_nowait(bot)
async def inspect_item(self, s: int, a: int, d: int, m: int) -> BaseInspectedItem:
bot = await self.queue.get()
try:
item = await bot.inspect_item(s, a, d, m, self.INSPECT_TIMEOUT)
finally:
bot.timeout()
return item
| 22.704819 | 120 | 0.613425 | import logging
import asyncio
from steam.ext.csgo import Client
from steam.ext.csgo.enums import Language
from steam.ext.csgo.backpack import BaseInspectedItem
from steam.protobufs import GCMsgProto, EMsg, MsgProto
from steam.protobufs.client_server import CMsgClientLicenseListLicense
from steam_tradeoffer_manager.base import SteamBot, SteamBotPool
_log = logging.getLogger(__name__)
_CSGO_PACKAGE_IDS = {
17039,
88535,
54029,
161243,
261665,
14,
211096,
133828,
4,
49,
16236,
16237,
17878,
18702,
18703,
18939,
27267,
29197,
29198,
36071,
39221,
39297,
51835,
51836,
53711,
59228,
62690,
88534,
88541,
88623,
88624,
61,
392171,
61986,
329385,
303386,
63290,
15740,
298963,
298962,
298961,
272766,
199420,
154735,
277644,
273865,
266388,
229740,
226979,
16222,
16223,
16018,
16019,
54030,
63289,
197847,
4116,
11470,
11758,
15990,
17905,
27618,
27762,
35043,
54627,
60765,
62486,
62606,
62688,
113904,
124041,
125313,
}
_CSGO_ID = 730
class InspectBot(SteamBot[int, "InspectPool"], Client):
_licenses: dict[int, CMsgClientLicenseListLicense]
async def on_ready(self) -> None:
await super().on_ready()
await asyncio.sleep(0.1)
for package_id in _CSGO_PACKAGE_IDS:
if package_id in self.licenses:
break
else:
_log.info(f"Request free CSGO license for {self}")
await self.request_free_license([_CSGO_ID])
self.pool.queue.put_nowait(self)
@property
def licenses(self) -> dict[int, CMsgClientLicenseListLicense]:
return getattr(self, "_licenses", {})
async def on_licenses(self, licenses: list[CMsgClientLicenseListLicense]):
self._licenses = {}
for steam_license in licenses:
self.licenses[steam_license.package_id] = steam_license
def timeout(self) -> asyncio.Task:
async def _timeout():
await asyncio.sleep(1)
self.pool.queue.put_nowait(self)
return asyncio.create_task(_timeout())
def request_free_license(self, app_ids: list[int]):
return self.ws.send_proto_and_wait(MsgProto(EMsg.ClientRequestFreeLicense, appids=app_ids))
async def inspect_item(self, s: int, a: int, d: int, m: int, timeout: int) -> BaseInspectedItem:
await self.ws.send_gc_message(
GCMsgProto(
Language.Client2GCEconPreviewDataBlockRequest,
param_s=s,
param_a=a,
param_d=d,
param_m=m,
)
)
return await self.wait_for("inspect_item_info", timeout=timeout, check=lambda item: item.id == a)
class InspectPool(SteamBotPool[int, InspectBot]):
INSPECT_TIMEOUT: int
def __init__(self) -> None:
super().__init__()
self.queue: asyncio.Queue[InspectBot] = asyncio.Queue()
async def startup(self) -> None:
await super().startup()
bot = await self.queue.get()
self.queue.put_nowait(bot)
async def inspect_item(self, s: int, a: int, d: int, m: int) -> BaseInspectedItem:
bot = await self.queue.get()
try:
item = await bot.inspect_item(s, a, d, m, self.INSPECT_TIMEOUT)
finally:
bot.timeout()
return item
| true | true |
7900a0a926aa64ad95ee205dd355f50cf4263d59 | 470 | py | Python | example/manage.py | peterbe/django-jingo-offline-compressor | 282cb4a0cea3a0f3b4c9c00b8ee4be8ed9cf9ce7 | [
"BSD-3-Clause"
] | 1 | 2015-05-17T08:20:05.000Z | 2015-05-17T08:20:05.000Z | example/manage.py | peterbe/django-jingo-offline-compressor | 282cb4a0cea3a0f3b4c9c00b8ee4be8ed9cf9ce7 | [
"BSD-3-Clause"
] | null | null | null | example/manage.py | peterbe/django-jingo-offline-compressor | 282cb4a0cea3a0f3b4c9c00b8ee4be8ed9cf9ce7 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import os
import sys
import warnings
if __name__ == "__main__":
here = os.path.dirname(__file__)
there = os.path.join(here, '..')
there = os.path.abspath(there)
sys.path.insert(0, there)
print "NOTE Using jingo_offline_compressor from %s" % there
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 26.111111 | 71 | 0.725532 |
import os
import sys
import warnings
if __name__ == "__main__":
here = os.path.dirname(__file__)
there = os.path.join(here, '..')
there = os.path.abspath(there)
sys.path.insert(0, there)
print "NOTE Using jingo_offline_compressor from %s" % there
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| false | true |
7900a0af06525ec857da6ff0a1ff1daab53be7dc | 3,120 | py | Python | dayu_widgets_mvc/item_view_set.py | muyr/dayu_widgets_mvc | 902766359caf6b5f9d0becf5e346569a26d5674d | [
"MIT"
] | 3 | 2019-09-12T07:33:26.000Z | 2022-03-21T07:11:19.000Z | dayu_widgets/item_view_set.py | kanbang/dayu_widgets | 6ff101e6c6f8fcf10e5cb578023a12ccdcef9164 | [
"MIT"
] | null | null | null | dayu_widgets/item_view_set.py | kanbang/dayu_widgets | 6ff101e6c6f8fcf10e5cb578023a12ccdcef9164 | [
"MIT"
] | 1 | 2022-02-16T14:19:54.000Z | 2022-02-16T14:19:54.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###################################################################
# Author: Mu yanru
# Date : 2018.5
# Email : muyanru345@163.com
###################################################################
from dayu_widgets.item_model import MSortFilterModel, MTableModel
from dayu_widgets.item_view import MTableView, MTreeView, MBigView, MListView
from dayu_widgets.line_edit import MLineEdit
from dayu_widgets.tool_button import MToolButton
from dayu_widgets.qt import QWidget, QModelIndex, Signal, QVBoxLayout, QApplication, Qt, Slot, QHBoxLayout
class MItemViewSet(QWidget):
sig_double_clicked = Signal(QModelIndex)
sig_left_clicked = Signal(QModelIndex)
TableViewType = MTableView
BigViewType = MBigView
TreeViewType = MTreeView
ListViewType = MListView
def __init__(self, view_type=None, parent=None):
super(MItemViewSet, self).__init__(parent)
self._main_lay = QVBoxLayout()
self._main_lay.setSpacing(5)
self._main_lay.setContentsMargins(0, 0, 0, 0)
self.sort_filter_model = MSortFilterModel()
self.source_model = MTableModel()
self.sort_filter_model.setSourceModel(self.source_model)
view_class = view_type or MItemViewSet.TableViewType
self.item_view = view_class()
self.item_view.doubleClicked.connect(self.sig_double_clicked)
self.item_view.pressed.connect(self.slot_left_clicked)
self.item_view.setModel(self.sort_filter_model)
self._search_line_edit = MLineEdit().search().small()
self._search_attr_button = MToolButton().icon_only().svg('down_fill.svg').small()
self._search_line_edit.set_prefix_widget(self._search_attr_button)
self._search_line_edit.textChanged.connect(self.sort_filter_model.set_search_pattern)
self._search_line_edit.setVisible(False)
_search_lay = QHBoxLayout()
_search_lay.setContentsMargins(0, 0, 0, 0)
_search_lay.addStretch()
_search_lay.addWidget(self._search_line_edit)
self._main_lay.addLayout(_search_lay)
self._main_lay.addWidget(self.item_view)
self.setLayout(self._main_lay)
@Slot(QModelIndex)
def slot_left_clicked(self, start_index):
button = QApplication.mouseButtons()
if button == Qt.LeftButton:
real_index = self.sort_filter_model.mapToSource(start_index)
self.sig_left_clicked.emit(real_index)
def set_header_list(self, header_list):
self.source_model.set_header_list(header_list)
self.sort_filter_model.set_header_list(header_list)
self.sort_filter_model.setSourceModel(self.source_model)
self.item_view.set_header_list(header_list)
@Slot()
def setup_data(self, data_list):
self.source_model.clear()
if data_list:
self.source_model.set_data_list(data_list)
def get_data(self):
return self.source_model.get_data_list()
def searchable(self):
"""Enable search line edit visible."""
self._search_line_edit.setVisible(True)
return self
| 39.493671 | 106 | 0.690705 | true | true | |
7900a134932544acc96a287638e2783a6497123c | 808 | py | Python | nlu/components/lemmatizer.py | sumanthratna/nlu | acde6879d776116051d4cbe909268ab8946989b5 | [
"Apache-2.0"
] | 1 | 2020-09-25T22:55:13.000Z | 2020-09-25T22:55:13.000Z | nlu/components/lemmatizer.py | sumanthratna/nlu | acde6879d776116051d4cbe909268ab8946989b5 | [
"Apache-2.0"
] | null | null | null | nlu/components/lemmatizer.py | sumanthratna/nlu | acde6879d776116051d4cbe909268ab8946989b5 | [
"Apache-2.0"
] | null | null | null | from nlu import *
from nlu.pipe_components import SparkNLUComponent
from sparknlp.annotator import *
class Lemmatizer(SparkNLUComponent):
def __init__(self,component_name='lemma', language='en', component_type='lemmatizer', get_default=False,model = None, sparknlp_reference=''):
component_name = 'lemmatizer'
SparkNLUComponent.__init__(self,component_name,component_type)
# component_name = utils.lower_case(component_name) TODO
if model != None : self.model = model
else :
if 'lemma' in component_name :
from nlu import SparkNLPLemmatizer
if get_default : self.model = SparkNLPLemmatizer.get_default_model()
else : self.model = SparkNLPLemmatizer.get_pretrained_model(sparknlp_reference,language)
| 44.888889 | 145 | 0.709158 | from nlu import *
from nlu.pipe_components import SparkNLUComponent
from sparknlp.annotator import *
class Lemmatizer(SparkNLUComponent):
def __init__(self,component_name='lemma', language='en', component_type='lemmatizer', get_default=False,model = None, sparknlp_reference=''):
component_name = 'lemmatizer'
SparkNLUComponent.__init__(self,component_name,component_type)
if model != None : self.model = model
else :
if 'lemma' in component_name :
from nlu import SparkNLPLemmatizer
if get_default : self.model = SparkNLPLemmatizer.get_default_model()
else : self.model = SparkNLPLemmatizer.get_pretrained_model(sparknlp_reference,language)
| true | true |
7900a3cb2fe883116b5dddf46e9f623d2757d39e | 302 | py | Python | setup.py | ferlzc/youtube-dl-flask | afc01922c70650a05919c071f176c72479e5bf47 | [
"Unlicense"
] | null | null | null | setup.py | ferlzc/youtube-dl-flask | afc01922c70650a05919c071f176c72479e5bf47 | [
"Unlicense"
] | null | null | null | setup.py | ferlzc/youtube-dl-flask | afc01922c70650a05919c071f176c72479e5bf47 | [
"Unlicense"
] | null | null | null | from setuptools import setup
setup(
name='yt-dl',
version = "0.1.0",
author = "Fernando Luiz Cola",
author_email ="fernando.cola@emc-logic.com",
license = "MIT",
install_requires=[
'Flask',
'youtube-dl',
],
)
| 21.571429 | 52 | 0.480132 | from setuptools import setup
setup(
name='yt-dl',
version = "0.1.0",
author = "Fernando Luiz Cola",
author_email ="fernando.cola@emc-logic.com",
license = "MIT",
install_requires=[
'Flask',
'youtube-dl',
],
)
| true | true |
7900a408abb0e85cf06173bbfa8c45244a7a51e4 | 10,433 | py | Python | pubs.py | Ibrahimmohamed33/web | 4cbeba3ab9b83bfa780dcf84dc3bad9b9ac188a0 | [
"MIT"
] | null | null | null | pubs.py | Ibrahimmohamed33/web | 4cbeba3ab9b83bfa780dcf84dc3bad9b9ac188a0 | [
"MIT"
] | null | null | null | pubs.py | Ibrahimmohamed33/web | 4cbeba3ab9b83bfa780dcf84dc3bad9b9ac188a0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# an ugly hack to convert some stuff into other stuff...
# EDIT THESE #####################################################################
names_to_highlight = ['Eren AM',
'Delmont TO',
'Esen ÖC',
'Lee STM',
'Shaiber A',
'Kiefl E',
'Cui S',
'Watson AR',
'Lolans K']
journal_name_fixes = [('The ISME journal', 'ISME J'),
('Proceedings of the National Academy of Sciences of the United States of America', 'Proc Natl Acad Sci U S A'),
('Proceedings of the National Academy of Sciences', 'Proc Natl Acad Sci U S A'),
('Frontiers in Microbiology', 'Front Microbiol')]
keep_pubs_after_year = 2009
##################################################################################
import os
import sys
from datetime import datetime
try:
import anvio.utils as u
from anvio.errors import ConfigError
except:
sys.stderr.write("This program requires anvi'o to be installed :/\n")
sys.exit(-1)
class Publications:
def __init__(self, pubs_file_path='pubs.txt', pubs_info_file_path='pubs_info.txt'):
"""Takes an EndNote library exported a TXT file (`pubs_file_path`), and an optional\
TAB-delimited info file path with DOI identifiers (`pubs_info_file_path`), and\
generates some Markdown formatted output.
Here is an info line from the EndNote:
Winterberg, K. M., and Reznikoff, W. S. (2007). "Screening transposon mutant libraries using full-genome oligonucleotide microarrays." Methods Enzymol, 421, 110-25.
Absolute matching to this format is required.
Expected headers in the TAB-delimited pubs info file are 'doi', 'highlights',\
and 'featured_image'.
- doi: The DOI of the pub matching to a pubs file path entry.
- highlights: Brief bullet points about the work. Each pont must be separated\
from the rest with a ';' character. HTML tags are OK.
- featured_image: A URL to an image.
If things are not working, feel free to write to meren at uchicago.edu
"""
self.info = {}
self.pubs_dict = {}
self.journals_list = []
self.authors_list = []
self.recent_authors_list = []
self.author_links = {}
self.pubs_file_path = pubs_file_path
self.pubs_info_file_path = pubs_info_file_path
def get_author_highlights(self, pub):
authors_str = []
for author in pub['authors']:
if author in pub['co_first_authors']:
author_h = author + '<sup>☯</sup>'
elif author in pub['co_senior_authors']:
author_h = author + '<sup>‡</sup>'
else:
author_h = author
if author in names_to_highlight:
authors_str.append('<span class="pub-member-author">%s</span>' % (author_h))
else:
authors_str.append(author_h)
return ', '.join(authors_str)
def parse_pubs_txt(self):
if os.path.exists(self.pubs_info_file_path):
self.info = u.get_TAB_delimited_file_as_dictionary(self.pubs_info_file_path)
pubs_header = u.get_columns_of_TAB_delim_file(self.pubs_file_path, include_first_column=True)
headers_expected = ['Authors', 'Title', 'Publication', 'Volume', 'Number', 'Pages', 'Year', 'doi']
missing_headers = [h for h in pubs_header if h not in headers_expected]
if len(missing_headers):
raise ConfigError("Sorry, the pubs.txt seems to be missing some of the headers that are mandatory. Each of \
the columns in the following list must be present in this file: %s (hint: yours do not have\
the following: %s)." % (', '.join(headers_expected), ', '.join(missing_headers)))
self.pubs_txt = u.get_TAB_delimited_file_as_dictionary(self.pubs_file_path, indexing_field=pubs_header.index('doi'))
for doi in self.pubs_txt:
authors = []
co_first_authors = []
co_senior_authors = []
p = self.pubs_txt[doi]
for author in [_.strip() for _ in p['Authors'].split(';')]:
if not len(author):
continue
author_last_name, author_first_name_raw = [_.strip() for _ in author.split(',')]
author_first_name = ''.join([n[0] for n in author_first_name_raw.split()])
author_final_name = '%s %s' % (author_last_name, author_first_name)
if author_first_name_raw.endswith('*'):
co_first_authors.append(author_final_name)
elif author_first_name_raw.endswith('+'):
co_senior_authors.append(author_final_name)
authors.append(author_final_name)
if p['Number']:
issue = '%s(%s):%s' % (p['Volume'], p['Number'], p['Pages'])
else:
issue = '%s:%s' % (p['Volume'], p['Pages'])
year = p['Year'].strip()
pub_entry = {'authors': authors, 'title': p['Title'], 'journal': p['Publication'], 'issue': issue, 'doi': doi, 'year': year, 'co_first_authors': co_first_authors, 'co_senior_authors': co_senior_authors}
if year not in self.pubs_dict:
self.pubs_dict[year] = [pub_entry]
else:
self.pubs_dict[year].append(pub_entry)
def get_markdown_text_for_pub(self, pub):
"""Gets a dictionary `pub`, returns a markdown formatted text.
An example pub:
{'authors': 'McLellan, S. L., and Eren, A. M.',
'doi': '10.1016/j.tim.2014.08.002',
'issue': '22(12), 697-706',
'title': 'Discovering new indicators of fecal pollution.',
'journal': 'Trends Microbiol',
'year': 2014}
"""
pub_md = []
A = lambda s: pub_md.append(s)
A('<div class="pub">')
A('''<div class='altmetric-embed' data-badge-type='donut' data-doi="%s"></div>''' % pub['doi'])
A('''<div class="__dimensions_badge_embed__" data-doi="%s" data-hide-zero-citations="true" data-legend="hover-bottom" data-style="small_circle"></div>''' % pub['doi'])
if pub['doi']:
A(' <h3><a href="%s" target="_new">%s</a></h3>' % (' https://doi.org/%s' % (pub['doi']), pub['title']))
else:
A(' <h3><a href="http://scholar.google.com/scholar?hl=en&q=%s" target="_new">%s</a></h3>' % ('http://scholar.google.com/scholar?hl=en&q=%s' % (pub['title'].replace(' ', '+')), pub['title']))
A(' <span class="pub-authors">%s</span>' % self.get_author_highlights(pub))
if pub['co_first_authors'] and not pub['co_senior_authors']:
A(' <span class="pub-co-first-authors"><sup>☯</sup>Co-first authors</span>')
elif pub['co_first_authors'] and pub['co_senior_authors']:
A(' <span class="pub-co-first-authors"><sup>☯</sup>Co-first authors; <sup>‡</sup>Co-senior authors</span>')
elif pub['co_senior_authors'] and not pub['co_first_authors']:
A(' <span class="pub-co-first-authors"><sup>‡</sup>Co-senior authors</span>')
if pub['doi'] in self.info:
info = self.info[pub['doi']]
A(' <div class="%s">' % ('pub-info' if info['featured_image'] else 'pub-info-no-image'))
if info['featured_image']:
A(' <div class="pub-featured-image">')
A(' <a href="%s"><img src="%s" style="max-width: 100px; max-height: 80px; width: auto; border: none; height: auto; margin: 0 auto; display: block; transform: translateY(15%%);"/></a>' % (info['featured_image'], info['featured_image']))
A(' </div>')
highlights = info['highlights'].split(';') if info['highlights'] else None
if highlights:
A(' <div class="%s">' % ('pub-highlights' if info['featured_image'] else 'pub-highlights-no-image'))
A(' %s' % '<br>'.join(['<span style="display: inline-block; padding-bottom: 5px;">- %s</span>' % h for h in highlights]))
A(' </div>')
A(' </div>')
A(' <span class="pub-journal"><b>%s</b>, %s.</span>' % (pub['journal'], pub['issue']))
A('</div>\n')
return '\n'.join(pub_md)
def store_markdown_output_for_pubs(self, output_file_path):
# years = ''.join(['<a href="#%s"><span class="category-item">%s <small>(%d)</small></span></a>' % (y, y, len(self.pubs_dict[y])) for y in sorted(list(self.pubs_dict.keys()), reverse=True)])
years = ''.join(['<a href="#%s"><span class="category-item">%s</span></a>' % (y, y) for y in sorted(list(self.pubs_dict.keys()), reverse=True)])
output_file = open(output_file_path, 'w')
W = lambda s: output_file.write(s + '\n')
W('---')
W('layout: publications')
W('modified: %s' % datetime.today().strftime('%Y-%m-%d'))
W('comments: false')
W('---\n')
W('''<script type='text/javascript' src='https://d1bxh8uas1mnw7.cloudfront.net/assets/embed.js'></script>\n''')
W('''<script async src="https://badge.dimensions.ai/badge.js" charset="utf-8"></script>\n''')
W('<div class="category-box">\n%s\n</div>\n' % years)
W('{:.notice}\n')
W("This page shows publications that are most reflective of our interests. For a complete list, please see <a href='https://scholar.google.com/citations?user=GtLLuxoAAAAJ&view_op=list_works&sortby=pubdate' target='_blank'>Meren's Google Scholar page</a>.\n")
for year in sorted(list(self.pubs_dict.keys()), reverse=True):
W('<a name="%s"> </a>' % year)
W('<h1>%s</h1>\n' % year)
for pub in self.pubs_dict[year]:
W(self.get_markdown_text_for_pub(pub))
W('')
if __name__ == '__main__':
pubs = Publications()
try:
pubs.parse_pubs_txt()
pubs.store_markdown_output_for_pubs('publications/index.md')
except ConfigError as e:
print(e)
sys.exit(-1)
| 44.776824 | 266 | 0.557749 |
sup>Co-senior authors</span>')
if pub['doi'] in self.info:
info = self.info[pub['doi']]
A(' <div class="%s">' % ('pub-info' if info['featured_image'] else 'pub-info-no-image'))
if info['featured_image']:
A(' <div class="pub-featured-image">')
A(' <a href="%s"><img src="%s" style="max-width: 100px; max-height: 80px; width: auto; border: none; height: auto; margin: 0 auto; display: block; transform: translateY(15%%);"/></a>' % (info['featured_image'], info['featured_image']))
A(' </div>')
highlights = info['highlights'].split(';') if info['highlights'] else None
if highlights:
A(' <div class="%s">' % ('pub-highlights' if info['featured_image'] else 'pub-highlights-no-image'))
A(' %s' % '<br>'.join(['<span style="display: inline-block; padding-bottom: 5px;">- %s</span>' % h for h in highlights]))
A(' </div>')
A(' </div>')
A(' <span class="pub-journal"><b>%s</b>, %s.</span>' % (pub['journal'], pub['issue']))
A('</div>\n')
return '\n'.join(pub_md)
def store_markdown_output_for_pubs(self, output_file_path):
# years = ''.join(['<a href="#%s"><span class="category-item">%s <small>(%d)</small></span></a>' % (y, y, len(self.pubs_dict[y])) for y in sorted(list(self.pubs_dict.keys()), reverse=True)])
years = ''.join(['<a href="#%s"><span class="category-item">%s</span></a>' % (y, y) for y in sorted(list(self.pubs_dict.keys()), reverse=True)])
output_file = open(output_file_path, 'w')
W = lambda s: output_file.write(s + '\n')
W('---')
W('layout: publications')
W('modified: %s' % datetime.today().strftime('%Y-%m-%d'))
W('comments: false')
W('---\n')
W('''<script type='text/javascript' src='https://d1bxh8uas1mnw7.cloudfront.net/assets/embed.js'></script>\n''')
W('''<script async src="https://badge.dimensions.ai/badge.js" charset="utf-8"></script>\n''')
W('<div class="category-box">\n%s\n</div>\n' % years)
W('{:.notice}\n')
W("This page shows publications that are most reflective of our interests. For a complete list, please see <a href='https://scholar.google.com/citations?user=GtLLuxoAAAAJ&view_op=list_works&sortby=pubdate' target='_blank'>Meren's Google Scholar page</a>.\n")
for year in sorted(list(self.pubs_dict.keys()), reverse=True):
W('<a name="%s"> </a>' % year)
W('<h1>%s</h1>\n' % year)
for pub in self.pubs_dict[year]:
W(self.get_markdown_text_for_pub(pub))
W('')
if __name__ == '__main__':
pubs = Publications()
try:
pubs.parse_pubs_txt()
pubs.store_markdown_output_for_pubs('publications/index.md')
except ConfigError as e:
print(e)
sys.exit(-1)
| true | true |
7900a4680b4f1b5c6b30355fbfadf81023efdc8c | 319 | py | Python | rewrite_multi_pis_ansilbe_hosts.py | yujmo/python | ff4802cd0ab00ba91f9ca466e52a39ce5da44791 | [
"Apache-2.0"
] | null | null | null | rewrite_multi_pis_ansilbe_hosts.py | yujmo/python | ff4802cd0ab00ba91f9ca466e52a39ce5da44791 | [
"Apache-2.0"
] | null | null | null | rewrite_multi_pis_ansilbe_hosts.py | yujmo/python | ff4802cd0ab00ba91f9ca466e52a39ce5da44791 | [
"Apache-2.0"
] | null | null | null | with open('/home/pi/kown_hosts') as kown_f,open('/home/pi/cache_hosts') as cache_f:
kown_hosts = kown_f.readlines()
cache_hosts = set(cache_f.readlines())
kown_hosts = [host.split() for host in kown_hosts]
with open('/etc/ansible/hosts','w') as wf:
wf.writelines([x.split()[1]+"\n" for x in cache_hosts])
| 35.444444 | 83 | 0.689655 | with open('/home/pi/kown_hosts') as kown_f,open('/home/pi/cache_hosts') as cache_f:
kown_hosts = kown_f.readlines()
cache_hosts = set(cache_f.readlines())
kown_hosts = [host.split() for host in kown_hosts]
with open('/etc/ansible/hosts','w') as wf:
wf.writelines([x.split()[1]+"\n" for x in cache_hosts])
| true | true |
7900a4f625b92fbaf650baeb2d4607ac690945d2 | 7,545 | py | Python | pcs_images_packages_read.py | moonman81/pc-toolbox | c88c6291118eab0f01add173243d3d1eacc39496 | [
"MIT"
] | null | null | null | pcs_images_packages_read.py | moonman81/pc-toolbox | c88c6291118eab0f01add173243d3d1eacc39496 | [
"MIT"
] | null | null | null | pcs_images_packages_read.py | moonman81/pc-toolbox | c88c6291118eab0f01add173243d3d1eacc39496 | [
"MIT"
] | null | null | null | """ Get a list of Packages in CI, Deployed, or all Images """
from __future__ import print_function
from pc_lib import pc_api, pc_utility
# --Configuration-- #
parser = pc_utility.get_arg_parser()
parser.add_argument(
'--mode',
type=str,
choices=['ci', 'deployed', 'all'],
default='all',
help='(Optional) - Report on CI, Deployed, or all Images.')
parser.add_argument(
'--package_type',
type=str,
choices=['binary', 'gem', 'go', 'jar', 'nodejs', 'nuget', 'package', 'python', 'windows', 'all'],
default='all',
help='(Optional) - Report on one or all Package Types.')
parser.add_argument(
'--image_id',
type=str,
help='(Optional) - ID of the Image (sha256:...).')
parser.add_argument(
'--package_id',
type=str,
help='(Optional) - ID of the Package (name:version).')
args = parser.parse_args()
search_package_name = None
search_package_version = None
if args.package_id:
print_all_packages = False
if ':' in args.package_id:
[search_package_name, search_package_version] = args.package_id.split(':')
else:
search_package_name = args.package_id
else:
print_all_packages = True
# --Helpers-- #
def optional_print(txt='', mode=True):
if mode:
print(txt)
# --Initialize-- #
settings = pc_utility.get_settings(args)
pc_api.configure(settings)
pc_api.validate_api_compute()
# --Main-- #
get_deployed_images = True
get_ci_images = True
deployed_images_with_package = []
ci_images_with_package = []
"""
"instances": [{
"image": "k8s.gcr.io/etcd:3.4.3-0",
"host": "demo",
"registry": "k8s.gcr.io"
"repo": "etcd",
"tag": "3.4.3-0",
}],
"packages": [{
"pkgsType": "package",
"pkgs": [{
"version": "2.27-2",
"name": "grep",
"cveCount": 12,
"license": "GPL-3+",
"layerTime": 1557275612
}],
"pkgsType": [
"binary",
"gem",
"go",
"jar",
"nodejs",
"nuget",
"package",
"python",
"windows",
]
"""
print('Testing Compute API Access ...', end='')
intelligence = pc_api.statuses_intelligence()
print(' done.')
print()
if search_package_name:
print('Searching for Package: (%s) Version: (%s)' % (search_package_name, search_package_version))
print()
# Monitor > Vulnerabilities/Compliance > Images > Deployed
deployed_images = {}
if args.mode in ['deployed', 'all']:
print('Getting Deployed Images ...')
images = pc_api.images_list_read(args.image_id)
for image in images:
image_id = image['_id']
# TODO: Verify instances array length.
image_ii = '%s %s' % (image['instances'][0]['image'], image['instances'][0]['host'])
deployed_images[image_id] = {
'id': image['_id'],
'instance': image_ii,
'instances': image['instances'],
'packages': image['packages']}
optional_print(mode=print_all_packages)
for image in deployed_images:
optional_print('Deployed Image', mode=print_all_packages)
optional_print('ID: %s' % image, mode=print_all_packages)
optional_print('Instance: %s' % deployed_images[image]['instance'], mode=print_all_packages)
optional_print(mode=print_all_packages)
if not deployed_images[image]['packages']:
continue
for package_type in deployed_images[image]['packages']:
for package in package_type['pkgs']:
optional_print('\tType: %s' % package_type['pkgsType'], mode=print_all_packages)
optional_print('\tName: %s' % package['name'], mode=print_all_packages)
optional_print('\tVers: %s' % package['version'], mode=print_all_packages)
optional_print('\tCVEs: %s' % package['cveCount'], mode=print_all_packages)
optional_print(mode=print_all_packages)
if args.package_type in [package_type['pkgsType'], 'all']:
if search_package_name and (search_package_name == package['name']):
if search_package_version:
if search_package_version == package['version']:
deployed_images_with_package.append(deployed_images[image]['instance'])
else:
deployed_images_with_package.append(deployed_images[image]['instance'])
print('Done.')
print()
# Monitor > Vulnerabilities/Compliance > Images > CI
ci_images = {}
if args.mode in ['ci', 'all']:
print('Getting CI Images ...')
images = pc_api.scans_list_read(args.image_id)
for image in images:
image_id = image['entityInfo']['id']
if image['entityInfo']['instances']:
image_ii = '%s %s' % (image['entityInfo']['instances'][0]['image'], image['entityInfo']['instances'][0]['host'])
else:
image_ii = None
ci_images[image_id] = {
'id': image['entityInfo']['id'],
'instance': image_ii,
'instances': image['entityInfo']['instances'],
'packages': image['entityInfo']['packages']}
optional_print(mode=print_all_packages)
for image in ci_images:
optional_print('CI Image', mode=print_all_packages)
optional_print('ID: %s' % image, mode=print_all_packages)
optional_print('Instance: %s' % ci_images[image]['instance'], mode=print_all_packages)
optional_print(mode=print_all_packages)
if not ci_images[image]['packages']:
continue
for package_type in ci_images[image]['packages']:
for package in package_type['pkgs']:
optional_print('\tType: %s' % package_type['pkgsType'], mode=print_all_packages)
optional_print('\tName: %s' % package['name'], mode=print_all_packages)
optional_print('\tVers: %s' % package['version'], mode=print_all_packages)
optional_print('\tCVEs: %s' % package['cveCount'], mode=print_all_packages)
optional_print(mode=print_all_packages)
if args.package_type in [package_type['pkgsType'], 'all']:
if search_package_name and (search_package_name == package['name']):
if search_package_version:
if search_package_version == package['version']:
ci_images_with_package.append(deployed_images[image]['instance'])
else:
ci_images_with_package.append(deployed_images[image]['instance'])
print('Done.')
print()
if args.package_id:
if args.mode in ['deployed', 'all']:
print()
if deployed_images_with_package:
print('Package: (%s) Version: (%s) found in these Deployed Images:' % (search_package_name, search_package_version))
print()
for image in deployed_images_with_package:
print('\t%s' % image)
else:
print('Package: (%s) Version: (%s) not found in any Deployed Images' % (search_package_name, search_package_version))
if args.mode in ['ci', 'all']:
print()
if ci_images_with_package:
print('Package: (%s) Version: (%s) found in these CI Images:' % (search_package_name, search_package_version))
print()
for image in ci_images_with_package:
print('\t%s' % image)
else:
print('Package: (%s) Version: (%s) not found in any CI Images' % (search_package_name, search_package_version))
| 37.351485 | 129 | 0.605302 |
from __future__ import print_function
from pc_lib import pc_api, pc_utility
parser = pc_utility.get_arg_parser()
parser.add_argument(
'--mode',
type=str,
choices=['ci', 'deployed', 'all'],
default='all',
help='(Optional) - Report on CI, Deployed, or all Images.')
parser.add_argument(
'--package_type',
type=str,
choices=['binary', 'gem', 'go', 'jar', 'nodejs', 'nuget', 'package', 'python', 'windows', 'all'],
default='all',
help='(Optional) - Report on one or all Package Types.')
parser.add_argument(
'--image_id',
type=str,
help='(Optional) - ID of the Image (sha256:...).')
parser.add_argument(
'--package_id',
type=str,
help='(Optional) - ID of the Package (name:version).')
args = parser.parse_args()
search_package_name = None
search_package_version = None
if args.package_id:
print_all_packages = False
if ':' in args.package_id:
[search_package_name, search_package_version] = args.package_id.split(':')
else:
search_package_name = args.package_id
else:
print_all_packages = True
def optional_print(txt='', mode=True):
if mode:
print(txt)
settings = pc_utility.get_settings(args)
pc_api.configure(settings)
pc_api.validate_api_compute()
get_deployed_images = True
get_ci_images = True
deployed_images_with_package = []
ci_images_with_package = []
print('Testing Compute API Access ...', end='')
intelligence = pc_api.statuses_intelligence()
print(' done.')
print()
if search_package_name:
print('Searching for Package: (%s) Version: (%s)' % (search_package_name, search_package_version))
print()
deployed_images = {}
if args.mode in ['deployed', 'all']:
print('Getting Deployed Images ...')
images = pc_api.images_list_read(args.image_id)
for image in images:
image_id = image['_id']
image_ii = '%s %s' % (image['instances'][0]['image'], image['instances'][0]['host'])
deployed_images[image_id] = {
'id': image['_id'],
'instance': image_ii,
'instances': image['instances'],
'packages': image['packages']}
optional_print(mode=print_all_packages)
for image in deployed_images:
optional_print('Deployed Image', mode=print_all_packages)
optional_print('ID: %s' % image, mode=print_all_packages)
optional_print('Instance: %s' % deployed_images[image]['instance'], mode=print_all_packages)
optional_print(mode=print_all_packages)
if not deployed_images[image]['packages']:
continue
for package_type in deployed_images[image]['packages']:
for package in package_type['pkgs']:
optional_print('\tType: %s' % package_type['pkgsType'], mode=print_all_packages)
optional_print('\tName: %s' % package['name'], mode=print_all_packages)
optional_print('\tVers: %s' % package['version'], mode=print_all_packages)
optional_print('\tCVEs: %s' % package['cveCount'], mode=print_all_packages)
optional_print(mode=print_all_packages)
if args.package_type in [package_type['pkgsType'], 'all']:
if search_package_name and (search_package_name == package['name']):
if search_package_version:
if search_package_version == package['version']:
deployed_images_with_package.append(deployed_images[image]['instance'])
else:
deployed_images_with_package.append(deployed_images[image]['instance'])
print('Done.')
print()
ci_images = {}
if args.mode in ['ci', 'all']:
print('Getting CI Images ...')
images = pc_api.scans_list_read(args.image_id)
for image in images:
image_id = image['entityInfo']['id']
if image['entityInfo']['instances']:
image_ii = '%s %s' % (image['entityInfo']['instances'][0]['image'], image['entityInfo']['instances'][0]['host'])
else:
image_ii = None
ci_images[image_id] = {
'id': image['entityInfo']['id'],
'instance': image_ii,
'instances': image['entityInfo']['instances'],
'packages': image['entityInfo']['packages']}
optional_print(mode=print_all_packages)
for image in ci_images:
optional_print('CI Image', mode=print_all_packages)
optional_print('ID: %s' % image, mode=print_all_packages)
optional_print('Instance: %s' % ci_images[image]['instance'], mode=print_all_packages)
optional_print(mode=print_all_packages)
if not ci_images[image]['packages']:
continue
for package_type in ci_images[image]['packages']:
for package in package_type['pkgs']:
optional_print('\tType: %s' % package_type['pkgsType'], mode=print_all_packages)
optional_print('\tName: %s' % package['name'], mode=print_all_packages)
optional_print('\tVers: %s' % package['version'], mode=print_all_packages)
optional_print('\tCVEs: %s' % package['cveCount'], mode=print_all_packages)
optional_print(mode=print_all_packages)
if args.package_type in [package_type['pkgsType'], 'all']:
if search_package_name and (search_package_name == package['name']):
if search_package_version:
if search_package_version == package['version']:
ci_images_with_package.append(deployed_images[image]['instance'])
else:
ci_images_with_package.append(deployed_images[image]['instance'])
print('Done.')
print()
if args.package_id:
if args.mode in ['deployed', 'all']:
print()
if deployed_images_with_package:
print('Package: (%s) Version: (%s) found in these Deployed Images:' % (search_package_name, search_package_version))
print()
for image in deployed_images_with_package:
print('\t%s' % image)
else:
print('Package: (%s) Version: (%s) not found in any Deployed Images' % (search_package_name, search_package_version))
if args.mode in ['ci', 'all']:
print()
if ci_images_with_package:
print('Package: (%s) Version: (%s) found in these CI Images:' % (search_package_name, search_package_version))
print()
for image in ci_images_with_package:
print('\t%s' % image)
else:
print('Package: (%s) Version: (%s) not found in any CI Images' % (search_package_name, search_package_version))
| true | true |
7900a610a7fe99f11beeab17e51704daf0e039b4 | 11,974 | py | Python | simba/make/simbaerrno.py | ghsecuritylab/N17 | 2291615396e97923ffd655d1087222f6fb1f86bd | [
"MIT"
] | 325 | 2015-11-12T15:21:39.000Z | 2022-01-11T09:39:36.000Z | simba/make/simbaerrno.py | ghsecuritylab/N17 | 2291615396e97923ffd655d1087222f6fb1f86bd | [
"MIT"
] | 216 | 2016-01-02T10:57:11.000Z | 2021-08-25T05:36:51.000Z | simba/make/simbaerrno.py | ghsecuritylab/N17 | 2291615396e97923ffd655d1087222f6fb1f86bd | [
"MIT"
] | 101 | 2015-12-28T16:21:27.000Z | 2022-03-29T11:59:01.000Z | errno_map = {
"1": {
"comment": "Operation not permitted",
"name": "EPERM"
},
"2": {
"comment": "No such file or directory",
"name": "ENOENT"
},
"3": {
"comment": "No such process",
"name": "ESRCH"
},
"4": {
"comment": "Interrupted system call",
"name": "EINTR"
},
"5": {
"comment": "I/O error",
"name": "EIO"
},
"6": {
"comment": "No such device or address",
"name": "ENXIO"
},
"7": {
"comment": "Argument list too long",
"name": "E2BIG"
},
"8": {
"comment": "Exec format error",
"name": "ENOEXEC"
},
"9": {
"comment": "Bad file number",
"name": "EBADF"
},
"10": {
"comment": "No child processes",
"name": "ECHILD"
},
"11": {
"comment": "Try again",
"name": "EAGAIN"
},
"12": {
"comment": "Out of memory",
"name": "ENOMEM"
},
"13": {
"comment": "Permission denied",
"name": "EACCES"
},
"14": {
"comment": "Bad address",
"name": "EFAULT"
},
"15": {
"comment": "Block device required",
"name": "ENOTBLK"
},
"16": {
"comment": "Device or resource busy",
"name": "EBUSY"
},
"17": {
"comment": "File exists",
"name": "EEXIST"
},
"18": {
"comment": "Cross-device link",
"name": "EXDEV"
},
"19": {
"comment": "No such device",
"name": "ENODEV"
},
"20": {
"comment": "Not a directory",
"name": "ENOTDIR"
},
"21": {
"comment": "Is a directory",
"name": "EISDIR"
},
"22": {
"comment": "Invalid argument",
"name": "EINVAL"
},
"23": {
"comment": "File table overflow",
"name": "ENFILE"
},
"24": {
"comment": "Too many open files",
"name": "EMFILE"
},
"25": {
"comment": "Not a typewriter",
"name": "ENOTTY"
},
"26": {
"comment": "Text file busy",
"name": "ETXTBSY"
},
"27": {
"comment": "File too large",
"name": "EFBIG"
},
"28": {
"comment": "No space left on device",
"name": "ENOSPC"
},
"29": {
"comment": "Illegal seek",
"name": "ESPIPE"
},
"30": {
"comment": "Read-only file system",
"name": "EROFS"
},
"31": {
"comment": "Too many links",
"name": "EMLINK"
},
"32": {
"comment": "Broken pipe",
"name": "EPIPE"
},
"33": {
"comment": "Math argument out of domain of func",
"name": "EDOM"
},
"34": {
"comment": "Math result not representable",
"name": "ERANGE"
},
"35": {
"comment": "Resource deadlock would occur",
"name": "EDEADLK"
},
"36": {
"comment": "File name too long",
"name": "ENAMETOOLONG"
},
"37": {
"comment": "No record locks available",
"name": "ENOLCK"
},
"38": {
"comment": "Function not implemented",
"name": "ENOSYS"
},
"39": {
"comment": "Directory not empty",
"name": "ENOTEMPTY"
},
"40": {
"comment": "Too many symbolic links encountered",
"name": "ELOOP"
},
"42": {
"comment": "No message of desired type",
"name": "ENOMSG"
},
"43": {
"comment": "Identifier removed",
"name": "EIDRM"
},
"44": {
"comment": "Channel number out of range",
"name": "ECHRNG"
},
"45": {
"comment": "Level 2 not synchronized",
"name": "EL2NSYNC"
},
"46": {
"comment": "Level 3 halted",
"name": "EL3HLT"
},
"47": {
"comment": "Level 3 reset",
"name": "EL3RST"
},
"48": {
"comment": "Link number out of range",
"name": "ELNRNG"
},
"49": {
"comment": "Protocol driver not attached",
"name": "EUNATCH"
},
"50": {
"comment": "No CSI structure available",
"name": "ENOCSI"
},
"51": {
"comment": "Level 2 halted",
"name": "EL2HLT"
},
"52": {
"comment": "Invalid exchange",
"name": "EBADE"
},
"53": {
"comment": "Invalid request descriptor",
"name": "EBADR"
},
"54": {
"comment": "Exchange full",
"name": "EXFULL"
},
"55": {
"comment": "No anode",
"name": "ENOANO"
},
"56": {
"comment": "Invalid request code",
"name": "EBADRQC"
},
"57": {
"comment": "Invalid slot",
"name": "EBADSLT"
},
"59": {
"comment": "Bad font file format",
"name": "EBFONT"
},
"60": {
"comment": "Device not a stream",
"name": "ENOSTR"
},
"61": {
"comment": "No data available",
"name": "ENODATA"
},
"62": {
"comment": "Timer expired",
"name": "ETIME"
},
"63": {
"comment": "Out of streams resources",
"name": "ENOSR"
},
"64": {
"comment": "Machine is not on the network",
"name": "ENONET"
},
"65": {
"comment": "Package not installed",
"name": "ENOPKG"
},
"66": {
"comment": "Object is remote",
"name": "EREMOTE"
},
"67": {
"comment": "Link has been severed",
"name": "ENOLINK"
},
"68": {
"comment": "Advertise error",
"name": "EADV"
},
"69": {
"comment": "Srmount error",
"name": "ESRMNT"
},
"70": {
"comment": "Communication error on send",
"name": "ECOMM"
},
"71": {
"comment": "Protocol error",
"name": "EPROTO"
},
"72": {
"comment": "Multihop attempted",
"name": "EMULTIHOP"
},
"73": {
"comment": "RFS specific error",
"name": "EDOTDOT"
},
"74": {
"comment": "Not a data message",
"name": "EBADMSG"
},
"75": {
"comment": "Value too large for defined data type",
"name": "EOVERFLOW"
},
"76": {
"comment": "Name not unique on network",
"name": "ENOTUNIQ"
},
"77": {
"comment": "File descriptor in bad state",
"name": "EBADFD"
},
"78": {
"comment": "Remote address changed",
"name": "EREMCHG"
},
"79": {
"comment": "Can not access a needed shared library",
"name": "ELIBACC"
},
"80": {
"comment": "Accessing a corrupted shared library",
"name": "ELIBBAD"
},
"81": {
"comment": ".lib section in a.out corrupted",
"name": "ELIBSCN"
},
"82": {
"comment": "Attempting to link in too many shared libraries",
"name": "ELIBMAX"
},
"83": {
"comment": "Cannot exec a shared library directly",
"name": "ELIBEXEC"
},
"84": {
"comment": "Illegal byte sequence",
"name": "EILSEQ"
},
"85": {
"comment": "Interrupted system call should be restarted",
"name": "ERESTART"
},
"86": {
"comment": "Streams pipe error",
"name": "ESTRPIPE"
},
"87": {
"comment": "Too many users",
"name": "EUSERS"
},
"88": {
"comment": "Socket operation on non-socket",
"name": "ENOTSOCK"
},
"89": {
"comment": "Destination address required",
"name": "EDESTADDRREQ"
},
"90": {
"comment": "Message too long",
"name": "EMSGSIZE"
},
"91": {
"comment": "Protocol wrong type for socket",
"name": "EPROTOTYPE"
},
"92": {
"comment": "Protocol not available",
"name": "ENOPROTOOPT"
},
"93": {
"comment": "Protocol not supported",
"name": "EPROTONOSUPBOARD"
},
"94": {
"comment": "Socket type not supported",
"name": "ESOCKTNOSUPBOARD"
},
"95": {
"comment": "Operation not supported on transport endpoint",
"name": "EOPNOTSUPP"
},
"96": {
"comment": "Protocol family not supported",
"name": "EPFNOSUPBOARD"
},
"97": {
"comment": "Address family not supported by protocol",
"name": "EAFNOSUPBOARD"
},
"98": {
"comment": "Address already in use",
"name": "EADDRINUSE"
},
"99": {
"comment": "Cannot assign requested address",
"name": "EADDRNOTAVAIL"
},
"100": {
"comment": "Network is down",
"name": "ENETDOWN"
},
"101": {
"comment": "Network is unreachable",
"name": "ENETUNREACH"
},
"102": {
"comment": "Network dropped connection because of reset",
"name": "ENETRESET"
},
"103": {
"comment": "Software caused connection abort",
"name": "ECONNABORTED"
},
"104": {
"comment": "Connection reset by peer",
"name": "ECONNRESET"
},
"105": {
"comment": "No buffer space available",
"name": "ENOBUFS"
},
"106": {
"comment": "Transport endpoint is already connected",
"name": "EISCONN"
},
"107": {
"comment": "Transport endpoint is not connected",
"name": "ENOTCONN"
},
"108": {
"comment": "Cannot send after transport endpoint shutdown",
"name": "ESHUTDOWN"
},
"109": {
"comment": "Too many references: cannot splice",
"name": "ETOOMANYREFS"
},
"110": {
"comment": "Connection timed out",
"name": "ETIMEDOUT"
},
"111": {
"comment": "Connection refused",
"name": "ECONNREFUSED"
},
"112": {
"comment": "Host is down",
"name": "EHOSTDOWN"
},
"113": {
"comment": "No route to host",
"name": "EHOSTUNREACH"
},
"114": {
"comment": "Operation already in progress",
"name": "EALREADY"
},
"115": {
"comment": "Operation now in progress",
"name": "EINPROGRESS"
},
"116": {
"comment": "Stale NFS file handle",
"name": "ESTALE"
},
"117": {
"comment": "Structure needs cleaning",
"name": "EUCLEAN"
},
"118": {
"comment": "Not a XENIX named type file",
"name": "ENOTNAM"
},
"119": {
"comment": "No XENIX sems available",
"name": "ENAVAIL"
},
"120": {
"comment": "Is a named type file",
"name": "EISNAM"
},
"121": {
"comment": "Remote I/O error",
"name": "EREMOTEIO"
},
"122": {
"comment": "Quota exceeded",
"name": "EDQUOT"
},
"123": {
"comment": "No medium found",
"name": "ENOMEDIUM"
},
"124": {
"comment": "Wrong medium type",
"name": "EMEDIUMTYPE"
},
"125": {
"comment": "Operation Canceled",
"name": "ECANCELED"
},
"126": {
"comment": "Required key not available",
"name": "ENOKEY"
},
"127": {
"comment": "Key has expired",
"name": "EKEYEXPIRED"
},
"128": {
"comment": "Key has been revoked",
"name": "EKEYREVOKED"
},
"129": {
"comment": "Key was rejected by service",
"name": "EKEYREJECTED"
},
"1000": {
"comment": "Stack corrupt.",
"name": "ESTACK"
},
"1001": {
"comment": "Watchdog timeout.",
"name": "EWATCHDOGTIMEOUT"
}
} | 23.11583 | 70 | 0.431769 | errno_map = {
"1": {
"comment": "Operation not permitted",
"name": "EPERM"
},
"2": {
"comment": "No such file or directory",
"name": "ENOENT"
},
"3": {
"comment": "No such process",
"name": "ESRCH"
},
"4": {
"comment": "Interrupted system call",
"name": "EINTR"
},
"5": {
"comment": "I/O error",
"name": "EIO"
},
"6": {
"comment": "No such device or address",
"name": "ENXIO"
},
"7": {
"comment": "Argument list too long",
"name": "E2BIG"
},
"8": {
"comment": "Exec format error",
"name": "ENOEXEC"
},
"9": {
"comment": "Bad file number",
"name": "EBADF"
},
"10": {
"comment": "No child processes",
"name": "ECHILD"
},
"11": {
"comment": "Try again",
"name": "EAGAIN"
},
"12": {
"comment": "Out of memory",
"name": "ENOMEM"
},
"13": {
"comment": "Permission denied",
"name": "EACCES"
},
"14": {
"comment": "Bad address",
"name": "EFAULT"
},
"15": {
"comment": "Block device required",
"name": "ENOTBLK"
},
"16": {
"comment": "Device or resource busy",
"name": "EBUSY"
},
"17": {
"comment": "File exists",
"name": "EEXIST"
},
"18": {
"comment": "Cross-device link",
"name": "EXDEV"
},
"19": {
"comment": "No such device",
"name": "ENODEV"
},
"20": {
"comment": "Not a directory",
"name": "ENOTDIR"
},
"21": {
"comment": "Is a directory",
"name": "EISDIR"
},
"22": {
"comment": "Invalid argument",
"name": "EINVAL"
},
"23": {
"comment": "File table overflow",
"name": "ENFILE"
},
"24": {
"comment": "Too many open files",
"name": "EMFILE"
},
"25": {
"comment": "Not a typewriter",
"name": "ENOTTY"
},
"26": {
"comment": "Text file busy",
"name": "ETXTBSY"
},
"27": {
"comment": "File too large",
"name": "EFBIG"
},
"28": {
"comment": "No space left on device",
"name": "ENOSPC"
},
"29": {
"comment": "Illegal seek",
"name": "ESPIPE"
},
"30": {
"comment": "Read-only file system",
"name": "EROFS"
},
"31": {
"comment": "Too many links",
"name": "EMLINK"
},
"32": {
"comment": "Broken pipe",
"name": "EPIPE"
},
"33": {
"comment": "Math argument out of domain of func",
"name": "EDOM"
},
"34": {
"comment": "Math result not representable",
"name": "ERANGE"
},
"35": {
"comment": "Resource deadlock would occur",
"name": "EDEADLK"
},
"36": {
"comment": "File name too long",
"name": "ENAMETOOLONG"
},
"37": {
"comment": "No record locks available",
"name": "ENOLCK"
},
"38": {
"comment": "Function not implemented",
"name": "ENOSYS"
},
"39": {
"comment": "Directory not empty",
"name": "ENOTEMPTY"
},
"40": {
"comment": "Too many symbolic links encountered",
"name": "ELOOP"
},
"42": {
"comment": "No message of desired type",
"name": "ENOMSG"
},
"43": {
"comment": "Identifier removed",
"name": "EIDRM"
},
"44": {
"comment": "Channel number out of range",
"name": "ECHRNG"
},
"45": {
"comment": "Level 2 not synchronized",
"name": "EL2NSYNC"
},
"46": {
"comment": "Level 3 halted",
"name": "EL3HLT"
},
"47": {
"comment": "Level 3 reset",
"name": "EL3RST"
},
"48": {
"comment": "Link number out of range",
"name": "ELNRNG"
},
"49": {
"comment": "Protocol driver not attached",
"name": "EUNATCH"
},
"50": {
"comment": "No CSI structure available",
"name": "ENOCSI"
},
"51": {
"comment": "Level 2 halted",
"name": "EL2HLT"
},
"52": {
"comment": "Invalid exchange",
"name": "EBADE"
},
"53": {
"comment": "Invalid request descriptor",
"name": "EBADR"
},
"54": {
"comment": "Exchange full",
"name": "EXFULL"
},
"55": {
"comment": "No anode",
"name": "ENOANO"
},
"56": {
"comment": "Invalid request code",
"name": "EBADRQC"
},
"57": {
"comment": "Invalid slot",
"name": "EBADSLT"
},
"59": {
"comment": "Bad font file format",
"name": "EBFONT"
},
"60": {
"comment": "Device not a stream",
"name": "ENOSTR"
},
"61": {
"comment": "No data available",
"name": "ENODATA"
},
"62": {
"comment": "Timer expired",
"name": "ETIME"
},
"63": {
"comment": "Out of streams resources",
"name": "ENOSR"
},
"64": {
"comment": "Machine is not on the network",
"name": "ENONET"
},
"65": {
"comment": "Package not installed",
"name": "ENOPKG"
},
"66": {
"comment": "Object is remote",
"name": "EREMOTE"
},
"67": {
"comment": "Link has been severed",
"name": "ENOLINK"
},
"68": {
"comment": "Advertise error",
"name": "EADV"
},
"69": {
"comment": "Srmount error",
"name": "ESRMNT"
},
"70": {
"comment": "Communication error on send",
"name": "ECOMM"
},
"71": {
"comment": "Protocol error",
"name": "EPROTO"
},
"72": {
"comment": "Multihop attempted",
"name": "EMULTIHOP"
},
"73": {
"comment": "RFS specific error",
"name": "EDOTDOT"
},
"74": {
"comment": "Not a data message",
"name": "EBADMSG"
},
"75": {
"comment": "Value too large for defined data type",
"name": "EOVERFLOW"
},
"76": {
"comment": "Name not unique on network",
"name": "ENOTUNIQ"
},
"77": {
"comment": "File descriptor in bad state",
"name": "EBADFD"
},
"78": {
"comment": "Remote address changed",
"name": "EREMCHG"
},
"79": {
"comment": "Can not access a needed shared library",
"name": "ELIBACC"
},
"80": {
"comment": "Accessing a corrupted shared library",
"name": "ELIBBAD"
},
"81": {
"comment": ".lib section in a.out corrupted",
"name": "ELIBSCN"
},
"82": {
"comment": "Attempting to link in too many shared libraries",
"name": "ELIBMAX"
},
"83": {
"comment": "Cannot exec a shared library directly",
"name": "ELIBEXEC"
},
"84": {
"comment": "Illegal byte sequence",
"name": "EILSEQ"
},
"85": {
"comment": "Interrupted system call should be restarted",
"name": "ERESTART"
},
"86": {
"comment": "Streams pipe error",
"name": "ESTRPIPE"
},
"87": {
"comment": "Too many users",
"name": "EUSERS"
},
"88": {
"comment": "Socket operation on non-socket",
"name": "ENOTSOCK"
},
"89": {
"comment": "Destination address required",
"name": "EDESTADDRREQ"
},
"90": {
"comment": "Message too long",
"name": "EMSGSIZE"
},
"91": {
"comment": "Protocol wrong type for socket",
"name": "EPROTOTYPE"
},
"92": {
"comment": "Protocol not available",
"name": "ENOPROTOOPT"
},
"93": {
"comment": "Protocol not supported",
"name": "EPROTONOSUPBOARD"
},
"94": {
"comment": "Socket type not supported",
"name": "ESOCKTNOSUPBOARD"
},
"95": {
"comment": "Operation not supported on transport endpoint",
"name": "EOPNOTSUPP"
},
"96": {
"comment": "Protocol family not supported",
"name": "EPFNOSUPBOARD"
},
"97": {
"comment": "Address family not supported by protocol",
"name": "EAFNOSUPBOARD"
},
"98": {
"comment": "Address already in use",
"name": "EADDRINUSE"
},
"99": {
"comment": "Cannot assign requested address",
"name": "EADDRNOTAVAIL"
},
"100": {
"comment": "Network is down",
"name": "ENETDOWN"
},
"101": {
"comment": "Network is unreachable",
"name": "ENETUNREACH"
},
"102": {
"comment": "Network dropped connection because of reset",
"name": "ENETRESET"
},
"103": {
"comment": "Software caused connection abort",
"name": "ECONNABORTED"
},
"104": {
"comment": "Connection reset by peer",
"name": "ECONNRESET"
},
"105": {
"comment": "No buffer space available",
"name": "ENOBUFS"
},
"106": {
"comment": "Transport endpoint is already connected",
"name": "EISCONN"
},
"107": {
"comment": "Transport endpoint is not connected",
"name": "ENOTCONN"
},
"108": {
"comment": "Cannot send after transport endpoint shutdown",
"name": "ESHUTDOWN"
},
"109": {
"comment": "Too many references: cannot splice",
"name": "ETOOMANYREFS"
},
"110": {
"comment": "Connection timed out",
"name": "ETIMEDOUT"
},
"111": {
"comment": "Connection refused",
"name": "ECONNREFUSED"
},
"112": {
"comment": "Host is down",
"name": "EHOSTDOWN"
},
"113": {
"comment": "No route to host",
"name": "EHOSTUNREACH"
},
"114": {
"comment": "Operation already in progress",
"name": "EALREADY"
},
"115": {
"comment": "Operation now in progress",
"name": "EINPROGRESS"
},
"116": {
"comment": "Stale NFS file handle",
"name": "ESTALE"
},
"117": {
"comment": "Structure needs cleaning",
"name": "EUCLEAN"
},
"118": {
"comment": "Not a XENIX named type file",
"name": "ENOTNAM"
},
"119": {
"comment": "No XENIX sems available",
"name": "ENAVAIL"
},
"120": {
"comment": "Is a named type file",
"name": "EISNAM"
},
"121": {
"comment": "Remote I/O error",
"name": "EREMOTEIO"
},
"122": {
"comment": "Quota exceeded",
"name": "EDQUOT"
},
"123": {
"comment": "No medium found",
"name": "ENOMEDIUM"
},
"124": {
"comment": "Wrong medium type",
"name": "EMEDIUMTYPE"
},
"125": {
"comment": "Operation Canceled",
"name": "ECANCELED"
},
"126": {
"comment": "Required key not available",
"name": "ENOKEY"
},
"127": {
"comment": "Key has expired",
"name": "EKEYEXPIRED"
},
"128": {
"comment": "Key has been revoked",
"name": "EKEYREVOKED"
},
"129": {
"comment": "Key was rejected by service",
"name": "EKEYREJECTED"
},
"1000": {
"comment": "Stack corrupt.",
"name": "ESTACK"
},
"1001": {
"comment": "Watchdog timeout.",
"name": "EWATCHDOGTIMEOUT"
}
} | true | true |
7900a7398b9d2533f4dca9741bca6b386cc6c3b5 | 6,338 | py | Python | pysnmp-with-texts/SNMP-MPD-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/SNMP-MPD-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/SNMP-MPD-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module SNMP-MPD-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/SNMP-MPD-MIB
# Produced by pysmi-0.3.4 at Wed May 1 15:08:13 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, SingleValueConstraint, ConstraintsIntersection, ValueRangeConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "SingleValueConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "ValueSizeConstraint")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
IpAddress, TimeTicks, ObjectIdentity, snmpModules, ModuleIdentity, Integer32, Counter64, Counter32, Unsigned32, iso, Bits, NotificationType, Gauge32, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "IpAddress", "TimeTicks", "ObjectIdentity", "snmpModules", "ModuleIdentity", "Integer32", "Counter64", "Counter32", "Unsigned32", "iso", "Bits", "NotificationType", "Gauge32", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
snmpMPDMIB = ModuleIdentity((1, 3, 6, 1, 6, 3, 11))
snmpMPDMIB.setRevisions(('2002-10-14 00:00', '1999-05-04 16:36', '1997-09-30 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: snmpMPDMIB.setRevisionsDescriptions(('Updated addresses, published as RFC 3412.', 'Updated addresses, published as RFC 2572.', 'Original version, published as RFC 2272.',))
if mibBuilder.loadTexts: snmpMPDMIB.setLastUpdated('200210140000Z')
if mibBuilder.loadTexts: snmpMPDMIB.setOrganization('SNMPv3 Working Group')
if mibBuilder.loadTexts: snmpMPDMIB.setContactInfo('WG-EMail: snmpv3@lists.tislabs.com Subscribe: snmpv3-request@lists.tislabs.com Co-Chair: Russ Mundy Network Associates Laboratories postal: 15204 Omega Drive, Suite 300 Rockville, MD 20850-4601 USA EMail: mundy@tislabs.com phone: +1 301-947-7107 Co-Chair & Co-editor: David Harrington Enterasys Networks postal: 35 Industrial Way P. O. Box 5005 Rochester NH 03866-5005 USA EMail: dbh@enterasys.com phone: +1 603-337-2614 Co-editor: Jeffrey Case SNMP Research, Inc. postal: 3001 Kimberlin Heights Road Knoxville, TN 37920-9716 USA EMail: case@snmp.com phone: +1 423-573-1434 Co-editor: Randy Presuhn BMC Software, Inc. postal: 2141 North First Street San Jose, CA 95131 USA EMail: randy_presuhn@bmc.com phone: +1 408-546-1006 Co-editor: Bert Wijnen Lucent Technologies postal: Schagen 33 3461 GL Linschoten Netherlands EMail: bwijnen@lucent.com phone: +31 348-680-485 ')
if mibBuilder.loadTexts: snmpMPDMIB.setDescription('The MIB for Message Processing and Dispatching Copyright (C) The Internet Society (2002). This version of this MIB module is part of RFC 3412; see the RFC itself for full legal notices. ')
snmpMPDAdmin = MibIdentifier((1, 3, 6, 1, 6, 3, 11, 1))
snmpMPDMIBObjects = MibIdentifier((1, 3, 6, 1, 6, 3, 11, 2))
snmpMPDMIBConformance = MibIdentifier((1, 3, 6, 1, 6, 3, 11, 3))
snmpMPDStats = MibIdentifier((1, 3, 6, 1, 6, 3, 11, 2, 1))
snmpUnknownSecurityModels = MibScalar((1, 3, 6, 1, 6, 3, 11, 2, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmpUnknownSecurityModels.setStatus('current')
if mibBuilder.loadTexts: snmpUnknownSecurityModels.setDescription('The total number of packets received by the SNMP engine which were dropped because they referenced a securityModel that was not known to or supported by the SNMP engine. ')
snmpInvalidMsgs = MibScalar((1, 3, 6, 1, 6, 3, 11, 2, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmpInvalidMsgs.setStatus('current')
if mibBuilder.loadTexts: snmpInvalidMsgs.setDescription('The total number of packets received by the SNMP engine which were dropped because there were invalid or inconsistent components in the SNMP message. ')
snmpUnknownPDUHandlers = MibScalar((1, 3, 6, 1, 6, 3, 11, 2, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmpUnknownPDUHandlers.setStatus('current')
if mibBuilder.loadTexts: snmpUnknownPDUHandlers.setDescription('The total number of packets received by the SNMP engine which were dropped because the PDU contained in the packet could not be passed to an application responsible for handling the pduType, e.g. no SNMP application had registered for the proper combination of the contextEngineID and the pduType. ')
snmpMPDMIBCompliances = MibIdentifier((1, 3, 6, 1, 6, 3, 11, 3, 1))
snmpMPDMIBGroups = MibIdentifier((1, 3, 6, 1, 6, 3, 11, 3, 2))
snmpMPDCompliance = ModuleCompliance((1, 3, 6, 1, 6, 3, 11, 3, 1, 1)).setObjects(("SNMP-MPD-MIB", "snmpMPDGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
snmpMPDCompliance = snmpMPDCompliance.setStatus('current')
if mibBuilder.loadTexts: snmpMPDCompliance.setDescription('The compliance statement for SNMP entities which implement the SNMP-MPD-MIB. ')
snmpMPDGroup = ObjectGroup((1, 3, 6, 1, 6, 3, 11, 3, 2, 1)).setObjects(("SNMP-MPD-MIB", "snmpUnknownSecurityModels"), ("SNMP-MPD-MIB", "snmpInvalidMsgs"), ("SNMP-MPD-MIB", "snmpUnknownPDUHandlers"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
snmpMPDGroup = snmpMPDGroup.setStatus('current')
if mibBuilder.loadTexts: snmpMPDGroup.setDescription('A collection of objects providing for remote monitoring of the SNMP Message Processing and Dispatching process. ')
mibBuilder.exportSymbols("SNMP-MPD-MIB", snmpMPDMIBGroups=snmpMPDMIBGroups, snmpMPDMIB=snmpMPDMIB, snmpMPDCompliance=snmpMPDCompliance, snmpMPDStats=snmpMPDStats, snmpUnknownPDUHandlers=snmpUnknownPDUHandlers, snmpMPDMIBCompliances=snmpMPDMIBCompliances, snmpMPDGroup=snmpMPDGroup, PYSNMP_MODULE_ID=snmpMPDMIB, snmpMPDMIBObjects=snmpMPDMIBObjects, snmpMPDAdmin=snmpMPDAdmin, snmpMPDMIBConformance=snmpMPDMIBConformance, snmpUnknownSecurityModels=snmpUnknownSecurityModels, snmpInvalidMsgs=snmpInvalidMsgs)
| 132.041667 | 921 | 0.778321 |
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, SingleValueConstraint, ConstraintsIntersection, ValueRangeConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "SingleValueConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "ValueSizeConstraint")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
IpAddress, TimeTicks, ObjectIdentity, snmpModules, ModuleIdentity, Integer32, Counter64, Counter32, Unsigned32, iso, Bits, NotificationType, Gauge32, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "IpAddress", "TimeTicks", "ObjectIdentity", "snmpModules", "ModuleIdentity", "Integer32", "Counter64", "Counter32", "Unsigned32", "iso", "Bits", "NotificationType", "Gauge32", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
snmpMPDMIB = ModuleIdentity((1, 3, 6, 1, 6, 3, 11))
snmpMPDMIB.setRevisions(('2002-10-14 00:00', '1999-05-04 16:36', '1997-09-30 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: snmpMPDMIB.setRevisionsDescriptions(('Updated addresses, published as RFC 3412.', 'Updated addresses, published as RFC 2572.', 'Original version, published as RFC 2272.',))
if mibBuilder.loadTexts: snmpMPDMIB.setLastUpdated('200210140000Z')
if mibBuilder.loadTexts: snmpMPDMIB.setOrganization('SNMPv3 Working Group')
if mibBuilder.loadTexts: snmpMPDMIB.setContactInfo('WG-EMail: snmpv3@lists.tislabs.com Subscribe: snmpv3-request@lists.tislabs.com Co-Chair: Russ Mundy Network Associates Laboratories postal: 15204 Omega Drive, Suite 300 Rockville, MD 20850-4601 USA EMail: mundy@tislabs.com phone: +1 301-947-7107 Co-Chair & Co-editor: David Harrington Enterasys Networks postal: 35 Industrial Way P. O. Box 5005 Rochester NH 03866-5005 USA EMail: dbh@enterasys.com phone: +1 603-337-2614 Co-editor: Jeffrey Case SNMP Research, Inc. postal: 3001 Kimberlin Heights Road Knoxville, TN 37920-9716 USA EMail: case@snmp.com phone: +1 423-573-1434 Co-editor: Randy Presuhn BMC Software, Inc. postal: 2141 North First Street San Jose, CA 95131 USA EMail: randy_presuhn@bmc.com phone: +1 408-546-1006 Co-editor: Bert Wijnen Lucent Technologies postal: Schagen 33 3461 GL Linschoten Netherlands EMail: bwijnen@lucent.com phone: +31 348-680-485 ')
if mibBuilder.loadTexts: snmpMPDMIB.setDescription('The MIB for Message Processing and Dispatching Copyright (C) The Internet Society (2002). This version of this MIB module is part of RFC 3412; see the RFC itself for full legal notices. ')
snmpMPDAdmin = MibIdentifier((1, 3, 6, 1, 6, 3, 11, 1))
snmpMPDMIBObjects = MibIdentifier((1, 3, 6, 1, 6, 3, 11, 2))
snmpMPDMIBConformance = MibIdentifier((1, 3, 6, 1, 6, 3, 11, 3))
snmpMPDStats = MibIdentifier((1, 3, 6, 1, 6, 3, 11, 2, 1))
snmpUnknownSecurityModels = MibScalar((1, 3, 6, 1, 6, 3, 11, 2, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmpUnknownSecurityModels.setStatus('current')
if mibBuilder.loadTexts: snmpUnknownSecurityModels.setDescription('The total number of packets received by the SNMP engine which were dropped because they referenced a securityModel that was not known to or supported by the SNMP engine. ')
snmpInvalidMsgs = MibScalar((1, 3, 6, 1, 6, 3, 11, 2, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmpInvalidMsgs.setStatus('current')
if mibBuilder.loadTexts: snmpInvalidMsgs.setDescription('The total number of packets received by the SNMP engine which were dropped because there were invalid or inconsistent components in the SNMP message. ')
snmpUnknownPDUHandlers = MibScalar((1, 3, 6, 1, 6, 3, 11, 2, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmpUnknownPDUHandlers.setStatus('current')
if mibBuilder.loadTexts: snmpUnknownPDUHandlers.setDescription('The total number of packets received by the SNMP engine which were dropped because the PDU contained in the packet could not be passed to an application responsible for handling the pduType, e.g. no SNMP application had registered for the proper combination of the contextEngineID and the pduType. ')
snmpMPDMIBCompliances = MibIdentifier((1, 3, 6, 1, 6, 3, 11, 3, 1))
snmpMPDMIBGroups = MibIdentifier((1, 3, 6, 1, 6, 3, 11, 3, 2))
snmpMPDCompliance = ModuleCompliance((1, 3, 6, 1, 6, 3, 11, 3, 1, 1)).setObjects(("SNMP-MPD-MIB", "snmpMPDGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
snmpMPDCompliance = snmpMPDCompliance.setStatus('current')
if mibBuilder.loadTexts: snmpMPDCompliance.setDescription('The compliance statement for SNMP entities which implement the SNMP-MPD-MIB. ')
snmpMPDGroup = ObjectGroup((1, 3, 6, 1, 6, 3, 11, 3, 2, 1)).setObjects(("SNMP-MPD-MIB", "snmpUnknownSecurityModels"), ("SNMP-MPD-MIB", "snmpInvalidMsgs"), ("SNMP-MPD-MIB", "snmpUnknownPDUHandlers"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
snmpMPDGroup = snmpMPDGroup.setStatus('current')
if mibBuilder.loadTexts: snmpMPDGroup.setDescription('A collection of objects providing for remote monitoring of the SNMP Message Processing and Dispatching process. ')
mibBuilder.exportSymbols("SNMP-MPD-MIB", snmpMPDMIBGroups=snmpMPDMIBGroups, snmpMPDMIB=snmpMPDMIB, snmpMPDCompliance=snmpMPDCompliance, snmpMPDStats=snmpMPDStats, snmpUnknownPDUHandlers=snmpUnknownPDUHandlers, snmpMPDMIBCompliances=snmpMPDMIBCompliances, snmpMPDGroup=snmpMPDGroup, PYSNMP_MODULE_ID=snmpMPDMIB, snmpMPDMIBObjects=snmpMPDMIBObjects, snmpMPDAdmin=snmpMPDAdmin, snmpMPDMIBConformance=snmpMPDMIBConformance, snmpUnknownSecurityModels=snmpUnknownSecurityModels, snmpInvalidMsgs=snmpInvalidMsgs)
| true | true |
7900a8c0407ae7c15d6cfc7568abb10ddbe5149f | 8,602 | py | Python | py3k-sympy/sympy/logic/tests/test_boolalg.py | cielavenir/sympy | ada04faf48a4eb6c1529e8a5d49a6f2f9ce2616e | [
"BSD-3-Clause"
] | null | null | null | py3k-sympy/sympy/logic/tests/test_boolalg.py | cielavenir/sympy | ada04faf48a4eb6c1529e8a5d49a6f2f9ce2616e | [
"BSD-3-Clause"
] | null | null | null | py3k-sympy/sympy/logic/tests/test_boolalg.py | cielavenir/sympy | ada04faf48a4eb6c1529e8a5d49a6f2f9ce2616e | [
"BSD-3-Clause"
] | null | null | null | from sympy.logic.boolalg import to_cnf, eliminate_implications, distribute_and_over_or, \
compile_rule, conjuncts, disjuncts, to_int_repr, fuzzy_not, Boolean, is_cnf
from sympy import symbols, And, Or, Xor, Not, Nand, Nor, Implies, Equivalent, ITE
from sympy.utilities.pytest import raises, XFAIL
def test_overloading():
"""Test that |, & are overloaded as expected"""
A, B, C = list(map(Boolean, symbols('A,B,C')))
assert A & B == And(A, B)
assert A | B == Or(A, B)
assert (A & B) | C == Or(And(A, B), C)
assert A >> B == Implies(A, B)
assert A << B == Implies(B, A)
assert ~A == Not(A)
assert A ^ B == Xor(A, B)
def test_And():
A, B, C = list(map(Boolean, symbols('A,B,C')))
assert And() == True
assert And(A) == A
assert And(True) == True
assert And(False) == False
assert And(True, True ) == True
assert And(True, False) == False
assert And(False, False) == False
assert And(True, A) == A
assert And(False, A) == False
assert And(True, True, True) == True
assert And(True, True , A) == A
assert And(True, False, A) == False
def test_Or():
A, B, C = list(map(Boolean, symbols('A,B,C')))
assert Or() == False
assert Or(A) == A
assert Or(True) == True
assert Or(False) == False
assert Or(True, True ) == True
assert Or(True, False) == True
assert Or(False, False) == False
assert Or(True, A) == True
assert Or(False, A) == A
assert Or(True, False, False) == True
assert Or(True, False, A) == True
assert Or(False, False, A) == A
def test_Xor():
A, B, C = list(map(Boolean, symbols('A,B,C')))
assert Xor() == False
assert Xor(A) == A
assert Xor(True) == True
assert Xor(False) == False
assert Xor(True, True ) == False
assert Xor(True, False) == True
assert Xor(False, False) == False
assert Xor(True, A) == ~A
assert Xor(False, A) == A
assert Xor(True, False, False) == True
assert Xor(True, False, A) == ~A
assert Xor(False, False, A) == A
def test_Not():
assert Not(True) == False
assert Not(False) == True
assert Not(True, True ) == [False, False]
assert Not(True, False) == [False, True ]
assert Not(False,False) == [True, True ]
def test_Nand():
A, B, C = list(map(Boolean, symbols('A,B,C')))
assert Nand() == False
assert Nand(A) == ~A
assert Nand(True) == False
assert Nand(False) == True
assert Nand(True, True ) == False
assert Nand(True, False) == True
assert Nand(False, False) == True
assert Nand(True, A) == ~A
assert Nand(False, A) == True
assert Nand(True, True, True) == False
assert Nand(True, True , A) == ~A
assert Nand(True, False, A) == True
def test_Nor():
A, B, C = list(map(Boolean, symbols('A,B,C')))
assert Nor() == True
assert Nor(A) == ~A
assert Nor(True) == False
assert Nor(False) == True
assert Nor(True, True ) == False
assert Nor(True, False) == False
assert Nor(False, False) == True
assert Nor(True, A) == False
assert Nor(False, A) == ~A
assert Nor(True, True, True) == False
assert Nor(True, True , A) == False
assert Nor(True, False, A) == False
def test_Implies():
A, B, C = list(map(Boolean, symbols('A,B,C')))
raises(ValueError, "Implies(A,B,C)")
assert Implies(True, True) == True
assert Implies(True, False) == False
assert Implies(False, True) == True
assert Implies(False, False) == True
assert A >> B == B << A
def test_Equivalent():
A, B, C = list(map(Boolean, symbols('A,B,C')))
assert Equivalent(A, B) == Equivalent(B, A) == Equivalent(A, B, A)
assert Equivalent() == True
assert Equivalent(A, A) == Equivalent(A) == True
assert Equivalent(True, True) == Equivalent(False, False) == True
assert Equivalent(True, False) == Equivalent(False, True) == False
assert Equivalent(A, True) == A
assert Equivalent(A, False) == Not(A)
assert Equivalent(A, B, True) == A & B
assert Equivalent(A, B, False) == ~A & ~B
def test_bool_symbol():
"""Test that mixing symbols with boolean values
works as expected"""
A, B, C = list(map(Boolean, symbols('A,B,C')))
assert And(A, True) == A
assert And(A, True, True) == A
assert And(A, False) == False
assert And(A, True, False) == False
assert Or(A, True) == True
assert Or(A, False) == A
def test_subs():
A, B, C = list(map(Boolean, symbols('A,B,C')))
assert (A & B).subs(A, True) == B
assert (A & B).subs(A, False) == False
assert (A & B).subs(B, True) == A
assert (A & B).subs(B, False) == False
assert (A & B).subs({A: True, B:True}) == True
assert (A | B).subs(A, True) == True
assert (A | B).subs(A, False) == B
assert (A | B).subs(B, True) == True
assert (A | B).subs(B, False) == A
assert (A | B).subs({A: True, B:True}) == True
"""
we test for axioms of boolean algebra
see http://en.wikipedia.org/wiki/Boolean_algebra_(structure)
"""
def test_commutative():
"""Test for commutivity of And and Or"""
A, B = list(map(Boolean, symbols('A,B')))
assert A & B == B & A
assert A | B == B | A
def test_and_associativity():
"""Test for associativity of And"""
A, B, C = list(map(Boolean, symbols('A,B,C')))
assert (A & B) & C == A & (B & C)
def test_or_assicativity():
A, B, C = list(map(Boolean, symbols('A,B,C')))
assert ((A | B) | C) == (A | (B | C))
def test_double_negation():
a = Boolean()
assert ~(~a) == a
def test_De_Morgan():
A, B, C = list(map(Boolean, symbols('A,B,C')))
assert ~(A & B) == (~A) | (~B)
assert ~(A | B) == (~A) & (~B)
assert ~(A | B | C) == ~A & ~B & ~C
# test methods
def test_eliminate_implications():
A, B, C = list(map(Boolean, symbols('A,B,C')))
assert eliminate_implications(Implies(A, B, evaluate=False)) == (~A) | B
assert eliminate_implications(A >> (C >>Not(B))) == Or(Or(Not(B), Not(C)), Not(A))
def test_conjuncts():
A, B, C = list(map(Boolean, symbols('A,B,C')))
assert conjuncts(A & B & C) == set([A, B, C])
assert conjuncts((A | B) & C) == set([A | B, C])
assert conjuncts(A) == set([A])
assert conjuncts(True) == set([True])
assert conjuncts(False) == set([False])
def test_disjuncts():
A, B, C = list(map(Boolean, symbols('A,B,C')))
assert disjuncts(A | B | C) == set([A, B, C])
assert disjuncts((A | B) & C) == set([(A | B) & C])
assert disjuncts(A) == set([A])
assert disjuncts(True) == set([True])
assert disjuncts(False) == set([False])
def test_distribute():
A, B, C = list(map(Boolean, symbols('A,B,C')))
assert distribute_and_over_or(Or(And(A, B), C)) == And(Or(A, C), Or(B, C))
def test_to_cnf():
A, B, C = list(map(Boolean, symbols('A,B,C')))
assert to_cnf(~(B | C)) == And(Not(B), Not(C))
assert to_cnf((A & B) | C) == And(Or(A, C), Or(B, C))
assert to_cnf(A >> B) == (~A) | B
assert to_cnf(A >> (B & C)) == (~A | B) & (~A | C)
assert to_cnf(Equivalent(A, B)) == And(Or(A, Not(B)), Or(B, Not(A)))
assert to_cnf(Equivalent(A, B & C)) == (~A | B) & (~A | C) & (~B | ~C | A)
assert to_cnf(Equivalent(A, B | C)) == \
And(Or(Not(B), A), Or(Not(C), A), Or(B, C, Not(A)))
def test_compile_rule():
from sympy import sympify
assert compile_rule("A & B") == sympify("A & B")
def test_to_int_repr():
x, y, z = list(map(Boolean, symbols('x,y,z')))
def sorted_recursive(arg):
try:
return sorted(sorted_recursive(x) for x in arg)
except TypeError: #arg is not a sequence
return arg
assert sorted_recursive(to_int_repr([x | y, z | x], [x, y, z])) == \
sorted_recursive([[1, 2], [1, 3]])
assert sorted_recursive(to_int_repr([x | y, z | ~x], [x, y, z])) == \
sorted_recursive([[1, 2], [3, -1]])
def test_is_cnf():
x, y, z = symbols('x,y,z')
assert is_cnf(x | y | z) == True
assert is_cnf(x & y & z) == True
assert is_cnf((x | y) & z) == True
assert is_cnf((x & y) | z) == False
def test_ITE():
A, B, C = list(map(Boolean, symbols('A,B,C')))
assert ITE(True, False, True) == False
assert ITE(True, True, False) == True
assert ITE(False, True, False) == False
assert ITE(False, False, True) == True
A = True
assert ITE(A, B, C) == B
A = False
assert ITE(A, B, C) == C
B = True
assert ITE(And(A, B), B, C) == C
assert ITE(Or(A, False), And(B, True), False) == False
| 32.097015 | 89 | 0.562892 | from sympy.logic.boolalg import to_cnf, eliminate_implications, distribute_and_over_or, \
compile_rule, conjuncts, disjuncts, to_int_repr, fuzzy_not, Boolean, is_cnf
from sympy import symbols, And, Or, Xor, Not, Nand, Nor, Implies, Equivalent, ITE
from sympy.utilities.pytest import raises, XFAIL
def test_overloading():
A, B, C = list(map(Boolean, symbols('A,B,C')))
assert A & B == And(A, B)
assert A | B == Or(A, B)
assert (A & B) | C == Or(And(A, B), C)
assert A >> B == Implies(A, B)
assert A << B == Implies(B, A)
assert ~A == Not(A)
assert A ^ B == Xor(A, B)
def test_And():
A, B, C = list(map(Boolean, symbols('A,B,C')))
assert And() == True
assert And(A) == A
assert And(True) == True
assert And(False) == False
assert And(True, True ) == True
assert And(True, False) == False
assert And(False, False) == False
assert And(True, A) == A
assert And(False, A) == False
assert And(True, True, True) == True
assert And(True, True , A) == A
assert And(True, False, A) == False
def test_Or():
A, B, C = list(map(Boolean, symbols('A,B,C')))
assert Or() == False
assert Or(A) == A
assert Or(True) == True
assert Or(False) == False
assert Or(True, True ) == True
assert Or(True, False) == True
assert Or(False, False) == False
assert Or(True, A) == True
assert Or(False, A) == A
assert Or(True, False, False) == True
assert Or(True, False, A) == True
assert Or(False, False, A) == A
def test_Xor():
A, B, C = list(map(Boolean, symbols('A,B,C')))
assert Xor() == False
assert Xor(A) == A
assert Xor(True) == True
assert Xor(False) == False
assert Xor(True, True ) == False
assert Xor(True, False) == True
assert Xor(False, False) == False
assert Xor(True, A) == ~A
assert Xor(False, A) == A
assert Xor(True, False, False) == True
assert Xor(True, False, A) == ~A
assert Xor(False, False, A) == A
def test_Not():
assert Not(True) == False
assert Not(False) == True
assert Not(True, True ) == [False, False]
assert Not(True, False) == [False, True ]
assert Not(False,False) == [True, True ]
def test_Nand():
A, B, C = list(map(Boolean, symbols('A,B,C')))
assert Nand() == False
assert Nand(A) == ~A
assert Nand(True) == False
assert Nand(False) == True
assert Nand(True, True ) == False
assert Nand(True, False) == True
assert Nand(False, False) == True
assert Nand(True, A) == ~A
assert Nand(False, A) == True
assert Nand(True, True, True) == False
assert Nand(True, True , A) == ~A
assert Nand(True, False, A) == True
def test_Nor():
A, B, C = list(map(Boolean, symbols('A,B,C')))
assert Nor() == True
assert Nor(A) == ~A
assert Nor(True) == False
assert Nor(False) == True
assert Nor(True, True ) == False
assert Nor(True, False) == False
assert Nor(False, False) == True
assert Nor(True, A) == False
assert Nor(False, A) == ~A
assert Nor(True, True, True) == False
assert Nor(True, True , A) == False
assert Nor(True, False, A) == False
def test_Implies():
A, B, C = list(map(Boolean, symbols('A,B,C')))
raises(ValueError, "Implies(A,B,C)")
assert Implies(True, True) == True
assert Implies(True, False) == False
assert Implies(False, True) == True
assert Implies(False, False) == True
assert A >> B == B << A
def test_Equivalent():
A, B, C = list(map(Boolean, symbols('A,B,C')))
assert Equivalent(A, B) == Equivalent(B, A) == Equivalent(A, B, A)
assert Equivalent() == True
assert Equivalent(A, A) == Equivalent(A) == True
assert Equivalent(True, True) == Equivalent(False, False) == True
assert Equivalent(True, False) == Equivalent(False, True) == False
assert Equivalent(A, True) == A
assert Equivalent(A, False) == Not(A)
assert Equivalent(A, B, True) == A & B
assert Equivalent(A, B, False) == ~A & ~B
def test_bool_symbol():
A, B, C = list(map(Boolean, symbols('A,B,C')))
assert And(A, True) == A
assert And(A, True, True) == A
assert And(A, False) == False
assert And(A, True, False) == False
assert Or(A, True) == True
assert Or(A, False) == A
def test_subs():
A, B, C = list(map(Boolean, symbols('A,B,C')))
assert (A & B).subs(A, True) == B
assert (A & B).subs(A, False) == False
assert (A & B).subs(B, True) == A
assert (A & B).subs(B, False) == False
assert (A & B).subs({A: True, B:True}) == True
assert (A | B).subs(A, True) == True
assert (A | B).subs(A, False) == B
assert (A | B).subs(B, True) == True
assert (A | B).subs(B, False) == A
assert (A | B).subs({A: True, B:True}) == True
def test_commutative():
A, B = list(map(Boolean, symbols('A,B')))
assert A & B == B & A
assert A | B == B | A
def test_and_associativity():
A, B, C = list(map(Boolean, symbols('A,B,C')))
assert (A & B) & C == A & (B & C)
def test_or_assicativity():
A, B, C = list(map(Boolean, symbols('A,B,C')))
assert ((A | B) | C) == (A | (B | C))
def test_double_negation():
a = Boolean()
assert ~(~a) == a
def test_De_Morgan():
A, B, C = list(map(Boolean, symbols('A,B,C')))
assert ~(A & B) == (~A) | (~B)
assert ~(A | B) == (~A) & (~B)
assert ~(A | B | C) == ~A & ~B & ~C
def test_eliminate_implications():
A, B, C = list(map(Boolean, symbols('A,B,C')))
assert eliminate_implications(Implies(A, B, evaluate=False)) == (~A) | B
assert eliminate_implications(A >> (C >>Not(B))) == Or(Or(Not(B), Not(C)), Not(A))
def test_conjuncts():
A, B, C = list(map(Boolean, symbols('A,B,C')))
assert conjuncts(A & B & C) == set([A, B, C])
assert conjuncts((A | B) & C) == set([A | B, C])
assert conjuncts(A) == set([A])
assert conjuncts(True) == set([True])
assert conjuncts(False) == set([False])
def test_disjuncts():
A, B, C = list(map(Boolean, symbols('A,B,C')))
assert disjuncts(A | B | C) == set([A, B, C])
assert disjuncts((A | B) & C) == set([(A | B) & C])
assert disjuncts(A) == set([A])
assert disjuncts(True) == set([True])
assert disjuncts(False) == set([False])
def test_distribute():
A, B, C = list(map(Boolean, symbols('A,B,C')))
assert distribute_and_over_or(Or(And(A, B), C)) == And(Or(A, C), Or(B, C))
def test_to_cnf():
A, B, C = list(map(Boolean, symbols('A,B,C')))
assert to_cnf(~(B | C)) == And(Not(B), Not(C))
assert to_cnf((A & B) | C) == And(Or(A, C), Or(B, C))
assert to_cnf(A >> B) == (~A) | B
assert to_cnf(A >> (B & C)) == (~A | B) & (~A | C)
assert to_cnf(Equivalent(A, B)) == And(Or(A, Not(B)), Or(B, Not(A)))
assert to_cnf(Equivalent(A, B & C)) == (~A | B) & (~A | C) & (~B | ~C | A)
assert to_cnf(Equivalent(A, B | C)) == \
And(Or(Not(B), A), Or(Not(C), A), Or(B, C, Not(A)))
def test_compile_rule():
from sympy import sympify
assert compile_rule("A & B") == sympify("A & B")
def test_to_int_repr():
x, y, z = list(map(Boolean, symbols('x,y,z')))
def sorted_recursive(arg):
try:
return sorted(sorted_recursive(x) for x in arg)
except TypeError:
return arg
assert sorted_recursive(to_int_repr([x | y, z | x], [x, y, z])) == \
sorted_recursive([[1, 2], [1, 3]])
assert sorted_recursive(to_int_repr([x | y, z | ~x], [x, y, z])) == \
sorted_recursive([[1, 2], [3, -1]])
def test_is_cnf():
x, y, z = symbols('x,y,z')
assert is_cnf(x | y | z) == True
assert is_cnf(x & y & z) == True
assert is_cnf((x | y) & z) == True
assert is_cnf((x & y) | z) == False
def test_ITE():
A, B, C = list(map(Boolean, symbols('A,B,C')))
assert ITE(True, False, True) == False
assert ITE(True, True, False) == True
assert ITE(False, True, False) == False
assert ITE(False, False, True) == True
A = True
assert ITE(A, B, C) == B
A = False
assert ITE(A, B, C) == C
B = True
assert ITE(And(A, B), B, C) == C
assert ITE(Or(A, False), And(B, True), False) == False
| true | true |
7900abe356ffbddc2bca77051f275782bc7c99b1 | 317 | py | Python | Curso-em-video-Python3-mundo3/ex108/moeda.py | bernardombraga/Solucoes-exercicios-cursos-gratuitos | 0347a8325443fce84e0a753c96f523a22858537b | [
"MIT"
] | null | null | null | Curso-em-video-Python3-mundo3/ex108/moeda.py | bernardombraga/Solucoes-exercicios-cursos-gratuitos | 0347a8325443fce84e0a753c96f523a22858537b | [
"MIT"
] | null | null | null | Curso-em-video-Python3-mundo3/ex108/moeda.py | bernardombraga/Solucoes-exercicios-cursos-gratuitos | 0347a8325443fce84e0a753c96f523a22858537b | [
"MIT"
] | null | null | null | def metade(x=0):
res = x / 2
return res
def dobro(x=0):
res = 2 * x
return res
def aumentar(x=0, y=0):
res = x * (1 + y / 100)
return res
def reduzir(x=0, y=0):
res = x * (1 - y / 100)
return res
def moeda(x=0, m='R$'):
res = f'{m}{x:.2f}'.replace('.', ',')
return res
| 13.208333 | 41 | 0.473186 | def metade(x=0):
res = x / 2
return res
def dobro(x=0):
res = 2 * x
return res
def aumentar(x=0, y=0):
res = x * (1 + y / 100)
return res
def reduzir(x=0, y=0):
res = x * (1 - y / 100)
return res
def moeda(x=0, m='R$'):
res = f'{m}{x:.2f}'.replace('.', ',')
return res
| true | true |
7900abe9e8f5740b6c211d610c43b0b312b958a2 | 3,294 | py | Python | ucsmsdk/mometa/bios/BiosVfExecuteDisableBit.py | thinkitdata/ucsmsdk | da6599e1dbc1207a30eabe548a7e5791af5f476b | [
"Apache-2.0"
] | null | null | null | ucsmsdk/mometa/bios/BiosVfExecuteDisableBit.py | thinkitdata/ucsmsdk | da6599e1dbc1207a30eabe548a7e5791af5f476b | [
"Apache-2.0"
] | null | null | null | ucsmsdk/mometa/bios/BiosVfExecuteDisableBit.py | thinkitdata/ucsmsdk | da6599e1dbc1207a30eabe548a7e5791af5f476b | [
"Apache-2.0"
] | null | null | null | """This module contains the general information for BiosVfExecuteDisableBit ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class BiosVfExecuteDisableBitConsts:
SUPPORTED_BY_DEFAULT_NO = "no"
SUPPORTED_BY_DEFAULT_YES = "yes"
VP_EXECUTE_DISABLE_BIT_DISABLED = "disabled"
VP_EXECUTE_DISABLE_BIT_ENABLED = "enabled"
VP_EXECUTE_DISABLE_BIT_PLATFORM_DEFAULT = "platform-default"
VP_EXECUTE_DISABLE_BIT_PLATFORM_RECOMMENDED = "platform-recommended"
class BiosVfExecuteDisableBit(ManagedObject):
"""This is BiosVfExecuteDisableBit class."""
consts = BiosVfExecuteDisableBitConsts()
naming_props = set([])
mo_meta = MoMeta("BiosVfExecuteDisableBit", "biosVfExecuteDisableBit", "Execute-Disable-Bit", VersionMeta.Version111j, "InputOutput", 0x3f, [], ["admin", "ls-compute", "ls-config", "ls-server", "ls-server-policy", "pn-policy"], [u'biosSettings', u'biosVProfile'], [], ["Get", "Set"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version111j, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"prop_acl": MoPropertyMeta("prop_acl", "propAcl", "ulong", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version111j, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"supported_by_default": MoPropertyMeta("supported_by_default", "supportedByDefault", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, None, ["no", "yes"], []),
"vp_execute_disable_bit": MoPropertyMeta("vp_execute_disable_bit", "vpExecuteDisableBit", "string", VersionMeta.Version111j, MoPropertyMeta.READ_WRITE, 0x20, None, None, None, ["disabled", "enabled", "platform-default", "platform-recommended"], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"propAcl": "prop_acl",
"rn": "rn",
"sacl": "sacl",
"status": "status",
"supportedByDefault": "supported_by_default",
"vpExecuteDisableBit": "vp_execute_disable_bit",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.prop_acl = None
self.sacl = None
self.status = None
self.supported_by_default = None
self.vp_execute_disable_bit = None
ManagedObject.__init__(self, "BiosVfExecuteDisableBit", parent_mo_or_dn, **kwargs)
| 57.789474 | 287 | 0.687007 |
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class BiosVfExecuteDisableBitConsts:
SUPPORTED_BY_DEFAULT_NO = "no"
SUPPORTED_BY_DEFAULT_YES = "yes"
VP_EXECUTE_DISABLE_BIT_DISABLED = "disabled"
VP_EXECUTE_DISABLE_BIT_ENABLED = "enabled"
VP_EXECUTE_DISABLE_BIT_PLATFORM_DEFAULT = "platform-default"
VP_EXECUTE_DISABLE_BIT_PLATFORM_RECOMMENDED = "platform-recommended"
class BiosVfExecuteDisableBit(ManagedObject):
consts = BiosVfExecuteDisableBitConsts()
naming_props = set([])
mo_meta = MoMeta("BiosVfExecuteDisableBit", "biosVfExecuteDisableBit", "Execute-Disable-Bit", VersionMeta.Version111j, "InputOutput", 0x3f, [], ["admin", "ls-compute", "ls-config", "ls-server", "ls-server-policy", "pn-policy"], [u'biosSettings', u'biosVProfile'], [], ["Get", "Set"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version111j, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"prop_acl": MoPropertyMeta("prop_acl", "propAcl", "ulong", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version111j, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"supported_by_default": MoPropertyMeta("supported_by_default", "supportedByDefault", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, None, ["no", "yes"], []),
"vp_execute_disable_bit": MoPropertyMeta("vp_execute_disable_bit", "vpExecuteDisableBit", "string", VersionMeta.Version111j, MoPropertyMeta.READ_WRITE, 0x20, None, None, None, ["disabled", "enabled", "platform-default", "platform-recommended"], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"propAcl": "prop_acl",
"rn": "rn",
"sacl": "sacl",
"status": "status",
"supportedByDefault": "supported_by_default",
"vpExecuteDisableBit": "vp_execute_disable_bit",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.prop_acl = None
self.sacl = None
self.status = None
self.supported_by_default = None
self.vp_execute_disable_bit = None
ManagedObject.__init__(self, "BiosVfExecuteDisableBit", parent_mo_or_dn, **kwargs)
| true | true |
7900ace9ad04d258678c59834b1a699500f361bd | 881 | py | Python | Python/SearchInsertPosition.py | TonnyL/Windary | 39f85cdedaaf5b85f7ce842ecef975301fc974cf | [
"MIT"
] | 205 | 2017-11-16T08:38:46.000Z | 2022-03-06T05:50:03.000Z | Python/SearchInsertPosition.py | santosh241/Windary | 39f85cdedaaf5b85f7ce842ecef975301fc974cf | [
"MIT"
] | 3 | 2018-04-10T10:17:52.000Z | 2020-12-11T08:00:09.000Z | Python/SearchInsertPosition.py | santosh241/Windary | 39f85cdedaaf5b85f7ce842ecef975301fc974cf | [
"MIT"
] | 28 | 2018-04-10T06:42:42.000Z | 2021-09-14T14:15:39.000Z | # -*- coding: UTF-8 -*-
# Given a sorted array and a target value, return the index if the target is found. If not, return the index where it would be if it were inserted in order.
#
# You may assume no duplicates in the array.
#
# Here are few examples.
# [1,3,5,6], 5 → 2
# [1,3,5,6], 2 → 1
# [1,3,5,6], 7 → 4
# [1,3,5,6], 0 → 0
#
# Python, Python 3 all accepted.
class SearchInsertPosition(object):
def searchInsert(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
if nums is None or len(nums) == 0:
return 0
for i in range(0, len(nums)):
if nums[i] == target:
return i
elif nums[i] < target:
if (i + 1 < len(nums) and nums[i + 1] > target) or i + 1 == len(nums):
return i + 1
return 0
| 26.69697 | 156 | 0.523269 |
class SearchInsertPosition(object):
def searchInsert(self, nums, target):
if nums is None or len(nums) == 0:
return 0
for i in range(0, len(nums)):
if nums[i] == target:
return i
elif nums[i] < target:
if (i + 1 < len(nums) and nums[i + 1] > target) or i + 1 == len(nums):
return i + 1
return 0
| true | true |
7900ad817cfd6b053661207065b1a9129af1ae48 | 401 | py | Python | kweetservice/kweetservice/wsgi.py | teunw/JEA6-Kweeter | 9da250bc4717e5c17297e8d2bc9ee0e39b6d53e6 | [
"MIT"
] | null | null | null | kweetservice/kweetservice/wsgi.py | teunw/JEA6-Kweeter | 9da250bc4717e5c17297e8d2bc9ee0e39b6d53e6 | [
"MIT"
] | 18 | 2018-02-18T20:17:33.000Z | 2018-02-28T19:51:33.000Z | kweetservice/kweetservice/wsgi.py | teunw/JEA6-Kweeter | 9da250bc4717e5c17297e8d2bc9ee0e39b6d53e6 | [
"MIT"
] | 1 | 2018-02-26T14:28:44.000Z | 2018-02-26T14:28:44.000Z | """
WSGI config for kweetservice project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "kweetservice.settings")
application = get_wsgi_application()
| 23.588235 | 78 | 0.790524 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "kweetservice.settings")
application = get_wsgi_application()
| true | true |
7900ae0b72de776f42176f7055e913242b485667 | 1,619 | py | Python | test/unit/test_router.py | RedHatOfficial/receptor | 0eb9f0e3bd3b25bce948f7a2f43562f181a630a1 | [
"Apache-2.0"
] | 6 | 2020-07-12T05:56:21.000Z | 2022-03-09T11:43:53.000Z | test/unit/test_router.py | RedHatOfficial/receptor | 0eb9f0e3bd3b25bce948f7a2f43562f181a630a1 | [
"Apache-2.0"
] | 7 | 2020-07-06T15:51:06.000Z | 2021-08-18T18:55:26.000Z | test/unit/test_router.py | RedHatOfficial/receptor | 0eb9f0e3bd3b25bce948f7a2f43562f181a630a1 | [
"Apache-2.0"
] | 3 | 2020-06-25T21:03:42.000Z | 2021-08-09T01:27:48.000Z | import pytest
from receptor.router import MeshRouter
test_networks = [
(
[
("a", "b", 1),
("a", "d", 1),
("a", "f", 1),
("b", "d", 1),
("b", "c", 1),
("c", "e", 1),
("c", "h", 1),
("c", "j", 1),
("e", "f", 1),
("e", "g", 1),
("e", "h", 1),
("f", "g", 1),
("g", "h", 1),
("h", "j", 1),
("h", "k", 1),
("j", "k", 1),
("j", "m", 1),
("l", "m", 1),
],
[("a", "f", "f"), ("a", "m", "b"), ("h", "d", "c")],
[("a", {"b", "d", "f"}), ("f", {"a", "e", "g"}), ("j", {"c", "h", "k", "m"})],
),
(
[("a", "b", 1), ("b", "c", 1), ("c", "d", 1), ("d", "e", 1), ("e", "f", 1)],
[("a", "f", "b"), ("c", "a", "b"), ("f", "c", "e")],
[("a", {"b"}), ("f", {"e"}), ("c", {"b", "d"})],
),
]
@pytest.mark.parametrize("edges, expected_next_hops, expected_neighbors", test_networks)
def test_next_hop(edges, expected_next_hops, expected_neighbors):
for node_id, remote, enh in expected_next_hops:
r = MeshRouter(node_id=node_id)
r.add_or_update_edges(edges)
assert r.next_hop(remote) == enh
@pytest.mark.parametrize("edges, expected_next_hops, expected_neighbors", test_networks)
def test_neighbors(edges, expected_next_hops, expected_neighbors):
r = MeshRouter(node_id=edges[0][0])
r.add_or_update_edges(edges)
for node_id, neighbors in expected_neighbors:
assert r.get_neighbors(node_id) == neighbors
| 31.745098 | 88 | 0.413218 | import pytest
from receptor.router import MeshRouter
test_networks = [
(
[
("a", "b", 1),
("a", "d", 1),
("a", "f", 1),
("b", "d", 1),
("b", "c", 1),
("c", "e", 1),
("c", "h", 1),
("c", "j", 1),
("e", "f", 1),
("e", "g", 1),
("e", "h", 1),
("f", "g", 1),
("g", "h", 1),
("h", "j", 1),
("h", "k", 1),
("j", "k", 1),
("j", "m", 1),
("l", "m", 1),
],
[("a", "f", "f"), ("a", "m", "b"), ("h", "d", "c")],
[("a", {"b", "d", "f"}), ("f", {"a", "e", "g"}), ("j", {"c", "h", "k", "m"})],
),
(
[("a", "b", 1), ("b", "c", 1), ("c", "d", 1), ("d", "e", 1), ("e", "f", 1)],
[("a", "f", "b"), ("c", "a", "b"), ("f", "c", "e")],
[("a", {"b"}), ("f", {"e"}), ("c", {"b", "d"})],
),
]
@pytest.mark.parametrize("edges, expected_next_hops, expected_neighbors", test_networks)
def test_next_hop(edges, expected_next_hops, expected_neighbors):
for node_id, remote, enh in expected_next_hops:
r = MeshRouter(node_id=node_id)
r.add_or_update_edges(edges)
assert r.next_hop(remote) == enh
@pytest.mark.parametrize("edges, expected_next_hops, expected_neighbors", test_networks)
def test_neighbors(edges, expected_next_hops, expected_neighbors):
r = MeshRouter(node_id=edges[0][0])
r.add_or_update_edges(edges)
for node_id, neighbors in expected_neighbors:
assert r.get_neighbors(node_id) == neighbors
| true | true |
7900ae3e4053dc3a0b8e6c2dd8401574c3f4af7a | 5,212 | py | Python | chefboost/training/Preprocess.py | anapaulamendes/chefboost | 4628154f054cb6c79ab3f69a642d597c1265b202 | [
"MIT"
] | 322 | 2019-03-06T15:01:32.000Z | 2022-03-30T12:26:30.000Z | chefboost/training/Preprocess.py | anapaulamendes/chefboost | 4628154f054cb6c79ab3f69a642d597c1265b202 | [
"MIT"
] | 21 | 2019-09-03T17:55:56.000Z | 2022-03-23T06:29:42.000Z | chefboost/training/Preprocess.py | anapaulamendes/chefboost | 4628154f054cb6c79ab3f69a642d597c1265b202 | [
"MIT"
] | 86 | 2019-05-02T19:55:54.000Z | 2022-03-23T03:33:06.000Z | import numpy as np
import math
from chefboost.training import Training
#from training import Training
def processContinuousFeatures(algorithm, df, column_name, entropy, config):
#if True:
if df[column_name].nunique() <= 20:
unique_values = sorted(df[column_name].unique())
else:
unique_values = []
df_mean = df[column_name].mean()
df_std = df[column_name].std(ddof=0)
df_min = df[column_name].min()
df_max = df[column_name].max()
unique_values.append(df[column_name].min())
unique_values.append(df[column_name].max())
unique_values.append(df[column_name].mean())
scales = list(range(-3,+4, 1))
for scale in scales:
if df_mean + scale * df_std > df_min and df_mean + scale * df_std < df_max:
unique_values.append(df_mean + scale * df_std)
unique_values.sort()
#print(column_name,"->",unique_values)
subset_gainratios = []; subset_gains = []; subset_ginis = []; subset_red_stdevs = []; subset_chi_squares = []
if len(unique_values) == 1:
winner_threshold = unique_values[0]
df[column_name] = np.where(df[column_name] <= winner_threshold, "<="+str(winner_threshold), ">"+str(winner_threshold))
return df
for i in range(0, len(unique_values)-1):
threshold = unique_values[i]
subset1 = df[df[column_name] <= threshold]
subset2 = df[df[column_name] > threshold]
subset1_rows = subset1.shape[0]; subset2_rows = subset2.shape[0]
total_instances = df.shape[0] #subset1_rows+subset2_rows
subset1_probability = subset1_rows / total_instances
subset2_probability = subset2_rows / total_instances
if algorithm == 'ID3' or algorithm == 'C4.5':
threshold_gain = entropy - subset1_probability*Training.calculateEntropy(subset1, config) - subset2_probability*Training.calculateEntropy(subset2, config)
subset_gains.append(threshold_gain)
if algorithm == 'C4.5': #C4.5 also need gain in the block above. That's why, instead of else if we used direct if condition here
threshold_splitinfo = -subset1_probability * math.log(subset1_probability, 2)-subset2_probability*math.log(subset2_probability, 2)
gainratio = threshold_gain / threshold_splitinfo
subset_gainratios.append(gainratio)
elif algorithm == 'CART':
decision_for_subset1 = subset1['Decision'].value_counts().tolist()
decision_for_subset2 = subset2['Decision'].value_counts().tolist()
gini_subset1 = 1; gini_subset2 = 1
for j in range(0, len(decision_for_subset1)):
gini_subset1 = gini_subset1 - math.pow((decision_for_subset1[j]/subset1_rows),2)
for j in range(0, len(decision_for_subset2)):
gini_subset2 = gini_subset2 - math.pow((decision_for_subset2[j]/subset2_rows),2)
gini = (subset1_rows/total_instances)*gini_subset1 + (subset2_rows/total_instances) * gini_subset2
subset_ginis.append(gini)
elif algorithm == "CHAID":
#subset1 = high, subset2 = normal
unique_decisions = df['Decision'].unique() #Yes, No
num_of_decisions = len(unique_decisions) #2
subset1_expected = subset1.shape[0] / num_of_decisions
subset2_expected = subset2.shape[0] / num_of_decisions
chi_square = 0
for d in unique_decisions: #Yes, No
#decision = Yes
subset1_d = subset1[subset1["Decision"] == d] #high, yes
subset2_d = subset2[subset2["Decision"] == d] #normal, yes
subset1_d_chi_square = math.sqrt(((subset1_d.shape[0] - subset1_expected) * (subset1_d.shape[0] - subset1_expected))/subset1_expected)
subset2_d_chi_square = math.sqrt(((subset2_d.shape[0] - subset2_expected) * (subset2_d.shape[0] - subset2_expected))/subset2_expected)
chi_square = chi_square + subset1_d_chi_square + subset2_d_chi_square
subset_chi_squares.append(chi_square)
#----------------------------------
elif algorithm == 'Regression':
superset_stdev = df['Decision'].std(ddof=0)
subset1_stdev = subset1['Decision'].std(ddof=0)
subset2_stdev = subset2['Decision'].std(ddof=0)
threshold_weighted_stdev = (subset1_rows/total_instances)*subset1_stdev + (subset2_rows/total_instances)*subset2_stdev
threshold_reducted_stdev = superset_stdev - threshold_weighted_stdev
subset_red_stdevs.append(threshold_reducted_stdev)
#----------------------------------
if algorithm == "C4.5":
winner_one = subset_gainratios.index(max(subset_gainratios))
elif algorithm == "ID3": #actually, ID3 does not support for continuous features but we can still do it
winner_one = subset_gains.index(max(subset_gains))
elif algorithm == "CART":
winner_one = subset_ginis.index(min(subset_ginis))
elif algorithm == "CHAID":
winner_one = subset_chi_squares.index(max(subset_chi_squares))
elif algorithm == "Regression":
winner_one = subset_red_stdevs.index(max(subset_red_stdevs))
winner_threshold = unique_values[winner_one]
#print(column_name,": ", winner_threshold," in ", unique_values)
#print("theshold is ",winner_threshold," for ",column_name)
df[column_name] = np.where(df[column_name] <= winner_threshold, "<="+str(winner_threshold), ">"+str(winner_threshold))
return df
| 39.18797 | 158 | 0.700691 | import numpy as np
import math
from chefboost.training import Training
def processContinuousFeatures(algorithm, df, column_name, entropy, config):
if df[column_name].nunique() <= 20:
unique_values = sorted(df[column_name].unique())
else:
unique_values = []
df_mean = df[column_name].mean()
df_std = df[column_name].std(ddof=0)
df_min = df[column_name].min()
df_max = df[column_name].max()
unique_values.append(df[column_name].min())
unique_values.append(df[column_name].max())
unique_values.append(df[column_name].mean())
scales = list(range(-3,+4, 1))
for scale in scales:
if df_mean + scale * df_std > df_min and df_mean + scale * df_std < df_max:
unique_values.append(df_mean + scale * df_std)
unique_values.sort()
subset_gainratios = []; subset_gains = []; subset_ginis = []; subset_red_stdevs = []; subset_chi_squares = []
if len(unique_values) == 1:
winner_threshold = unique_values[0]
df[column_name] = np.where(df[column_name] <= winner_threshold, "<="+str(winner_threshold), ">"+str(winner_threshold))
return df
for i in range(0, len(unique_values)-1):
threshold = unique_values[i]
subset1 = df[df[column_name] <= threshold]
subset2 = df[df[column_name] > threshold]
subset1_rows = subset1.shape[0]; subset2_rows = subset2.shape[0]
total_instances = df.shape[0]
subset1_probability = subset1_rows / total_instances
subset2_probability = subset2_rows / total_instances
if algorithm == 'ID3' or algorithm == 'C4.5':
threshold_gain = entropy - subset1_probability*Training.calculateEntropy(subset1, config) - subset2_probability*Training.calculateEntropy(subset2, config)
subset_gains.append(threshold_gain)
if algorithm == 'C4.5':
threshold_splitinfo = -subset1_probability * math.log(subset1_probability, 2)-subset2_probability*math.log(subset2_probability, 2)
gainratio = threshold_gain / threshold_splitinfo
subset_gainratios.append(gainratio)
elif algorithm == 'CART':
decision_for_subset1 = subset1['Decision'].value_counts().tolist()
decision_for_subset2 = subset2['Decision'].value_counts().tolist()
gini_subset1 = 1; gini_subset2 = 1
for j in range(0, len(decision_for_subset1)):
gini_subset1 = gini_subset1 - math.pow((decision_for_subset1[j]/subset1_rows),2)
for j in range(0, len(decision_for_subset2)):
gini_subset2 = gini_subset2 - math.pow((decision_for_subset2[j]/subset2_rows),2)
gini = (subset1_rows/total_instances)*gini_subset1 + (subset2_rows/total_instances) * gini_subset2
subset_ginis.append(gini)
elif algorithm == "CHAID":
#subset1 = high, subset2 = normal
unique_decisions = df['Decision'].unique() #Yes, No
num_of_decisions = len(unique_decisions) #2
subset1_expected = subset1.shape[0] / num_of_decisions
subset2_expected = subset2.shape[0] / num_of_decisions
chi_square = 0
for d in unique_decisions: #Yes, No
#decision = Yes
subset1_d = subset1[subset1["Decision"] == d] #high, yes
subset2_d = subset2[subset2["Decision"] == d] #normal, yes
subset1_d_chi_square = math.sqrt(((subset1_d.shape[0] - subset1_expected) * (subset1_d.shape[0] - subset1_expected))/subset1_expected)
subset2_d_chi_square = math.sqrt(((subset2_d.shape[0] - subset2_expected) * (subset2_d.shape[0] - subset2_expected))/subset2_expected)
chi_square = chi_square + subset1_d_chi_square + subset2_d_chi_square
subset_chi_squares.append(chi_square)
#----------------------------------
elif algorithm == 'Regression':
superset_stdev = df['Decision'].std(ddof=0)
subset1_stdev = subset1['Decision'].std(ddof=0)
subset2_stdev = subset2['Decision'].std(ddof=0)
threshold_weighted_stdev = (subset1_rows/total_instances)*subset1_stdev + (subset2_rows/total_instances)*subset2_stdev
threshold_reducted_stdev = superset_stdev - threshold_weighted_stdev
subset_red_stdevs.append(threshold_reducted_stdev)
#----------------------------------
if algorithm == "C4.5":
winner_one = subset_gainratios.index(max(subset_gainratios))
elif algorithm == "ID3": #actually, ID3 does not support for continuous features but we can still do it
winner_one = subset_gains.index(max(subset_gains))
elif algorithm == "CART":
winner_one = subset_ginis.index(min(subset_ginis))
elif algorithm == "CHAID":
winner_one = subset_chi_squares.index(max(subset_chi_squares))
elif algorithm == "Regression":
winner_one = subset_red_stdevs.index(max(subset_red_stdevs))
winner_threshold = unique_values[winner_one]
#print(column_name,": ", winner_threshold," in ", unique_values)
#print("theshold is ",winner_threshold," for ",column_name)
df[column_name] = np.where(df[column_name] <= winner_threshold, "<="+str(winner_threshold), ">"+str(winner_threshold))
return df
| true | true |
7900ae9cfe9061026d17775927a526a9e50184bb | 1,144 | py | Python | release/stubs.min/System/Windows/Controls/__init___parts/InkCanvasEditingMode.py | htlcnn/ironpython-stubs | 780d829e2104b2789d5f4d6f32b0ec9f2930ca03 | [
"MIT"
] | 182 | 2017-06-27T02:26:15.000Z | 2022-03-30T18:53:43.000Z | release/stubs.min/System/Windows/Controls/__init___parts/InkCanvasEditingMode.py | htlcnn/ironpython-stubs | 780d829e2104b2789d5f4d6f32b0ec9f2930ca03 | [
"MIT"
] | 28 | 2017-06-27T13:38:23.000Z | 2022-03-15T11:19:44.000Z | release/stubs.min/System/Windows/Controls/__init___parts/InkCanvasEditingMode.py | htlcnn/ironpython-stubs | 780d829e2104b2789d5f4d6f32b0ec9f2930ca03 | [
"MIT"
] | 67 | 2017-06-28T09:43:59.000Z | 2022-03-20T21:17:10.000Z | class InkCanvasEditingMode(Enum,IComparable,IFormattable,IConvertible):
"""
Specifies the editing mode for the System.Windows.Controls.InkCanvas
enum InkCanvasEditingMode,values: EraseByPoint (5),EraseByStroke (6),GestureOnly (2),Ink (1),InkAndGesture (3),None (0),Select (4)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
EraseByPoint=None
EraseByStroke=None
GestureOnly=None
Ink=None
InkAndGesture=None
None=None
Select=None
value__=None
| 27.902439 | 215 | 0.682692 | class InkCanvasEditingMode(Enum,IComparable,IFormattable,IConvertible):
"""
Specifies the editing mode for the System.Windows.Controls.InkCanvas
enum InkCanvasEditingMode,values: EraseByPoint (5),EraseByStroke (6),GestureOnly (2),Ink (1),InkAndGesture (3),None (0),Select (4)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
EraseByPoint=None
EraseByStroke=None
GestureOnly=None
Ink=None
InkAndGesture=None
None=None
Select=None
value__=None
| false | true |
7900b278925c5fb0d6dc23776c83c90adc49ccaf | 3,613 | py | Python | jd_fanli.py | w123113/loon | 0efa307483f1da818b44a36d7ec797ad417a5b53 | [
"Apache-2.0"
] | null | null | null | jd_fanli.py | w123113/loon | 0efa307483f1da818b44a36d7ec797ad417a5b53 | [
"Apache-2.0"
] | null | null | null | jd_fanli.py | w123113/loon | 0efa307483f1da818b44a36d7ec797ad417a5b53 | [
"Apache-2.0"
] | 2 | 2021-11-06T00:45:46.000Z | 2022-01-18T07:56:47.000Z | """
const $ = new Env("京东饭粒");
京东饭粒任务
活动入口:https://u.jd.com/ytWx4w0
每天60豆小毛,爱要不要
cron:
46 9 * * * jd_fanli.py
"""
import os
import time
import re
import requests
import random
proxies = {"http": None, "https": None}
def randomstr(num):
randomstr = ""
for i in range(num):
randomstr = randomstr + random.choice("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
return randomstr
def randomstr1():
randomstr = ""
for i in range(16):
randomstr = randomstr + random.choice("0123456789")
randomstr += "-"
for i in range(16):
randomstr = randomstr + random.choice("0123456789")
return randomstr
def getheader(ck):
return {
"Host": "ifanli.m.jd.com",
"Connection": "keep-alive",
"Accept": "application/json, text/plain, */*",
"Cache-Control": "no-cache",
"User-Agent": "jdapp;android;10.2.2;11;%s;model/Mi 10;osVer/30;appBuild/91077;partner/xiaomi001;eufv/1;jdSupportDarkMode/0;Mozilla/5.0 (Linux; Android 11; Mi 10 Build/RKQ1.200826.002; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/77.0.3865.120 MQQBrowser/6.2 TBS/045715 Mobile Safari/537.36" % randomstr1(),
"Sec-Fetch-Mode": "cors",
"X-Requested-With": "com.jingdong.app.mall",
"Sec-Fetch-Site": "same-origin",
"Referer": "https://ifanli.m.jd.com/rebate/earnBean.html?paltform=null",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7",
"Cookie": ck,
"Content-Type": "application/json;charset=UTF-8"
}
def getTaskList(ck):
url = "https://ifanli.m.jd.com/rebateapi/task/getTaskList"
headers = getheader(ck)
r = requests.get(url, headers=headers, proxies=proxies)
# print(r.text)
return r.json()["content"]
def getTaskFinishCount(ck):
url = "https://ifanli.m.jd.com/rebateapi/task/getTaskFinishCount"
headers = getheader(ck)
r = requests.get(url, headers=headers, proxies=proxies)
print('已完成任务次数:', r.json()["content"]["finishCount"], '总任务次数:', r.json()["content"]["maxTaskCount"])
return r.json()["content"]
def saveTaskRecord(ck, taskId):
url = "https://ifanli.m.jd.com/rebateapi/task/saveTaskRecord"
headers = getheader(ck)
data = '{"taskId":%s,"taskType":4}' % taskId
r = requests.post(url, headers=headers, data=data, proxies=proxies)
# print(r.text)
return r.json()["content"]["uid"], r.json()["content"]["tt"]
def saveTaskRecord1(ck, taskId, uid, tt):
# tt=int(time.time()*1000)
url = "https://ifanli.m.jd.com/rebateapi/task/saveTaskRecord"
headers = getheader(ck)
data = '{"taskId":%s,"taskType":4,"uid":"%s","tt":%s}' % (taskId, uid, tt)
# print(data)
r = requests.post(url, headers=headers, data=data, proxies=proxies)
print(r.json()["content"]["msg"])
if __name__ == '__main__':
cks = os.environ["JD_COOKIE"].split("&")
for ck in cks:
ptpin = re.findall(r"pt_pin=(.*?);", ck)[0]
print("--------开始京东账号", ptpin, "--------")
try:
count = getTaskFinishCount(ck)
if count["finishCount"] < count["maxTaskCount"]:
for times in range(count["maxTaskCount"] - count["finishCount"]):
tasks = getTaskList(ck)
for i in tasks:
if i["taskType"] == 4:
uid, tt = saveTaskRecord(ck, i["taskId"])
time.sleep(10)
saveTaskRecord1(ck, i["taskId"], uid, tt)
except:
print("发生异常错误")
| 33.453704 | 331 | 0.599779 |
import os
import time
import re
import requests
import random
proxies = {"http": None, "https": None}
def randomstr(num):
randomstr = ""
for i in range(num):
randomstr = randomstr + random.choice("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
return randomstr
def randomstr1():
randomstr = ""
for i in range(16):
randomstr = randomstr + random.choice("0123456789")
randomstr += "-"
for i in range(16):
randomstr = randomstr + random.choice("0123456789")
return randomstr
def getheader(ck):
return {
"Host": "ifanli.m.jd.com",
"Connection": "keep-alive",
"Accept": "application/json, text/plain, */*",
"Cache-Control": "no-cache",
"User-Agent": "jdapp;android;10.2.2;11;%s;model/Mi 10;osVer/30;appBuild/91077;partner/xiaomi001;eufv/1;jdSupportDarkMode/0;Mozilla/5.0 (Linux; Android 11; Mi 10 Build/RKQ1.200826.002; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/77.0.3865.120 MQQBrowser/6.2 TBS/045715 Mobile Safari/537.36" % randomstr1(),
"Sec-Fetch-Mode": "cors",
"X-Requested-With": "com.jingdong.app.mall",
"Sec-Fetch-Site": "same-origin",
"Referer": "https://ifanli.m.jd.com/rebate/earnBean.html?paltform=null",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7",
"Cookie": ck,
"Content-Type": "application/json;charset=UTF-8"
}
def getTaskList(ck):
url = "https://ifanli.m.jd.com/rebateapi/task/getTaskList"
headers = getheader(ck)
r = requests.get(url, headers=headers, proxies=proxies)
return r.json()["content"]
def getTaskFinishCount(ck):
url = "https://ifanli.m.jd.com/rebateapi/task/getTaskFinishCount"
headers = getheader(ck)
r = requests.get(url, headers=headers, proxies=proxies)
print('已完成任务次数:', r.json()["content"]["finishCount"], '总任务次数:', r.json()["content"]["maxTaskCount"])
return r.json()["content"]
def saveTaskRecord(ck, taskId):
url = "https://ifanli.m.jd.com/rebateapi/task/saveTaskRecord"
headers = getheader(ck)
data = '{"taskId":%s,"taskType":4}' % taskId
r = requests.post(url, headers=headers, data=data, proxies=proxies)
return r.json()["content"]["uid"], r.json()["content"]["tt"]
def saveTaskRecord1(ck, taskId, uid, tt):
url = "https://ifanli.m.jd.com/rebateapi/task/saveTaskRecord"
headers = getheader(ck)
data = '{"taskId":%s,"taskType":4,"uid":"%s","tt":%s}' % (taskId, uid, tt)
r = requests.post(url, headers=headers, data=data, proxies=proxies)
print(r.json()["content"]["msg"])
if __name__ == '__main__':
cks = os.environ["JD_COOKIE"].split("&")
for ck in cks:
ptpin = re.findall(r"pt_pin=(.*?);", ck)[0]
print("--------开始京东账号", ptpin, "--------")
try:
count = getTaskFinishCount(ck)
if count["finishCount"] < count["maxTaskCount"]:
for times in range(count["maxTaskCount"] - count["finishCount"]):
tasks = getTaskList(ck)
for i in tasks:
if i["taskType"] == 4:
uid, tt = saveTaskRecord(ck, i["taskId"])
time.sleep(10)
saveTaskRecord1(ck, i["taskId"], uid, tt)
except:
print("发生异常错误")
| true | true |
7900b395f9f262044bbea20a2bff4d6f3c340218 | 3,229 | py | Python | source/interprocedural_analyses/taint/test/integration/via_type_of.py | joehendrix/pyre-check | 23693455b1e0b4a7287efba9337be6bbfe23ada4 | [
"MIT"
] | 1 | 2022-02-10T10:51:32.000Z | 2022-02-10T10:51:32.000Z | source/interprocedural_analyses/taint/test/integration/via_type_of.py | joehendrix/pyre-check | 23693455b1e0b4a7287efba9337be6bbfe23ada4 | [
"MIT"
] | null | null | null | source/interprocedural_analyses/taint/test/integration/via_type_of.py | joehendrix/pyre-check | 23693455b1e0b4a7287efba9337be6bbfe23ada4 | [
"MIT"
] | null | null | null | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import enum
from builtins import _test_sink, _test_source
from typing import Annotated, Any, Dict, List
class Test1_C:
x: int = 0
y: str = "y"
z: Annotated[str, "test1"] = "z"
def test1_alarm1():
# always-via-type:int
c = Test1_C(_test_source())
_test_sink(c.x)
def test1_alarm2():
# always-via-type:str
c = Test1_C(_test_source())
_test_sink(c.y)
def test1_alarm3():
# always-via-type:typing.Annotated[str]
c = Test1_C(_test_source())
_test_sink(c.z)
def test1_alarm4(foo):
# via-type:int, via-type:str, via-type:typing.Annotated[str]
c = Test1_C(_test_source())
foo = c.x
if 1:
foo = c.y
elif 2:
foo = c.z
_test_sink(foo)
class Test2_C:
x: Dict[str, int] = {}
y: List[str] = []
z: Annotated[float, "test2"] = 0.0
def test2_alarm1():
# always-via-type:Dict[str, int]
c = Test2_C(_test_source())
_test_sink(c.x)
def test2_alarm2():
# always-via-type:List[str]
c = Test2_C(_test_source())
_test_sink(c.y)
def test2_alarm3():
# always-via-type:float
c = Test2_C(_test_source())
_test_sink(c.z)
def test2_alarm4(foo):
# via-type:Dict[str, int], via-type:List[str], via-type:float
c = Test2_C(_test_source())
foo = c.x
if 1:
foo = c.y
elif 2:
foo = c.z
_test_sink(foo)
class Test3_Foo:
...
class Test3_C:
x: Dict[str, List[int]] = {}
y: Test3_Foo = Test3_Foo()
z: Annotated[List[List[str]], "test3"] = []
def test3_alarm1(c: Test3_C):
# always-via-type:Dict[str, List[int]]
_test_sink(c.x)
def test3_alarm2(c: Test3_C):
# always-via-type:Test3_Foo
_test_sink(c.y)
def test3_alarm3(c: Test3_C):
# always-via-type:typing.Annotated[List[List[str]]
_test_sink(c.z)
def test3_alarm4(c: Test3_C, foo):
# via-type:Dict[str, List[int]],
# via-type:Test3_Foo,
# via-type:typing.Annotated[List[List[str]]
foo = c.x
if 1:
foo = c.y
elif 2:
foo = c.z
_test_sink(foo)
class Test4_C:
x = ...
y: Any = 0
z: object = []
def test4_alarm1(c: Test4_C):
# always-via-type:unknown
c.x = _test_source()
def test4_alarm2(c: Test4_C):
# always-via-type:Any
c.y = _test_source()
def test4_alarm3(c: Test4_C):
# always-via-type:object
c.z = _test_source()
def return_via_parameter_type(parameter):
return 0
def test_strings():
return return_via_parameter_type("A")
def test_numerals():
return return_via_parameter_type(1)
def test_lists():
return return_via_parameter_type(["a", "b"])
def meta(parameter):
return return_via_parameter_type(parameter)
def test_via_type_of_does_not_propagate():
return meta("Name")
def tito(parameter, other):
pass
def test_tito():
a = tito(_test_source(), [1, 2])
return a
def sink_via_type_of(x, y):
pass
def test_sink(element):
return sink_via_type_of(element, 1)
def test_backwards_tito(parameter):
return tito(parameter, "by_backwards")
| 17.741758 | 65 | 0.635491 |
import enum
from builtins import _test_sink, _test_source
from typing import Annotated, Any, Dict, List
class Test1_C:
x: int = 0
y: str = "y"
z: Annotated[str, "test1"] = "z"
def test1_alarm1():
c = Test1_C(_test_source())
_test_sink(c.x)
def test1_alarm2():
c = Test1_C(_test_source())
_test_sink(c.y)
def test1_alarm3():
c = Test1_C(_test_source())
_test_sink(c.z)
def test1_alarm4(foo):
c = Test1_C(_test_source())
foo = c.x
if 1:
foo = c.y
elif 2:
foo = c.z
_test_sink(foo)
class Test2_C:
x: Dict[str, int] = {}
y: List[str] = []
z: Annotated[float, "test2"] = 0.0
def test2_alarm1():
c = Test2_C(_test_source())
_test_sink(c.x)
def test2_alarm2():
c = Test2_C(_test_source())
_test_sink(c.y)
def test2_alarm3():
c = Test2_C(_test_source())
_test_sink(c.z)
def test2_alarm4(foo):
c = Test2_C(_test_source())
foo = c.x
if 1:
foo = c.y
elif 2:
foo = c.z
_test_sink(foo)
class Test3_Foo:
...
class Test3_C:
x: Dict[str, List[int]] = {}
y: Test3_Foo = Test3_Foo()
z: Annotated[List[List[str]], "test3"] = []
def test3_alarm1(c: Test3_C):
_test_sink(c.x)
def test3_alarm2(c: Test3_C):
_test_sink(c.y)
def test3_alarm3(c: Test3_C):
_test_sink(c.z)
def test3_alarm4(c: Test3_C, foo):
foo = c.x
if 1:
foo = c.y
elif 2:
foo = c.z
_test_sink(foo)
class Test4_C:
x = ...
y: Any = 0
z: object = []
def test4_alarm1(c: Test4_C):
c.x = _test_source()
def test4_alarm2(c: Test4_C):
c.y = _test_source()
def test4_alarm3(c: Test4_C):
c.z = _test_source()
def return_via_parameter_type(parameter):
return 0
def test_strings():
return return_via_parameter_type("A")
def test_numerals():
return return_via_parameter_type(1)
def test_lists():
return return_via_parameter_type(["a", "b"])
def meta(parameter):
return return_via_parameter_type(parameter)
def test_via_type_of_does_not_propagate():
return meta("Name")
def tito(parameter, other):
pass
def test_tito():
a = tito(_test_source(), [1, 2])
return a
def sink_via_type_of(x, y):
pass
def test_sink(element):
return sink_via_type_of(element, 1)
def test_backwards_tito(parameter):
return tito(parameter, "by_backwards")
| true | true |
7900b43bbe6367311223bee3a8ad519eced17a32 | 4,602 | py | Python | simcse/train_unsup.py | Macielyoung/sentence_representation_matching | aa33147eb870a805f69dbc54c2177b11a94cf814 | [
"Apache-2.0"
] | 22 | 2022-01-24T10:08:39.000Z | 2022-03-31T10:47:05.000Z | simcse/train_unsup.py | Macielyoung/sentence_representation_matching | aa33147eb870a805f69dbc54c2177b11a94cf814 | [
"Apache-2.0"
] | 3 | 2022-03-06T11:52:25.000Z | 2022-03-15T06:32:17.000Z | simcse/train_unsup.py | Macielyoung/sentence_representation_matching | aa33147eb870a805f69dbc54c2177b11a94cf814 | [
"Apache-2.0"
] | 5 | 2022-02-28T09:13:04.000Z | 2022-03-22T12:50:09.000Z | # -*- coding: utf-8 -*-
# @Time : 2021/6/10
# @Author : kaka
import argparse
import logging
import os
from config import Params
from datasets import load_dataset
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
import numpy as np
from SimCSE import SimCSE
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
def parse_args():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# parser.add_argument("train_file", type=str, help="train text file")
# parser.add_argument("--pretrained", type=str, default="hfl/chinese-bert-wwm-ext", help="huggingface pretrained model")
# parser.add_argument("--model_out", type=str, default="./finder_model", help="model output path")
parser.add_argument("--num_proc", type=int, default=1, help="dataset process thread num")
parser.add_argument("--max_length", type=int, default=64, help="sentence max length")
parser.add_argument("--batch_size", type=int, default=32, help="batch size")
parser.add_argument("--epochs", type=int, default=101, help="epochs")
parser.add_argument("--lr", type=float, default=1e-5, help="learning rate")
parser.add_argument("--tao", type=float, default=0.05, help="temperature")
parser.add_argument("--device", type=str, default="cuda", help="device")
parser.add_argument("--display_interval", type=int, default=500, help="display interval")
parser.add_argument("--save_interval", type=int, default=10, help="save interval")
parser.add_argument("--pool_type", type=str, default="pooler", help="pool_type")
parser.add_argument("--dropout_rate", type=float, default=0.3, help="dropout_rate")
args = parser.parse_args()
return args
def read_data(args):
with open(Params.dialogues_file, 'r') as f:
sentences = f.readlines()
dl = DataLoader(sentences,
batch_size=args.batch_size)
return dl
def duplicate_batch(batch, tokenzier, args):
'''
句子进行重复
'''
new_batch = []
for sentence in batch:
new_batch.append(sentence)
new_batch.append(sentence)
batch_encoding = tokenzier(new_batch,
padding=True,
truncation=True,
max_length=args.max_length,
return_tensors='pt')
return batch_encoding
def compute_loss(y_pred, tao=0.05, device="cuda"):
idxs = torch.arange(0, y_pred.shape[0], device=device)
y_true = idxs + 1 - idxs % 2 * 2
similarities = F.cosine_similarity(y_pred.unsqueeze(1), y_pred.unsqueeze(0), dim=2)
similarities = similarities - torch.eye(y_pred.shape[0], device=device) * 1e12
similarities = similarities / tao
loss = F.cross_entropy(similarities, y_true)
return torch.mean(loss)
def train(args):
tokenizer = AutoTokenizer.from_pretrained(Params.pretrained_model_path)
dl = read_data(args)
model = SimCSE(Params.pretrained_model_path, args.pool_type, args.dropout_rate).to(args.device)
optimizer = torch.optim.AdamW(model.parameters(), lr=args.lr)
model.train()
batch_idx = 0
min_loss = 10000000
for epoch_idx in range(args.epochs):
epoch_losses = []
for data in tqdm(dl):
batch_idx += 1
new_batch_data = duplicate_batch(data, tokenizer, args)
pred = model(input_ids=new_batch_data["input_ids"].to(args.device),
attention_mask=new_batch_data["attention_mask"].to(args.device),
token_type_ids=new_batch_data["token_type_ids"].to(args.device))
loss = compute_loss(pred, args.tao, args.device)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss = loss.item()
epoch_losses.append(loss)
if batch_idx % args.display_interval == 0:
logging.info(f"epoch: {epoch_idx}, batch_idx: {batch_idx}, loss: {loss:>10f}")
avg_epoch_loss = np.mean(epoch_losses)
if avg_epoch_loss < min_loss:
min_loss = avg_epoch_loss
torch.save({
'epoch': epoch_idx,
'model_state_dict': model.state_dict(),
'loss': avg_epoch_loss
}, Params.simcse_model_path)
def main():
args = parse_args()
train(args)
if __name__ == "__main__":
log_fmt = "%(asctime)s|%(name)s|%(levelname)s|%(message)s"
logging.basicConfig(level=logging.INFO, format=log_fmt)
main()
| 38.35 | 124 | 0.651673 |
import argparse
import logging
import os
from config import Params
from datasets import load_dataset
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
import numpy as np
from SimCSE import SimCSE
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
def parse_args():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--num_proc", type=int, default=1, help="dataset process thread num")
parser.add_argument("--max_length", type=int, default=64, help="sentence max length")
parser.add_argument("--batch_size", type=int, default=32, help="batch size")
parser.add_argument("--epochs", type=int, default=101, help="epochs")
parser.add_argument("--lr", type=float, default=1e-5, help="learning rate")
parser.add_argument("--tao", type=float, default=0.05, help="temperature")
parser.add_argument("--device", type=str, default="cuda", help="device")
parser.add_argument("--display_interval", type=int, default=500, help="display interval")
parser.add_argument("--save_interval", type=int, default=10, help="save interval")
parser.add_argument("--pool_type", type=str, default="pooler", help="pool_type")
parser.add_argument("--dropout_rate", type=float, default=0.3, help="dropout_rate")
args = parser.parse_args()
return args
def read_data(args):
with open(Params.dialogues_file, 'r') as f:
sentences = f.readlines()
dl = DataLoader(sentences,
batch_size=args.batch_size)
return dl
def duplicate_batch(batch, tokenzier, args):
new_batch = []
for sentence in batch:
new_batch.append(sentence)
new_batch.append(sentence)
batch_encoding = tokenzier(new_batch,
padding=True,
truncation=True,
max_length=args.max_length,
return_tensors='pt')
return batch_encoding
def compute_loss(y_pred, tao=0.05, device="cuda"):
idxs = torch.arange(0, y_pred.shape[0], device=device)
y_true = idxs + 1 - idxs % 2 * 2
similarities = F.cosine_similarity(y_pred.unsqueeze(1), y_pred.unsqueeze(0), dim=2)
similarities = similarities - torch.eye(y_pred.shape[0], device=device) * 1e12
similarities = similarities / tao
loss = F.cross_entropy(similarities, y_true)
return torch.mean(loss)
def train(args):
tokenizer = AutoTokenizer.from_pretrained(Params.pretrained_model_path)
dl = read_data(args)
model = SimCSE(Params.pretrained_model_path, args.pool_type, args.dropout_rate).to(args.device)
optimizer = torch.optim.AdamW(model.parameters(), lr=args.lr)
model.train()
batch_idx = 0
min_loss = 10000000
for epoch_idx in range(args.epochs):
epoch_losses = []
for data in tqdm(dl):
batch_idx += 1
new_batch_data = duplicate_batch(data, tokenizer, args)
pred = model(input_ids=new_batch_data["input_ids"].to(args.device),
attention_mask=new_batch_data["attention_mask"].to(args.device),
token_type_ids=new_batch_data["token_type_ids"].to(args.device))
loss = compute_loss(pred, args.tao, args.device)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss = loss.item()
epoch_losses.append(loss)
if batch_idx % args.display_interval == 0:
logging.info(f"epoch: {epoch_idx}, batch_idx: {batch_idx}, loss: {loss:>10f}")
avg_epoch_loss = np.mean(epoch_losses)
if avg_epoch_loss < min_loss:
min_loss = avg_epoch_loss
torch.save({
'epoch': epoch_idx,
'model_state_dict': model.state_dict(),
'loss': avg_epoch_loss
}, Params.simcse_model_path)
def main():
args = parse_args()
train(args)
if __name__ == "__main__":
log_fmt = "%(asctime)s|%(name)s|%(levelname)s|%(message)s"
logging.basicConfig(level=logging.INFO, format=log_fmt)
main()
| true | true |
7900b4fc822a05a014be892f51a80210696f3a13 | 865 | py | Python | josephus.py | rhthomas/Python-Interview-Problems-for-Practice | cb713c13f6d70851dbde6337944a77940dfabff2 | [
"MIT"
] | null | null | null | josephus.py | rhthomas/Python-Interview-Problems-for-Practice | cb713c13f6d70851dbde6337944a77940dfabff2 | [
"MIT"
] | null | null | null | josephus.py | rhthomas/Python-Interview-Problems-for-Practice | cb713c13f6d70851dbde6337944a77940dfabff2 | [
"MIT"
] | 1 | 2020-08-21T04:08:42.000Z | 2020-08-21T04:08:42.000Z | # Problem: N soldiers are standing in a circle and
# first person has sword and he kills the 2nd person
# and gives the sword to the third person and so on
# till 99th person kills the 100th person gives the
# sword back to the first person, this goes on till
# only one person survives. Print the survivor.
def josephus(people, step=2):
if step<=1:
print("Enter step value, greater than 1")
else:
step -= 1 # translated to zero-based indexing
kill = step # kill will hold the index of current person to die
while(len(people) > 1):
print(people.pop(kill)) # pop method removes the element from the list
kill = (kill + step) % len(people)
print(people[0], "is safe")
num = int(input("Enter the number of soldiers: "))
soldiers = [i for i in range(1, num+1)] # generates a list of 1..num
josephus(soldiers)
| 37.608696 | 76 | 0.678613 |
def josephus(people, step=2):
if step<=1:
print("Enter step value, greater than 1")
else:
step -= 1
kill = step
while(len(people) > 1):
print(people.pop(kill))
kill = (kill + step) % len(people)
print(people[0], "is safe")
num = int(input("Enter the number of soldiers: "))
soldiers = [i for i in range(1, num+1)]
josephus(soldiers)
| true | true |
7900b61193b6ea07e230a1500d503e1cdaf6fd68 | 2,269 | py | Python | airflow/executors/celery_executor.py | fengzhongzhu1621/xAirflow | 4ecd136eb662d44a4f8d7b9262eca5f2d9f91ec0 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 3 | 2018-08-31T05:27:36.000Z | 2019-04-10T13:09:18.000Z | airflow/executors/celery_executor.py | fengzhongzhu1621/xAirflow | 4ecd136eb662d44a4f8d7b9262eca5f2d9f91ec0 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | airflow/executors/celery_executor.py | fengzhongzhu1621/xAirflow | 4ecd136eb662d44a4f8d7b9262eca5f2d9f91ec0 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import subprocess
import os
from celery import Celery
from airflow.config_templates.default_celery import DEFAULT_CELERY_CONFIG
from airflow.exceptions import AirflowException
from airflow import configuration
from xTool.utils.log.logging_mixin import LoggingMixin
from xTool.utils.module_loading import import_string
from xTool.executors.celery_executor import CeleryExecutor
'''
To start the celery worker, run the command:
airflow worker
'''
# 获得配置文件的路径,并导入celery默认配置
if configuration.conf.has_option('celery', 'celery_config_options'):
celery_configuration = import_string(
configuration.conf.get('celery', 'celery_config_options')
)
else:
celery_configuration = DEFAULT_CELERY_CONFIG
# 创建一个celery客户端
celery_app_name = configuration.conf.get('celery', 'CELERY_APP_NAME')
app = Celery(
celery_app_name,
config_source=celery_configuration)
@app.task
def execute_command(command):
"""airflow worker 执行shell命令 ."""
log = LoggingMixin().log
log.info("Executing command in Celery: %s", command)
env = os.environ.copy()
try:
# celery worker 收到消息后,执行消息中的shell命令
subprocess.check_call(command, shell=True, stderr=subprocess.STDOUT,
close_fds=True, env=env)
except subprocess.CalledProcessError as e:
log.exception('execute_command encountered a CalledProcessError')
log.error(e.output)
raise AirflowException('Celery command failed')
| 34.907692 | 76 | 0.75584 |
import subprocess
import os
from celery import Celery
from airflow.config_templates.default_celery import DEFAULT_CELERY_CONFIG
from airflow.exceptions import AirflowException
from airflow import configuration
from xTool.utils.log.logging_mixin import LoggingMixin
from xTool.utils.module_loading import import_string
from xTool.executors.celery_executor import CeleryExecutor
if configuration.conf.has_option('celery', 'celery_config_options'):
celery_configuration = import_string(
configuration.conf.get('celery', 'celery_config_options')
)
else:
celery_configuration = DEFAULT_CELERY_CONFIG
celery_app_name = configuration.conf.get('celery', 'CELERY_APP_NAME')
app = Celery(
celery_app_name,
config_source=celery_configuration)
@app.task
def execute_command(command):
log = LoggingMixin().log
log.info("Executing command in Celery: %s", command)
env = os.environ.copy()
try:
subprocess.check_call(command, shell=True, stderr=subprocess.STDOUT,
close_fds=True, env=env)
except subprocess.CalledProcessError as e:
log.exception('execute_command encountered a CalledProcessError')
log.error(e.output)
raise AirflowException('Celery command failed')
| true | true |
7900b737bfa055d4ccba8b5ba8bf4355f56555bc | 2,455 | py | Python | topy/data/H8T_K.py | TarcisioLOliveira/topy | 060da675e6494fee63fa5547befcb1f8ecc39fdc | [
"MIT"
] | 1 | 2021-01-25T00:13:34.000Z | 2021-01-25T00:13:34.000Z | topy/data/H8T_K.py | TarcisioLOliveira/topy | 060da675e6494fee63fa5547befcb1f8ecc39fdc | [
"MIT"
] | null | null | null | topy/data/H8T_K.py | TarcisioLOliveira/topy | 060da675e6494fee63fa5547befcb1f8ecc39fdc | [
"MIT"
] | null | null | null | """
# =============================================================================
# Creates the stiffness matrix as requested, using the material properties
# provided in the TPD file (for v2020 files).
#
# Author: William Hunter, Tarcísio L. de Oliveira
# Copyright (C) 2008, 2015, William Hunter.
# Copyright (C) 2020, 2021, Tarcísio L. de Oliveira
# =============================================================================
"""
from __future__ import division
import os
from sympy import symbols, Matrix, diff, integrate, zeros
from numpy import abs, array
from ..utils import get_logger
logger = get_logger(__name__)
def create_K(_L, _E, _nu, _k, _t):
# Initialize variables
_a, _b, _c = _L, _L, _L # element dimensions (half-lengths)
_G = _E / (2 * (1 + _nu)) # modulus of rigidity
_g = _E / ((1 + _nu) * (1 - 2 * _nu))
# SymPy symbols:
x, y, z = symbols('x y z')
N1, N2, N3, N4 = symbols('N1 N2 N3 N4')
N5, N6, N7, N8 = symbols('N5 N6 N7 N8')
xlist = [x, x, x, x, x, x, x, x]
ylist = [y, y, y, y, y, y, y, y]
zlist = [z, z, z, z, z, z, z, z]
# Shape functions:
N1 = (_a - x) * (_b - y) * (_c - z) / (8 * _a * _b * _c)
N2 = (_a + x) * (_b - y) * (_c - z) / (8 * _a * _b * _c)
N3 = (_a + x) * (_b + y) * (_c - z) / (8 * _a * _b * _c)
N4 = (_a - x) * (_b + y) * (_c - z) / (8 * _a * _b * _c)
N5 = (_a - x) * (_b - y) * (_c + z) / (8 * _a * _b * _c)
N6 = (_a + x) * (_b - y) * (_c + z) / (8 * _a * _b * _c)
N7 = (_a + x) * (_b + y) * (_c + z) / (8 * _a * _b * _c)
N8 = (_a - x) * (_b + y) * (_c + z) / (8 * _a * _b * _c)
# Create strain-displacement matrix B:
B0 = tuple(map(diff, [N1, N2, N3, N4, N5, N6, N7, N8], xlist))
B1 = tuple(map(diff, [N1, N2, N3, N4, N5, N6, N7, N8], ylist))
B2 = tuple(map(diff, [N1, N2, N3, N4, N5, N6, N7, N8], zlist))
B = Matrix([B0, B1, B2])
# Create conductivity matrix:
C = Matrix([[_k, 0, 0],
[0, _k, 0],
[0, 0, _k]])
dK = B.T * C * B
# Integration:
logger.info('SymPy is integrating: K for H8T...')
K = dK.integrate((x, -_a, _a),(y, -_b, _b),(z, -_c, _c))
# Convert SymPy Matrix to NumPy array:
K = array(K, dtype='double')
C = array(C, dtype='double')
# Set small (<< 0) values equal to zero:
K[abs(K) < 1e-6] = 0
# Return result:
logger.info('Created stiffness matrix.')
return K, B, C
# EOF H8T_K.py
| 32.733333 | 79 | 0.479837 | """
# =============================================================================
# Creates the stiffness matrix as requested, using the material properties
# provided in the TPD file (for v2020 files).
#
# Author: William Hunter, Tarcísio L. de Oliveira
# Copyright (C) 2008, 2015, William Hunter.
# Copyright (C) 2020, 2021, Tarcísio L. de Oliveira
# =============================================================================
"""
from __future__ import division
import os
from sympy import symbols, Matrix, diff, integrate, zeros
from numpy import abs, array
from ..utils import get_logger
logger = get_logger(__name__)
def create_K(_L, _E, _nu, _k, _t):
_a, _b, _c = _L, _L, _L
_G = _E / (2 * (1 + _nu))
_g = _E / ((1 + _nu) * (1 - 2 * _nu))
x, y, z = symbols('x y z')
N1, N2, N3, N4 = symbols('N1 N2 N3 N4')
N5, N6, N7, N8 = symbols('N5 N6 N7 N8')
xlist = [x, x, x, x, x, x, x, x]
ylist = [y, y, y, y, y, y, y, y]
zlist = [z, z, z, z, z, z, z, z]
N1 = (_a - x) * (_b - y) * (_c - z) / (8 * _a * _b * _c)
N2 = (_a + x) * (_b - y) * (_c - z) / (8 * _a * _b * _c)
N3 = (_a + x) * (_b + y) * (_c - z) / (8 * _a * _b * _c)
N4 = (_a - x) * (_b + y) * (_c - z) / (8 * _a * _b * _c)
N5 = (_a - x) * (_b - y) * (_c + z) / (8 * _a * _b * _c)
N6 = (_a + x) * (_b - y) * (_c + z) / (8 * _a * _b * _c)
N7 = (_a + x) * (_b + y) * (_c + z) / (8 * _a * _b * _c)
N8 = (_a - x) * (_b + y) * (_c + z) / (8 * _a * _b * _c)
B0 = tuple(map(diff, [N1, N2, N3, N4, N5, N6, N7, N8], xlist))
B1 = tuple(map(diff, [N1, N2, N3, N4, N5, N6, N7, N8], ylist))
B2 = tuple(map(diff, [N1, N2, N3, N4, N5, N6, N7, N8], zlist))
B = Matrix([B0, B1, B2])
C = Matrix([[_k, 0, 0],
[0, _k, 0],
[0, 0, _k]])
dK = B.T * C * B
logger.info('SymPy is integrating: K for H8T...')
K = dK.integrate((x, -_a, _a),(y, -_b, _b),(z, -_c, _c))
K = array(K, dtype='double')
C = array(C, dtype='double')
K[abs(K) < 1e-6] = 0
logger.info('Created stiffness matrix.')
return K, B, C
| false | true |
7900b77822db44af6327d63ca1ffd3f9069f8a81 | 30,988 | py | Python | tempest/api/compute/base.py | AurelienLourot/tempest | 4d14a22a1a0eb7aaa4aafb917273baa0739f55c3 | [
"Apache-2.0"
] | null | null | null | tempest/api/compute/base.py | AurelienLourot/tempest | 4d14a22a1a0eb7aaa4aafb917273baa0739f55c3 | [
"Apache-2.0"
] | null | null | null | tempest/api/compute/base.py | AurelienLourot/tempest | 4d14a22a1a0eb7aaa4aafb917273baa0739f55c3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_log import log as logging
from tempest.common import compute
from tempest.common import waiters
from tempest import config
from tempest import exceptions
from tempest.lib.common import api_microversion_fixture
from tempest.lib.common import api_version_request
from tempest.lib.common import api_version_utils
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import exceptions as lib_exc
import tempest.test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class BaseV2ComputeTest(api_version_utils.BaseMicroversionTest,
tempest.test.BaseTestCase):
"""Base test case class for all Compute API tests."""
force_tenant_isolation = False
# Set this to True in subclasses to create a default network. See
# https://bugs.launchpad.net/tempest/+bug/1844568
create_default_network = False
# TODO(andreaf) We should care also for the alt_manager here
# but only once client lazy load in the manager is done
credentials = ['primary']
@classmethod
def skip_checks(cls):
super(BaseV2ComputeTest, cls).skip_checks()
if not CONF.service_available.nova:
raise cls.skipException("Nova is not available")
api_version_utils.check_skip_with_microversion(
cls.min_microversion, cls.max_microversion,
CONF.compute.min_microversion, CONF.compute.max_microversion)
api_version_utils.check_skip_with_microversion(
cls.volume_min_microversion, cls.volume_max_microversion,
CONF.volume.min_microversion, CONF.volume.max_microversion)
api_version_utils.check_skip_with_microversion(
cls.placement_min_microversion, cls.placement_max_microversion,
CONF.placement.min_microversion, CONF.placement.max_microversion)
@classmethod
def setup_credentials(cls):
# Setting network=True, subnet=True creates a default network
cls.set_network_resources(
network=cls.create_default_network,
subnet=cls.create_default_network)
super(BaseV2ComputeTest, cls).setup_credentials()
@classmethod
def setup_clients(cls):
super(BaseV2ComputeTest, cls).setup_clients()
cls.servers_client = cls.os_primary.servers_client
cls.server_groups_client = cls.os_primary.server_groups_client
cls.flavors_client = cls.os_primary.flavors_client
cls.compute_images_client = cls.os_primary.compute_images_client
cls.extensions_client = cls.os_primary.extensions_client
cls.floating_ip_pools_client = cls.os_primary.floating_ip_pools_client
cls.floating_ips_client = cls.os_primary.compute_floating_ips_client
cls.keypairs_client = cls.os_primary.keypairs_client
cls.security_group_rules_client = (
cls.os_primary.compute_security_group_rules_client)
cls.security_groups_client =\
cls.os_primary.compute_security_groups_client
cls.quotas_client = cls.os_primary.quotas_client
cls.compute_networks_client = cls.os_primary.compute_networks_client
cls.limits_client = cls.os_primary.limits_client
cls.volumes_extensions_client =\
cls.os_primary.volumes_extensions_client
cls.snapshots_extensions_client =\
cls.os_primary.snapshots_extensions_client
cls.interfaces_client = cls.os_primary.interfaces_client
cls.fixed_ips_client = cls.os_primary.fixed_ips_client
cls.availability_zone_client = cls.os_primary.availability_zone_client
cls.agents_client = cls.os_primary.agents_client
cls.aggregates_client = cls.os_primary.aggregates_client
cls.services_client = cls.os_primary.services_client
cls.instance_usages_audit_log_client = (
cls.os_primary.instance_usages_audit_log_client)
cls.hypervisor_client = cls.os_primary.hypervisor_client
cls.certificates_client = cls.os_primary.certificates_client
cls.migrations_client = cls.os_primary.migrations_client
cls.security_group_default_rules_client = (
cls.os_primary.security_group_default_rules_client)
cls.versions_client = cls.os_primary.compute_versions_client
if CONF.service_available.cinder:
cls.volumes_client = cls.os_primary.volumes_client_latest
cls.attachments_client = cls.os_primary.attachments_client_latest
cls.snapshots_client = cls.os_primary.snapshots_client_latest
if CONF.service_available.glance:
if CONF.image_feature_enabled.api_v1:
cls.images_client = cls.os_primary.image_client
elif CONF.image_feature_enabled.api_v2:
cls.images_client = cls.os_primary.image_client_v2
else:
raise lib_exc.InvalidConfiguration(
'Either api_v1 or api_v2 must be True in '
'[image-feature-enabled].')
cls._check_depends_on_nova_network()
@classmethod
def _check_depends_on_nova_network(cls):
# Since nova-network APIs were removed from Nova in the Rocky release,
# determine, based on the max version from the version document, if
# the compute API is >Queens and if so, skip tests that rely on
# nova-network.
if not getattr(cls, 'depends_on_nova_network', False):
return
versions = cls.versions_client.list_versions()['versions']
# Find the v2.1 version which will tell us our max version for the
# compute API we're testing against.
for version in versions:
if version['id'] == 'v2.1':
max_version = api_version_request.APIVersionRequest(
version['version'])
break
else:
LOG.warning(
'Unable to determine max v2.1 compute API version: %s',
versions)
return
# The max compute API version in Queens is 2.60 so we cap
# at that version.
queens = api_version_request.APIVersionRequest('2.60')
if max_version > queens:
raise cls.skipException('nova-network is gone')
@classmethod
def resource_setup(cls):
super(BaseV2ComputeTest, cls).resource_setup()
cls.request_microversion = (
api_version_utils.select_request_microversion(
cls.min_microversion,
CONF.compute.min_microversion))
cls.volume_request_microversion = (
api_version_utils.select_request_microversion(
cls.volume_min_microversion,
CONF.volume.min_microversion))
cls.placement_request_microversion = (
api_version_utils.select_request_microversion(
cls.placement_min_microversion,
CONF.placement.min_microversion))
cls.build_interval = CONF.compute.build_interval
cls.build_timeout = CONF.compute.build_timeout
cls.image_ref = CONF.compute.image_ref
cls.image_ref_alt = CONF.compute.image_ref_alt
cls.flavor_ref = CONF.compute.flavor_ref
cls.flavor_ref_alt = CONF.compute.flavor_ref_alt
cls.ssh_user = CONF.validation.image_ssh_user
cls.ssh_alt_user = CONF.validation.image_alt_ssh_user
cls.image_ssh_user = CONF.validation.image_ssh_user
cls.image_alt_ssh_user = CONF.validation.image_alt_ssh_user
cls.image_ssh_password = CONF.validation.image_ssh_password
cls.image_alt_ssh_password = CONF.validation.image_alt_ssh_password
@classmethod
def is_requested_microversion_compatible(cls, max_version):
"""Check the compatibility of selected request microversion
This method will check if selected request microversion
(cls.request_microversion) for test is compatible with respect
to 'max_version'. Compatible means if selected request microversion
is in the range(<=) of 'max_version'.
:param max_version: maximum microversion to compare for compatibility.
Example: '2.30'
:returns: True if selected request microversion is compatible with
'max_version'. False in other case.
"""
try:
req_version_obj = api_version_request.APIVersionRequest(
cls.request_microversion)
# NOTE(gmann): This is case where this method is used before calling
# resource_setup(), where cls.request_microversion is set. There may
# not be any such case but still we can handle this case.
except AttributeError:
request_microversion = (
api_version_utils.select_request_microversion(
cls.min_microversion,
CONF.compute.min_microversion))
req_version_obj = api_version_request.APIVersionRequest(
request_microversion)
max_version_obj = api_version_request.APIVersionRequest(max_version)
return req_version_obj <= max_version_obj
@classmethod
def server_check_teardown(cls):
"""Checks is the shared server clean enough for subsequent test.
Method will delete the server when it's dirty.
The setUp method is responsible for creating a new server.
Exceptions raised in tearDown class are fails the test case,
This method supposed to use only by tearDown methods, when
the shared server_id is stored in the server_id of the class.
"""
if getattr(cls, 'server_id', None) is not None:
try:
waiters.wait_for_server_status(cls.servers_client,
cls.server_id, 'ACTIVE')
except Exception as exc:
LOG.exception(exc)
cls.servers_client.delete_server(cls.server_id)
waiters.wait_for_server_termination(cls.servers_client,
cls.server_id)
cls.server_id = None
raise
@classmethod
def create_test_server(cls, validatable=False, volume_backed=False,
validation_resources=None, clients=None, **kwargs):
"""Wrapper utility that returns a test server.
This wrapper utility calls the common create test server and
returns a test server. The purpose of this wrapper is to minimize
the impact on the code of the tests already using this
function.
:param validatable: Whether the server will be pingable or sshable.
:param volume_backed: Whether the instance is volume backed or not.
:param validation_resources: Dictionary of validation resources as
returned by `get_class_validation_resources`.
:param clients: Client manager, defaults to os_primary.
:param kwargs: Extra arguments are passed down to the
`compute.create_test_server` call.
"""
if 'name' not in kwargs:
kwargs['name'] = data_utils.rand_name(cls.__name__ + "-server")
request_version = api_version_request.APIVersionRequest(
cls.request_microversion)
v2_37_version = api_version_request.APIVersionRequest('2.37')
tenant_network = cls.get_tenant_network()
# NOTE(snikitin): since microversion v2.37 'networks' field is required
if (request_version >= v2_37_version and 'networks' not in kwargs and
not tenant_network):
kwargs['networks'] = 'none'
if clients is None:
clients = cls.os_primary
body, servers = compute.create_test_server(
clients,
validatable,
validation_resources=validation_resources,
tenant_network=tenant_network,
volume_backed=volume_backed,
**kwargs)
# For each server schedule wait and delete, so we first delete all
# and then wait for all
for server in servers:
cls.addClassResourceCleanup(waiters.wait_for_server_termination,
clients.servers_client, server['id'])
for server in servers:
cls.addClassResourceCleanup(
test_utils.call_and_ignore_notfound_exc,
clients.servers_client.delete_server, server['id'])
return body
@classmethod
def create_security_group(cls, name=None, description=None):
if name is None:
name = data_utils.rand_name(cls.__name__ + "-securitygroup")
if description is None:
description = data_utils.rand_name('description')
body = cls.security_groups_client.create_security_group(
name=name, description=description)['security_group']
cls.addClassResourceCleanup(
test_utils.call_and_ignore_notfound_exc,
cls.security_groups_client.delete_security_group,
body['id'])
return body
@classmethod
def create_test_server_group(cls, name="", policy=None):
if not name:
name = data_utils.rand_name(cls.__name__ + "-Server-Group")
if policy is None:
policy = ['affinity']
body = cls.server_groups_client.create_server_group(
name=name, policies=policy)['server_group']
cls.addClassResourceCleanup(
test_utils.call_and_ignore_notfound_exc,
cls.server_groups_client.delete_server_group,
body['id'])
return body
def wait_for(self, condition):
"""Repeatedly calls condition() until a timeout."""
start_time = int(time.time())
while True:
try:
condition()
except Exception:
pass
else:
return
if int(time.time()) - start_time >= self.build_timeout:
condition()
return
time.sleep(self.build_interval)
@classmethod
def prepare_instance_network(cls):
if (CONF.validation.auth_method != 'disabled' and
CONF.validation.connect_method == 'floating'):
cls.set_network_resources(network=True, subnet=True, router=True,
dhcp=True)
@classmethod
def create_image_from_server(cls, server_id, **kwargs):
"""Wrapper utility that returns an image created from the server.
If compute microversion >= 2.36, the returned image response will
be from the image service API rather than the compute image proxy API.
"""
name = kwargs.pop('name',
data_utils.rand_name(cls.__name__ + "-image"))
wait_until = kwargs.pop('wait_until', None)
wait_for_server = kwargs.pop('wait_for_server', True)
image = cls.compute_images_client.create_image(server_id, name=name,
**kwargs)
if api_version_utils.compare_version_header_to_response(
"OpenStack-API-Version", "compute 2.45", image.response, "lt"):
image_id = image['image_id']
else:
image_id = data_utils.parse_image_id(image.response['location'])
# The compute image proxy APIs were deprecated in 2.35 so
# use the images client directly if the API microversion being
# used is >=2.36.
if not cls.is_requested_microversion_compatible('2.35'):
client = cls.images_client
else:
client = cls.compute_images_client
cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
client.delete_image, image_id)
if wait_until is not None:
try:
wait_until = wait_until.upper()
if not cls.is_requested_microversion_compatible('2.35'):
wait_until = wait_until.lower()
waiters.wait_for_image_status(client, image_id, wait_until)
except lib_exc.NotFound:
if wait_until.upper() == 'ACTIVE':
# If the image is not found after create_image returned
# that means the snapshot failed in nova-compute and nova
# deleted the image. There should be a compute fault
# recorded with the server in that case, so get the server
# and dump some details.
server = (
cls.servers_client.show_server(server_id)['server'])
if 'fault' in server:
raise exceptions.SnapshotNotFoundException(
server['fault'], image_id=image_id)
else:
raise exceptions.SnapshotNotFoundException(
image_id=image_id)
else:
raise
image = client.show_image(image_id)
# Compute image client returns response wrapped in 'image' element
# which is not the case with Glance image client.
if 'image' in image:
image = image['image']
if wait_until.upper() == 'ACTIVE':
if wait_for_server:
waiters.wait_for_server_status(cls.servers_client,
server_id, 'ACTIVE')
return image
@classmethod
def recreate_server(cls, server_id, validatable=False, **kwargs):
"""Destroy an existing class level server and creates a new one
Some test classes use a test server that can be used by multiple
tests. This is done to optimise runtime and test load.
If something goes wrong with the test server, it can be rebuilt
using this helper.
This helper can also be used for the initial provisioning if no
server_id is specified.
:param server_id: UUID of the server to be rebuilt. If None is
specified, a new server is provisioned.
:param validatable: whether to the server needs to be
validatable. When True, validation resources are acquired via
the `get_class_validation_resources` helper.
:param kwargs: extra paramaters are passed through to the
`create_test_server` call.
:return: the UUID of the created server.
"""
if server_id:
cls.delete_server(server_id)
cls.password = data_utils.rand_password()
server = cls.create_test_server(
validatable,
validation_resources=cls.get_class_validation_resources(
cls.os_primary),
wait_until='ACTIVE',
adminPass=cls.password,
**kwargs)
return server['id']
@classmethod
def delete_server(cls, server_id):
"""Deletes an existing server and waits for it to be gone."""
try:
cls.servers_client.delete_server(server_id)
waiters.wait_for_server_termination(cls.servers_client,
server_id)
except Exception:
LOG.exception('Failed to delete server %s', server_id)
def resize_server(self, server_id, new_flavor_id, **kwargs):
"""resize and confirm_resize an server, waits for it to be ACTIVE."""
self.servers_client.resize_server(server_id, new_flavor_id, **kwargs)
waiters.wait_for_server_status(self.servers_client, server_id,
'VERIFY_RESIZE')
self.servers_client.confirm_resize_server(server_id)
waiters.wait_for_server_status(
self.servers_client, server_id, 'ACTIVE')
server = self.servers_client.show_server(server_id)['server']
self.assert_flavor_equal(new_flavor_id, server['flavor'])
@classmethod
def delete_volume(cls, volume_id):
"""Deletes the given volume and waits for it to be gone."""
try:
cls.volumes_client.delete_volume(volume_id)
# TODO(mriedem): We should move the wait_for_resource_deletion
# into the delete_volume method as a convenience to the caller.
cls.volumes_client.wait_for_resource_deletion(volume_id)
except lib_exc.NotFound:
LOG.warning("Unable to delete volume '%s' since it was not found. "
"Maybe it was already deleted?", volume_id)
@classmethod
def get_server_ip(cls, server, validation_resources=None):
"""Get the server fixed or floating IP.
Based on the configuration we're in, return a correct ip
address for validating that a guest is up.
:param server: The server dict as returned by the API
:param validation_resources: The dict of validation resources
provisioned for the server.
"""
if CONF.validation.connect_method == 'floating':
if validation_resources:
return validation_resources['floating_ip']['ip']
else:
msg = ('When validation.connect_method equals floating, '
'validation_resources cannot be None')
raise lib_exc.InvalidParam(invalid_param=msg)
elif CONF.validation.connect_method == 'fixed':
addresses = server['addresses'][CONF.validation.network_for_ssh]
for address in addresses:
if address['version'] == CONF.validation.ip_version_for_ssh:
return address['addr']
raise exceptions.ServerUnreachable(server_id=server['id'])
else:
raise lib_exc.InvalidConfiguration()
def setUp(self):
super(BaseV2ComputeTest, self).setUp()
self.useFixture(api_microversion_fixture.APIMicroversionFixture(
compute_microversion=self.request_microversion,
volume_microversion=self.volume_request_microversion,
placement_microversion=self.placement_request_microversion))
@classmethod
def create_volume(cls, image_ref=None, **kwargs):
"""Create a volume and wait for it to become 'available'.
:param image_ref: Specify an image id to create a bootable volume.
:param kwargs: other parameters to create volume.
:returns: The available volume.
"""
if 'size' not in kwargs:
kwargs['size'] = CONF.volume.volume_size
if 'display_name' not in kwargs:
vol_name = data_utils.rand_name(cls.__name__ + '-volume')
kwargs['display_name'] = vol_name
if image_ref is not None:
kwargs['imageRef'] = image_ref
if CONF.compute.compute_volume_common_az:
kwargs.setdefault('availability_zone',
CONF.compute.compute_volume_common_az)
volume = cls.volumes_client.create_volume(**kwargs)['volume']
cls.addClassResourceCleanup(
cls.volumes_client.wait_for_resource_deletion, volume['id'])
cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
cls.volumes_client.delete_volume,
volume['id'])
waiters.wait_for_volume_resource_status(cls.volumes_client,
volume['id'], 'available')
return volume
def _detach_volume(self, server, volume):
"""Helper method to detach a volume.
Ignores 404 responses if the volume or server do not exist, or the
volume is already detached from the server.
"""
try:
volume = self.volumes_client.show_volume(volume['id'])['volume']
# Check the status. You can only detach an in-use volume, otherwise
# the compute API will return a 400 response.
if volume['status'] == 'in-use':
self.servers_client.detach_volume(server['id'], volume['id'])
except lib_exc.NotFound:
# Ignore 404s on detach in case the server is deleted or the volume
# is already detached.
pass
def attach_volume(self, server, volume, device=None, tag=None):
"""Attaches volume to server and waits for 'in-use' volume status.
The volume will be detached when the test tears down.
:param server: The server to which the volume will be attached.
:param volume: The volume to attach.
:param device: Optional mountpoint for the attached volume. Note that
this is not guaranteed for all hypervisors and is not recommended.
:param tag: Optional device role tag to apply to the volume.
"""
attach_kwargs = dict(volumeId=volume['id'])
if device:
attach_kwargs['device'] = device
if tag:
attach_kwargs['tag'] = tag
attachment = self.servers_client.attach_volume(
server['id'], **attach_kwargs)['volumeAttachment']
# On teardown detach the volume and for multiattach volumes wait for
# the attachment to be removed. For non-multiattach volumes wait for
# the state of the volume to change to available. This is so we don't
# error out when trying to delete the volume during teardown.
if volume['multiattach']:
att = waiters.wait_for_volume_attachment_create(
self.volumes_client, volume['id'], server['id'])
self.addCleanup(waiters.wait_for_volume_attachment_remove,
self.volumes_client, volume['id'],
att['attachment_id'])
else:
self.addCleanup(waiters.wait_for_volume_resource_status,
self.volumes_client, volume['id'], 'available')
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'in-use')
# Ignore 404s on detach in case the server is deleted or the volume
# is already detached.
self.addCleanup(self._detach_volume, server, volume)
return attachment
def create_volume_snapshot(self, volume_id, name=None, description=None,
metadata=None, force=False):
name = name or data_utils.rand_name(
self.__class__.__name__ + '-snapshot')
snapshot = self.snapshots_client.create_snapshot(
volume_id=volume_id,
force=force,
display_name=name,
description=description,
metadata=metadata)['snapshot']
self.addCleanup(self.snapshots_client.wait_for_resource_deletion,
snapshot['id'])
self.addCleanup(self.snapshots_client.delete_snapshot, snapshot['id'])
waiters.wait_for_volume_resource_status(self.snapshots_client,
snapshot['id'], 'available')
snapshot = self.snapshots_client.show_snapshot(
snapshot['id'])['snapshot']
return snapshot
def assert_flavor_equal(self, flavor_id, server_flavor):
"""Check whether server_flavor equals to flavor.
:param flavor_id: flavor id
:param server_flavor: flavor info returned by show_server.
"""
# Nova API > 2.46 no longer includes flavor.id, and schema check
# will cover whether 'id' should be in flavor
if server_flavor.get('id'):
msg = ('server flavor is not same as flavor!')
self.assertEqual(flavor_id, server_flavor['id'], msg)
else:
flavor = self.flavors_client.show_flavor(flavor_id)['flavor']
self.assertEqual(flavor['name'], server_flavor['original_name'],
"original_name in server flavor is not same as "
"flavor name!")
for key in ['ram', 'vcpus', 'disk']:
msg = ('attribute %s in server flavor is not same as '
'flavor!' % key)
self.assertEqual(flavor[key], server_flavor[key], msg)
class BaseV2ComputeAdminTest(BaseV2ComputeTest):
"""Base test case class for Compute Admin API tests."""
credentials = ['primary', 'admin']
@classmethod
def setup_clients(cls):
super(BaseV2ComputeAdminTest, cls).setup_clients()
cls.availability_zone_admin_client = (
cls.os_admin.availability_zone_client)
cls.admin_flavors_client = cls.os_admin.flavors_client
cls.admin_servers_client = cls.os_admin.servers_client
cls.image_client = cls.os_admin.image_client_v2
cls.admin_assisted_volume_snapshots_client = \
cls.os_admin.assisted_volume_snapshots_client
def create_flavor(self, ram, vcpus, disk, name=None,
is_public='True', **kwargs):
if name is None:
name = data_utils.rand_name(self.__class__.__name__ + "-flavor")
id = kwargs.pop('id', data_utils.rand_int_id(start=1000))
client = self.admin_flavors_client
flavor = client.create_flavor(
ram=ram, vcpus=vcpus, disk=disk, name=name,
id=id, is_public=is_public, **kwargs)['flavor']
self.addCleanup(client.wait_for_resource_deletion, flavor['id'])
self.addCleanup(client.delete_flavor, flavor['id'])
return flavor
@classmethod
def get_host_for_server(cls, server_id):
server_details = cls.admin_servers_client.show_server(server_id)
return server_details['server']['OS-EXT-SRV-ATTR:host']
def get_host_other_than(self, server_id):
source_host = self.get_host_for_server(server_id)
svcs = self.os_admin.services_client.list_services(
binary='nova-compute')['services']
hosts = []
for svc in svcs:
if svc['state'] == 'up' and svc['status'] == 'enabled':
if CONF.compute.compute_volume_common_az:
if svc['zone'] == CONF.compute.compute_volume_common_az:
hosts.append(svc['host'])
else:
hosts.append(svc['host'])
for target_host in hosts:
if source_host != target_host:
return target_host
| 45.637703 | 79 | 0.639635 |
import time
from oslo_log import log as logging
from tempest.common import compute
from tempest.common import waiters
from tempest import config
from tempest import exceptions
from tempest.lib.common import api_microversion_fixture
from tempest.lib.common import api_version_request
from tempest.lib.common import api_version_utils
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import exceptions as lib_exc
import tempest.test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class BaseV2ComputeTest(api_version_utils.BaseMicroversionTest,
tempest.test.BaseTestCase):
force_tenant_isolation = False
create_default_network = False
credentials = ['primary']
@classmethod
def skip_checks(cls):
super(BaseV2ComputeTest, cls).skip_checks()
if not CONF.service_available.nova:
raise cls.skipException("Nova is not available")
api_version_utils.check_skip_with_microversion(
cls.min_microversion, cls.max_microversion,
CONF.compute.min_microversion, CONF.compute.max_microversion)
api_version_utils.check_skip_with_microversion(
cls.volume_min_microversion, cls.volume_max_microversion,
CONF.volume.min_microversion, CONF.volume.max_microversion)
api_version_utils.check_skip_with_microversion(
cls.placement_min_microversion, cls.placement_max_microversion,
CONF.placement.min_microversion, CONF.placement.max_microversion)
@classmethod
def setup_credentials(cls):
cls.set_network_resources(
network=cls.create_default_network,
subnet=cls.create_default_network)
super(BaseV2ComputeTest, cls).setup_credentials()
@classmethod
def setup_clients(cls):
super(BaseV2ComputeTest, cls).setup_clients()
cls.servers_client = cls.os_primary.servers_client
cls.server_groups_client = cls.os_primary.server_groups_client
cls.flavors_client = cls.os_primary.flavors_client
cls.compute_images_client = cls.os_primary.compute_images_client
cls.extensions_client = cls.os_primary.extensions_client
cls.floating_ip_pools_client = cls.os_primary.floating_ip_pools_client
cls.floating_ips_client = cls.os_primary.compute_floating_ips_client
cls.keypairs_client = cls.os_primary.keypairs_client
cls.security_group_rules_client = (
cls.os_primary.compute_security_group_rules_client)
cls.security_groups_client =\
cls.os_primary.compute_security_groups_client
cls.quotas_client = cls.os_primary.quotas_client
cls.compute_networks_client = cls.os_primary.compute_networks_client
cls.limits_client = cls.os_primary.limits_client
cls.volumes_extensions_client =\
cls.os_primary.volumes_extensions_client
cls.snapshots_extensions_client =\
cls.os_primary.snapshots_extensions_client
cls.interfaces_client = cls.os_primary.interfaces_client
cls.fixed_ips_client = cls.os_primary.fixed_ips_client
cls.availability_zone_client = cls.os_primary.availability_zone_client
cls.agents_client = cls.os_primary.agents_client
cls.aggregates_client = cls.os_primary.aggregates_client
cls.services_client = cls.os_primary.services_client
cls.instance_usages_audit_log_client = (
cls.os_primary.instance_usages_audit_log_client)
cls.hypervisor_client = cls.os_primary.hypervisor_client
cls.certificates_client = cls.os_primary.certificates_client
cls.migrations_client = cls.os_primary.migrations_client
cls.security_group_default_rules_client = (
cls.os_primary.security_group_default_rules_client)
cls.versions_client = cls.os_primary.compute_versions_client
if CONF.service_available.cinder:
cls.volumes_client = cls.os_primary.volumes_client_latest
cls.attachments_client = cls.os_primary.attachments_client_latest
cls.snapshots_client = cls.os_primary.snapshots_client_latest
if CONF.service_available.glance:
if CONF.image_feature_enabled.api_v1:
cls.images_client = cls.os_primary.image_client
elif CONF.image_feature_enabled.api_v2:
cls.images_client = cls.os_primary.image_client_v2
else:
raise lib_exc.InvalidConfiguration(
'Either api_v1 or api_v2 must be True in '
'[image-feature-enabled].')
cls._check_depends_on_nova_network()
@classmethod
def _check_depends_on_nova_network(cls):
if not getattr(cls, 'depends_on_nova_network', False):
return
versions = cls.versions_client.list_versions()['versions']
for version in versions:
if version['id'] == 'v2.1':
max_version = api_version_request.APIVersionRequest(
version['version'])
break
else:
LOG.warning(
'Unable to determine max v2.1 compute API version: %s',
versions)
return
# The max compute API version in Queens is 2.60 so we cap
# at that version.
queens = api_version_request.APIVersionRequest('2.60')
if max_version > queens:
raise cls.skipException('nova-network is gone')
@classmethod
def resource_setup(cls):
super(BaseV2ComputeTest, cls).resource_setup()
cls.request_microversion = (
api_version_utils.select_request_microversion(
cls.min_microversion,
CONF.compute.min_microversion))
cls.volume_request_microversion = (
api_version_utils.select_request_microversion(
cls.volume_min_microversion,
CONF.volume.min_microversion))
cls.placement_request_microversion = (
api_version_utils.select_request_microversion(
cls.placement_min_microversion,
CONF.placement.min_microversion))
cls.build_interval = CONF.compute.build_interval
cls.build_timeout = CONF.compute.build_timeout
cls.image_ref = CONF.compute.image_ref
cls.image_ref_alt = CONF.compute.image_ref_alt
cls.flavor_ref = CONF.compute.flavor_ref
cls.flavor_ref_alt = CONF.compute.flavor_ref_alt
cls.ssh_user = CONF.validation.image_ssh_user
cls.ssh_alt_user = CONF.validation.image_alt_ssh_user
cls.image_ssh_user = CONF.validation.image_ssh_user
cls.image_alt_ssh_user = CONF.validation.image_alt_ssh_user
cls.image_ssh_password = CONF.validation.image_ssh_password
cls.image_alt_ssh_password = CONF.validation.image_alt_ssh_password
@classmethod
def is_requested_microversion_compatible(cls, max_version):
try:
req_version_obj = api_version_request.APIVersionRequest(
cls.request_microversion)
# NOTE(gmann): This is case where this method is used before calling
# resource_setup(), where cls.request_microversion is set. There may
# not be any such case but still we can handle this case.
except AttributeError:
request_microversion = (
api_version_utils.select_request_microversion(
cls.min_microversion,
CONF.compute.min_microversion))
req_version_obj = api_version_request.APIVersionRequest(
request_microversion)
max_version_obj = api_version_request.APIVersionRequest(max_version)
return req_version_obj <= max_version_obj
@classmethod
def server_check_teardown(cls):
if getattr(cls, 'server_id', None) is not None:
try:
waiters.wait_for_server_status(cls.servers_client,
cls.server_id, 'ACTIVE')
except Exception as exc:
LOG.exception(exc)
cls.servers_client.delete_server(cls.server_id)
waiters.wait_for_server_termination(cls.servers_client,
cls.server_id)
cls.server_id = None
raise
@classmethod
def create_test_server(cls, validatable=False, volume_backed=False,
validation_resources=None, clients=None, **kwargs):
if 'name' not in kwargs:
kwargs['name'] = data_utils.rand_name(cls.__name__ + "-server")
request_version = api_version_request.APIVersionRequest(
cls.request_microversion)
v2_37_version = api_version_request.APIVersionRequest('2.37')
tenant_network = cls.get_tenant_network()
# NOTE(snikitin): since microversion v2.37 'networks' field is required
if (request_version >= v2_37_version and 'networks' not in kwargs and
not tenant_network):
kwargs['networks'] = 'none'
if clients is None:
clients = cls.os_primary
body, servers = compute.create_test_server(
clients,
validatable,
validation_resources=validation_resources,
tenant_network=tenant_network,
volume_backed=volume_backed,
**kwargs)
# For each server schedule wait and delete, so we first delete all
# and then wait for all
for server in servers:
cls.addClassResourceCleanup(waiters.wait_for_server_termination,
clients.servers_client, server['id'])
for server in servers:
cls.addClassResourceCleanup(
test_utils.call_and_ignore_notfound_exc,
clients.servers_client.delete_server, server['id'])
return body
@classmethod
def create_security_group(cls, name=None, description=None):
if name is None:
name = data_utils.rand_name(cls.__name__ + "-securitygroup")
if description is None:
description = data_utils.rand_name('description')
body = cls.security_groups_client.create_security_group(
name=name, description=description)['security_group']
cls.addClassResourceCleanup(
test_utils.call_and_ignore_notfound_exc,
cls.security_groups_client.delete_security_group,
body['id'])
return body
@classmethod
def create_test_server_group(cls, name="", policy=None):
if not name:
name = data_utils.rand_name(cls.__name__ + "-Server-Group")
if policy is None:
policy = ['affinity']
body = cls.server_groups_client.create_server_group(
name=name, policies=policy)['server_group']
cls.addClassResourceCleanup(
test_utils.call_and_ignore_notfound_exc,
cls.server_groups_client.delete_server_group,
body['id'])
return body
def wait_for(self, condition):
start_time = int(time.time())
while True:
try:
condition()
except Exception:
pass
else:
return
if int(time.time()) - start_time >= self.build_timeout:
condition()
return
time.sleep(self.build_interval)
@classmethod
def prepare_instance_network(cls):
if (CONF.validation.auth_method != 'disabled' and
CONF.validation.connect_method == 'floating'):
cls.set_network_resources(network=True, subnet=True, router=True,
dhcp=True)
@classmethod
def create_image_from_server(cls, server_id, **kwargs):
name = kwargs.pop('name',
data_utils.rand_name(cls.__name__ + "-image"))
wait_until = kwargs.pop('wait_until', None)
wait_for_server = kwargs.pop('wait_for_server', True)
image = cls.compute_images_client.create_image(server_id, name=name,
**kwargs)
if api_version_utils.compare_version_header_to_response(
"OpenStack-API-Version", "compute 2.45", image.response, "lt"):
image_id = image['image_id']
else:
image_id = data_utils.parse_image_id(image.response['location'])
# The compute image proxy APIs were deprecated in 2.35 so
# use the images client directly if the API microversion being
# used is >=2.36.
if not cls.is_requested_microversion_compatible('2.35'):
client = cls.images_client
else:
client = cls.compute_images_client
cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
client.delete_image, image_id)
if wait_until is not None:
try:
wait_until = wait_until.upper()
if not cls.is_requested_microversion_compatible('2.35'):
wait_until = wait_until.lower()
waiters.wait_for_image_status(client, image_id, wait_until)
except lib_exc.NotFound:
if wait_until.upper() == 'ACTIVE':
# If the image is not found after create_image returned
# that means the snapshot failed in nova-compute and nova
# deleted the image. There should be a compute fault
# recorded with the server in that case, so get the server
# and dump some details.
server = (
cls.servers_client.show_server(server_id)['server'])
if 'fault' in server:
raise exceptions.SnapshotNotFoundException(
server['fault'], image_id=image_id)
else:
raise exceptions.SnapshotNotFoundException(
image_id=image_id)
else:
raise
image = client.show_image(image_id)
# Compute image client returns response wrapped in 'image' element
# which is not the case with Glance image client.
if 'image' in image:
image = image['image']
if wait_until.upper() == 'ACTIVE':
if wait_for_server:
waiters.wait_for_server_status(cls.servers_client,
server_id, 'ACTIVE')
return image
@classmethod
def recreate_server(cls, server_id, validatable=False, **kwargs):
if server_id:
cls.delete_server(server_id)
cls.password = data_utils.rand_password()
server = cls.create_test_server(
validatable,
validation_resources=cls.get_class_validation_resources(
cls.os_primary),
wait_until='ACTIVE',
adminPass=cls.password,
**kwargs)
return server['id']
@classmethod
def delete_server(cls, server_id):
try:
cls.servers_client.delete_server(server_id)
waiters.wait_for_server_termination(cls.servers_client,
server_id)
except Exception:
LOG.exception('Failed to delete server %s', server_id)
def resize_server(self, server_id, new_flavor_id, **kwargs):
self.servers_client.resize_server(server_id, new_flavor_id, **kwargs)
waiters.wait_for_server_status(self.servers_client, server_id,
'VERIFY_RESIZE')
self.servers_client.confirm_resize_server(server_id)
waiters.wait_for_server_status(
self.servers_client, server_id, 'ACTIVE')
server = self.servers_client.show_server(server_id)['server']
self.assert_flavor_equal(new_flavor_id, server['flavor'])
@classmethod
def delete_volume(cls, volume_id):
try:
cls.volumes_client.delete_volume(volume_id)
# TODO(mriedem): We should move the wait_for_resource_deletion
# into the delete_volume method as a convenience to the caller.
cls.volumes_client.wait_for_resource_deletion(volume_id)
except lib_exc.NotFound:
LOG.warning("Unable to delete volume '%s' since it was not found. "
"Maybe it was already deleted?", volume_id)
@classmethod
def get_server_ip(cls, server, validation_resources=None):
if CONF.validation.connect_method == 'floating':
if validation_resources:
return validation_resources['floating_ip']['ip']
else:
msg = ('When validation.connect_method equals floating, '
'validation_resources cannot be None')
raise lib_exc.InvalidParam(invalid_param=msg)
elif CONF.validation.connect_method == 'fixed':
addresses = server['addresses'][CONF.validation.network_for_ssh]
for address in addresses:
if address['version'] == CONF.validation.ip_version_for_ssh:
return address['addr']
raise exceptions.ServerUnreachable(server_id=server['id'])
else:
raise lib_exc.InvalidConfiguration()
def setUp(self):
super(BaseV2ComputeTest, self).setUp()
self.useFixture(api_microversion_fixture.APIMicroversionFixture(
compute_microversion=self.request_microversion,
volume_microversion=self.volume_request_microversion,
placement_microversion=self.placement_request_microversion))
@classmethod
def create_volume(cls, image_ref=None, **kwargs):
if 'size' not in kwargs:
kwargs['size'] = CONF.volume.volume_size
if 'display_name' not in kwargs:
vol_name = data_utils.rand_name(cls.__name__ + '-volume')
kwargs['display_name'] = vol_name
if image_ref is not None:
kwargs['imageRef'] = image_ref
if CONF.compute.compute_volume_common_az:
kwargs.setdefault('availability_zone',
CONF.compute.compute_volume_common_az)
volume = cls.volumes_client.create_volume(**kwargs)['volume']
cls.addClassResourceCleanup(
cls.volumes_client.wait_for_resource_deletion, volume['id'])
cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
cls.volumes_client.delete_volume,
volume['id'])
waiters.wait_for_volume_resource_status(cls.volumes_client,
volume['id'], 'available')
return volume
def _detach_volume(self, server, volume):
try:
volume = self.volumes_client.show_volume(volume['id'])['volume']
# Check the status. You can only detach an in-use volume, otherwise
# the compute API will return a 400 response.
if volume['status'] == 'in-use':
self.servers_client.detach_volume(server['id'], volume['id'])
except lib_exc.NotFound:
# Ignore 404s on detach in case the server is deleted or the volume
# is already detached.
pass
def attach_volume(self, server, volume, device=None, tag=None):
attach_kwargs = dict(volumeId=volume['id'])
if device:
attach_kwargs['device'] = device
if tag:
attach_kwargs['tag'] = tag
attachment = self.servers_client.attach_volume(
server['id'], **attach_kwargs)['volumeAttachment']
# On teardown detach the volume and for multiattach volumes wait for
# the attachment to be removed. For non-multiattach volumes wait for
# the state of the volume to change to available. This is so we don't
if volume['multiattach']:
att = waiters.wait_for_volume_attachment_create(
self.volumes_client, volume['id'], server['id'])
self.addCleanup(waiters.wait_for_volume_attachment_remove,
self.volumes_client, volume['id'],
att['attachment_id'])
else:
self.addCleanup(waiters.wait_for_volume_resource_status,
self.volumes_client, volume['id'], 'available')
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'in-use')
self.addCleanup(self._detach_volume, server, volume)
return attachment
def create_volume_snapshot(self, volume_id, name=None, description=None,
metadata=None, force=False):
name = name or data_utils.rand_name(
self.__class__.__name__ + '-snapshot')
snapshot = self.snapshots_client.create_snapshot(
volume_id=volume_id,
force=force,
display_name=name,
description=description,
metadata=metadata)['snapshot']
self.addCleanup(self.snapshots_client.wait_for_resource_deletion,
snapshot['id'])
self.addCleanup(self.snapshots_client.delete_snapshot, snapshot['id'])
waiters.wait_for_volume_resource_status(self.snapshots_client,
snapshot['id'], 'available')
snapshot = self.snapshots_client.show_snapshot(
snapshot['id'])['snapshot']
return snapshot
def assert_flavor_equal(self, flavor_id, server_flavor):
if server_flavor.get('id'):
msg = ('server flavor is not same as flavor!')
self.assertEqual(flavor_id, server_flavor['id'], msg)
else:
flavor = self.flavors_client.show_flavor(flavor_id)['flavor']
self.assertEqual(flavor['name'], server_flavor['original_name'],
"original_name in server flavor is not same as "
"flavor name!")
for key in ['ram', 'vcpus', 'disk']:
msg = ('attribute %s in server flavor is not same as '
'flavor!' % key)
self.assertEqual(flavor[key], server_flavor[key], msg)
class BaseV2ComputeAdminTest(BaseV2ComputeTest):
credentials = ['primary', 'admin']
@classmethod
def setup_clients(cls):
super(BaseV2ComputeAdminTest, cls).setup_clients()
cls.availability_zone_admin_client = (
cls.os_admin.availability_zone_client)
cls.admin_flavors_client = cls.os_admin.flavors_client
cls.admin_servers_client = cls.os_admin.servers_client
cls.image_client = cls.os_admin.image_client_v2
cls.admin_assisted_volume_snapshots_client = \
cls.os_admin.assisted_volume_snapshots_client
def create_flavor(self, ram, vcpus, disk, name=None,
is_public='True', **kwargs):
if name is None:
name = data_utils.rand_name(self.__class__.__name__ + "-flavor")
id = kwargs.pop('id', data_utils.rand_int_id(start=1000))
client = self.admin_flavors_client
flavor = client.create_flavor(
ram=ram, vcpus=vcpus, disk=disk, name=name,
id=id, is_public=is_public, **kwargs)['flavor']
self.addCleanup(client.wait_for_resource_deletion, flavor['id'])
self.addCleanup(client.delete_flavor, flavor['id'])
return flavor
@classmethod
def get_host_for_server(cls, server_id):
server_details = cls.admin_servers_client.show_server(server_id)
return server_details['server']['OS-EXT-SRV-ATTR:host']
def get_host_other_than(self, server_id):
source_host = self.get_host_for_server(server_id)
svcs = self.os_admin.services_client.list_services(
binary='nova-compute')['services']
hosts = []
for svc in svcs:
if svc['state'] == 'up' and svc['status'] == 'enabled':
if CONF.compute.compute_volume_common_az:
if svc['zone'] == CONF.compute.compute_volume_common_az:
hosts.append(svc['host'])
else:
hosts.append(svc['host'])
for target_host in hosts:
if source_host != target_host:
return target_host
| true | true |
7900ba4754c926164dd2748f4d59fd2fbccbf00f | 8,660 | py | Python | resolwe_bio/processes/import_data/basespace.py | plojyon/resolwe-bio | 45d001a78fcc387b5e3239a34c9da7f40d789022 | [
"Apache-2.0"
] | null | null | null | resolwe_bio/processes/import_data/basespace.py | plojyon/resolwe-bio | 45d001a78fcc387b5e3239a34c9da7f40d789022 | [
"Apache-2.0"
] | null | null | null | resolwe_bio/processes/import_data/basespace.py | plojyon/resolwe-bio | 45d001a78fcc387b5e3239a34c9da7f40d789022 | [
"Apache-2.0"
] | null | null | null | """Import a file from Illumina BaseSpace."""
import atexit
import gzip
import os
import time
import traceback
from pathlib import Path
from requests import RequestException, Session
from resolwe.process import (
BooleanField,
FileField,
GroupField,
IntegerField,
Persistence,
Process,
SecretField,
StringField,
)
class BaseSpaceDownloadError(Exception):
"""BaseSpace download error."""
pass
def download_file_repeatedly(
tries, session, file_id, file_name, expected_file_size, request_headers, error
):
"""Attempt to download BaseSpace file numerous times in case of errors."""
for i in range(tries):
try:
download_file(
session=session,
file_id=file_id,
file_name=file_name,
request_headers=request_headers,
error=error,
)
raise_for_file_corruption(
file_name=file_name, expected_file_size=expected_file_size, error=error
)
break
except BaseSpaceDownloadError:
if i + 1 == tries:
error("Could not download file from BaseSpace.")
else:
time.sleep(3)
def download_file(session, file_id, file_name, request_headers, error):
"""Download BaseSpace file."""
response = make_get_request(
session=session,
url=get_api_file_content_url(file_id=file_id),
headers=request_headers,
error=error,
stream=True,
)
try:
with open(file_name, "wb") as f:
chunk_size = 1024 * 1024 * 10
for chunk in response.iter_content(chunk_size=chunk_size):
f.write(chunk)
except FileNotFoundError:
error(f"Could not save file to {file_name}, due to directory not being found")
except PermissionError:
error(f"Could not save file to {file_name}, due to insufficient permissions")
except RequestException:
error(f"Could not save file to {file_name}, due to a network error")
def get_file_properties(session, file_id, request_headers, error):
"""Get file name and size (in bytes)."""
response = make_get_request(
session=session,
url=get_api_file_url(file_id=file_id),
headers=request_headers,
error=error,
)
info = response.json()["Response"]
return info["Name"], info["Size"]
def make_get_request(session, url, headers, error, stream=False):
"""Make a get request."""
response = session.get(url=url, headers=headers, stream=stream, timeout=60)
if response.status_code == 401:
error(f"Authentication failed on URL {url}")
elif response.status_code == 404:
error(f"BaseSpace file {url} not found")
elif response.status_code != 200:
error(f"Failed to retrieve content from {url}")
return response
def get_api_file_url(file_id):
"""Get BaseSpace API file URL."""
api_url = "https://api.basespace.illumina.com/v1pre3"
return f"{api_url}/files/{file_id}"
def get_api_file_content_url(file_id):
"""Get BaseSpace API file contents URL."""
return f"{get_api_file_url(file_id=file_id)}/content"
def output(output_option, value):
"""Print to standard output."""
if output_option == "full":
print(value)
elif output_option == "filename":
if value.startswith("filename="):
print(value[len("filename=") :])
def get_token_from_secret_file(secret_file_path, error):
"""Read secret file to obtain access token."""
try:
with open(secret_file_path, "r") as f:
return f.readline()
except FileNotFoundError:
error("Secret file not found")
except PermissionError:
error("No permissions to read secret file")
def on_exit(session):
"""Clean up function called on exit."""
session.close()
def raise_for_file_corruption(file_name, expected_file_size, error):
"""Raise an error if file does not pass integrity check."""
# Check file size.
actual_file_size = os.path.getsize(file_name)
if expected_file_size != actual_file_size:
error(
f"File's ({file_name}) expected size ({expected_file_size}) "
f"does not match its actual size ({actual_file_size})"
)
# Check gzip integrity.
if file_name.split(".")[-1] == "gz":
try:
with gzip.open(file_name, "rb") as f:
chunk_size = 1024 * 1024 * 10
while bool(f.read(chunk_size)):
pass
except OSError:
error(f"File {file_name} did not pass gzip integrity check")
class BaseSpaceImport(Process):
"""Import a file from Illumina BaseSpace."""
slug = "basespace-file-import"
name = "BaseSpace file"
process_type = "data:file"
version = "1.4.0"
category = "Import"
data_name = 'BaseSpace ({{ file_id|default("?") }})'
persistence = Persistence.TEMP
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/common:3.0.0"}
},
"resources": {
"cores": 1,
"memory": 1024,
"network": True,
"secrets": True,
},
}
class Input:
"""Input fields to process BaseSpaceImport."""
file_id = StringField(label="BaseSpace file ID")
access_token_secret = SecretField(
label="BaseSpace access token",
description="BaseSpace access token secret handle needed to download the file.",
)
show_advanced = BooleanField(
label="Show advanced options",
default=False,
)
class Advanced:
"""Advanced options."""
output = StringField(
label="Output",
allow_custom_choice=False,
choices=[("full", "Full"), ("filename", "Filename")],
default="filename",
description="Sets what is printed to standard output. "
"Argument 'Full' outputs everything, "
"argument 'Filename' outputs only file names of downloaded files.",
)
tries = IntegerField(
label="Tries",
description="Number of tries to download a file before giving up.",
range=[1, 10],
default=3,
)
verbose = BooleanField(
label="Verbose",
default=False,
description="Print detailed exception information to standard output "
"when error occurs. Output argument had no effect on this argument.",
)
advanced = GroupField(
Advanced, label="Advanced options", hidden="!show_advanced"
)
class Output:
"""Output fields to process BaseSpaceImport."""
file = FileField(label="File with reads")
def run(self, inputs, outputs):
"""Run import."""
secret_path = Path("/secrets") / inputs.access_token_secret["handle"]
session = Session()
atexit.register(on_exit, session)
try:
file_id = inputs.file_id
access_token = get_token_from_secret_file(
secret_file_path=secret_path, error=self.error
)
headers = {"x-access-token": access_token}
file_name, file_size = get_file_properties(
session=session,
file_id=file_id,
request_headers=headers,
error=self.error,
)
download_file_repeatedly(
tries=inputs.advanced.tries,
session=session,
file_id=file_id,
file_name=file_name,
expected_file_size=file_size,
request_headers=headers,
error=self.error,
)
output(inputs.advanced.output, f"filename={file_name}")
except Exception as error:
if inputs.advanced.verbose:
traceback.print_exc()
self.error(
"Unexpected error occurred while trying to download files from BaseSpace. "
"Check standard output for more details."
)
else:
print(str(error))
self.error(
"Unexpected error occurred while trying to download files from BaseSpace. "
"Set Verbose to True to see the traceback."
)
outputs.file = file_name
| 31.376812 | 95 | 0.588337 |
import atexit
import gzip
import os
import time
import traceback
from pathlib import Path
from requests import RequestException, Session
from resolwe.process import (
BooleanField,
FileField,
GroupField,
IntegerField,
Persistence,
Process,
SecretField,
StringField,
)
class BaseSpaceDownloadError(Exception):
pass
def download_file_repeatedly(
tries, session, file_id, file_name, expected_file_size, request_headers, error
):
for i in range(tries):
try:
download_file(
session=session,
file_id=file_id,
file_name=file_name,
request_headers=request_headers,
error=error,
)
raise_for_file_corruption(
file_name=file_name, expected_file_size=expected_file_size, error=error
)
break
except BaseSpaceDownloadError:
if i + 1 == tries:
error("Could not download file from BaseSpace.")
else:
time.sleep(3)
def download_file(session, file_id, file_name, request_headers, error):
response = make_get_request(
session=session,
url=get_api_file_content_url(file_id=file_id),
headers=request_headers,
error=error,
stream=True,
)
try:
with open(file_name, "wb") as f:
chunk_size = 1024 * 1024 * 10
for chunk in response.iter_content(chunk_size=chunk_size):
f.write(chunk)
except FileNotFoundError:
error(f"Could not save file to {file_name}, due to directory not being found")
except PermissionError:
error(f"Could not save file to {file_name}, due to insufficient permissions")
except RequestException:
error(f"Could not save file to {file_name}, due to a network error")
def get_file_properties(session, file_id, request_headers, error):
response = make_get_request(
session=session,
url=get_api_file_url(file_id=file_id),
headers=request_headers,
error=error,
)
info = response.json()["Response"]
return info["Name"], info["Size"]
def make_get_request(session, url, headers, error, stream=False):
response = session.get(url=url, headers=headers, stream=stream, timeout=60)
if response.status_code == 401:
error(f"Authentication failed on URL {url}")
elif response.status_code == 404:
error(f"BaseSpace file {url} not found")
elif response.status_code != 200:
error(f"Failed to retrieve content from {url}")
return response
def get_api_file_url(file_id):
api_url = "https://api.basespace.illumina.com/v1pre3"
return f"{api_url}/files/{file_id}"
def get_api_file_content_url(file_id):
return f"{get_api_file_url(file_id=file_id)}/content"
def output(output_option, value):
if output_option == "full":
print(value)
elif output_option == "filename":
if value.startswith("filename="):
print(value[len("filename=") :])
def get_token_from_secret_file(secret_file_path, error):
try:
with open(secret_file_path, "r") as f:
return f.readline()
except FileNotFoundError:
error("Secret file not found")
except PermissionError:
error("No permissions to read secret file")
def on_exit(session):
session.close()
def raise_for_file_corruption(file_name, expected_file_size, error):
actual_file_size = os.path.getsize(file_name)
if expected_file_size != actual_file_size:
error(
f"File's ({file_name}) expected size ({expected_file_size}) "
f"does not match its actual size ({actual_file_size})"
)
# Check gzip integrity.
if file_name.split(".")[-1] == "gz":
try:
with gzip.open(file_name, "rb") as f:
chunk_size = 1024 * 1024 * 10
while bool(f.read(chunk_size)):
pass
except OSError:
error(f"File {file_name} did not pass gzip integrity check")
class BaseSpaceImport(Process):
slug = "basespace-file-import"
name = "BaseSpace file"
process_type = "data:file"
version = "1.4.0"
category = "Import"
data_name = 'BaseSpace ({{ file_id|default("?") }})'
persistence = Persistence.TEMP
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/common:3.0.0"}
},
"resources": {
"cores": 1,
"memory": 1024,
"network": True,
"secrets": True,
},
}
class Input:
file_id = StringField(label="BaseSpace file ID")
access_token_secret = SecretField(
label="BaseSpace access token",
description="BaseSpace access token secret handle needed to download the file.",
)
show_advanced = BooleanField(
label="Show advanced options",
default=False,
)
class Advanced:
output = StringField(
label="Output",
allow_custom_choice=False,
choices=[("full", "Full"), ("filename", "Filename")],
default="filename",
description="Sets what is printed to standard output. "
"Argument 'Full' outputs everything, "
"argument 'Filename' outputs only file names of downloaded files.",
)
tries = IntegerField(
label="Tries",
description="Number of tries to download a file before giving up.",
range=[1, 10],
default=3,
)
verbose = BooleanField(
label="Verbose",
default=False,
description="Print detailed exception information to standard output "
"when error occurs. Output argument had no effect on this argument.",
)
advanced = GroupField(
Advanced, label="Advanced options", hidden="!show_advanced"
)
class Output:
file = FileField(label="File with reads")
def run(self, inputs, outputs):
secret_path = Path("/secrets") / inputs.access_token_secret["handle"]
session = Session()
atexit.register(on_exit, session)
try:
file_id = inputs.file_id
access_token = get_token_from_secret_file(
secret_file_path=secret_path, error=self.error
)
headers = {"x-access-token": access_token}
file_name, file_size = get_file_properties(
session=session,
file_id=file_id,
request_headers=headers,
error=self.error,
)
download_file_repeatedly(
tries=inputs.advanced.tries,
session=session,
file_id=file_id,
file_name=file_name,
expected_file_size=file_size,
request_headers=headers,
error=self.error,
)
output(inputs.advanced.output, f"filename={file_name}")
except Exception as error:
if inputs.advanced.verbose:
traceback.print_exc()
self.error(
"Unexpected error occurred while trying to download files from BaseSpace. "
"Check standard output for more details."
)
else:
print(str(error))
self.error(
"Unexpected error occurred while trying to download files from BaseSpace. "
"Set Verbose to True to see the traceback."
)
outputs.file = file_name
| true | true |
7900bab0a051ba38431bfff59c78d0990bb8525f | 541 | py | Python | registration/migrations/0014_eventresult_timestamp.py | arpanpathak/college-fest-management | 186ffe78deed7ae4904e809412d84883e669b6bf | [
"MIT"
] | 1 | 2022-01-02T05:40:59.000Z | 2022-01-02T05:40:59.000Z | registration/migrations/0014_eventresult_timestamp.py | arpanpathak/college-fest-management | 186ffe78deed7ae4904e809412d84883e669b6bf | [
"MIT"
] | null | null | null | registration/migrations/0014_eventresult_timestamp.py | arpanpathak/college-fest-management | 186ffe78deed7ae4904e809412d84883e669b6bf | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-04 21:24
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('registration', '0013_eventresult_scoresubmittedby'),
]
operations = [
migrations.AddField(
model_name='eventresult',
name='timeStamp',
field=models.DateTimeField(default=django.utils.timezone.now, editable=False),
),
]
| 24.590909 | 90 | 0.661738 |
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('registration', '0013_eventresult_scoresubmittedby'),
]
operations = [
migrations.AddField(
model_name='eventresult',
name='timeStamp',
field=models.DateTimeField(default=django.utils.timezone.now, editable=False),
),
]
| true | true |
7900bb2b2ee9fba61fcbb0da0c5eec57fc3b5c33 | 24,465 | py | Python | qa/rpc-tests/fundrawtransaction.py | alik918/esmacoin | 9966b5a1b76a8fbeb98ca86e084fe3d9e00d88b1 | [
"MIT"
] | null | null | null | qa/rpc-tests/fundrawtransaction.py | alik918/esmacoin | 9966b5a1b76a8fbeb98ca86e084fe3d9e00d88b1 | [
"MIT"
] | null | null | null | qa/rpc-tests/fundrawtransaction.py | alik918/esmacoin | 9966b5a1b76a8fbeb98ca86e084fe3d9e00d88b1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class RawTransactionsTest(BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self, split=False):
self.nodes = start_nodes(4, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
def run_test(self):
print "Mining blocks..."
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"]
watchonly_amount = Decimal(2000)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 15)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 50)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enought inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 22 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 26 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 26, self.nodes[1].getnewaddress() : 25 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 50:
utx = aUtx
break
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 50:
utx = aUtx
break
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(50) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 10:
utx = aUtx
break
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = False
utx2 = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 10:
utx = aUtx
if aUtx['amount'] == 50:
utx2 = aUtx
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 60 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = False
utx2 = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 10:
utx = aUtx
if aUtx['amount'] == 50:
utx2 = aUtx
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 60, self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
listunspent = self.nodes[2].listunspent()
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 10}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
try:
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
raise AssertionError("Spent more than available")
except JSONRPCException as e:
assert("Insufficient" in e.error['message'])
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():11,self.nodes[1].getnewaddress():12,self.nodes[1].getnewaddress():1,self.nodes[1].getnewaddress():13,self.nodes[1].getnewaddress():2,self.nodes[1].getnewaddress():3}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
inputs = []
outputs = {mSigObj:11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
addr3Obj = self.nodes[1].validateaddress(addr3)
addr4Obj = self.nodes[1].validateaddress(addr4)
addr5Obj = self.nodes[1].validateaddress(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])
inputs = []
outputs = {mSigObj:11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
# send 12 ESMA to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 12)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():11}
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawTx)
signedTx = self.nodes[2].signrawtransaction(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('11.0000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.nodes[1].encryptwallet("test")
self.nodes.pop(1)
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(4, self.options.tmpdir)
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
try:
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 12)
raise AssertionError("Wallet unlocked without passphrase")
except JSONRPCException as e:
assert('walletpassphrase' in e.error['message'])
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():11}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 100)
signedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('511.0000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
fundedAndSignedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('500.19000000'), self.nodes[0].getbalance()) #0.19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert("fee" in result.keys())
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransaction(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransaction(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
if __name__ == '__main__':
RawTransactionsTest().main()
| 40.438017 | 214 | 0.556019 |
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class RawTransactionsTest(BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self, split=False):
self.nodes = start_nodes(4, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
def run_test(self):
print "Mining blocks..."
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"]
watchonly_amount = Decimal(2000)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 15)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 50)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
ansaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
| false | true |
7900bb9023551be02622f799fbbf23980df6b488 | 3,956 | py | Python | src/wad.blog/wad/blog/portlets/categories.py | potzenheimer/buildout.wad | 0ebf9518b5707d65d93655e3ff38c54eb0d21335 | [
"MIT"
] | null | null | null | src/wad.blog/wad/blog/portlets/categories.py | potzenheimer/buildout.wad | 0ebf9518b5707d65d93655e3ff38c54eb0d21335 | [
"MIT"
] | null | null | null | src/wad.blog/wad/blog/portlets/categories.py | potzenheimer/buildout.wad | 0ebf9518b5707d65d93655e3ff38c54eb0d21335 | [
"MIT"
] | null | null | null | import urllib2
from zope.interface import implements
from plone.portlets.interfaces import IPortletDataProvider
from plone.app.portlets.portlets import base
from Products.CMFCore.utils import getToolByName
from zope import schema
from zope.formlib import form
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from wad.blog.utils import find_portlet_assignment_context
from wad.blog.blogentry import IBlogEntry
from wad.blog import MessageFactory as _
class IBlogCategoriesPortlet(IPortletDataProvider):
"""A portlet
It inherits from IPortletDataProvider because for this portlet, the
data that is being rendered and the portlet assignment itself are the
same.
"""
archive_view = schema.TextLine(
title=_(u"Archive view"),
description=_(u"The name of the archive view"),
default=u'blog-view',
required=True
)
class Assignment(base.Assignment):
"""Portlet assignment.
This is what is actually managed through the portlets UI and associated
with columns.
"""
implements(IBlogCategoriesPortlet)
def __init__(self, archive_view=u'blog-view'):
self.archive_view = archive_view
@property
def title(self):
"""This property is used to give the title of the portlet in the
"manage portlets" screen.
"""
return _("Categories")
class Renderer(base.Renderer):
"""Portlet renderer.
This is registered in configure.zcml. The referenced page template is
rendered, and the implicit variable 'view' will refer to an instance
of this class. Other methods can be added and referenced in the template.
"""
render = ViewPageTemplateFile('categories.pt')
def keywords(self):
catalog = getToolByName(self.context, 'portal_catalog')
keywords = catalog.uniqueValuesFor('Subject')
keywords = [unicode(k, 'utf-8') for k in keywords]
return keywords
def archive_url(self, subject):
# Get the path of where the portlet is created. That's the blog.
assignment_context = find_portlet_assignment_context(self.data,
self.context)
if assignment_context is None:
assignment_context = self.context
self.folder_url = assignment_context.absolute_url()
sub = urllib2.quote(subject.encode('utf-8'))
url = '%s/%s?category=%s' % (self.folder_url,
self.data.archive_view,
sub)
return url
def blog_url(self):
assignment_context = find_portlet_assignment_context(self.data,
self.context)
if assignment_context is None:
assignment_context = self.context
return assignment_context.absolute_url()
def count_entries(self, subject):
catalog = getToolByName(self.context, 'portal_catalog')
brains = catalog(object_provides=IBlogEntry.__identifier__,
Subject=subject.encode('utf-8'))
return len(brains)
def count_all_entries(self):
catalog = getToolByName(self.context, 'portal_catalog')
brains = catalog(object_provides=IBlogEntry.__identifier__)
return len(brains)
class AddForm(base.AddForm):
"""Portlet add form.
This is registered in configure.zcml. The form_fields variable tells
zope.formlib which fields to display. The create() method actually
constructs the assignment that is being added.
"""
form_fields = form.Fields(IBlogCategoriesPortlet)
def create(self, data):
return Assignment(**data)
class EditForm(base.EditForm):
"""Portlet edit form.
This is registered with configure.zcml. The form_fields variable tells
zope.formlib which fields to display.
"""
form_fields = form.Fields(IBlogCategoriesPortlet)
| 32.694215 | 77 | 0.671638 | import urllib2
from zope.interface import implements
from plone.portlets.interfaces import IPortletDataProvider
from plone.app.portlets.portlets import base
from Products.CMFCore.utils import getToolByName
from zope import schema
from zope.formlib import form
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from wad.blog.utils import find_portlet_assignment_context
from wad.blog.blogentry import IBlogEntry
from wad.blog import MessageFactory as _
class IBlogCategoriesPortlet(IPortletDataProvider):
archive_view = schema.TextLine(
title=_(u"Archive view"),
description=_(u"The name of the archive view"),
default=u'blog-view',
required=True
)
class Assignment(base.Assignment):
implements(IBlogCategoriesPortlet)
def __init__(self, archive_view=u'blog-view'):
self.archive_view = archive_view
@property
def title(self):
return _("Categories")
class Renderer(base.Renderer):
render = ViewPageTemplateFile('categories.pt')
def keywords(self):
catalog = getToolByName(self.context, 'portal_catalog')
keywords = catalog.uniqueValuesFor('Subject')
keywords = [unicode(k, 'utf-8') for k in keywords]
return keywords
def archive_url(self, subject):
assignment_context = find_portlet_assignment_context(self.data,
self.context)
if assignment_context is None:
assignment_context = self.context
self.folder_url = assignment_context.absolute_url()
sub = urllib2.quote(subject.encode('utf-8'))
url = '%s/%s?category=%s' % (self.folder_url,
self.data.archive_view,
sub)
return url
def blog_url(self):
assignment_context = find_portlet_assignment_context(self.data,
self.context)
if assignment_context is None:
assignment_context = self.context
return assignment_context.absolute_url()
def count_entries(self, subject):
catalog = getToolByName(self.context, 'portal_catalog')
brains = catalog(object_provides=IBlogEntry.__identifier__,
Subject=subject.encode('utf-8'))
return len(brains)
def count_all_entries(self):
catalog = getToolByName(self.context, 'portal_catalog')
brains = catalog(object_provides=IBlogEntry.__identifier__)
return len(brains)
class AddForm(base.AddForm):
form_fields = form.Fields(IBlogCategoriesPortlet)
def create(self, data):
return Assignment(**data)
class EditForm(base.EditForm):
form_fields = form.Fields(IBlogCategoriesPortlet)
| true | true |
7900bc108c778c3c4e52e8399f72b060d1309c3d | 1,376 | py | Python | pytorch/unet_3d/unet_model.py | mistermoutan/ModelsGenesis | 98af7075b93311fe655e9692773eb1ce015b8bd0 | [
"MIT"
] | null | null | null | pytorch/unet_3d/unet_model.py | mistermoutan/ModelsGenesis | 98af7075b93311fe655e9692773eb1ce015b8bd0 | [
"MIT"
] | null | null | null | pytorch/unet_3d/unet_model.py | mistermoutan/ModelsGenesis | 98af7075b93311fe655e9692773eb1ce015b8bd0 | [
"MIT"
] | null | null | null | """ Full assembly of the parts to form the complete network """
import torch.nn.functional as F
from .unet_parts import *
from .channels import C
class UNet3D(nn.Module):
def __init__(self, n_channels, n_classes, bilinear=True, apply_sigmoid_to_output=False):
super(UNet3D, self).__init__()
self.n_channels = n_channels
self.n_classes = n_classes
self.bilinear = bilinear
self.inc = DoubleConv3D(n_channels, C[0])
self.down1 = Down(C[0], C[1])
self.down2 = Down(C[1], C[2])
self.down3 = Down(C[2], C[3])
factor = 2 if bilinear else 1
self.down4 = Down(C[3], C[4] // factor) # switch do Double CONV if stick do 8x spatial down
self.up1 = Up(C[4], C[3] // factor, bilinear)
self.up2 = Up(C[3], C[2] // factor, bilinear)
self.up3 = Up(C[2], C[1] // factor, bilinear)
self.up4 = Up(C[1], C[0], bilinear)
self.outc = OutConv(C[0], n_classes) if apply_sigmoid_to_output is False else OutConv(C[0], n_classes, sigmoid=True)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
logits = self.outc(x)
return logits
| 34.4 | 124 | 0.582849 |
import torch.nn.functional as F
from .unet_parts import *
from .channels import C
class UNet3D(nn.Module):
def __init__(self, n_channels, n_classes, bilinear=True, apply_sigmoid_to_output=False):
super(UNet3D, self).__init__()
self.n_channels = n_channels
self.n_classes = n_classes
self.bilinear = bilinear
self.inc = DoubleConv3D(n_channels, C[0])
self.down1 = Down(C[0], C[1])
self.down2 = Down(C[1], C[2])
self.down3 = Down(C[2], C[3])
factor = 2 if bilinear else 1
self.down4 = Down(C[3], C[4] // factor)
self.up1 = Up(C[4], C[3] // factor, bilinear)
self.up2 = Up(C[3], C[2] // factor, bilinear)
self.up3 = Up(C[2], C[1] // factor, bilinear)
self.up4 = Up(C[1], C[0], bilinear)
self.outc = OutConv(C[0], n_classes) if apply_sigmoid_to_output is False else OutConv(C[0], n_classes, sigmoid=True)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
logits = self.outc(x)
return logits
| true | true |
7900bc42ab9f4255009df4547d5f39d3d7822f52 | 5,411 | py | Python | sweetie_bot_flexbe_states/src/sweetie_bot_flexbe_states/internal/set_joint_state_base.py | sweetie-bot-project/sweetie_bot_flexbe_behaviors | d8511564bb9d6125838b4373263fb68a8b858d70 | [
"BSD-3-Clause"
] | null | null | null | sweetie_bot_flexbe_states/src/sweetie_bot_flexbe_states/internal/set_joint_state_base.py | sweetie-bot-project/sweetie_bot_flexbe_behaviors | d8511564bb9d6125838b4373263fb68a8b858d70 | [
"BSD-3-Clause"
] | null | null | null | sweetie_bot_flexbe_states/src/sweetie_bot_flexbe_states/internal/set_joint_state_base.py | sweetie-bot-project/sweetie_bot_flexbe_behaviors | d8511564bb9d6125838b4373263fb68a8b858d70 | [
"BSD-3-Clause"
] | 1 | 2019-12-23T05:06:26.000Z | 2019-12-23T05:06:26.000Z | #!/usr/bin/env python
from itertools import izip
import xmlrpclib
import rospy
from rospy.rostime import Time, Duration
from flexbe_core import EventState as Dummy
from flexbe_core import Logger
from flexbe_core.proxy import ProxyPublisher, ProxySubscriberCached, ProxyActionClient
from sensor_msgs.msg import JointState
from sweetie_bot_control_msgs.msg import SetOperationalAction, SetOperationalGoal, SetOperationalResult
# This is helper class so trick FlexBe App to ignore it.
# Dummy is actually EventState but FlexBe App is not able to recognize it.
class SetJointStateBase(Dummy):
'''
Base class for states which move robot to named pose using FollowJointState controller.
Pose is loaded from binary parameter from Parameter Server as JointState message.
Then state activate FollowJointState controller and publish pose.
Movement is considered finished when position error is less then given tolerance.
-- controller string FollowJointState controller namespace.
-- tolerance float Position tolerance (rad).
-- timeout float Movement timeout (s).
-- joint_topic string Topic where actual pose published.
<= done Finished.
<= failed Failed to activate FollowJointState controller.
<= timeout Timeout reached.
'''
def __init__(self, controller = 'motion/controller/joint_state_head', tolerance = 0.17, timeout = 10.0,
joint_topic = "joint_states", outcomes = ['done', 'failed', 'timeout']):
super(SetJointStateBase, self).__init__(outcomes = outcomes)
# Store topic parameter for later use.
self._controller = controller
self._joint_topic = joint_topic
self._tolerance = tolerance
self._timeout = Duration.from_sec(timeout)
# create proxies
self._action_client = ProxyActionClient({self._controller: SetOperationalAction})
self._pose_publisher = ProxyPublisher({ self._controller + '/in_joints_ref': JointState })
self._pose_subscriber = ProxySubscriberCached({ self._joint_topic: JointState })
# timestamp
self._timestamp = None
# error in enter hook
self._error = False
def load_joint_state_msg(self, pose_ns, pose_param):
# derive parameter full name
if pose_ns:
pose_param = pose_ns + '/' + pose_param
# Load JointState message from Parameter Server
try:
goal_raw = rospy.get_param(pose_param)
except KeyError as e:
raise KeyError, "SetJointStateBase: Unable to get '" + pose_param + "' parameter."
if not isinstance(goal_raw, xmlrpclib.Binary):
raise TypeError, "SetJointStateBase: ROS parameter '" + pose_param + "' is not a binary data."
# deserialize
self._target_joint_state = JointState()
self._target_joint_state.deserialize(goal_raw.data)
# create joint index to simplify tolerance check
self._joint_target_pose = { name: position for name, position in izip(self._target_joint_state.name, self._target_joint_state.position) }
def on_enter(self, userdata):
self._error = False
# activate controller
actiavtion_request = SetOperationalGoal()
actiavtion_request.operational = True
actiavtion_request.resources = self._target_joint_state.name
try:
self._action_client.send_goal(self._controller, actiavtion_request)
except Exception as e:
Logger.logwarn('SetJointStateBase: Failed to send the SetOperational command:\n%s' % str(e))
self._error = True
return
# set start timestamp
self._timestamp = Time.now()
def execute(self, userdata):
# error in start hook
if self._error:
return 'failed'
# check if controller is active
if not self._action_client.is_active(self._controller):
Logger.loginfo('SetJointStateBase: controller was deactivated by external cause.')
return 'failed';
# check if time elasped
if Time.now() - self._timestamp > self._timeout:
Logger.loginfo('SetJointStateBase: controller was deactivated by external cause.')
return 'timeout'
# publish goal pose
self._pose_publisher.publish(self._controller+'/in_joints_ref', self._target_joint_state)
# check tolerance
joints_msg = self._pose_subscriber.get_last_msg(self._joint_topic)
on_position = True
for name, pos in izip(joints_msg.name, joints_msg.position):
target_pos = self._joint_target_pose.get(name)
if (target_pos != None):
if abs(target_pos - pos) > self._tolerance:
on_position = False
break
if on_position:
Logger.loginfo('SetJointStateBase: on position')
return 'done'
def on_exit(self, userdata):
if self._action_client.is_active(self._controller):
try:
self._action_client.cancel(self._controller)
except Exception as e:
Logger.logwarn('SetJointStateBase: failed to deactivate `' + self._controller + '` controller:\n%s' % str(e))
| 42.273438 | 145 | 0.656441 |
from itertools import izip
import xmlrpclib
import rospy
from rospy.rostime import Time, Duration
from flexbe_core import EventState as Dummy
from flexbe_core import Logger
from flexbe_core.proxy import ProxyPublisher, ProxySubscriberCached, ProxyActionClient
from sensor_msgs.msg import JointState
from sweetie_bot_control_msgs.msg import SetOperationalAction, SetOperationalGoal, SetOperationalResult
class SetJointStateBase(Dummy):
'''
Base class for states which move robot to named pose using FollowJointState controller.
Pose is loaded from binary parameter from Parameter Server as JointState message.
Then state activate FollowJointState controller and publish pose.
Movement is considered finished when position error is less then given tolerance.
-- controller string FollowJointState controller namespace.
-- tolerance float Position tolerance (rad).
-- timeout float Movement timeout (s).
-- joint_topic string Topic where actual pose published.
<= done Finished.
<= failed Failed to activate FollowJointState controller.
<= timeout Timeout reached.
'''
def __init__(self, controller = 'motion/controller/joint_state_head', tolerance = 0.17, timeout = 10.0,
joint_topic = "joint_states", outcomes = ['done', 'failed', 'timeout']):
super(SetJointStateBase, self).__init__(outcomes = outcomes)
self._controller = controller
self._joint_topic = joint_topic
self._tolerance = tolerance
self._timeout = Duration.from_sec(timeout)
self._action_client = ProxyActionClient({self._controller: SetOperationalAction})
self._pose_publisher = ProxyPublisher({ self._controller + '/in_joints_ref': JointState })
self._pose_subscriber = ProxySubscriberCached({ self._joint_topic: JointState })
self._timestamp = None
self._error = False
def load_joint_state_msg(self, pose_ns, pose_param):
if pose_ns:
pose_param = pose_ns + '/' + pose_param
try:
goal_raw = rospy.get_param(pose_param)
except KeyError as e:
raise KeyError, "SetJointStateBase: Unable to get '" + pose_param + "' parameter."
if not isinstance(goal_raw, xmlrpclib.Binary):
raise TypeError, "SetJointStateBase: ROS parameter '" + pose_param + "' is not a binary data."
self._target_joint_state = JointState()
self._target_joint_state.deserialize(goal_raw.data)
self._joint_target_pose = { name: position for name, position in izip(self._target_joint_state.name, self._target_joint_state.position) }
def on_enter(self, userdata):
self._error = False
actiavtion_request = SetOperationalGoal()
actiavtion_request.operational = True
actiavtion_request.resources = self._target_joint_state.name
try:
self._action_client.send_goal(self._controller, actiavtion_request)
except Exception as e:
Logger.logwarn('SetJointStateBase: Failed to send the SetOperational command:\n%s' % str(e))
self._error = True
return
self._timestamp = Time.now()
def execute(self, userdata):
if self._error:
return 'failed'
if not self._action_client.is_active(self._controller):
Logger.loginfo('SetJointStateBase: controller was deactivated by external cause.')
return 'failed';
if Time.now() - self._timestamp > self._timeout:
Logger.loginfo('SetJointStateBase: controller was deactivated by external cause.')
return 'timeout'
self._pose_publisher.publish(self._controller+'/in_joints_ref', self._target_joint_state)
joints_msg = self._pose_subscriber.get_last_msg(self._joint_topic)
on_position = True
for name, pos in izip(joints_msg.name, joints_msg.position):
target_pos = self._joint_target_pose.get(name)
if (target_pos != None):
if abs(target_pos - pos) > self._tolerance:
on_position = False
break
if on_position:
Logger.loginfo('SetJointStateBase: on position')
return 'done'
def on_exit(self, userdata):
if self._action_client.is_active(self._controller):
try:
self._action_client.cancel(self._controller)
except Exception as e:
Logger.logwarn('SetJointStateBase: failed to deactivate `' + self._controller + '` controller:\n%s' % str(e))
| false | true |
7900bca39ae064b8c0d6d9f0022b2d8517f1fcf6 | 1,793 | py | Python | crawling_scraping/bin/rst2odt_prepstyles.py | litteletips/crawling_scraping-scrapy_tool | 6d70b4d2a91f2d2bebcc5266ed43ad9be4723bc0 | [
"MIT"
] | null | null | null | crawling_scraping/bin/rst2odt_prepstyles.py | litteletips/crawling_scraping-scrapy_tool | 6d70b4d2a91f2d2bebcc5266ed43ad9be4723bc0 | [
"MIT"
] | 16 | 2021-03-19T09:44:52.000Z | 2022-03-12T00:22:14.000Z | crawling_scraping/bin/rst2odt_prepstyles.py | litteletips/crawling_scraping | 6d70b4d2a91f2d2bebcc5266ed43ad9be4723bc0 | [
"MIT"
] | null | null | null | #!/Users/yaroten/Library/Mobile Documents/com~apple~CloudDocs/git/crawling_scraping/crawling_scraping/bin/python3
# $Id: rst2odt_prepstyles.py 5839 2009-01-07 19:09:28Z dkuhlman $
# Author: Dave Kuhlman <dkuhlman@rexx.com>
# Copyright: This module has been placed in the public domain.
"""
Fix a word-processor-generated styles.odt for odtwriter use: Drop page size
specifications from styles.xml in STYLE_FILE.odt.
"""
#
# Author: Michael Schutte <michi@uiae.at>
from lxml import etree
import sys
import zipfile
from tempfile import mkstemp
import shutil
import os
NAMESPACES = {
"style": "urn:oasis:names:tc:opendocument:xmlns:style:1.0",
"fo": "urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0"
}
def prepstyle(filename):
zin = zipfile.ZipFile(filename)
styles = zin.read("styles.xml")
root = etree.fromstring(styles)
for el in root.xpath("//style:page-layout-properties",
namespaces=NAMESPACES):
for attr in el.attrib:
if attr.startswith("{%s}" % NAMESPACES["fo"]):
del el.attrib[attr]
tempname = mkstemp()
zout = zipfile.ZipFile(os.fdopen(tempname[0], "w"), "w",
zipfile.ZIP_DEFLATED)
for item in zin.infolist():
if item.filename == "styles.xml":
zout.writestr(item, etree.tostring(root))
else:
zout.writestr(item, zin.read(item.filename))
zout.close()
zin.close()
shutil.move(tempname[1], filename)
def main():
args = sys.argv[1:]
if len(args) != 1:
print >> sys.stderr, __doc__
print >> sys.stderr, "Usage: %s STYLE_FILE.odt\n" % sys.argv[0]
sys.exit(1)
filename = args[0]
prepstyle(filename)
if __name__ == '__main__':
main()
# vim:tw=78:sw=4:sts=4:et:
| 26.367647 | 113 | 0.650307 |
from lxml import etree
import sys
import zipfile
from tempfile import mkstemp
import shutil
import os
NAMESPACES = {
"style": "urn:oasis:names:tc:opendocument:xmlns:style:1.0",
"fo": "urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0"
}
def prepstyle(filename):
zin = zipfile.ZipFile(filename)
styles = zin.read("styles.xml")
root = etree.fromstring(styles)
for el in root.xpath("//style:page-layout-properties",
namespaces=NAMESPACES):
for attr in el.attrib:
if attr.startswith("{%s}" % NAMESPACES["fo"]):
del el.attrib[attr]
tempname = mkstemp()
zout = zipfile.ZipFile(os.fdopen(tempname[0], "w"), "w",
zipfile.ZIP_DEFLATED)
for item in zin.infolist():
if item.filename == "styles.xml":
zout.writestr(item, etree.tostring(root))
else:
zout.writestr(item, zin.read(item.filename))
zout.close()
zin.close()
shutil.move(tempname[1], filename)
def main():
args = sys.argv[1:]
if len(args) != 1:
print >> sys.stderr, __doc__
print >> sys.stderr, "Usage: %s STYLE_FILE.odt\n" % sys.argv[0]
sys.exit(1)
filename = args[0]
prepstyle(filename)
if __name__ == '__main__':
main()
| true | true |
7900bd9606d025b5b0dd85fe04ca40f0cfa02f0d | 1,046 | py | Python | cocasync/errors.py | cree-py/cocasync | 4705009077e270b6dd45b7be67d15bfdf3387e5a | [
"MIT"
] | 2 | 2018-02-01T03:15:07.000Z | 2018-02-03T23:35:17.000Z | cocasync/errors.py | cree-py/cocasync | 4705009077e270b6dd45b7be67d15bfdf3387e5a | [
"MIT"
] | null | null | null | cocasync/errors.py | cree-py/cocasync | 4705009077e270b6dd45b7be67d15bfdf3387e5a | [
"MIT"
] | null | null | null | class Error(Exception):
'''Base Error.'''
def __init__(self):
self.error = 'Fatal error occured.'
super().__init__(self.error)
class ArgError(Error):
'''Argument Error.'''
def __init__(self):
self.error = 'Incorrect argument passed.'
super().__init__(self.error)
class MissingArg(ArgError):
'''Argument is missing.'''
def __init__(self, arg):
self.error = f'{arg} is a required argument that is missing.'
super().__init__(self.error)
class InvalidArg(ArgError):
'''Argument is invalid.'''
def __init__(self, arg):
self.error = f'{arg} is invalid.'
super().__init__(self.error)
class HTTPError(Error):
'''Error occured in HTTP.'''
def __init__(self, code):
self.error = f'An error occured. Status: {code}'
super().__init__(self.error)
class Timeout(HTTPError):
'''Connection timed out.'''
def __init__(self):
self.error = 'The connection timed out.'
super().__init__(self.error)
class MissingData(Error):
'''Missing data.'''
def __init__(self, data):
self.error = f'Value of {data} is missing.' | 22.255319 | 63 | 0.685468 | class Error(Exception):
def __init__(self):
self.error = 'Fatal error occured.'
super().__init__(self.error)
class ArgError(Error):
def __init__(self):
self.error = 'Incorrect argument passed.'
super().__init__(self.error)
class MissingArg(ArgError):
def __init__(self, arg):
self.error = f'{arg} is a required argument that is missing.'
super().__init__(self.error)
class InvalidArg(ArgError):
def __init__(self, arg):
self.error = f'{arg} is invalid.'
super().__init__(self.error)
class HTTPError(Error):
def __init__(self, code):
self.error = f'An error occured. Status: {code}'
super().__init__(self.error)
class Timeout(HTTPError):
def __init__(self):
self.error = 'The connection timed out.'
super().__init__(self.error)
class MissingData(Error):
def __init__(self, data):
self.error = f'Value of {data} is missing.' | true | true |
7900bdb85d1491014b72fb3cfe40f55a5812474a | 1,283 | py | Python | tests/test_remove_emphasises.py | PoWWoP/wiki-dump-reader | a7c195f132753a1f411ba2615410910fbf8c6888 | [
"MIT"
] | 18 | 2019-03-05T13:09:07.000Z | 2022-01-27T20:45:11.000Z | tests/test_remove_emphasises.py | PoWWoP/wiki-dump-reader | a7c195f132753a1f411ba2615410910fbf8c6888 | [
"MIT"
] | 2 | 2019-03-21T17:59:38.000Z | 2019-09-20T22:16:11.000Z | tests/test_remove_emphasises.py | PoWWoP/wiki-dump-reader | a7c195f132753a1f411ba2615410910fbf8c6888 | [
"MIT"
] | 5 | 2019-10-06T13:47:33.000Z | 2022-02-25T15:11:04.000Z | import unittest
from wiki_dump_reader import Cleaner
class TestRemoveEmphasis(unittest.TestCase):
def setUp(self):
self.cleaner = Cleaner()
def test_remove_emphasis_bold(self):
text = "'''游戏工具编程'''是指采用各种开发工具进行开发修改[[电脑]]、[[电视]][[游戏]]的过程。主要的开发工具有以下几大类"
expected = '游戏工具编程是指采用各种开发工具进行开发修改[[电脑]]、[[电视]][[游戏]]的过程。主要的开发工具有以下几大类'
actual = self.cleaner._remove_emphasises(text)
self.assertEqual(expected, actual)
def test_remove_emphasis_italic(self):
text = "'''臺灣藍鵲'''([[學名]]:''{{lang|la|Urocissa caerulea}}''),又稱'''臺灣暗藍鵲'''、'''紅嘴山鵲'''、" \
"'''長尾山娘'''([[臺灣閩南語羅馬字拼音方案|閩南語]]:{{Unicode|Tn̂g-bué Suann-niû}})或'''長尾陣仔''',為臺" \
"灣特有種鳥類。臺灣從[[臺灣清治時期|清領時期]]開始就有文獻紀載臺灣藍鵲的資料。1862年,鳥畫家[[约翰·古尔德]]根據英" \
"國博物學家[[郇和]]寄來的臺灣鳥類標本發表了一篇文章,命名並詳述16種新發現的台灣品種,其中就包含臺灣藍鵲。"
expected = '臺灣藍鵲([[學名]]:{{lang|la|Urocissa caerulea}}),又稱臺灣暗藍鵲、紅嘴山鵲、長尾山娘([[臺灣閩南語羅馬' \
'字拼音方案|閩南語]]:{{Unicode|Tn̂g-bué Suann-niû}})或長尾陣仔,為臺灣特有種鳥類。臺灣從[[臺灣清治時期|清' \
'領時期]]開始就有文獻紀載臺灣藍鵲的資料。1862年,鳥畫家[[约翰·古尔德]]根據英國博物學家[[郇和]]寄來的臺灣鳥類' \
'標本發表了一篇文章,命名並詳述16種新發現的台灣品種,其中就包含臺灣藍鵲。'
actual = self.cleaner._remove_emphasises(text)
self.assertEqual(expected, actual)
| 47.518519 | 97 | 0.62198 | import unittest
from wiki_dump_reader import Cleaner
class TestRemoveEmphasis(unittest.TestCase):
def setUp(self):
self.cleaner = Cleaner()
def test_remove_emphasis_bold(self):
text = "'''游戏工具编程'''是指采用各种开发工具进行开发修改[[电脑]]、[[电视]][[游戏]]的过程。主要的开发工具有以下几大类"
expected = '游戏工具编程是指采用各种开发工具进行开发修改[[电脑]]、[[电视]][[游戏]]的过程。主要的开发工具有以下几大类'
actual = self.cleaner._remove_emphasises(text)
self.assertEqual(expected, actual)
def test_remove_emphasis_italic(self):
text = "'''臺灣藍鵲'''([[學名]]:''{{lang|la|Urocissa caerulea}}''),又稱'''臺灣暗藍鵲'''、'''紅嘴山鵲'''、" \
"'''長尾山娘'''([[臺灣閩南語羅馬字拼音方案|閩南語]]:{{Unicode|Tn̂g-bué Suann-niû}})或'''長尾陣仔''',為臺" \
"灣特有種鳥類。臺灣從[[臺灣清治時期|清領時期]]開始就有文獻紀載臺灣藍鵲的資料。1862年,鳥畫家[[约翰·古尔德]]根據英" \
"國博物學家[[郇和]]寄來的臺灣鳥類標本發表了一篇文章,命名並詳述16種新發現的台灣品種,其中就包含臺灣藍鵲。"
expected = '臺灣藍鵲([[學名]]:{{lang|la|Urocissa caerulea}}),又稱臺灣暗藍鵲、紅嘴山鵲、長尾山娘([[臺灣閩南語羅馬' \
'字拼音方案|閩南語]]:{{Unicode|Tn̂g-bué Suann-niû}})或長尾陣仔,為臺灣特有種鳥類。臺灣從[[臺灣清治時期|清' \
'領時期]]開始就有文獻紀載臺灣藍鵲的資料。1862年,鳥畫家[[约翰·古尔德]]根據英國博物學家[[郇和]]寄來的臺灣鳥類' \
'標本發表了一篇文章,命名並詳述16種新發現的台灣品種,其中就包含臺灣藍鵲。'
actual = self.cleaner._remove_emphasises(text)
self.assertEqual(expected, actual)
| true | true |
7900bdd57c5f7c38660175436a08e2b93e5ced2e | 137,949 | py | Python | tools/python/dex.py | gdawg/redex | 857c8dc08c93d2d768bff768dad3d1ff56750690 | [
"MIT"
] | null | null | null | tools/python/dex.py | gdawg/redex | 857c8dc08c93d2d768bff768dad3d1ff56750690 | [
"MIT"
] | null | null | null | tools/python/dex.py | gdawg/redex | 857c8dc08c93d2d768bff768dad3d1ff56750690 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import bisect
import copy
import dict_utils
import file_extract
from file_extract import AutoParser
import numbers
import operator
import optparse
import os
import re
import six
import string
import sys
import StringIO
def get_uleb128_byte_size(value):
byte_size = 1
while value >= 0x80:
byte_size += 1
value >>= 7
return byte_size
def get_uleb128p1_byte_size(value):
return get_uleb128_byte_size(value + 1)
# ----------------------------------------------------------------------
# Constants
# ----------------------------------------------------------------------
MAGIC = "dex\n"
ENDIAN_CONSTANT = 0x12345678
REVERSE_ENDIAN_CONSTANT = 0x78563412
NO_INDEX = 0xffffffff
INT4_MIN = -8
INT4_MAX = 7
INT8_MIN = -128
INT8_MAX = 127
INT16_MIN = -32768
INT16_MAX = 32767
INT24_MIN = -8388608
INT24_MAX = 8388607
INT32_MIN = -2147483648
INT32_MAX = 2147483647
UINT4_MAX = 15
UINT8_MAX = 255
UINT16_MAX = 65535
UINT32_MAX = 4294967295
# ----------------------------------------------------------------------
# access_flags definitions
# ----------------------------------------------------------------------
ACC_PUBLIC = 0x1
ACC_PRIVATE = 0x2
ACC_PROTECTED = 0x4
ACC_STATIC = 0x8
ACC_FINAL = 0x10
ACC_SYNCHRONIZED = 0x20
ACC_VOLATILE = 0x40
ACC_BRIDGE = 0x40
ACC_TRANSIENT = 0x80
ACC_VARARGS = 0x80
ACC_NATIVE = 0x100
ACC_INTERFACE = 0x200
ACC_ABSTRACT = 0x400
ACC_STRICT = 0x800
ACC_SYNTHETIC = 0x1000
ACC_ANNOTATION = 0x2000
ACC_ENUM = 0x4000
ACC_CONSTRUCTOR = 0x10000
ACC_DECLARED_SYNCHRONIZED = 0x20000
# ----------------------------------------------------------------------
# Value formats
# ----------------------------------------------------------------------
VALUE_BYTE = 0x00
VALUE_SHORT = 0x02
VALUE_CHAR = 0x03
VALUE_INT = 0x04
VALUE_LONG = 0x06
VALUE_FLOAT = 0x10
VALUE_DOUBLE = 0x11
VALUE_METHOD_TYPE = 0x15
VALUE_METHOD_HANDLE = 0x16
VALUE_STRING = 0x17
VALUE_TYPE = 0x18
VALUE_FIELD = 0x19
VALUE_METHOD = 0x1a
VALUE_ENUM = 0x1b
VALUE_ARRAY = 0x1c
VALUE_ANNOTATION = 0x1d
VALUE_NULL = 0x1e
VALUE_BOOLEAN = 0x1f
class ValueFormat(dict_utils.Enum):
enum = {
'VALUE_BYTE': VALUE_BYTE,
'VALUE_SHORT': VALUE_SHORT,
'VALUE_CHAR': VALUE_CHAR,
'VALUE_INT': VALUE_INT,
'VALUE_LONG': VALUE_LONG,
'VALUE_FLOAT': VALUE_FLOAT,
'VALUE_DOUBLE': VALUE_DOUBLE,
'VALUE_METHOD_TYPE': VALUE_METHOD_TYPE,
'VALUE_METHOD_HANDLE': VALUE_METHOD_HANDLE,
'VALUE_STRING': VALUE_STRING,
'VALUE_TYPE': VALUE_TYPE,
'VALUE_FIELD': VALUE_FIELD,
'VALUE_METHOD': VALUE_METHOD,
'VALUE_ENUM': VALUE_ENUM,
'VALUE_ARRAY': VALUE_ARRAY,
'VALUE_ANNOTATION': VALUE_ANNOTATION,
'VALUE_NULL': VALUE_NULL,
'VALUE_BOOLEAN': VALUE_BOOLEAN,
}
def __init__(self, data):
dict_utils.Enum.__init__(self, data.get_uint16(), self.enum)
# ----------------------------------------------------------------------
# Type Codes
# ----------------------------------------------------------------------
TYPE_HEADER_ITEM = 0x0000 # size = 0x70
TYPE_STRING_ID_ITEM = 0x0001 # size = 0x04
TYPE_TYPE_ID_ITEM = 0x0002 # size = 0x04
TYPE_PROTO_ID_ITEM = 0x0003 # size = 0x0c
TYPE_FIELD_ID_ITEM = 0x0004 # size = 0x08
TYPE_METHOD_ID_ITEM = 0x0005 # size = 0x08
TYPE_CLASS_DEF_ITEM = 0x0006 # size = 0x20
TYPE_CALL_SITE_ID_ITEM = 0x0007 # size = 0x04
TYPE_METHOD_HANDLE_ITEM = 0x0008 # size = 0x08
TYPE_MAP_LIST = 0x1000 # size = 4 + (item.size * 12)
TYPE_TYPE_LIST = 0x1001 # size = 4 + (item.size * 2)
TYPE_ANNOTATION_SET_REF_LIST = 0x1002 # size = 4 + (item.size * 4)
TYPE_ANNOTATION_SET_ITEM = 0x1003 # size = 4 + (item.size * 4)
TYPE_CLASS_DATA_ITEM = 0x2000
TYPE_CODE_ITEM = 0x2001
TYPE_STRING_DATA_ITEM = 0x2002
TYPE_DEBUG_INFO_ITEM = 0x2003
TYPE_ANNOTATION_ITEM = 0x2004
TYPE_ENCODED_ARRAY_ITEM = 0x2005
TYPE_ANNOTATIONS_DIRECTORY_ITEM = 0x2006
class TypeCode(dict_utils.Enum):
enum = {
'TYPE_HEADER_ITEM': TYPE_HEADER_ITEM,
'TYPE_STRING_ID_ITEM': TYPE_STRING_ID_ITEM,
'TYPE_TYPE_ID_ITEM': TYPE_TYPE_ID_ITEM,
'TYPE_PROTO_ID_ITEM': TYPE_PROTO_ID_ITEM,
'TYPE_FIELD_ID_ITEM': TYPE_FIELD_ID_ITEM,
'TYPE_METHOD_ID_ITEM': TYPE_METHOD_ID_ITEM,
'TYPE_CLASS_DEF_ITEM': TYPE_CLASS_DEF_ITEM,
'TYPE_CALL_SITE_ID_ITEM': TYPE_CALL_SITE_ID_ITEM,
'TYPE_METHOD_HANDLE_ITEM': TYPE_METHOD_HANDLE_ITEM,
'TYPE_MAP_LIST': TYPE_MAP_LIST,
'TYPE_TYPE_LIST': TYPE_TYPE_LIST,
'TYPE_ANNOTATION_SET_REF_LIST': TYPE_ANNOTATION_SET_REF_LIST,
'TYPE_ANNOTATION_SET_ITEM': TYPE_ANNOTATION_SET_ITEM,
'TYPE_CLASS_DATA_ITEM': TYPE_CLASS_DATA_ITEM,
'TYPE_CODE_ITEM': TYPE_CODE_ITEM,
'TYPE_STRING_DATA_ITEM': TYPE_STRING_DATA_ITEM,
'TYPE_DEBUG_INFO_ITEM': TYPE_DEBUG_INFO_ITEM,
'TYPE_ANNOTATION_ITEM': TYPE_ANNOTATION_ITEM,
'TYPE_ENCODED_ARRAY_ITEM': TYPE_ENCODED_ARRAY_ITEM,
'TYPE_ANNOTATIONS_DIRECTORY_ITEM': TYPE_ANNOTATIONS_DIRECTORY_ITEM,
}
def __init__(self, data):
dict_utils.Enum.__init__(self, data.get_uint16(), self.enum)
def dump(self, prefix=None, f=sys.stdout, print_name=True,
parent_path=None):
f.write(str(self))
# ----------------------------------------------------------------------
# Method Handle Type Codes
# ----------------------------------------------------------------------
METHOD_HANDLE_TYPE_STATIC_PUT = 0x00
METHOD_HANDLE_TYPE_STATIC_GET = 0x01
METHOD_HANDLE_TYPE_INSTANCE_PUT = 0x02
METHOD_HANDLE_TYPE_INSTANCE_GET = 0x03
METHOD_HANDLE_TYPE_INVOKE_STATIC = 0x04
METHOD_HANDLE_TYPE_INVOKE_INSTANCE = 0x05
class MethodHandleTypeCode(dict_utils.Enum):
enum = {
'METHOD_HANDLE_TYPE_STATIC_PUT': METHOD_HANDLE_TYPE_STATIC_PUT,
'METHOD_HANDLE_TYPE_STATIC_GET': METHOD_HANDLE_TYPE_STATIC_GET,
'METHOD_HANDLE_TYPE_INSTANCE_PUT': METHOD_HANDLE_TYPE_INSTANCE_PUT,
'METHOD_HANDLE_TYPE_INSTANCE_GET': METHOD_HANDLE_TYPE_INSTANCE_GET,
'METHOD_HANDLE_TYPE_INVOKE_STATIC': METHOD_HANDLE_TYPE_INVOKE_STATIC,
'METHOD_HANDLE_TYPE_INVOKE_INSTANCE':
METHOD_HANDLE_TYPE_INVOKE_INSTANCE,
}
def __init__(self, data):
dict_utils.Enum.__init__(self, data.get_uint16(), self.enum)
PRINTABLE = string.ascii_letters + string.digits + string.punctuation + ' '
def escape(c):
global PRINTABLE
if c in PRINTABLE:
return c
c = ord(c)
if c <= 0xff:
return '\\x' + '%02.2x' % (c)
elif c <= '\uffff':
return '\\u' + '%04.4x' % (c)
else:
return '\\U' + '%08.8x' % (c)
def print_string(s, f):
f.write('"')
f.write(''.join(escape(c) for c in s))
f.write('"')
def print_version(version, f):
if len(version) == 3:
f.write("%u.%u.%u" % (version[0], version[1], version[2]))
def print_hex_bytes(data, f):
for byte in data:
f.write("%2.2x" % (byte))
def print_endian(value, f):
f.write("%#8.8x" % (value))
if value == ENDIAN_CONSTANT:
f.write(" (ENDIAN_CONSTANT)")
elif value == REVERSE_ENDIAN_CONSTANT:
f.write(" (REVERSE_ENDIAN_CONSTANT)")
def is_zero(value):
if value == 0:
return None
return 'value should be zero, bit is %s' % (str(value))
def is_dex_magic(magic):
if magic == MAGIC:
return None
return 'value should be %s but is %s' % (MAGIC, magic)
def hex_escape(s):
return ''.join(escape(c) for c in s)
# ----------------------------------------------------------------------
# encoded_field
# ----------------------------------------------------------------------
class encoded_field(AutoParser):
items = [
{'type': 'uleb', 'name': 'field_idx', 'format': '%u'},
{'type': 'uleb', 'name': 'access_flags', 'format': '0x%8.8x'},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
@classmethod
def fixup_indexes(cls, items):
for i in range(1, len(items)):
items[i].field_idx += items[i - 1].field_idx
@classmethod
def get_table_header(self):
return 'FIELD FLAGS\n'
def get_dump_flat(self):
return True
# ----------------------------------------------------------------------
# encoded_method
# ----------------------------------------------------------------------
class encoded_method(AutoParser):
items = [
{'type': 'uleb', 'name': 'method_idx', 'format': '%u'},
{'type': 'uleb', 'name': 'access_flags', 'format': '0x%8.8x'},
{'type': 'uleb', 'name': 'code_off', 'format': '0x%8.8x'},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
@classmethod
def fixup_indexes(cls, items):
for i in range(1, len(items)):
items[i].method_idx += items[i - 1].method_idx
@classmethod
def get_table_header(self):
return 'METHOD FLAGS\n'
def get_dump_flat(self):
return True
# ----------------------------------------------------------------------
# class_data_item
# ----------------------------------------------------------------------
class class_data_item(AutoParser):
items = [
{'type': 'uleb', 'name': 'static_fields_size'},
{'type': 'uleb', 'name': 'instance_fields_size'},
{'type': 'uleb', 'name': 'direct_methods_size'},
{'type': 'uleb', 'name': 'virtual_methods_size'},
{'class': encoded_field, 'name': 'static_fields',
'attr_count': 'static_fields_size', 'flat': True},
{'class': encoded_field, 'name': 'instance_fields',
'attr_count': 'instance_fields_size', 'flat': True},
{'class': encoded_method, 'name': 'direct_methods',
'attr_count': 'direct_methods_size', 'flat': True},
{'class': encoded_method, 'name': 'virtual_methods',
'attr_count': 'virtual_methods_size', 'flat': True},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
encoded_field.fixup_indexes(self.static_fields)
encoded_field.fixup_indexes(self.instance_fields)
encoded_method.fixup_indexes(self.direct_methods)
encoded_method.fixup_indexes(self.virtual_methods)
@classmethod
def create_empty(cls):
data = file_extract.FileExtract(StringIO.StringIO('\0\0\0\0'), '=')
return class_data_item(data)
# ----------------------------------------------------------------------
# class_def_item
# ----------------------------------------------------------------------
class class_def_item(AutoParser):
items = [
{'type': 'u32', 'name': 'class_idx', 'align': 4},
{'type': 'u32', 'name': 'access_flags'},
{'type': 'u32', 'name': 'superclass_idx'},
{'type': 'u32', 'name': 'interfaces_off'},
{'type': 'u32', 'name': 'source_file_idx'},
{'type': 'u32', 'name': 'annotations_off'},
{'type': 'u32', 'name': 'class_data_off'},
{'type': 'u32', 'name': 'static_values_off'},
{'class': class_data_item, 'name': 'class_data',
'attr_offset': 'class_data_off',
'condition': lambda item, data: item.class_data_off != 0,
'dump': False,
'default': class_data_item.create_empty()},
]
def __init__(self, data, context):
AutoParser.__init__(self, self.items, data, context)
@classmethod
def get_table_header(self):
return ('CLASS ACCESS SUPERCLASS INTERFACES SOURCE'
' ANNOTATION CLASS_DATA STATIC_VALUES\n')
def get_dump_flat(self):
return True
def find_encoded_method_by_code_off(self, code_off):
for encoded_method in self.class_data.direct_methods:
if encoded_method.code_off == code_off:
return encoded_method
for encoded_method in self.class_data.virtual_methods:
if encoded_method.code_off == code_off:
return encoded_method
return None
# ----------------------------------------------------------------------
# try_item
# ----------------------------------------------------------------------
class try_item(AutoParser):
items = [
{'type': 'u32', 'name': 'start_addr'},
{'type': 'u16', 'name': 'insn_count'},
{'type': 'u16', 'name': 'handler_off'},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
def get_dump_flat(self):
return True
# ----------------------------------------------------------------------
# encoded_type_addr_pair
# ----------------------------------------------------------------------
class encoded_type_addr_pair(AutoParser):
items = [
{'type': 'uleb', 'name': 'type_idx', 'format': '%#8.8x'},
{'type': 'uleb', 'name': 'addr', 'format': '%#8.8x'},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
def get_dump_flat(self):
return True
# ----------------------------------------------------------------------
# encoded_catch_handler
# ----------------------------------------------------------------------
class encoded_catch_handler(AutoParser):
items = [
{'type': 'sleb', 'name': 'size'},
{'class': encoded_type_addr_pair, 'name': 'handlers',
'attr_count': 'size', 'attr_count_fixup': abs},
{'type': 'uleb', 'name': 'catch_all_addr', 'default': 0,
'condition': lambda item, data: item.size <= 0},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
def get_dump_flat(self):
return True
# ----------------------------------------------------------------------
# encoded_catch_handler_list
# ----------------------------------------------------------------------
class encoded_catch_handler_list(AutoParser):
items = [
{'type': 'uleb', 'name': 'size'},
{'class': encoded_catch_handler, 'name': 'list', 'attr_count': 'size'}
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
def get_dump_flat(self):
return True
def print_instructions(insns, prefix, flat, f):
f.write('\n')
code_units = CodeUnits(insns)
dex_inst = DexInstruction()
while code_units.index_is_valid():
dex_inst.decode(code_units)
if prefix:
f.write(prefix)
f.write(' ')
dex_inst.dump()
DBG_END_SEQUENCE = 0x00
DBG_ADVANCE_PC = 0x01
DBG_ADVANCE_LINE = 0x02
DBG_START_LOCAL = 0x03
DBG_START_LOCAL_EXTENDED = 0x04
DBG_END_LOCAL = 0x05
DBG_RESTART_LOCAL = 0x06
DBG_SET_PROLOGUE_END = 0x07
DBG_SET_EPILOGUE_BEGIN = 0x08
DBG_SET_FILE = 0x09
DBG_FIRST_SPECIAL = 0x0a
DBG_LINE_BASE = -4
DBG_LINE_RANGE = 15
class DBG(dict_utils.Enum):
enum = {
'DBG_END_SEQUENCE': DBG_END_SEQUENCE,
'DBG_ADVANCE_PC': DBG_ADVANCE_PC,
'DBG_ADVANCE_LINE': DBG_ADVANCE_LINE,
'DBG_START_LOCAL': DBG_START_LOCAL,
'DBG_START_LOCAL_EXTENDED': DBG_START_LOCAL_EXTENDED,
'DBG_END_LOCAL': DBG_END_LOCAL,
'DBG_RESTART_LOCAL': DBG_RESTART_LOCAL,
'DBG_SET_PROLOGUE_END': DBG_SET_PROLOGUE_END,
'DBG_SET_EPILOGUE_BEGIN': DBG_SET_EPILOGUE_BEGIN,
'DBG_SET_FILE': DBG_SET_FILE
}
def __init__(self, data):
dict_utils.Enum.__init__(self, data.get_uint8(), self.enum)
def dump(self, prefix=None, f=sys.stdout, print_name=True,
parent_path=None):
f.write(str(self))
class debug_info_op(AutoParser):
items = [
{'class': DBG, 'name': 'op'},
{'switch': 'op', 'cases': {
DBG_ADVANCE_PC: [
{'type': 'uleb', 'name': 'addr_offset'}
],
DBG_ADVANCE_LINE: [
{'type': 'sleb', 'name': 'line_offset'},
],
DBG_START_LOCAL: [
{'type': 'uleb', 'name': 'register_num'},
{'type': 'ulebp1', 'name': 'name_idx'},
{'type': 'ulebp1', 'name': 'type_idx'},
],
DBG_START_LOCAL_EXTENDED: [
{'type': 'uleb', 'name': 'register_num'},
{'type': 'ulebp1', 'name': 'name_idx'},
{'type': 'ulebp1', 'name': 'type_idx'},
{'type': 'ulebp1', 'name': 'sig_idx'},
],
DBG_END_LOCAL: [
{'type': 'uleb', 'name': 'register_num'}
],
DBG_RESTART_LOCAL: [
{'type': 'uleb', 'name': 'register_num'}
],
DBG_SET_FILE: [
{'type': 'ulebp1', 'name': 'name_idx'}
],
'default': []
}
}
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
if self.op >= DBG_FIRST_SPECIAL:
adjusted_opcode = int(self.op) - DBG_FIRST_SPECIAL
line_offset = DBG_LINE_BASE + (adjusted_opcode % DBG_LINE_RANGE)
addr_offset = (adjusted_opcode / DBG_LINE_RANGE)
setattr(self, 'line_offset', line_offset)
setattr(self, 'addr_offset', addr_offset)
setattr(self, 'byte_size', data.tell() - self.get_offset())
def get_dump_flat(self):
return True
def get_byte_size(self):
return self.byte_size
def dump_opcode(self, f=sys.stdout):
f.write(str(self.op))
if self.op == DBG_ADVANCE_PC:
f.write('(%u)' % self.addr_offset)
elif self.op == DBG_ADVANCE_LINE:
f.write('(%u)' % self.line_offset)
elif self.op == DBG_START_LOCAL:
f.write('(register_num=%u, name_idx=' % self.register_num)
if self.name_idx < 0:
f.write('NO_INDEX')
else:
f.write('%u' % (self.name_idx))
f.write(', type_idx=')
if self.type_idx < 0:
f.write('NO_INDEX)')
else:
f.write('%u)' % (self.type_idx))
elif self.op == DBG_START_LOCAL_EXTENDED:
f.write('(register_num=%u, name_idx=' % self.register_num)
if self.name_idx < 0:
f.write('NO_INDEX')
else:
f.write('%u' % (self.name_idx))
f.write(', type_idx=')
if self.type_idx < 0:
f.write('NO_INDEX')
else:
f.write('%u' % (self.type_idx))
f.write(', sig_idx=')
if self.type_idx < 0:
f.write('NO_INDEX)')
else:
f.write('%u)' % (self.type_idx))
elif self.op == DBG_END_LOCAL or self.op == DBG_RESTART_LOCAL:
f.write('(register_num=%u)' % self.register_num)
elif self.op == DBG_SET_FILE:
f.write('(name_idx=%u)' % self.name_idx)
elif self.op >= DBG_FIRST_SPECIAL:
f.write(' (addr_offset=%u, line_offset=%i)' %
(self.addr_offset, self.line_offset))
class debug_info_item(AutoParser):
items = [
{'type': 'uleb', 'name': 'line_start'},
{'type': 'uleb', 'name': 'parameters_size'},
{'type': 'ulebp1', 'name': 'parameter_names',
'attr_count': 'parameters_size'},
]
class row(object):
def __init__(self):
self.address = 0
self.line = 1
self.source_file = -1
self.prologue_end = False
self.epilogue_begin = False
def dump(self, f=sys.stdout):
f.write('0x%4.4x %5u %5u ' %
(self.address, self.line, self.source_file))
if self.prologue_end or self.epilogue_begin:
if self.prologue_end:
f.write('P ')
else:
f.write(' ')
if self.epilogue_begin:
f.write('E')
f.write('\n')
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
self.data = data
self.ops = None
self.line_table = None
self.debug_info_offset = data.tell()
def check_encoding(self, dex_method, f=sys.stdout):
bytes_saved = 0
ops = self.get_ops()
if len(ops) == 1:
op = ops[0]
if op.op == DBG_END_SEQUENCE:
bytes_saved += (get_uleb128_byte_size(self.line_start) +
get_uleb128p1_byte_size(self.parameters_size))
for parameter_name in self.parameter_names:
bytes_saved += get_uleb128p1_byte_size(parameter_name)
bytes_saved += 1
f.write('warning: %s debug info contains only a single ' % (
dex_method.get_qualified_name()))
f.write('%s, all debug info can be removed ' % (op.op))
f.write('(%u bytes)\n' % (bytes_saved))
return bytes_saved
# Dex files built for release don't need any the following
# debug info ops
for op in ops:
size = op.get_byte_size()
if op.op == DBG_SET_PROLOGUE_END:
f.write('warning: %s %s can be removed (%u byte)\n' % (
dex_method.get_qualified_name(), op.op, size))
bytes_saved += size
elif op.op == DBG_SET_EPILOGUE_BEGIN:
f.write('warning: %s %s can be removed (%u byte)\n' % (
dex_method.get_qualified_name(), op.op, size))
bytes_saved += size
elif op.op == DBG_START_LOCAL:
f.write('warning: %s %s can be removed (%u bytes)\n' % (
dex_method.get_qualified_name(), op.op, size))
bytes_saved += size
elif op.op == DBG_START_LOCAL_EXTENDED:
f.write('warning: %s %s can be removed (%u bytes)\n' % (
dex_method.get_qualified_name(), op.op, size))
bytes_saved += size
elif op.op == DBG_END_LOCAL:
f.write('warning: %s %s can be removed (%u bytes)\n' % (
dex_method.get_qualified_name(), op.op, size))
bytes_saved += size
elif op.op == DBG_RESTART_LOCAL:
f.write('warning: %s %s can be removed (%u bytes)\n' % (
dex_method.get_qualified_name(), op.op, size))
bytes_saved += size
return bytes_saved
def get_line_table(self):
if self.line_table is None:
ops = self.get_ops()
row = debug_info_item.row()
for op_args in ops:
op = op_args[0]
if op == DBG_END_SEQUENCE:
break
if op == DBG_ADVANCE_PC:
row.address += op.addr_offset
elif op == DBG_ADVANCE_LINE:
row.line += op.line_offset
elif op == DBG_START_LOCAL:
pass
elif op == DBG_START_LOCAL_EXTENDED:
pass
elif op == DBG_END_LOCAL:
pass
elif op == DBG_RESTART_LOCAL:
pass
elif op == DBG_SET_PROLOGUE_END:
row.prologue_end = True
elif op == DBG_SET_EPILOGUE_BEGIN:
row.epilogue_begin = True
elif op == DBG_SET_FILE:
row.source_file = op.name_idx
else:
row.line += op.line_offset
row.address += op.addr_offset
self.line_table.append(copy.copy(row))
row.prologue_end = False
row.epilogue_begin = False
return self.line_table
def get_ops(self):
if self.ops is None:
data = self.data
data.push_offset_and_seek(self.debug_info_offset)
self.ops = list()
while True:
op = debug_info_op(data)
self.ops.append(op)
if op.op == DBG_END_SEQUENCE:
break
data.pop_offset_and_seek()
return self.ops
def dump_debug_info(self, f=sys.stdout, prefix=None):
ops = self.get_ops()
for op in ops:
if prefix:
f.write(prefix)
f.write(' ')
op.dump_opcode(f=f)
f.write('\n')
# ----------------------------------------------------------------------
# code_item
# ----------------------------------------------------------------------
class code_item(AutoParser):
items = [
{'type': 'u16', 'name': 'registers_size', 'align': 4},
{'type': 'u16', 'name': 'ins_size'},
{'type': 'u16', 'name': 'outs_size'},
{'type': 'u16', 'name': 'tries_size'},
{'type': 'u32', 'name': 'debug_info_off'},
{'type': 'u32', 'name': 'insns_size', 'format': '%u'},
{'type': 'u16', 'name': 'insns',
'attr_count': 'insns_size', 'dump_list': print_instructions},
{'type': 'u16', 'condition': lambda item,
data: item.tries_size != 0 and item.insns_size & 1},
{'class': try_item, 'name': 'tries', 'attr_count': 'tries_size',
'condition': lambda item, data: item.tries_size != 0,
'default': None},
{'class': encoded_catch_handler_list, 'name': 'handlers',
'condition': lambda item, data: item.tries_size != 0,
'default': None}
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
self.debug_info = None
self.data = data
# Convert insns from a list to a tuple to avoid mutattion and also to
# allow self.insns to be hashed.
self.insns = tuple(self.insns)
def get_debug_info(self):
if self.debug_info is None and self.debug_info_off > 0:
data = self.data
data.push_offset_and_seek(self.debug_info_off)
self.debug_info = debug_info_item(data)
data.pop_offset_and_seek()
return self.debug_info
class encoded_value:
def __init__(self, data):
arg_type = data.get_uint8()
value_arg = arg_type >> 5
value_type = arg_type & 0x1f
self.value_type = ValueFormat(value_type)
self.value = None
size = value_arg + 1
if value_type == VALUE_BYTE:
if value_arg != 0:
raise ValueError(
'VALUE_BYTE value_arg != 0 (%u)' % (value_arg))
self.value = data.get_sint8()
elif value_type == VALUE_SHORT:
self.value = data.get_sint_size(size)
elif value_type == VALUE_CHAR:
self.value = data.get_uint_size(size)
elif value_type == VALUE_INT:
self.value = data.get_sint_size(size)
elif value_type == VALUE_LONG:
self.value = data.get_sint_size(size)
elif value_type == VALUE_FLOAT:
raise ValueError('VALUE_FLOAT not supported yet')
elif value_type == VALUE_DOUBLE:
raise ValueError('VALUE_DOUBLE not supported yet')
elif value_type == VALUE_METHOD_TYPE:
self.value = data.get_uint_size(size)
elif value_type == VALUE_METHOD_HANDLE:
self.value = data.get_uint_size(size)
elif value_type == VALUE_STRING:
self.value = data.get_uint_size(size)
elif value_type == VALUE_TYPE:
self.value = data.get_uint_size(size)
elif value_type == VALUE_FIELD:
self.value = data.get_uint_size(size)
elif value_type == VALUE_METHOD:
self.value = data.get_uint_size(size)
elif value_type == VALUE_ENUM:
self.value = data.get_uint_size(size)
elif value_type == VALUE_ARRAY:
if value_arg != 0:
raise ValueError(
'VALUE_ARRAY value_arg != 0 (%u)' % (value_arg))
raise ValueError('VALUE_ARRAY not supported yet')
# encoded_array: an array of values, in the format specified by
# "encoded_array format". The size of the value is implicit in
# the encoding.
elif value_type == VALUE_ANNOTATION:
if value_arg != 0:
raise ValueError(
'VALUE_ANNOTATION value_arg != 0 (%u)' % (value_arg))
# encoded_annotation: a sub-annotation, in the format specified by
# "encoded_annotation format" below. The size of the value is
# implicit in the encoding.
elif value_type == VALUE_NULL:
if value_arg != 0:
raise ValueError(
'VALUE_ARRAY value_arg != 0 (%u)' % (value_arg))
self.value = 0
elif value_type == VALUE_BOOLEAN:
if size == 0:
self.value = False
else:
self.value = data.get_uint8() != 0
# ----------------------------------------------------------------------
# encoded_array
# ----------------------------------------------------------------------
class encoded_array(AutoParser):
items = [
{'type': 'uleb', 'name': 'size'},
{'class': encoded_value, 'name': 'values', 'attr_count': 'size'},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
class encoded_array_item(AutoParser):
items = [
{'class': encoded_array, 'name': 'value'},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
# ----------------------------------------------------------------------
# field_id_item
# ----------------------------------------------------------------------
class field_id_item(AutoParser):
items = [
{'type': 'u16', 'name': 'class_idx', 'align': 4},
{'type': 'u16', 'name': 'type_idx'},
{'type': 'u32', 'name': 'name_idx'},
]
def __init__(self, data, context):
AutoParser.__init__(self, self.items, data, context)
@classmethod
def get_table_header(self):
return 'CLASS TYPE NAME\n'
def get_dump_flat(self):
return True
# ----------------------------------------------------------------------
# header_item
# ----------------------------------------------------------------------
class header_item(AutoParser):
items = [
{'type': 'cstr[4]', 'name': 'magic', 'validate': is_dex_magic},
{'type': 'u8[3]', 'name': 'version', 'dump': print_version},
{'type': 'u8', 'validate': is_zero}, # NULL byte
{'type': 'u32', 'name': 'checksum'},
{'type': 'u8[20]', 'name': 'signature', 'dump': print_hex_bytes},
{'type': 'u32', 'name': 'file_size'},
{'type': 'u32', 'name': 'header_size'},
{'type': 'u32', 'name': 'endian_tag', 'type': 'u32',
'dump': print_endian},
{'type': 'u32', 'name': 'link_size'},
{'type': 'u32', 'name': 'link_off'},
{'type': 'u32', 'name': 'map_off'},
{'type': 'u32', 'name': 'string_ids_size'},
{'type': 'u32', 'name': 'string_ids_off'},
{'type': 'u32', 'name': 'type_ids_size'},
{'type': 'u32', 'name': 'type_ids_off'},
{'type': 'u32', 'name': 'proto_ids_size'},
{'type': 'u32', 'name': 'proto_ids_off'},
{'type': 'u32', 'name': 'field_ids_size'},
{'type': 'u32', 'name': 'field_ids_off'},
{'type': 'u32', 'name': 'method_ids_size'},
{'type': 'u32', 'name': 'method_ids_off'},
{'type': 'u32', 'name': 'class_defs_size'},
{'type': 'u32', 'name': 'class_defs_off'},
{'type': 'u32', 'name': 'data_size'},
{'type': 'u32', 'name': 'data_off'},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
def get_dump_header(self):
return 'DEX header:'
# ----------------------------------------------------------------------
# map_item
# ----------------------------------------------------------------------
class map_item(AutoParser):
items = [
{'class': TypeCode, 'name': 'type',
'dump_width': TypeCode.max_width()},
{'type': 'u16'},
{'type': 'u32', 'name': 'size'},
{'type': 'u32', 'name': 'offset'},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
def get_list_header_lines(self):
return [' TYPE SIZE OFFSET\n']
def get_dump_flat(self):
return True
# ----------------------------------------------------------------------
# map_list
# ----------------------------------------------------------------------
class map_list(AutoParser):
items = [
{'type': 'u32', 'name': 'size', 'align': 4, 'dump': False},
{'class': map_item, 'name': 'list', 'attr_count': 'size',
'flat': True},
]
def get_dump_header(self):
return 'map_list:'
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
# ----------------------------------------------------------------------
# method_handle_item
# ----------------------------------------------------------------------
class method_handle_item(AutoParser):
items = [
{'class': MethodHandleTypeCode, 'name': 'method_handle_type',
'align': 4},
{'type': 'u16'},
{'type': 'u16', 'name': 'field_or_method_id'},
{'type': 'u16'},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
# ----------------------------------------------------------------------
# method_id_item
# ----------------------------------------------------------------------
class method_id_item(AutoParser):
items = [
{'type': 'u16', 'name': 'class_idx', 'align': 4},
{'type': 'u16', 'name': 'proto_idx'},
{'type': 'u32', 'name': 'name_idx'},
]
def __init__(self, data, context):
AutoParser.__init__(self, self.items, data, context)
@classmethod
def get_table_header(self):
return 'CLASS PROTO NAME\n'
def get_dump_flat(self):
return True
# ----------------------------------------------------------------------
# proto_id_item
# ----------------------------------------------------------------------
class proto_id_item(AutoParser):
items = [
{'type': 'u32', 'name': 'shorty_idx', 'align': 4},
{'type': 'u32', 'name': 'return_type_idx'},
{'type': 'u32', 'name': 'parameters_off'},
]
def __init__(self, data, context):
AutoParser.__init__(self, self.items, data, context)
self.parameters = None
def get_dump_flat(self):
return True
@classmethod
def get_table_header(self):
return 'SHORTY_IDX RETURN PARAMETERS\n'
def get_parameters(self):
if self.parameters_off != 0 and self.parameters is None:
# Get the data from our dex.File object
data = self.context.data
data.push_offset_and_seek(self.parameters_off)
self.parameters = type_list(data)
data.pop_offset_and_seek()
return self.parameters
# ----------------------------------------------------------------------
# string_data_item
# ----------------------------------------------------------------------
class string_data_item(AutoParser):
items = [
{'type': 'uleb', 'name': 'utf16_size', 'format': '%3u'},
{'type': 'cstr', 'name': 'data', 'dump': print_string},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
def get_dump_flat(self):
return True
# ----------------------------------------------------------------------
# type_list
# ----------------------------------------------------------------------
class type_list(AutoParser):
items = [
{'type': 'u32', 'name': 'size', 'align': 4},
{'type': 'u16', 'name': 'list', 'attr_count': 'size'},
]
def get_dump_header(self):
return 'type_list:'
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
class Progard:
'''Parses a proguard map file and does name lookups.'''
def __init__(self, path):
self.path = path
self.classes_dict = {}
class_dict = None
regex = re.compile('\s+([0-9]+:[0-9]+:)?(.*) -> (.*)$')
with open(path, 'r') as f:
for line in f:
line = line.rstrip('\n')
if line:
if line[0].isspace():
match = regex.match(line)
if match:
old = match.group(2)
new = match.group(3)
# print('other old = "%s"' % (old))
# print('other new = "%s"' % (new))
class_dict[new] = old
else:
(old, new) = line.split(' -> ')
# print('class old = "%s"' % (old))
# print('class new = "%s"' % (new))
class_dict = {}
self.classes_dict[new] = (old, class_dict)
def lookup_class(self, new_class):
'''Translate a new class name to the old class name.'''
if new_class in self.classes_dict:
(old_class, class_dict) = self.classes_dict[new_class]
if old_class is not None:
return old_class
return None
def lookup_method(self, new_class, new_method):
'''Translate a new class name and a new method into the old class
name and the old method name.'''
if new_class in self.classes_dict:
(old_class, class_dict) = self.classes_dict[new_class]
if new_method in class_dict:
return class_dict[new_method]
return None
class DexMethod:
'''Encapsulates a method within a DEX file.'''
def __init__(self, dex_class, encoded_method, is_virtual):
self.dex_class = dex_class
self.encoded_method = encoded_method
self.method_id = None
self.is_virtual = is_virtual
self.code_item = None
self.insns = None
self.name_in_file = None
self.name = None
def get_qualified_name(self):
class_name = self.get_class().get_name()
method_name = self.get_name()
if class_name[-1] != ';':
return class_name + ':' + method_name
else:
return class_name + method_name
def get_method_id(self):
'''Get the method_id_item for this method.'''
if self.method_id is None:
self.method_id = self.get_dex().get_method_id(self.encoded_method)
return self.method_id
def get_method_index(self):
'''Get the method index into the method_ids array in the DEX file.'''
return self.encoded_method.method_idx
def get_code_offset(self):
'''Get the code offset for this method.'''
return self.encoded_method.code_off
def get_code_item_index(self):
'''Get the index into the code_items array in the dex file for the
code for this method, or -1 if there is no code for this method.'''
code_item = self.get_code_item()
if code_item:
return self.get_dex().get_code_item_index_from_code_off(
code_item.get_offset())
return -1
def get_dex(self):
return self.dex_class.get_dex()
def get_name_in_file(self):
'''Returns the name of the method as it is known in the current DEX
file (no proguard remapping)'''
if self.name_in_file is None:
self.name_in_file = self.get_dex().get_string(
self.get_method_id().name_idx)
return self.name_in_file
def get_name(self):
if self.name is None:
cls_mangled = self.get_class().get_mangled_name()
name_in_file = self.get_name_in_file()
if cls_mangled and name_in_file:
self.name = self.get_dex().demangle_class_method_name(
cls_mangled, name_in_file)
if self.name is None:
self.name = name_in_file
return self.name
def get_class(self):
return self.dex_class
def get_code_item(self):
if self.code_item is None:
if self.encoded_method.code_off != 0:
self.code_item = self.get_dex().find_code_item(
self.encoded_method.code_off)
return self.code_item
def get_code_byte_size(self):
code_item = self.get_code_item()
if code_item:
return len(code_item.insns) * 2
return 0
def get_instructions(self):
if self.insns is None:
self.insns = []
code_item = self.get_code_item()
if code_item:
code_units = CodeUnits(code_item.insns)
while code_units.index_is_valid():
insn = DexInstruction()
insn.decode(code_units)
self.insns.append(insn)
return self.insns
def dump(self, dump_code=True, dump_debug_info=True, f=sys.stdout):
if self.is_virtual:
method_type = 'virtual'
else:
method_type = 'direct'
dex = self.get_dex()
f.write('method: (%s) %s%s\n' %
(method_type, self.get_class().get_name(), self.get_name()))
code_item_idx = dex.get_code_item_index_from_code_off(
self.encoded_method.code_off)
self.encoded_method.dump(f=f, prefix=' encoded_method.', flat=False)
method_id = dex.get_method_id(self.encoded_method.method_idx)
if method_id:
method_id.dump(f=f, prefix=' method_id.', flat=False)
proto_id = dex.get_proto_id(method_id.proto_idx)
if proto_id:
proto_id.dump(f=f, prefix=' proto_id.', flat=False)
f.write('\n')
if dump_code:
if code_item_idx >= 0:
code_item = dex.get_code_items()[code_item_idx]
f.write(' code_item[%u] @ %#8.8x:\n' % (code_item_idx,
code_item.get_offset()))
code_item.dump(f=f, prefix=' ')
if dump_debug_info:
self.dump_debug_info(f=f, prefix=' ')
def dump_code(self, f=sys.stdout):
insns = self.get_instructions()
for insn in insns:
insn.dump(f=f)
def get_debug_info(self):
code_item = self.get_code_item()
if code_item:
return code_item.get_debug_info()
return None
def dump_debug_info(self, f=sys.stdout, prefix=None):
debug_info = self.get_debug_info()
if prefix:
f.write(prefix)
if debug_info:
f.write('debug info @ %#8.8x:\n' % (debug_info.get_offset()))
debug_info.dump_debug_info(f=f, prefix=prefix)
f.write('\n')
else:
f.write('no debug info\n')
def check_debug_info_encoding(self):
debug_info = self.get_debug_info()
if debug_info:
return debug_info.check_encoding(self)
class DexClass:
'''Encapsulates a class within a DEX file.'''
def __init__(self, dex, class_def):
self.dex = dex
self.class_def = class_def
self.methods = None
self.num_direct_methods = 0
self.mangled = None
self.demangled = None
def dump(self, f=sys.stdout):
f.write('\nclass: %s\n' % (self.get_name()))
dex = self.get_dex()
class_def_offset = self.class_def.get_offset()
class_def_idx = dex.get_class_def_index_from_offset(class_def_offset)
f.write(' class_def[%u] @ %#8.8x:\n' % (class_def_idx,
class_def_offset))
self.class_def.dump(f=f, flat=False, prefix=' ')
f.write(' class_data_item @ %#8.8x:\n' % (
self.class_def.class_data.get_offset()))
self.class_def.class_data.dump(f=f, flat=False, prefix=' ')
f.write('\n')
def get_type_index(self):
'''Get type ID index (class_idx) for this class.'''
return self.class_def.class_idx
def is_abstract(self):
return (self.class_def.access_flags & ACC_ABSTRACT) != 0
def get_mangled_name(self):
if self.mangled is None:
dex = self.get_dex()
self.mangled = dex.get_typename(self.class_def.class_idx)
return self.mangled
def get_name(self):
'''Get the demangled name for a class if we have a proguard file or
return the mangled name if we don't have a proguard file.'''
if self.demangled is None:
mangled = self.get_mangled_name()
if mangled:
self.demangled = self.get_dex().demangle_class_name(mangled)
if self.demangled is None:
self.demangled = mangled
return self.demangled
def get_dex(self):
return self.dex
def get_methods(self):
if self.methods is None:
self.methods = []
self.num_direct_methods = len(
self.class_def.class_data.direct_methods)
for encoded_method in self.class_def.class_data.direct_methods:
self.methods.append(DexMethod(self, encoded_method, False))
for encoded_method in self.class_def.class_data.virtual_methods:
self.methods.append(DexMethod(self, encoded_method, True))
return self.methods
def demangle_classname(mangled):
if (mangled and len(mangled) > 2 and mangled[0] == 'L' and
mangled[-1] == ';'):
return mangled[1:-1].replace('/', '.') + ':'
# Already demangled
return mangled
def mangle_classname(demangled):
if (demangled and len(demangled) > 2 and
(demangled[0] != 'L' or demangled[-1] != ';')):
return 'L' + demangled.replace('.', '/') + ';'
# Already demangled
return demangled
class File:
'''Represents and DEX (Dalvik Executable) file'''
def __init__(self, path, proguard_path):
self.path = path
self.proguard = None
if proguard_path and os.path.exists(proguard_path):
self.proguard = Progard(proguard_path)
self.data = file_extract.FileExtract(open(self.path), '=', 4)
self.header = header_item(self.data)
self.map_list = None
self.string_ids = None
self.type_ids = None
self.proto_ids = None
self.field_ids = None
self.method_ids = None
self.class_defs = None
self.classes = None
self.call_site_ids = None
self.method_handle_items = None
self.code_items = None
self.code_off_to_code_item_idx = {}
self.strings = None
self.call_sites = None
self.dex_classes = {}
def demangle_class_name(self, cls_mangled):
'''Given a mangled type name as it would appear in a DEX file like
"LX/JxK;", return the demangled version if we have a proguard file,
otherwise return the original class typename'''
if self.proguard:
cls_demangled = demangle_classname(cls_mangled)
if cls_demangled:
return self.proguard.lookup_class(cls_demangled)
return None
def demangle_class_method_name(self, cls_mangled, method_name):
if self.proguard:
cls_demangled = demangle_classname(cls_mangled)
if cls_demangled:
return self.proguard.lookup_method(cls_demangled, method_name)
return None
def get_map_list(self):
if self.map_list is None:
self.data.push_offset_and_seek(self.header.map_off)
self.map_list = map_list(self.data)
self.data.pop_offset_and_seek()
return self.map_list
def get_map_tuple(self, type_code):
map_list = self.get_map_list()
for item in map_list.list:
if item.type.get_enum_value() == type_code:
return (item.size, item.offset)
return (0, 0)
def find_class(self, class_ref):
class_idx = class_ref
if isinstance(class_ref, six.string_types):
# Make sure the string is in 'L' <classname-with-slashes> ';'
class_mangled = mangle_classname(class_ref)
class_str_idx = self.find_string_idx(class_mangled)
if class_str_idx >= 0:
class_idx = self.find_type_idx(class_str_idx)
if isinstance(class_idx, numbers.Integral):
classes = self.get_classes()
for cls in classes:
if cls.class_def.class_idx == class_idx:
return cls
return None
def find_string_idx(self, match_s):
strings = self.get_strings()
for (i, s) in enumerate(strings):
if match_s == s.data:
return i
return -1
def get_string(self, index):
strings = self.get_strings()
if index < len(strings):
return strings[index].data
return None
def get_typename(self, type_id):
types = self.get_type_ids()
if type_id < len(types):
return self.get_string(types[type_id])
return None
def get_string_ids(self):
if self.string_ids is None:
self.string_ids = list()
self.data.push_offset_and_seek(self.header.string_ids_off)
for i in range(self.header.string_ids_size):
self.string_ids.append(self.data.get_uint32())
self.data.pop_offset_and_seek()
return self.string_ids
def get_type_ids(self):
if self.type_ids is None:
self.type_ids = list()
self.data.push_offset_and_seek(self.header.type_ids_off)
for i in range(self.header.type_ids_size):
self.type_ids.append(self.data.get_uint32())
self.data.pop_offset_and_seek()
return self.type_ids
def get_proto_ids(self):
if self.proto_ids is None:
self.proto_ids = list()
self.data.push_offset_and_seek(self.header.proto_ids_off)
for i in range(self.header.proto_ids_size):
self.proto_ids.append(proto_id_item(self.data, self))
self.data.pop_offset_and_seek()
return self.proto_ids
def get_proto_id(self, proto_idx):
proto_ids = self.get_proto_ids()
if proto_idx >= 0 and proto_idx < len(proto_ids):
return proto_ids[proto_idx]
return None
def get_proto_shorty(self, proto_idx):
id = self.get_proto_id(proto_idx)
return self.get_string(id.shorty_idx)
def get_field_ids(self):
if self.field_ids is None:
self.field_ids = list()
self.data.push_offset_and_seek(self.header.field_ids_off)
for i in range(self.header.field_ids_size):
self.field_ids.append(field_id_item(self.data, self))
self.data.pop_offset_and_seek()
return self.field_ids
def get_method_ids(self):
if self.method_ids is None:
self.method_ids = list()
self.data.push_offset_and_seek(self.header.method_ids_off)
for i in range(self.header.method_ids_size):
self.method_ids.append(method_id_item(self.data, self))
self.data.pop_offset_and_seek()
return self.method_ids
def find_method_ids(self, method_name, class_ref=None):
dex_class = None
if class_ref is not None:
dex_class = self.find_class(class_ref)
matches = list() # Return a list of matching methods
method_ids = self.get_method_ids()
if not method_ids:
return matches
name_idx = self.find_string_idx(method_name)
if name_idx <= 0:
return matches
for method_id in method_ids:
if method_id.name_idx == name_idx:
if dex_class:
if method_id.class_idx != dex_class.class_def.class_idx:
continue
matches.append(method_id)
return matches
def find_method_id_by_code_offset(self, code_off):
class_defs = self.get_class_defs()
for class_def in class_defs:
method_id = class_def.find_encoded_method_by_code_off(code_off)
if method_id:
return method_id
return None
def get_method_id(self, method_ref):
'''method_ref can be one of:
- a encoded_method object
- integer method index'''
method_ids = self.get_method_ids()
if method_ids:
if isinstance(method_ref, encoded_method):
if method_ref.method_idx < len(method_ids):
return method_ids[method_ref.method_idx]
elif isinstance(method_ref, numbers.Integral):
if method_ref < len(method_ids):
return method_ids[method_ref]
else:
raise ValueError('invalid method_ref type %s' %
(type(method_ref)))
return None
# def get_call_site(self, idx):
# call_site_ids = self.get_call_site_ids()
# if idx >= len(call_site_ids):
# return None
# if self.call_sites[idx] is None:
# self.data.push_offset_and_seek(call_site_ids[idx])
# self.call_sites[idx] = call_site_item(self.data)
# self.data.pop_offset_and_seek()
# return self.call_sites[idx]
def get_call_site_ids(self):
if self.call_site_ids is None:
self.call_site_ids = list()
self.call_sites = list()
(size, offset) = self.get_map_tuple(TYPE_CALL_SITE_ID_ITEM)
self.data.push_offset_and_seek(offset)
for i in range(size):
self.call_site_ids.append(self.data.get_uint32())
self.call_sites.append(None)
self.data.pop_offset_and_seek()
return self.call_site_ids
def get_method_handle_items(self):
if self.method_handle_items is None:
self.method_handle_items = list()
(size, offset) = self.get_map_tuple(TYPE_METHOD_HANDLE_ITEM)
self.data.push_offset_and_seek(offset)
for i in range(size):
self.method_handle_items.append(method_handle_item(self.data))
self.data.pop_offset_and_seek()
return self.method_handle_items
def get_code_items(self):
if self.code_items is None:
self.code_items = list()
(size, offset) = self.get_map_tuple(TYPE_CODE_ITEM)
self.data.push_offset_and_seek(offset)
for i in range(size):
self.data.align_to(4)
item = code_item(self.data)
self.code_items.append(item)
self.code_off_to_code_item_idx[item.get_offset()] = i
self.data.pop_offset_and_seek()
return self.code_items
def report_code_duplication(self):
code_to_code_items = {}
code_items = self.get_code_items()
if code_items:
for code_item in code_items:
key = code_item.insns
if key in code_to_code_items:
code_to_code_items[key].append(code_item)
else:
code_to_code_items[key] = [code_item]
for key in code_to_code_items:
code_items = code_to_code_items[key]
if len(code_items) > 1:
print('-' * 72)
print('The following methods have the same code:')
for code_item in code_items:
method = self.find_method_from_code_off(
code_item.get_offset())
if method.is_virtual:
print('virtual', end=' ')
else:
print('direct', end=' ')
print(method.get_qualified_name())
# Dump the code once for all methods
method.dump_code()
def get_class_def_index_from_offset(self, class_def_offset):
class_defs = self.get_class_defs()
for (i, class_def) in enumerate(class_defs):
if class_def.get_offset() == class_def_offset:
return i
return -1
def get_code_item_index_from_code_off(self, code_off):
# Make sure the code items are created
self.get_code_items()
if code_off in self.code_off_to_code_item_idx:
return self.code_off_to_code_item_idx[code_off]
return -1
def find_code_item(self, code_off):
code_item_idx = self.get_code_item_index_from_code_off(code_off)
if code_item_idx >= 0:
return self.get_code_items()[code_item_idx]
else:
raise ValueError('invalid code item offset %#8.8x' % code_off)
def find_method_from_code_off(self, code_off):
if code_off == 0:
return None
for cls in self.get_classes():
for method in cls.get_methods():
if method.get_code_offset() == code_off:
return method
return None
def get_class_defs(self):
if self.class_defs is None:
self.class_defs = list()
self.data.push_offset_and_seek(self.header.class_defs_off)
for i in range(self.header.class_defs_size):
class_def = class_def_item(self.data, self)
self.class_defs.append(class_def)
self.data.pop_offset_and_seek()
return self.class_defs
def get_classes(self):
if self.classes is None:
self.classes = list()
class_defs = self.get_class_defs()
for class_def in class_defs:
dex_class = DexClass(self, class_def)
self.classes.append(dex_class)
self.data.pop_offset_and_seek()
return self.classes
def get_strings(self):
if self.strings is None:
self.strings = list()
for string_id_item in self.get_string_ids():
self.data.push_offset_and_seek(string_id_item)
self.strings.append(string_data_item(self.data))
self.data.pop_offset_and_seek()
return self.strings
def dump_header(self, options, f=sys.stdout):
self.header.dump(f=f)
def dump_map_list(self, options, f=sys.stdout):
self.get_map_list().dump(f=f)
f.write('\n')
def dump_string_ids(self, options, f=sys.stdout):
string_ids = self.get_string_ids()
if string_ids:
f.write('string_ids:\n')
for (i, item) in enumerate(self.get_strings()):
f.write('[%3u] %#8.8x ( ' % (i, string_ids[i]))
item.dump(f=f)
f.write(')\n')
def dump_type_ids(self, options, f=sys.stdout):
type_ids = self.get_type_ids()
if type_ids:
f.write('\ntype_ids:\n DESCRIPTOR_IDX\n')
for (i, item) in enumerate(type_ids):
f.write('[%3u] %#8.8x ("%s")\n' %
(i, item, self.get_string(item)))
def find_type_idx(self, class_str_idx):
types = self.get_type_ids()
i = bisect.bisect_left(types, class_str_idx)
if i != len(types) and types[i] == class_str_idx:
return i
return -1
def find_class_def_by_type_index(self, class_idx):
class_defs = self.get_class_defs()
for class_def in class_defs:
if class_def.class_idx == class_idx:
return class_def
return None
def dump_proto_ids(self, options, f=sys.stdout):
proto_ids = self.get_proto_ids()
if proto_ids:
f.write('\nproto_ids:\n')
f.write(' ' * (5 + 1))
f.write(proto_id_item.get_table_header())
for (i, item) in enumerate(proto_ids):
f.write('[%3u] ' % (i))
item.dump(f=f, print_name=False)
shorty = self.get_string(item.shorty_idx)
ret = self.get_string(item.return_type_idx)
f.write(' ("%s", "%s"' % (shorty, ret))
parameters = item.get_parameters()
if parameters:
f.write(', (')
for (i, type_id) in enumerate(parameters.list):
if i > 0:
f.write(', ')
f.write(self.get_string(type_id))
f.write(')')
else:
f.write(', ()')
f.write(')\n')
def dump_field_ids(self, options, f=sys.stdout):
field_ids = self.get_field_ids()
if field_ids:
f.write('\nfield_ids:\n')
f.write(' ' * (5 + 1))
f.write(field_id_item.get_table_header())
for (i, item) in enumerate(field_ids):
f.write('[%3u] ' % (i))
item.dump(f=f, print_name=False)
f.write(' ("%s", "%s", "%s")\n' % (
self.get_typename(item.class_idx),
self.get_typename(item.type_idx),
self.get_string(item.name_idx)))
def dump_method_ids(self, options, f=sys.stdout):
method_ids = self.get_method_ids()
if method_ids:
f.write('\nmethod_ids:\n')
f.write(' ' * (5 + 1))
f.write(method_id_item.get_table_header())
for (i, item) in enumerate(method_ids):
f.write('[%3u] ' % (i))
item.dump(f=f, print_name=False)
f.write(' ("%s", "%s", "%s")\n' % (
self.get_typename(item.class_idx),
self.get_proto_shorty(item.proto_idx),
self.get_string(item.name_idx)))
def dump_class_defs(self, options, f=sys.stdout):
class_defs = self.get_class_defs()
if class_defs:
f.write('\nclass_defs:\n')
f.write(' ' * (5 + 1))
f.write(class_def_item.get_table_header())
for (i, item) in enumerate(class_defs):
f.write('[%3u] ' % (i))
item.dump(f=f, print_name=False)
f.write(' ("%s")' % (self.get_typename(item.class_idx)))
f.write('\n')
def dump_call_site_ids(self, options, f=sys.stdout):
call_site_ids = self.get_call_site_ids()
if call_site_ids:
f.write('\ncall_site_ids:\n')
f.write(' ' * (5 + 1))
for (i, item) in enumerate(call_site_ids):
f.write('[%3u] %#8.8x\n' % (i, item))
def dump_method_handle_items(self, options, f=sys.stdout):
method_handle_items = self.get_method_handle_items()
if method_handle_items:
f.write('\nmethod_handle_items:\n')
f.write(' ' * (5 + 1))
for (i, item) in enumerate(method_handle_items):
f.write('[%3u] ' % (i))
item.dump(f=f)
f.write('\n')
def dump_code(self, options, f=sys.stdout):
classes = self.get_classes()
if classes:
for cls in classes:
if cls.is_abstract():
continue
cls.dump(f=f)
methods = cls.get_methods()
dc = options.dump_code or options.dump_all
ddi = options.debug or options.dump_all
for method in methods:
if options.dump_code or options.dump_all:
method.dump(f=f, dump_code=dc, dump_debug_info=ddi)
f.write('\n')
def dump_code_items(self, options, f=sys.stdout):
code_items = self.get_code_items()
if code_items:
for (i, code_item) in enumerate(code_items):
f.write('code_item[%u]:\n' % (i))
code_item.dump(f=f)
def dump(self, options, f=sys.stdout):
self.dump_header(options, f)
f.write('\n')
self.dump_map_list(options, f)
self.dump_string_ids(options, f)
self.dump_type_ids(options, f)
self.dump_proto_ids(options, f)
self.dump_field_ids(options, f)
self.dump_method_ids(options, f)
self.dump_class_defs(options, f)
self.dump_call_site_ids(options, f)
self.dump_method_handle_items(options, f)
self.dump_code(options, f)
self.dump_code_items(options, f)
def sign_extending(value, bit_width):
# is the highest bit (sign) set? (x>>(b-1)) would be faster
if value & (1 << (bit_width - 1)):
return value - (1 << bit_width) # 2s complement
return value
def get_signed_hex_offset_as_str(signed_offset, width):
if signed_offset < 0:
s = '-'
offset = abs(signed_offset)
else:
s = '+'
offset = signed_offset
if width == 2:
s += '%2.2x' % (offset & 0xff)
elif width == 4:
s += '%4.4x' % (offset & 0xffff)
elif width == 8:
s += '%8.8x' % (offset & 0xffffffff)
else:
raise ValueError("only sizes of 2 4 or 8 are supported")
return s
class Opcode(object):
def __init__(self, inst):
self.inst = inst
def check_encoding(self, f=sys.stdout):
'''Verify that this instruction can't be encoded more efficiently'''
return 0 # Return zero to indicate we can't save any bytes
def new_encoding(self, f=sys.stdout):
'''Look for bytes we can save by making new opcodes that are encoded
as unsigned, or other optimizations'''
return 0 # Return zero to indicate we can't save any bytes
def get_op(self):
return self.inst.get_op()
def get_name(self):
op = self.get_op()
return self.ops[op]
def get_num_code_units(self):
return self.num_code_units
def regs_are_sequential(self):
if len(self.regs) <= 1:
return True
prev_reg = self.regs[0]
for i in range(1, len(self.regs)):
curr_reg = self.regs[i]
if prev_reg + 1 != curr_reg:
return False
return True
class Opcode00(Opcode):
ops = {0x00: 'nop'}
num_code_units = 1
max_regs = 0
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.nature = inst.get_AA()
if self.nature == 0:
pass # NOP
elif self.nature == 1:
self.size = code_units.get_code_unit()
self.first_key = code_units.get_int()
self.targets = list()
for i in range(self.size):
self.targets.append(code_units.get_int())
elif self.nature == 2:
self.size = code_units.get_code_unit()
self.keys = list()
self.targets = list()
for i in range(self.size):
self.keys.append(code_units.get_int())
for i in range(self.size):
self.targets.append(code_units.get_int())
elif self.nature == 3:
self.element_width = code_units.get_code_unit()
self.size = code_units.get_uint()
num_code_units = int((self.size * self.element_width + 1) / 2)
encoder = file_extract.FileEncode(StringIO.StringIO(), 'little', 4)
for i in range(num_code_units):
encoder.put_uint16(code_units.get_code_unit())
encoder.seek(0)
self.data = encoder.file.getvalue()
else:
raise ValueError("add support for NOP nature %u" % (self.nature))
def get_name(self):
if self.nature == 0:
return self.ops[0]
elif self.nature == 1:
return 'packed-switch-payload'
elif self.nature == 2:
return 'sparse-switch-payload'
elif self.nature == 3:
return 'fill-array-data-payload'
else:
raise ValueError("add support for NOP nature %u" % (self.nature))
def get_num_code_units(self):
if self.nature == 0:
return 1
elif self.nature == 1:
op_count = 1
size_count = 1
first_key_count = 2
keys_count = self.size * 2
return op_count + size_count + first_key_count + keys_count
elif self.nature == 2:
op_count = 1
size_count = 1
keys_and_targets_count = self.size * 4
return op_count + size_count + keys_and_targets_count
elif self.nature == 3:
op_count = 1
element_width_count = 2
return op_count + element_width_count + len(self.data)
else:
raise ValueError("add support for NOP nature %u" % (self.nature))
def dump(self, f=sys.stdout):
if self.nature == 0:
f.write('%s' % (self.get_name()))
elif self.nature == 1:
f.write('packed-switch-payload\n')
f.write('INDEX KEY TARGET\n===== --------- ---------\n')
for (i, target) in enumerate(self.targets):
f.write('[%3u] %+8.8x %+8.8x\n' %
(i, self.first_key + i, target))
elif self.nature == 2:
f.write('sparse-switch-payload\n')
f.write('INDEX KEY TARGET\n===== --------- ---------\n')
for (i, key) in enumerate(self.keys):
f.write('[%3u] %+8.8x %+8.8x\n' % (i, key, self.targets[i]))
elif self.nature == 3:
f.write('fill-array-data-payload (elem_width = %u, size = %u)\n' %
(self.element_width, self.size))
file_extract.dump_memory(0, self.data, self.element_width, f)
def emulate(self, emulator):
pass
class Opcode01(Opcode):
ops = {0x01: 'move'}
num_code_units = 1
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode02(Opcode):
ops = {0x02: 'move/from16'}
num_code_units = 2
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_AA())
self.regs.append(inst[1])
def check_encoding(self, f=sys.stdout):
if self.regs[0] <= UINT4_MAX and self.regs[1] <= UINT4_MAX:
f.write('warning: "move/from16" can be encoded as a "move"')
f.write(' more efficiently as its registers are both <= %u\n' %
(UINT4_MAX))
return 2
return 0
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode03(Opcode):
ops = {0x03: 'move/16'}
num_code_units = 3
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst[1])
self.regs.append(inst[2])
def check_encoding(self, f=sys.stdout):
if self.regs[0] <= UINT4_MAX and self.regs[1] <= UINT4_MAX:
f.write('warning: "move/16" can be encoded as a "move"')
f.write(' more efficiently as its registers are both <= %u\n' %
(UINT4_MAX))
return 4
if self.regs[0] <= UINT8_MAX:
f.write('warning: "move/16" can be encoded as a "move/from16"')
f.write(' more efficiently as its first register is <= %u\n' %
(UINT8_MAX))
return 2
return 0
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode04(Opcode):
ops = {0x04: 'move-wide'}
num_code_units = 1
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode05(Opcode):
ops = {0x05: 'move-wide/from16'}
num_code_units = 2
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_AA())
self.regs.append(inst[1])
def check_encoding(self, f=sys.stdout):
if self.regs[0] <= UINT4_MAX and self.regs[1] <= UINT4_MAX:
f.write('warning: "move-wide/from16" can be encoded as a ')
f.write('"move-wide" more efficiently as its registers are ')
f.write('both <= %u\n' % (UINT4_MAX))
return 2
return 0
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode06(Opcode):
ops = {0x06: 'move-wide/16'}
num_code_units = 3
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst[1])
self.regs.append(inst[2])
def check_encoding(self, f=sys.stdout):
if self.regs[0] <= UINT4_MAX and self.regs[1] <= UINT4_MAX:
f.write('warning: "move-wide/16" can be encoded as a "move-wide" ')
f.write('more efficiently as its registers are both <= %u\n' %
(UINT4_MAX))
return 4
if self.regs[0] <= UINT8_MAX:
f.write('warning: "move-wide/16" can be encoded as a ')
f.write('"move-wide/from16" more efficiently as its first ')
f.write('register is <= %u\n' % (UINT8_MAX))
return 2
return 0
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode07(Opcode):
ops = {0x07: 'move-object'}
num_code_units = 1
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode08(Opcode):
ops = {0x08: 'move-object/from16 '}
num_code_units = 2
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_AA())
self.regs.append(inst[1])
def check_encoding(self, f=sys.stdout):
if self.regs[0] <= UINT4_MAX and self.regs[1] <= UINT4_MAX:
f.write('warning: "move-object/from16" can be encoded as a ')
f.write('"move-object" more efficiently as its registers are ')
f.write('both <= %u\n' % (UINT4_MAX))
return 2
return 0
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode09(Opcode):
ops = {0x09: 'move-object/16'}
num_code_units = 3
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst[1])
self.regs.append(inst[2])
def check_encoding(self, f=sys.stdout):
if self.regs[0] <= UINT4_MAX and self.regs[1] <= UINT4_MAX:
f.write('warning: "move-object/16" can be encoded as a ')
f.write('"move-object" more efficiently as its registers ')
f.write('are both <= %u\n' % (UINT4_MAX))
return 4
if self.regs[0] <= UINT8_MAX:
f.write('warning: "move-object/16" can be encoded as a ')
f.write('"move-object/from16" more efficiently as its first ')
f.write('register is <= %u\n' % (UINT8_MAX))
return 2
return 0
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode0A_0D(Opcode):
ops = {
0x0a: 'move-result',
0x0b: 'move-result-wide',
0x0c: 'move-result-object',
0x0d: 'move-exception'
}
num_code_units = 1
max_regs = 1
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
def dump(self, f=sys.stdout):
f.write('%s v%u' % (self.get_name(), self.reg))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode0E(Opcode):
ops = {0x0e: 'return-void'}
num_code_units = 1
max_regs = 0
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
def dump(self, f=sys.stdout):
f.write('%s' % (self.get_name()))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode0F(Opcode):
ops = {0x0f: 'return'}
num_code_units = 1
max_regs = 1
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
def dump(self, f=sys.stdout):
f.write('%s v%u' % (self.get_name(), self.reg))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode10(Opcode):
ops = {0x10: 'return-wide'}
num_code_units = 1
max_regs = 1
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
def dump(self, f=sys.stdout):
f.write('%s v%u' % (self.get_name(), self.reg))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode11(Opcode):
ops = {0x11: 'return-object'}
num_code_units = 1
max_regs = 1
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
def dump(self, f=sys.stdout):
f.write('%s v%u' % (self.get_name(), self.reg))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode12(Opcode):
ops = {0x12: 'const/4'}
num_code_units = 1
max_regs = 1
extra_data = 'n'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_A()
self.imm = sign_extending(inst[0] >> 12, 4)
def dump(self, f=sys.stdout):
f.write('%s v%u, #int %i // #%#x' %
(self.get_name(), self.reg, self.imm, self.imm))
def emulate(self, emulator):
emulator.write_register(self.reg, self.imm)
class Opcode13(Opcode):
ops = {0x13: 'const/16'}
num_code_units = 2
max_regs = 1
extra_data = 's'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.imm = sign_extending(inst[1], 16)
def check_encoding(self, f=sys.stdout):
if (self.reg <= UINT4_MAX and INT4_MIN <= self.imm and
self.imm <= INT4_MAX):
f.write('warning: "const/16" can be encoded as a "const/4" more ')
f.write('efficiently as its register is <= %u and ' % (UINT4_MAX))
f.write('(%i <= %i <= %i)\n' % (INT4_MIN, self.imm, INT4_MAX))
return 2
return 0
def new_encoding(self, f=sys.stdout):
if (self.reg <= UINT4_MAX and self.imm > INT4_MAX and
self.imm <= (INT4_MAX + UINT4_MAX)):
f.write('"const/16" could be encoded as a new "const/u4" stores ')
f.write('a 4 bit unsigned offset from +8 for a constant range ')
f.write('of [8-24):\n')
return 2
return 0
def dump(self, f=sys.stdout):
f.write('%s v%u, #int %i // #%#x' %
(self.get_name(), self.reg, self.imm, self.imm))
def emulate(self, emulator):
emulator.write_register(self.reg, self.imm)
class Opcode14(Opcode):
ops = {0x14: 'const'}
num_code_units = 3
max_regs = 1
extra_data = 'i'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.imm = inst.get_uint32(1)
def check_encoding(self, f=sys.stdout):
if (self.reg <= UINT8_MAX and INT16_MIN <= self.imm and
self.imm <= INT16_MAX):
f.write('warning: "const" can be encoded as a "const/16" more ')
f.write('efficiently as its register is < %u ' % (UINT8_MAX))
f.write('and (%i <= %i <= %i)\n' % (INT16_MIN, self.imm,
INT16_MAX))
return 2
return 0
def new_encoding(self, f=sys.stdout):
if self.imm > INT16_MAX and self.imm <= (INT16_MAX + UINT16_MAX):
f.write('"const" could be encoded as a new "const/u16" stores a ')
f.write('16 bit unsigned offset from 32768 instead of a 16 bit ')
f.write('signed value\n')
return 2
return 0
def dump(self, f=sys.stdout):
f.write('%s v%u, #int %i // #%#x' %
(self.get_name(), self.reg, self.imm, self.imm))
def emulate(self, emulator):
emulator.write_register(self.reg, self.imm)
class Opcode15(Opcode):
ops = {0x15: 'const/high16'}
num_code_units = 2
max_regs = 1
extra_data = 'h'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.imm = inst[1] << 16
def dump(self, f=sys.stdout):
f.write('%s v%u, #int %i // #%#x' %
(self.get_name(), self.reg, self.imm, self.imm))
def emulate(self, emulator):
emulator.write_register(self.reg, self.imm)
class Opcode16(Opcode):
ops = {0x16: 'const-wide/16'}
num_code_units = 2
max_regs = 1
extra_data = 's'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.imm = sign_extending(inst[1], 16)
def dump(self, f=sys.stdout):
f.write('%s v%u, #int %i // #%#x' %
(self.get_name(), self.reg, self.imm, self.imm))
def emulate(self, emulator):
emulator.write_register(self.reg, self.imm)
class Opcode17(Opcode):
ops = {0x17: 'const-wide/32'}
num_code_units = 3
max_regs = 1
extra_data = 'i'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.imm = inst.get_sint32(1)
def check_encoding(self, f=sys.stdout):
if INT16_MIN <= self.imm and self.imm <= INT16_MAX:
f.write('warning: "const-wide/32" can be encoded as a ')
f.write('"const-wide/16" more efficiently as (%i <= %i <= %i)\n' %
(UINT8_MAX, INT16_MIN, self.imm, INT16_MAX))
return 2
return 0
def new_encoding(self, f=sys.stdout):
if self.imm > INT16_MAX and self.imm <= (INT16_MAX + UINT16_MAX):
f.write('"const-wide/32" could be encoded as a new ')
f.write('"const-wide/u16" stores a 16 bit unsigned offset from ')
f.write('32768 instead of a 16 bit signed value\n')
return 2
return 0
def dump(self, f=sys.stdout):
f.write('%s v%u, #int %i // #%#x' %
(self.get_name(), self.reg, self.imm, self.imm))
def emulate(self, emulator):
emulator.write_register(self.reg, self.imm)
class Opcode18(Opcode):
ops = {0x18: 'const-wide/64'}
num_code_units = 5
max_regs = 1
extra_data = 'l'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.imm = inst.get_uint64(1)
def check_encoding(self, f=sys.stdout):
if INT16_MIN <= self.imm and self.imm <= INT16_MAX:
f.write('warning: "const-wide/64" can be encoded as a ')
f.write('"const-wide/16" more efficiently as (%i <= %i <= %i)\n' %
(INT16_MIN, self.imm, INT16_MAX))
return 6
if INT32_MIN <= self.imm and self.imm <= INT32_MAX:
f.write('warning: "const-wide/64" can be encoded as a ')
f.write('"const-wide/32" more efficiently as (%i <= %i <= %i)\n' %
(INT32_MIN, self.imm, INT32_MAX))
return 4
return 0
def dump(self, f=sys.stdout):
f.write('%s v%u, #int %i // #%#x' %
(self.get_name(), self.reg, self.imm, self.imm))
def emulate(self, emulator):
emulator.write_register(self.reg, self.imm)
class Opcode19(Opcode):
ops = {0x19: 'const-wide/high16'}
num_code_units = 2
max_regs = 1
extra_data = 'h'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.imm = sign_extending(inst[1], 16) << 48
def dump(self, f=sys.stdout):
f.write('%s v%u, #int %i // #%#x' %
(self.get_name(), self.reg, self.imm, self.imm))
def emulate(self, emulator):
emulator.write_register(self.reg, self.imm)
class Opcode1A(Opcode):
ops = {0x1a: 'const-string'}
num_code_units = 2
max_regs = 1
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.string_idx = inst[1]
def dump(self, f=sys.stdout):
f.write('%s v%u, string@%4.4x' %
(self.get_name(), self.reg, self.string_idx))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode1B(Opcode):
ops = {0x1b: 'const-string/jumbo'}
num_code_units = 3
max_regs = 1
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.string_idx = inst.get_uint32(1)
def dump(self, f=sys.stdout):
f.write('%s v%u, string@%8.8x' %
(self.get_name(), self.reg, self.string_idx))
def check_encoding(self, f=sys.stdout):
if self.signed_offset <= UINT16_MAX:
f.write('warning: "const-string/jumbo" can be encoded as a ')
f.write('"const-string" more efficiently as its offset is ')
f.write('<= UINT16_MAX\n')
return 2
return 0
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode1C(Opcode):
ops = {0x1c: 'const-class'}
num_code_units = 2
max_regs = 1
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.type = inst[1]
def dump(self, f=sys.stdout):
f.write('%s v%u, type@%4.4x' % (self.get_name(), self.reg, self.type))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode1D(Opcode):
ops = {0x1d: 'monitor-enter'}
num_code_units = 1
max_regs = 1
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
def dump(self, f=sys.stdout):
f.write('%s v%u' % (self.get_name(), self.reg))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode1E(Opcode):
ops = {0x1e: 'monitor-exit'}
num_code_units = 1
max_regs = 1
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
def dump(self, f=sys.stdout):
f.write('%s v%u' % (self.get_name(), self.reg))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode1F(Opcode):
ops = {0x1f: 'check-cast'}
num_code_units = 2
max_regs = 1
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.type = inst[1]
def dump(self, f=sys.stdout):
f.write('%s v%u, type@%4.4x' % (self.get_name(), self.reg, self.type))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode20(Opcode):
ops = {0x20: 'instance-of'}
num_code_units = 2
max_regs = 2
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
self.type = inst[1]
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u, type@%4.4x' %
(self.get_name(), self.regs[0], self.regs[1], self.type))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode21(Opcode):
ops = {0x21: 'array-length'}
num_code_units = 1
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode22(Opcode):
ops = {0x22: 'new-instance'}
num_code_units = 2
max_regs = 1
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.type = inst[1]
def dump(self, f=sys.stdout):
f.write('%s v%u, type@%4.4x' % (self.get_name(), self.reg, self.type))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode23(Opcode):
ops = {0x23: 'new-array'}
num_code_units = 2
max_regs = 2
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
self.type = inst[1]
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u, type@%4.4x' %
(self.get_name(), self.regs[0], self.regs[1], self.type))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode24(Opcode):
ops = {0x24: 'filled-new-array'}
num_code_units = 3
max_regs = 5
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
arg_count = inst[0] >> 12
self.type = inst[1]
self.regs = list()
regs = inst[2] | ((inst[0] << 8) & 0xf0000)
for i in range(arg_count):
self.regs.append(regs & 0xf)
regs >>= 4
def dump(self, f=sys.stdout):
f.write("%s {" % (self.get_name()))
first = True
for reg in self.regs:
if not first:
f.write(', ')
f.write("v%u" % (reg))
first = False
f.write("} type@%4.4x" % (self.type))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode25(Opcode):
ops = {0x25: 'filled-new-array/range '}
num_code_units = 3
max_regs = 'r'
extra_data = 'c'
format = '3rc'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
arg_count = inst.get_AA()
self.type = inst[1]
first_reg = inst[2]
self.regs = list()
for i in range(arg_count):
self.regs.append(first_reg + i)
def dump(self, f=sys.stdout):
f.write("%s {" % (self.get_name()))
first = True
for reg in self.regs:
if not first:
f.write(', ')
f.write("v%u" % (reg))
first = False
f.write("} type@%4.4x" % (self.type))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode26(Opcode):
ops = {0x26: 'fill-array-data'}
num_code_units = 3
max_regs = 1
extra_data = 't'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.signed_offset = inst.get_sint32(1)
def dump(self, f=sys.stdout):
f.write('%s v%u, %8.8x // %s' % (self.get_name(), self.reg,
self.inst.code_unit_idx + self.signed_offset,
get_signed_hex_offset_as_str(self.signed_offset, 8)))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode27(Opcode):
ops = {0x27: 'throw'}
num_code_units = 1
max_regs = 1
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
def dump(self, f=sys.stdout):
f.write('%s v%u' % (self.get_name(), self.reg))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode28(Opcode):
ops = {0x28: 'goto'}
num_code_units = 1
max_regs = 0
extra_data = 't'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.signed_offset = inst.get_signed_AA()
def check_encoding(self, f=sys.stdout):
if self.signed_offset == 0:
f.write('error: "goto" has a zero offset (invalid encoding)\n')
return 0
def dump(self, f=sys.stdout):
f.write('%s %4.4x // %+i' % (self.get_name(),
self.inst.code_unit_idx + self.signed_offset,
self.signed_offset))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode29(Opcode):
ops = {0x29: 'goto/16'}
num_code_units = 2
max_regs = 0
extra_data = 't'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.signed_offset = sign_extending(inst[1], 16)
def dump(self, f=sys.stdout):
f.write('%s %4.4x // %+i' % (self.get_name(),
self.inst.code_unit_idx + self.signed_offset,
self.signed_offset))
def check_encoding(self, f=sys.stdout):
if self.signed_offset == 0:
f.write(
'error: "goto/16" has a zero offset (invalid encoding)\n')
elif INT8_MIN <= self.signed_offset and self.signed_offset <= INT8_MAX:
f.write('warning: "goto/16" can be encoded as a "goto" more ')
f.write('efficiently since (INT8_MIN <= offset <= INT8_MAX)\n')
return 2
return 0
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode2A(Opcode):
ops = {0x2A: 'goto/32'}
num_code_units = 3
max_regs = 0
extra_data = 't'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.signed_offset = inst.get_sint32(1)
def dump(self, f=sys.stdout):
f.write('%s %4.4x // %+i' % (self.get_name(),
self.inst.code_unit_idx + self.signed_offset,
self.signed_offset))
def check_encoding(self, f=sys.stdout):
if self.signed_offset == 0:
return 0
if INT8_MIN <= self.signed_offset and self.signed_offset <= INT8_MAX:
f.write('warning: "goto/32" can be encoded as a "goto" more ')
f.write('efficiently since (INT8_MIN <= offset <= INT8_MAX)\n')
return 2
if INT16_MIN <= self.signed_offset and self.signed_offset <= INT16_MAX:
f.write('warning: "goto/32" can be encoded as a "goto/16" more ')
f.write('efficiently since (INT16_MIN <= offset <= INT16_MAX)\n')
return 4
return 0
def new_encoding(self, f=sys.stdout):
if INT16_MIN <= self.signed_offset and self.signed_offset <= INT16_MAX:
return 0
if INT24_MIN <= self.signed_offset and self.signed_offset <= INT24_MAX:
f.write('"goto/32" could be encoded as a new "goto/16" where ')
f.write('that opcode uses the extra 8 bits in the first code ')
f.write('unit to provide a 24 bit branch range\n')
return 2
return 0
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode2B(Opcode):
ops = {0x2b: 'packed-switch'}
num_code_units = 3
max_regs = 1
extra_data = 't'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.branch = inst.get_uint32(1)
def dump(self, f=sys.stdout):
f.write('%s v%u, %8.8x // +%8.8x' % (self.get_name(), self.reg,
self.inst.get_code_unit_index() + self.branch, self.branch))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode2C(Opcode):
ops = {0x2c: 'sparse-switch'}
num_code_units = 3
max_regs = 1
extra_data = 't'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.branch = inst.get_uint32(1)
def dump(self, f=sys.stdout):
f.write('%s v%u, %8.8x // +%8.8x' % (self.get_name(), self.reg,
self.inst.get_code_unit_index() + self.branch, self.branch))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode2D_31(Opcode):
ops = {
0x2d: 'cmpl-float (lt bias)',
0x2e: 'cmpg-float (gt bias)',
0x2f: 'cmpl-double (lt bias)',
0x30: 'cmpg-double (gt bias)',
0x31: 'cmp-long',
}
num_code_units = 2
max_regs = 3
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_AA())
self.regs.append(inst.get_uint8_lo(1))
self.regs.append(inst.get_uint8_hi(1))
def dump(self, f=sys.stdout):
f.write("%s v%u, v%u, v%u" %
(self.get_name(), self.regs[0], self.regs[1], self.regs[2]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode32_37(Opcode):
ops = {
0x32: 'if-eq',
0x33: 'if-ne',
0x34: 'if-lt',
0x35: 'if-ge',
0x36: 'if-gt',
0x37: 'if-le',
}
num_code_units = 2
max_regs = 2
extra_data = 't'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
self.signed_offset = sign_extending(inst[1], 16)
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u, %4.4x // %i' % (self.get_name(), self.regs[0],
self.regs[1], self.inst.code_unit_idx + self.signed_offset,
self.signed_offset))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode38_3D(Opcode):
ops = {
0x38: 'if-eqz',
0x39: 'if-nez',
0x3a: 'if-ltz',
0x3b: 'if-gez',
0x3c: 'if-gtz',
0x3d: 'if-lez',
}
num_code_units = 2
max_regs = 1
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.signed_offset = sign_extending(inst[1], 16)
def dump(self, f=sys.stdout):
f.write('%s v%u, %4.4x // %s' % (self.get_name(), self.reg,
self.signed_offset + self.inst.code_unit_idx,
get_signed_hex_offset_as_str(self.signed_offset, 4)))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode44_51(Opcode):
ops = {
0x44: 'aget',
0x45: 'aget-wide',
0x46: 'aget-object',
0x47: 'aget-boolean',
0x48: 'aget-byte',
0x49: 'aget-char',
0x4a: 'aget-short',
0x4b: 'aput',
0x4c: 'aput-wide',
0x4d: 'aput-object',
0x4e: 'aput-boolean',
0x4f: 'aput-byte',
0x50: 'aput-char',
0x51: 'aput-short',
}
num_code_units = 2
max_regs = 3
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_AA())
self.regs.append(inst.get_uint8_lo(1))
self.regs.append(inst.get_uint8_hi(1))
def dump(self, f=sys.stdout):
f.write("%s v%u, v%u, v%u" %
(self.get_name(), self.regs[0], self.regs[1], self.regs[2]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode52_5f(Opcode):
ops = {
0x52: 'iget',
0x53: 'iget-wide',
0x54: 'iget-object',
0x55: 'iget-boolean',
0x56: 'iget-byte',
0x57: 'iget-char',
0x58: 'iget-short',
0x59: 'iput',
0x5a: 'iput-wide',
0x5b: 'iput-object',
0x5c: 'iput-boolean',
0x5d: 'iput-byte',
0x5e: 'iput-char',
0x5f: 'iput-short',
}
num_code_units = 2
max_regs = 2
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
self.field = inst[1]
def dump(self, f=sys.stdout):
f.write("%s v%u, v%u, field@%4.4x" %
(self.get_name(), self.regs[0], self.regs[1], self.field))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode60_6d(Opcode):
ops = {
0x60: 'sget',
0x61: 'sget-wide',
0x62: 'sget-object',
0x63: 'sget-boolean',
0x64: 'sget-byte',
0x65: 'sget-char',
0x66: 'sget-short',
0x67: 'sput',
0x68: 'sput-wide',
0x69: 'sput-object',
0x6a: 'sput-boolean',
0x6b: 'sput-byte',
0x6c: 'sput-char',
0x6d: 'sput-short',
}
num_code_units = 2
max_regs = 1
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.field = inst.get_uint16(1)
def dump(self, f=sys.stdout):
f.write("%s v%u, field@%4.4x" %
(self.get_name(), self.reg, self.field))
def emulate(self, emulator):
raise ValueError('emulate not supported')
can_use_new_encoding = 0
cant_use_new_encoding = 0
class Opcode6E_72(Opcode):
ops = {
0x6e: 'invoke-virtual',
0x6f: 'invoke-super',
0x70: 'invoke-direct',
0x71: 'invoke-static',
0x72: 'invoke-interface',
}
num_code_units = 3
max_regs = 5
extra_data = 'c'
format = '35c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
arg_count = inst[0] >> 12
self.method_idx = inst[1]
self.regs = list()
regs = inst[2] | ((inst[0] << 8) & 0xf0000)
for i in range(arg_count):
self.regs.append(regs & 0xf)
regs >>= 4
def dump(self, f=sys.stdout):
f.write("%s {" % (self.get_name()))
first = True
for reg in self.regs:
if not first:
f.write(', ')
f.write("v%u" % (reg))
first = False
f.write("} method@%4.4x" % (self.method_idx))
def new_encoding(self, f=sys.stdout):
if (self.regs_are_sequential() and
(len(self.regs) == 0 or self.regs[0] <= UINT4_MAX) and
len(self.regs) <= UINT4_MAX):
global can_use_new_encoding
can_use_new_encoding += 1
name = self.get_name()
f.write('"%s" can be encoded as "%s/min-range" ' % (name, name))
f.write('where the first register is contained in the first ')
f.write('opcode\n')
return 2
global cant_use_new_encoding
cant_use_new_encoding += 1
return 0
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode74_78(Opcode):
ops = {
0x74: 'invoke-virtual/range',
0x75: 'invoke-super/range',
0x76: 'invoke-direct/range',
0x77: 'invoke-static/range',
0x78: 'invoke-interface/range',
}
num_code_units = 3
max_regs = 'r'
extra_data = 'c'
format = '3rc'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
arg_count = inst.get_AA()
self.method_idx = inst[1]
first_reg = inst[2]
self.regs = list()
for i in range(arg_count):
self.regs.append(first_reg + i)
def dump(self, f=sys.stdout):
f.write("%s {" % (self.get_name()))
first = True
for reg in self.regs:
if not first:
f.write(', ')
f.write("v%u" % (reg))
first = False
f.write("} method@%4.4x" % (self.method_idx))
def new_encoding(self, f=sys.stdout):
if (self.regs_are_sequential() and
(len(self.regs) == 0 or self.regs[0] <= UINT4_MAX) and
len(self.regs) <= UINT4_MAX):
name = self.get_name()
f.write('"%s" can be encoded as a "%s/min-range" ' % (name, name))
f.write('where the first register is contained in the first ')
f.write('opcode\n')
return 2
return 0
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode7B_8F(Opcode):
ops = {
0x7b: 'neg-int',
0x7c: 'not-int',
0x7d: 'neg-long',
0x7e: 'not-long',
0x7f: 'neg-float',
0x80: 'neg-double',
0x81: 'int-to-long',
0x82: 'int-to-float',
0x83: 'int-to-double',
0x84: 'long-to-int',
0x85: 'long-to-float',
0x86: 'long-to-double',
0x87: 'float-to-int',
0x88: 'float-to-long',
0x89: 'float-to-double',
0x8a: 'double-to-int',
0x8b: 'double-to-long',
0x8c: 'double-to-float',
0x8d: 'int-to-byte',
0x8e: 'int-to-char',
0x8f: 'int-to-short',
}
num_code_units = 1
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode90_AF(Opcode):
ops = {
0x90: 'add-int',
0x91: 'sub-int',
0x92: 'mul-int',
0x93: 'div-int',
0x94: 'rem-int',
0x95: 'and-int',
0x96: 'or-int',
0x97: 'xor-int',
0x98: 'shl-int',
0x99: 'shr-int',
0x9a: 'ushr-int',
0x9b: 'add-long',
0x9c: 'sub-long',
0x9d: 'mul-long',
0x9e: 'div-long',
0x9f: 'rem-long',
0xa0: 'and-long',
0xa1: 'or-long',
0xa2: 'xor-long',
0xa3: 'shl-long',
0xa4: 'shr-long',
0xa5: 'ushr-long',
0xa6: 'add-float',
0xa7: 'sub-float',
0xa8: 'mul-float',
0xa9: 'div-float',
0xaa: 'rem-float',
0xab: 'add-double',
0xac: 'sub-double',
0xad: 'mul-double',
0xae: 'div-double',
0xaf: 'rem-double',
}
num_code_units = 2
max_regs = 3
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_AA())
self.regs.append(inst.get_uint8_lo(1))
self.regs.append(inst.get_uint8_hi(1))
def dump(self, f=sys.stdout):
f.write("%s v%u, v%u, v%u" %
(self.get_name(), self.regs[0], self.regs[1], self.regs[2]))
def opIsCommutative(self):
'''Return True if the operation is commutative'''
op = self.get_op()
return (op == 0x90 or # add-int
op == 0x92 or # mul-int
op == 0x95 or # and-int
op == 0x96 or # or-int
op == 0x97 or # xor-int
op == 0x9b or # add-long
op == 0x9d or # mul-long
op == 0xa0 or # and-long
op == 0xa1 or # or-long
op == 0xa2 or # xor-long
op == 0xa6 or # add-float
op == 0xa8 or # mul-float
op == 0xab or # add-double
op == 0xad) # mul-double
def check_encoding(self, f=sys.stdout):
vAA = self.regs[0]
vBB = self.regs[1]
vCC = self.regs[2]
if vAA == vBB and vAA <= UINT4_MAX and vCC <= UINT4_MAX:
name = self.get_name()
f.write('warning: "%s" can be encoded more efficiently ' % (name))
f.write('as "%s/2addr v%u, v%u"\n' % (name, vAA, vCC))
return 2
if (vAA == vCC and vAA <= UINT4_MAX and vBB <= UINT4_MAX and
self.opIsCommutative()):
name = self.get_name()
f.write('warning: "%s" is commutative and can be ' % (name))
f.write('encoded more efficiently as "%s/2addr v%u, v%u"\n' %
(name, vAA, vBB))
return 2
return 0 # Return zero to indicate we can't save any bytes
def emulate(self, emulator):
raise ValueError('emulate not supported')
class OpcodeB0_CF(Opcode):
ops = {
0xb0: 'add-int/2addr',
0xb1: 'sub-int/2addr',
0xb2: 'mul-int/2addr',
0xb3: 'div-int/2addr',
0xb4: 'rem-int/2addr',
0xb5: 'and-int/2addr',
0xb6: 'or-int/2addr',
0xb7: 'xor-int/2addr',
0xb8: 'shl-int/2addr',
0xb9: 'shr-int/2addr',
0xba: 'ushr-int/2addr',
0xbb: 'add-long/2addr',
0xbc: 'sub-long/2addr',
0xbd: 'mul-long/2addr',
0xbe: 'div-long/2addr',
0xbf: 'rem-long/2addr',
0xc0: 'and-long/2addr',
0xc1: 'or-long/2addr',
0xc2: 'xor-long/2addr',
0xc3: 'shl-long/2addr',
0xc4: 'shr-long/2addr',
0xc5: 'ushr-long/2addr',
0xc6: 'add-float/2addr',
0xc7: 'sub-float/2addr',
0xc8: 'mul-float/2addr',
0xc9: 'div-float/2addr',
0xca: 'rem-float/2addr',
0xcb: 'add-double/2addr',
0xcc: 'sub-double/2addr',
0xcd: 'mul-double/2addr',
0xce: 'div-double/2addr',
0xcf: 'rem-double/2addr ',
}
num_code_units = 1
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class OpcodeD0_D7(Opcode):
ops = {
0xd0: 'add-int/lit16',
0xd1: 'rsub-int/lit16',
0xd2: 'mul-int/lit16',
0xd3: 'div-int/lit16',
0xd4: 'rem-int/lit16',
0xd5: 'and-int/lit16',
0xd6: 'or-int/lit16',
0xd7: 'xor-int/lit16',
}
num_code_units = 2
max_regs = 2
extra_data = 's'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
self.imm = sign_extending(inst[1], 16)
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u, #int %i // #%#x' % (self.get_name(),
self.regs[0], self.regs[1], self.imm, self.imm))
def emulate(self, emulator):
emulator.write_register(self.reg, self.imm)
class OpcodeD8_E2(Opcode):
ops = {
0xd8: 'add-int/lit8',
0xd9: 'rsub-int/lit8',
0xda: 'mul-int/lit8',
0xdb: 'div-int/lit8',
0xdc: 'rem-int/lit8',
0xdd: 'and-int/lit8',
0xde: 'or-int/lit8',
0xdf: 'xor-int/lit8',
0xe0: 'shl-int/lit8',
0xe1: 'shr-int/lit8',
0xe2: 'ushr-int/lit8',
}
num_code_units = 2
max_regs = 2
extra_data = 'b'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_AA())
self.regs.append(inst.get_uint8_lo(1))
self.imm = sign_extending(inst.get_uint8_hi(1), 8)
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u, #int %i // #%#x' % (self.get_name(),
self.regs[0], self.regs[1], self.imm, self.imm))
def emulate(self, emulator):
emulator.write_register(self.reg, self.imm)
class OpcodeFA(Opcode):
ops = {0xfa: 'invoke-polymorphic'}
num_code_units = 4
max_regs = 5
extra_data = 'cc'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
raise ValueError('debug this when we find one of these')
arg_count = inst[0] >> 12
self.method_ref_idx = inst[1]
self.method_hdl_ref = inst[2]
self.regs = list()
regs = inst[3] | ((inst[0] << 8) & 0xf0000)
self.proto = inst[4]
for i in range(arg_count):
self.regs.append(regs & 0xf)
regs >>= 4
def dump(self, f=sys.stdout):
f.write("%s {" % (self.get_name()))
first = True
for reg in self.regs:
if not first:
f.write(', ')
f.write("v%u" % (reg))
first = False
f.write("} type@%4.4x" % (self.type))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class CodeUnits(Opcode):
def __init__(self, code_units):
self.code_units = code_units
self.idx = 0
def index_is_valid(self):
return self.idx < len(self.code_units)
def get_index(self):
return self.idx
def peek_code_unit(self, idx):
return self.code_units[idx]
def get_int(self):
return sign_extending(self.get_uint(), 32)
def get_uint(self):
return self.get_code_unit() | (self.get_code_unit() << 16)
def get_code_unit(self):
idx = self.idx
self.idx += 1
return self.code_units[idx]
def swap16(u):
return ((u >> 8) & 0x00ff) | ((u << 8) & 0xff00)
class DexInstruction(object):
opcode_defs = list()
@classmethod
def initialize(cls):
opcode_classes = [
Opcode00,
Opcode01,
Opcode02,
Opcode03,
Opcode04,
Opcode05,
Opcode06,
Opcode07,
Opcode08,
Opcode09,
Opcode0A_0D,
Opcode0E,
Opcode0F,
Opcode10,
Opcode11,
Opcode12,
Opcode13,
Opcode14,
Opcode15,
Opcode16,
Opcode17,
Opcode18,
Opcode19,
Opcode1A,
Opcode1B,
Opcode1C,
Opcode1D,
Opcode1E,
Opcode1F,
Opcode20,
Opcode21,
Opcode22,
Opcode23,
Opcode24,
Opcode25,
Opcode26,
Opcode27,
Opcode28,
Opcode29,
Opcode2A,
Opcode2B,
Opcode2C,
Opcode2D_31,
Opcode32_37,
Opcode38_3D,
Opcode44_51,
Opcode52_5f,
Opcode60_6d,
Opcode6E_72,
Opcode74_78,
Opcode7B_8F,
Opcode90_AF,
OpcodeB0_CF,
OpcodeD0_D7,
OpcodeD8_E2,
OpcodeFA,
]
for i in range(256):
cls.opcode_defs.append(None)
for opcode_class in opcode_classes:
for op in opcode_class.ops:
if cls.opcode_defs[op] is None:
cls.opcode_defs[op] = opcode_class
else:
raise ValueError("registering the same opcode twice: "
"%#2.2x in %s" % (op, str(opcode_class)))
def dump(self, f=sys.stdout, suffix='\n'):
f.write('%4.4x:' % (self.code_unit_idx))
for code_unit in self.code_units:
f.write(' %4.4x' % (swap16(code_unit)))
num_code_units = len(self.code_units)
if num_code_units < 5:
pad = 5 - num_code_units
for i in range(pad):
f.write(' ')
f.write(' ')
self.instruction.dump(f=f)
if suffix:
f.write(suffix)
def __init__(self):
self.code_unit_idx = -1
self.code_units = None
def check_encoding(self, f=sys.stdout):
bytes_saved = self.instruction.check_encoding(f)
if bytes_saved:
self.dump(f)
return bytes_saved
def new_encoding(self, f=sys.stdout):
bytes_saved = self.instruction.new_encoding(f)
if bytes_saved:
self.dump(f)
return bytes_saved
def get_code_unit_index(self):
return self.code_unit_idx
def decode(self, code_units):
self.code_unit_idx = code_units.get_index()
self.code_units = list()
self.code_units.append(code_units.get_code_unit())
op = self.get_op()
opcode_class = self.opcode_defs[op]
if opcode_class is None:
raise ValueError("unsupported opcode %#4.4x" % (swap16(self[0])))
for i in range(1, opcode_class.num_code_units):
self.code_units.append(code_units.get_code_unit())
self.instruction = opcode_class(self, code_units)
def get_name(self):
return self.instruction.get_name()
def get_num_code_units(self):
return self.instruction.get_num_code_units()
def get_op(self):
'''Return the 1 byte op field that tells us what instruction this is'''
return self.code_units[0] & 0xff
def get_A(self):
'''Get the 4 bit value of A'''
return (self.code_units[0] >> 8) & 0xf
def get_B(self):
'''Get the 4 bit value of B'''
return (self.code_units[0] >> 12) & 0xf
def get_AA(self):
'''Get the 8 bit value of AA from the byte next to the Op'''
return self.get_uint8_hi(0)
def get_signed_AA(self):
return sign_extending(self.get_AA(), 8)
def get_uint8_lo(self, idx):
return self.code_units[idx] & 0xff
def get_sint8_lo(self, idx):
return sign_extending(self.get_uint8_lo(), 8)
def get_uint8_hi(self, idx):
return (self.code_units[idx] >> 8) & 0xff
def get_sint8_hi(self, idx):
return sign_extending(self.get_uint8_hi(), 8)
def get_uint16(self, idx):
return self.code_units[idx]
def get_sint16(self, idx):
return sign_extending(self.get_uint16(), 16)
def get_uint32(self, idx):
return self.code_units[idx + 1] << 16 | self.code_units[idx]
def get_sint32(self, idx):
return sign_extending(self.get_uint32(idx), 32)
def get_uint64(self, idx):
return (self.code_units[idx + 3] << 48 |
self.code_units[idx + 2] << 32 |
self.code_units[idx + 1] << 16 |
self.code_units[idx])
def get_sint64(self, idx):
return sign_extending(self.get_uint64(idx), 64)
def __len__(self):
'''Overload the length operator to give out the number of code units'''
return len(self.code_units)
def __getitem__(self, key):
'''Overload the [] operator to give out code units'''
return self.code_units[key]
def emulate(self, emulator):
self.instruction.emulate(emulator)
DexInstruction.initialize()
def get_percentage(part, total):
return (float(part) / float(total)) * 100.0
def print_code_stats(size, total_size, file_size):
code_savings = get_percentage(size, total_size)
file_savings = get_percentage(size, file_size)
print('error: %u of %u code bytes (%u file bytes) ' % (size, total_size,
file_size), end='')
print('could be saved by encoding opcodes more efficiently ', end='')
print('(%2.2f%% code savings, %2.2f%% file savings).\n' % (code_savings,
file_savings))
def print_debug_stats(size, file_size):
file_savings = get_percentage(size, file_size)
print('error: %u debug info bytes of %u file ' % (size, file_size), end='')
print('bytes could be saved by encoding debug info more ', end='')
print('efficiently (%2.2f%% file savings).\n' % (file_savings))
def print_encoding_stats(size, total_size, file_size):
code_savings = get_percentage(size, total_size)
file_savings = get_percentage(size, file_size)
print('%u of %u code bytes could be saved ' % (size, total_size), end='')
print('could be saved by encoding opcodes more efficiently ', end='')
print('(%2.2f%% code savings, %2.2f%% file savings).\n' % (code_savings,
file_savings))
class DexEmulator(object):
def __init__(self):
self.registers = dict()
self.pc = 0
def read_register(self, reg):
if reg in self.registers:
return self.registers[reg]
raise ValueError("reading register with no value")
def write_register(self, reg, value):
self.registers[reg] = value
def emulate(self, uint16_array):
pass
def main():
usage = 'Usage: dex.py [options] [dex file(s)]'
parser = optparse.OptionParser(
usage=usage,
description='A script that parses DEX files.')
parser.add_option('-v', '--verbose',
action='store_true',
dest='verbose',
help='display verbose debug info',
default=False)
parser.add_option('-C', '--color',
action='store_true',
dest='color',
help='Enable colorized output',
default=False)
parser.add_option('-a', '--all',
action='store_true',
dest='dump_all',
help='Dump all DEX sections.',
default=False)
parser.add_option('-H', '--header',
action='store_true',
dest='dump_header',
help='Dump the DEX file header.',
default=False)
parser.add_option('--map-list',
action='store_true',
dest='dump_map_list',
help='Dump the DEX map list info.',
default=False)
parser.add_option('-s', '--strings',
action='store_true',
dest='dump_strings',
help='Dump the DEX strings.',
default=False)
parser.add_option('-t', '--types',
action='store_true',
dest='dump_types',
help='Dump the DEX types.',
default=False)
parser.add_option('-p', '--protos',
action='store_true',
dest='dump_protos',
help='Dump the DEX protos.',
default=False)
parser.add_option('-f', '--fields',
action='store_true',
dest='dump_fields',
help='Dump the DEX fields.',
default=False)
parser.add_option('-m', '--methods',
action='store_true',
dest='dump_methods',
help='Dump the DEX methods.',
default=False)
parser.add_option('--method-handles',
action='store_true',
dest='dump_method_handles',
help='Dump the DEX method handles.',
default=False)
parser.add_option('--classes',
action='store_true',
dest='dump_classes',
help='Dump the DEX classes.',
default=False)
parser.add_option('--class',
dest='class_filter',
help='Find a class by name. ' +
'Accepts `Lpath/to/Class;` or `path.to.Class`',
default=None)
parser.add_option('--method',
dest='method_filter',
help='Find a method by name. Must be used with --class',
default=None)
parser.add_option('--call-sites',
action='store_true',
dest='dump_call_sites',
help='Dump the DEX call sites.',
default=False)
parser.add_option('--code',
action='store_true',
dest='dump_code',
help='Dump the DEX code in all class methods.',
default=False)
parser.add_option('--code-items',
action='store_true',
dest='dump_code_items',
help='Dump the DEX code items.',
default=False)
parser.add_option('--code-duplication',
action='store_true',
dest='code_duplication',
help=('Dump any methods in the DEX file that have the '
'same instructions.'),
default=False)
parser.add_option('--debug',
action='store_true',
dest='debug',
help='Dump the DEX debug info.',
default=False)
parser.add_option('-d', '--disassemble',
action='store_true',
dest='dump_disassembly',
help='Dump the DEX code items instructions.',
default=False)
parser.add_option('--stats',
action='store_true',
dest='dump_stats',
help='Dump the DEX opcode statistics.',
default=False)
parser.add_option('--check-encoding',
action='store_true',
dest='check_encoding',
help='Verify opcodes are efficiently encoded.',
default=False)
parser.add_option('--new-encoding',
action='store_true',
dest='new_encoding',
help='Report byte savings from potential new encodings.',
default=False)
parser.add_option('--proguard',
dest='proguard',
help='Specify a progard file to use for demangling.',
default=None)
(options, files) = parser.parse_args()
total_code_bytes_inefficiently_encoded = 0
total_debug_info_bytes_inefficiently_encoded = 0
total_new_code_bytes_inefficiently_encoded = 0
total_opcode_byte_size = 0
total_file_size = 0
op_name_to_size = {}
string_counts = {}
i = 0
if len(files) == 0:
print('No input files. {}'.format(usage))
return
for (i, path) in enumerate(files):
if os.path.splitext(path)[1] == '.apk':
print('error: dex.py operates on dex files, please unpack your apk')
return
print('Dex file: %s' % (path))
file_size = os.path.getsize(path)
total_file_size += file_size
dex = File(path, options.proguard)
if options.class_filter:
dex_class = dex.find_class(options.class_filter)
if dex_class:
if options.method_filter is None:
dex_class.dump()
for method in dex_class.get_methods():
method_name = method.get_name()
if options.method_filter:
if options.method_filter != method_name:
continue
method.dump()
else:
print('error: class definition not found for "%s"' % (
options.class_filter))
if options.dump_header or options.dump_all:
dex.dump_header(options)
print('')
if options.dump_map_list or options.dump_all:
dex.dump_map_list(options)
if options.dump_strings or options.dump_all:
dex.dump_string_ids(options)
if options.dump_types or options.dump_all:
dex.dump_type_ids(options)
if options.dump_protos or options.dump_all:
dex.dump_proto_ids(options)
if options.dump_fields or options.dump_all:
dex.dump_field_ids(options)
if options.dump_methods or options.dump_all:
dex.dump_method_ids(options)
if options.dump_classes or options.dump_all:
dex.dump_class_defs(options)
if options.dump_call_sites or options.dump_all:
dex.dump_call_site_ids(options)
if options.dump_method_handles or options.dump_all:
dex.dump_method_handle_items(options)
if options.dump_code or options.debug or options.dump_all:
dex.dump_code(options)
if options.dump_code_items:
dex.dump_code_items(options)
if (options.dump_disassembly or options.dump_stats or
options.check_encoding or options.new_encoding):
if options.dump_stats:
for string_item in dex.get_strings():
if string_item.data not in string_counts:
string_counts[string_item.data] = 0
string_counts[string_item.data] += 1
code_bytes_inefficiently_encoded = 0
debug_info_bytes_inefficiently_encoded = 0
new_code_bytes_inefficiently_encoded = 0
file_opcodes_byte_size = 0
classes = dex.get_classes()
used_code_item_indexes = list()
for cls in classes:
methods = cls.get_methods()
for method in methods:
if options.dump_disassembly or options.debug:
method.dump(
f=sys.stdout, dump_code=options.dump_disassembly,
dump_debug_info=options.debug)
opcodes_bytes_size = method.get_code_byte_size()
file_opcodes_byte_size += opcodes_bytes_size
total_opcode_byte_size += opcodes_bytes_size
if (options.dump_stats or options.check_encoding or
options.new_encoding):
for dex_inst in method.get_instructions():
if options.dump_stats:
op_name = dex_inst.get_name()
size = dex_inst.get_num_code_units() * 2
if op_name not in op_name_to_size:
op_name_to_size[op_name] = 0
op_name_to_size[op_name] += size
if options.check_encoding:
code_bytes_inefficiently_encoded += (
dex_inst.check_encoding())
if options.new_encoding:
new_code_bytes_inefficiently_encoded += (
dex_inst.new_encoding())
if options.check_encoding:
code_item_idx = method.get_code_item_index()
if code_item_idx >= 0:
used_code_item_indexes.append(code_item_idx)
debug_info = method.get_debug_info()
if debug_info:
debug_info_bytes_inefficiently_encoded += (
method.check_debug_info_encoding())
if options.check_encoding:
efficiently_encoded = True
if code_bytes_inefficiently_encoded > 0:
efficiently_encoded = False
total_code_bytes_inefficiently_encoded += (
code_bytes_inefficiently_encoded)
print_code_stats(code_bytes_inefficiently_encoded,
file_opcodes_byte_size, file_size)
if debug_info_bytes_inefficiently_encoded > 0:
efficiently_encoded = False
total_debug_info_bytes_inefficiently_encoded += (
debug_info_bytes_inefficiently_encoded)
print_debug_stats(debug_info_bytes_inefficiently_encoded,
file_size)
# Verify that all code items are used.
used_code_item_indexes.sort()
prev_ci_idx = 0
for ci_idx in used_code_item_indexes:
if ci_idx != prev_ci_idx:
efficiently_encoded = False
for idx in range(prev_ci_idx + 1, ci_idx):
print('code_item[%u] is not used and its '
'code_item can be removed' % (idx))
prev_ci_idx = ci_idx
if efficiently_encoded:
print('file is efficiently encoded.')
if options.new_encoding:
if new_code_bytes_inefficiently_encoded > 0:
total_new_code_bytes_inefficiently_encoded += (
new_code_bytes_inefficiently_encoded)
print_encoding_stats(new_code_bytes_inefficiently_encoded,
file_opcodes_byte_size, file_size)
else:
print('file is efficiently encoded.')
if options.code_duplication:
dex.report_code_duplication()
if options.dump_stats:
duped_strings_byte_size = 0
for s in string_counts:
count = string_counts[s]
if count > 1:
s_len = len(s)
duped_strings_byte_size += (count - 1) * \
s_len + get_uleb128_byte_size(s_len)
if duped_strings_byte_size > 0:
print('%u bytes in duplicated strings across dex files.' % (
duped_strings_byte_size))
print('BYTESIZE %AGE OPCODE')
print('======== ===== =================================')
sorted_x = sorted(op_name_to_size.items(),
key=operator.itemgetter(1))
for (op_name, byte_size) in sorted_x:
percentage = get_percentage(byte_size, total_opcode_byte_size)
print('%-8u %5.2f %s' % (byte_size, percentage, op_name))
print('-------- ----- ---------------------------------')
print('%-8u 100.0' % (total_opcode_byte_size))
if i > 0:
if options.check_encoding:
if total_code_bytes_inefficiently_encoded > 0:
print_code_stats(total_code_bytes_inefficiently_encoded,
total_opcode_byte_size, total_file_size)
if total_debug_info_bytes_inefficiently_encoded > 0:
efficiently_encoded = False
print_debug_stats(total_debug_info_bytes_inefficiently_encoded,
total_file_size)
if options.new_encoding:
invoke_kind_percentage = get_percentage(
can_use_new_encoding,
can_use_new_encoding + cant_use_new_encoding)
print('%u invoke-kind opcodes could use new encoding' % (
can_use_new_encoding), end='')
print('%u could not (%2.2f%%)' % (cant_use_new_encoding,
invoke_kind_percentage))
if total_new_code_bytes_inefficiently_encoded > 0:
print_encoding_stats(
total_new_code_bytes_inefficiently_encoded,
total_opcode_byte_size, total_file_size)
if __name__ == '__main__':
main()
| 33.531599 | 80 | 0.546804 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import bisect
import copy
import dict_utils
import file_extract
from file_extract import AutoParser
import numbers
import operator
import optparse
import os
import re
import six
import string
import sys
import StringIO
def get_uleb128_byte_size(value):
byte_size = 1
while value >= 0x80:
byte_size += 1
value >>= 7
return byte_size
def get_uleb128p1_byte_size(value):
return get_uleb128_byte_size(value + 1)
MAGIC = "dex\n"
ENDIAN_CONSTANT = 0x12345678
REVERSE_ENDIAN_CONSTANT = 0x78563412
NO_INDEX = 0xffffffff
INT4_MIN = -8
INT4_MAX = 7
INT8_MIN = -128
INT8_MAX = 127
INT16_MIN = -32768
INT16_MAX = 32767
INT24_MIN = -8388608
INT24_MAX = 8388607
INT32_MIN = -2147483648
INT32_MAX = 2147483647
UINT4_MAX = 15
UINT8_MAX = 255
UINT16_MAX = 65535
UINT32_MAX = 4294967295
ACC_PUBLIC = 0x1
ACC_PRIVATE = 0x2
ACC_PROTECTED = 0x4
ACC_STATIC = 0x8
ACC_FINAL = 0x10
ACC_SYNCHRONIZED = 0x20
ACC_VOLATILE = 0x40
ACC_BRIDGE = 0x40
ACC_TRANSIENT = 0x80
ACC_VARARGS = 0x80
ACC_NATIVE = 0x100
ACC_INTERFACE = 0x200
ACC_ABSTRACT = 0x400
ACC_STRICT = 0x800
ACC_SYNTHETIC = 0x1000
ACC_ANNOTATION = 0x2000
ACC_ENUM = 0x4000
ACC_CONSTRUCTOR = 0x10000
ACC_DECLARED_SYNCHRONIZED = 0x20000
VALUE_BYTE = 0x00
VALUE_SHORT = 0x02
VALUE_CHAR = 0x03
VALUE_INT = 0x04
VALUE_LONG = 0x06
VALUE_FLOAT = 0x10
VALUE_DOUBLE = 0x11
VALUE_METHOD_TYPE = 0x15
VALUE_METHOD_HANDLE = 0x16
VALUE_STRING = 0x17
VALUE_TYPE = 0x18
VALUE_FIELD = 0x19
VALUE_METHOD = 0x1a
VALUE_ENUM = 0x1b
VALUE_ARRAY = 0x1c
VALUE_ANNOTATION = 0x1d
VALUE_NULL = 0x1e
VALUE_BOOLEAN = 0x1f
class ValueFormat(dict_utils.Enum):
enum = {
'VALUE_BYTE': VALUE_BYTE,
'VALUE_SHORT': VALUE_SHORT,
'VALUE_CHAR': VALUE_CHAR,
'VALUE_INT': VALUE_INT,
'VALUE_LONG': VALUE_LONG,
'VALUE_FLOAT': VALUE_FLOAT,
'VALUE_DOUBLE': VALUE_DOUBLE,
'VALUE_METHOD_TYPE': VALUE_METHOD_TYPE,
'VALUE_METHOD_HANDLE': VALUE_METHOD_HANDLE,
'VALUE_STRING': VALUE_STRING,
'VALUE_TYPE': VALUE_TYPE,
'VALUE_FIELD': VALUE_FIELD,
'VALUE_METHOD': VALUE_METHOD,
'VALUE_ENUM': VALUE_ENUM,
'VALUE_ARRAY': VALUE_ARRAY,
'VALUE_ANNOTATION': VALUE_ANNOTATION,
'VALUE_NULL': VALUE_NULL,
'VALUE_BOOLEAN': VALUE_BOOLEAN,
}
def __init__(self, data):
dict_utils.Enum.__init__(self, data.get_uint16(), self.enum)
TYPE_HEADER_ITEM = 0x0000
TYPE_STRING_ID_ITEM = 0x0001
TYPE_TYPE_ID_ITEM = 0x0002
TYPE_PROTO_ID_ITEM = 0x0003
TYPE_FIELD_ID_ITEM = 0x0004
TYPE_METHOD_ID_ITEM = 0x0005
TYPE_CLASS_DEF_ITEM = 0x0006
TYPE_CALL_SITE_ID_ITEM = 0x0007
TYPE_METHOD_HANDLE_ITEM = 0x0008
TYPE_MAP_LIST = 0x1000
TYPE_TYPE_LIST = 0x1001
TYPE_ANNOTATION_SET_REF_LIST = 0x1002
TYPE_ANNOTATION_SET_ITEM = 0x1003
TYPE_CLASS_DATA_ITEM = 0x2000
TYPE_CODE_ITEM = 0x2001
TYPE_STRING_DATA_ITEM = 0x2002
TYPE_DEBUG_INFO_ITEM = 0x2003
TYPE_ANNOTATION_ITEM = 0x2004
TYPE_ENCODED_ARRAY_ITEM = 0x2005
TYPE_ANNOTATIONS_DIRECTORY_ITEM = 0x2006
class TypeCode(dict_utils.Enum):
enum = {
'TYPE_HEADER_ITEM': TYPE_HEADER_ITEM,
'TYPE_STRING_ID_ITEM': TYPE_STRING_ID_ITEM,
'TYPE_TYPE_ID_ITEM': TYPE_TYPE_ID_ITEM,
'TYPE_PROTO_ID_ITEM': TYPE_PROTO_ID_ITEM,
'TYPE_FIELD_ID_ITEM': TYPE_FIELD_ID_ITEM,
'TYPE_METHOD_ID_ITEM': TYPE_METHOD_ID_ITEM,
'TYPE_CLASS_DEF_ITEM': TYPE_CLASS_DEF_ITEM,
'TYPE_CALL_SITE_ID_ITEM': TYPE_CALL_SITE_ID_ITEM,
'TYPE_METHOD_HANDLE_ITEM': TYPE_METHOD_HANDLE_ITEM,
'TYPE_MAP_LIST': TYPE_MAP_LIST,
'TYPE_TYPE_LIST': TYPE_TYPE_LIST,
'TYPE_ANNOTATION_SET_REF_LIST': TYPE_ANNOTATION_SET_REF_LIST,
'TYPE_ANNOTATION_SET_ITEM': TYPE_ANNOTATION_SET_ITEM,
'TYPE_CLASS_DATA_ITEM': TYPE_CLASS_DATA_ITEM,
'TYPE_CODE_ITEM': TYPE_CODE_ITEM,
'TYPE_STRING_DATA_ITEM': TYPE_STRING_DATA_ITEM,
'TYPE_DEBUG_INFO_ITEM': TYPE_DEBUG_INFO_ITEM,
'TYPE_ANNOTATION_ITEM': TYPE_ANNOTATION_ITEM,
'TYPE_ENCODED_ARRAY_ITEM': TYPE_ENCODED_ARRAY_ITEM,
'TYPE_ANNOTATIONS_DIRECTORY_ITEM': TYPE_ANNOTATIONS_DIRECTORY_ITEM,
}
def __init__(self, data):
dict_utils.Enum.__init__(self, data.get_uint16(), self.enum)
def dump(self, prefix=None, f=sys.stdout, print_name=True,
parent_path=None):
f.write(str(self))
METHOD_HANDLE_TYPE_STATIC_PUT = 0x00
METHOD_HANDLE_TYPE_STATIC_GET = 0x01
METHOD_HANDLE_TYPE_INSTANCE_PUT = 0x02
METHOD_HANDLE_TYPE_INSTANCE_GET = 0x03
METHOD_HANDLE_TYPE_INVOKE_STATIC = 0x04
METHOD_HANDLE_TYPE_INVOKE_INSTANCE = 0x05
class MethodHandleTypeCode(dict_utils.Enum):
enum = {
'METHOD_HANDLE_TYPE_STATIC_PUT': METHOD_HANDLE_TYPE_STATIC_PUT,
'METHOD_HANDLE_TYPE_STATIC_GET': METHOD_HANDLE_TYPE_STATIC_GET,
'METHOD_HANDLE_TYPE_INSTANCE_PUT': METHOD_HANDLE_TYPE_INSTANCE_PUT,
'METHOD_HANDLE_TYPE_INSTANCE_GET': METHOD_HANDLE_TYPE_INSTANCE_GET,
'METHOD_HANDLE_TYPE_INVOKE_STATIC': METHOD_HANDLE_TYPE_INVOKE_STATIC,
'METHOD_HANDLE_TYPE_INVOKE_INSTANCE':
METHOD_HANDLE_TYPE_INVOKE_INSTANCE,
}
def __init__(self, data):
dict_utils.Enum.__init__(self, data.get_uint16(), self.enum)
PRINTABLE = string.ascii_letters + string.digits + string.punctuation + ' '
def escape(c):
global PRINTABLE
if c in PRINTABLE:
return c
c = ord(c)
if c <= 0xff:
return '\\x' + '%02.2x' % (c)
elif c <= '\uffff':
return '\\u' + '%04.4x' % (c)
else:
return '\\U' + '%08.8x' % (c)
def print_string(s, f):
f.write('"')
f.write(''.join(escape(c) for c in s))
f.write('"')
def print_version(version, f):
if len(version) == 3:
f.write("%u.%u.%u" % (version[0], version[1], version[2]))
def print_hex_bytes(data, f):
for byte in data:
f.write("%2.2x" % (byte))
def print_endian(value, f):
f.write("%#8.8x" % (value))
if value == ENDIAN_CONSTANT:
f.write(" (ENDIAN_CONSTANT)")
elif value == REVERSE_ENDIAN_CONSTANT:
f.write(" (REVERSE_ENDIAN_CONSTANT)")
def is_zero(value):
if value == 0:
return None
return 'value should be zero, bit is %s' % (str(value))
def is_dex_magic(magic):
if magic == MAGIC:
return None
return 'value should be %s but is %s' % (MAGIC, magic)
def hex_escape(s):
return ''.join(escape(c) for c in s)
class encoded_field(AutoParser):
items = [
{'type': 'uleb', 'name': 'field_idx', 'format': '%u'},
{'type': 'uleb', 'name': 'access_flags', 'format': '0x%8.8x'},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
@classmethod
def fixup_indexes(cls, items):
for i in range(1, len(items)):
items[i].field_idx += items[i - 1].field_idx
@classmethod
def get_table_header(self):
return 'FIELD FLAGS\n'
def get_dump_flat(self):
return True
class encoded_method(AutoParser):
items = [
{'type': 'uleb', 'name': 'method_idx', 'format': '%u'},
{'type': 'uleb', 'name': 'access_flags', 'format': '0x%8.8x'},
{'type': 'uleb', 'name': 'code_off', 'format': '0x%8.8x'},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
@classmethod
def fixup_indexes(cls, items):
for i in range(1, len(items)):
items[i].method_idx += items[i - 1].method_idx
@classmethod
def get_table_header(self):
return 'METHOD FLAGS\n'
def get_dump_flat(self):
return True
class class_data_item(AutoParser):
items = [
{'type': 'uleb', 'name': 'static_fields_size'},
{'type': 'uleb', 'name': 'instance_fields_size'},
{'type': 'uleb', 'name': 'direct_methods_size'},
{'type': 'uleb', 'name': 'virtual_methods_size'},
{'class': encoded_field, 'name': 'static_fields',
'attr_count': 'static_fields_size', 'flat': True},
{'class': encoded_field, 'name': 'instance_fields',
'attr_count': 'instance_fields_size', 'flat': True},
{'class': encoded_method, 'name': 'direct_methods',
'attr_count': 'direct_methods_size', 'flat': True},
{'class': encoded_method, 'name': 'virtual_methods',
'attr_count': 'virtual_methods_size', 'flat': True},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
encoded_field.fixup_indexes(self.static_fields)
encoded_field.fixup_indexes(self.instance_fields)
encoded_method.fixup_indexes(self.direct_methods)
encoded_method.fixup_indexes(self.virtual_methods)
@classmethod
def create_empty(cls):
data = file_extract.FileExtract(StringIO.StringIO('\0\0\0\0'), '=')
return class_data_item(data)
class class_def_item(AutoParser):
items = [
{'type': 'u32', 'name': 'class_idx', 'align': 4},
{'type': 'u32', 'name': 'access_flags'},
{'type': 'u32', 'name': 'superclass_idx'},
{'type': 'u32', 'name': 'interfaces_off'},
{'type': 'u32', 'name': 'source_file_idx'},
{'type': 'u32', 'name': 'annotations_off'},
{'type': 'u32', 'name': 'class_data_off'},
{'type': 'u32', 'name': 'static_values_off'},
{'class': class_data_item, 'name': 'class_data',
'attr_offset': 'class_data_off',
'condition': lambda item, data: item.class_data_off != 0,
'dump': False,
'default': class_data_item.create_empty()},
]
def __init__(self, data, context):
AutoParser.__init__(self, self.items, data, context)
@classmethod
def get_table_header(self):
return ('CLASS ACCESS SUPERCLASS INTERFACES SOURCE'
' ANNOTATION CLASS_DATA STATIC_VALUES\n')
def get_dump_flat(self):
return True
def find_encoded_method_by_code_off(self, code_off):
for encoded_method in self.class_data.direct_methods:
if encoded_method.code_off == code_off:
return encoded_method
for encoded_method in self.class_data.virtual_methods:
if encoded_method.code_off == code_off:
return encoded_method
return None
class try_item(AutoParser):
items = [
{'type': 'u32', 'name': 'start_addr'},
{'type': 'u16', 'name': 'insn_count'},
{'type': 'u16', 'name': 'handler_off'},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
def get_dump_flat(self):
return True
class encoded_type_addr_pair(AutoParser):
items = [
{'type': 'uleb', 'name': 'type_idx', 'format': '%#8.8x'},
{'type': 'uleb', 'name': 'addr', 'format': '%#8.8x'},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
def get_dump_flat(self):
return True
class encoded_catch_handler(AutoParser):
items = [
{'type': 'sleb', 'name': 'size'},
{'class': encoded_type_addr_pair, 'name': 'handlers',
'attr_count': 'size', 'attr_count_fixup': abs},
{'type': 'uleb', 'name': 'catch_all_addr', 'default': 0,
'condition': lambda item, data: item.size <= 0},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
def get_dump_flat(self):
return True
class encoded_catch_handler_list(AutoParser):
items = [
{'type': 'uleb', 'name': 'size'},
{'class': encoded_catch_handler, 'name': 'list', 'attr_count': 'size'}
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
def get_dump_flat(self):
return True
def print_instructions(insns, prefix, flat, f):
f.write('\n')
code_units = CodeUnits(insns)
dex_inst = DexInstruction()
while code_units.index_is_valid():
dex_inst.decode(code_units)
if prefix:
f.write(prefix)
f.write(' ')
dex_inst.dump()
DBG_END_SEQUENCE = 0x00
DBG_ADVANCE_PC = 0x01
DBG_ADVANCE_LINE = 0x02
DBG_START_LOCAL = 0x03
DBG_START_LOCAL_EXTENDED = 0x04
DBG_END_LOCAL = 0x05
DBG_RESTART_LOCAL = 0x06
DBG_SET_PROLOGUE_END = 0x07
DBG_SET_EPILOGUE_BEGIN = 0x08
DBG_SET_FILE = 0x09
DBG_FIRST_SPECIAL = 0x0a
DBG_LINE_BASE = -4
DBG_LINE_RANGE = 15
class DBG(dict_utils.Enum):
enum = {
'DBG_END_SEQUENCE': DBG_END_SEQUENCE,
'DBG_ADVANCE_PC': DBG_ADVANCE_PC,
'DBG_ADVANCE_LINE': DBG_ADVANCE_LINE,
'DBG_START_LOCAL': DBG_START_LOCAL,
'DBG_START_LOCAL_EXTENDED': DBG_START_LOCAL_EXTENDED,
'DBG_END_LOCAL': DBG_END_LOCAL,
'DBG_RESTART_LOCAL': DBG_RESTART_LOCAL,
'DBG_SET_PROLOGUE_END': DBG_SET_PROLOGUE_END,
'DBG_SET_EPILOGUE_BEGIN': DBG_SET_EPILOGUE_BEGIN,
'DBG_SET_FILE': DBG_SET_FILE
}
def __init__(self, data):
dict_utils.Enum.__init__(self, data.get_uint8(), self.enum)
def dump(self, prefix=None, f=sys.stdout, print_name=True,
parent_path=None):
f.write(str(self))
class debug_info_op(AutoParser):
items = [
{'class': DBG, 'name': 'op'},
{'switch': 'op', 'cases': {
DBG_ADVANCE_PC: [
{'type': 'uleb', 'name': 'addr_offset'}
],
DBG_ADVANCE_LINE: [
{'type': 'sleb', 'name': 'line_offset'},
],
DBG_START_LOCAL: [
{'type': 'uleb', 'name': 'register_num'},
{'type': 'ulebp1', 'name': 'name_idx'},
{'type': 'ulebp1', 'name': 'type_idx'},
],
DBG_START_LOCAL_EXTENDED: [
{'type': 'uleb', 'name': 'register_num'},
{'type': 'ulebp1', 'name': 'name_idx'},
{'type': 'ulebp1', 'name': 'type_idx'},
{'type': 'ulebp1', 'name': 'sig_idx'},
],
DBG_END_LOCAL: [
{'type': 'uleb', 'name': 'register_num'}
],
DBG_RESTART_LOCAL: [
{'type': 'uleb', 'name': 'register_num'}
],
DBG_SET_FILE: [
{'type': 'ulebp1', 'name': 'name_idx'}
],
'default': []
}
}
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
if self.op >= DBG_FIRST_SPECIAL:
adjusted_opcode = int(self.op) - DBG_FIRST_SPECIAL
line_offset = DBG_LINE_BASE + (adjusted_opcode % DBG_LINE_RANGE)
addr_offset = (adjusted_opcode / DBG_LINE_RANGE)
setattr(self, 'line_offset', line_offset)
setattr(self, 'addr_offset', addr_offset)
setattr(self, 'byte_size', data.tell() - self.get_offset())
def get_dump_flat(self):
return True
def get_byte_size(self):
return self.byte_size
def dump_opcode(self, f=sys.stdout):
f.write(str(self.op))
if self.op == DBG_ADVANCE_PC:
f.write('(%u)' % self.addr_offset)
elif self.op == DBG_ADVANCE_LINE:
f.write('(%u)' % self.line_offset)
elif self.op == DBG_START_LOCAL:
f.write('(register_num=%u, name_idx=' % self.register_num)
if self.name_idx < 0:
f.write('NO_INDEX')
else:
f.write('%u' % (self.name_idx))
f.write(', type_idx=')
if self.type_idx < 0:
f.write('NO_INDEX)')
else:
f.write('%u)' % (self.type_idx))
elif self.op == DBG_START_LOCAL_EXTENDED:
f.write('(register_num=%u, name_idx=' % self.register_num)
if self.name_idx < 0:
f.write('NO_INDEX')
else:
f.write('%u' % (self.name_idx))
f.write(', type_idx=')
if self.type_idx < 0:
f.write('NO_INDEX')
else:
f.write('%u' % (self.type_idx))
f.write(', sig_idx=')
if self.type_idx < 0:
f.write('NO_INDEX)')
else:
f.write('%u)' % (self.type_idx))
elif self.op == DBG_END_LOCAL or self.op == DBG_RESTART_LOCAL:
f.write('(register_num=%u)' % self.register_num)
elif self.op == DBG_SET_FILE:
f.write('(name_idx=%u)' % self.name_idx)
elif self.op >= DBG_FIRST_SPECIAL:
f.write(' (addr_offset=%u, line_offset=%i)' %
(self.addr_offset, self.line_offset))
class debug_info_item(AutoParser):
items = [
{'type': 'uleb', 'name': 'line_start'},
{'type': 'uleb', 'name': 'parameters_size'},
{'type': 'ulebp1', 'name': 'parameter_names',
'attr_count': 'parameters_size'},
]
class row(object):
def __init__(self):
self.address = 0
self.line = 1
self.source_file = -1
self.prologue_end = False
self.epilogue_begin = False
def dump(self, f=sys.stdout):
f.write('0x%4.4x %5u %5u ' %
(self.address, self.line, self.source_file))
if self.prologue_end or self.epilogue_begin:
if self.prologue_end:
f.write('P ')
else:
f.write(' ')
if self.epilogue_begin:
f.write('E')
f.write('\n')
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
self.data = data
self.ops = None
self.line_table = None
self.debug_info_offset = data.tell()
def check_encoding(self, dex_method, f=sys.stdout):
bytes_saved = 0
ops = self.get_ops()
if len(ops) == 1:
op = ops[0]
if op.op == DBG_END_SEQUENCE:
bytes_saved += (get_uleb128_byte_size(self.line_start) +
get_uleb128p1_byte_size(self.parameters_size))
for parameter_name in self.parameter_names:
bytes_saved += get_uleb128p1_byte_size(parameter_name)
bytes_saved += 1
f.write('warning: %s debug info contains only a single ' % (
dex_method.get_qualified_name()))
f.write('%s, all debug info can be removed ' % (op.op))
f.write('(%u bytes)\n' % (bytes_saved))
return bytes_saved
# debug info ops
for op in ops:
size = op.get_byte_size()
if op.op == DBG_SET_PROLOGUE_END:
f.write('warning: %s %s can be removed (%u byte)\n' % (
dex_method.get_qualified_name(), op.op, size))
bytes_saved += size
elif op.op == DBG_SET_EPILOGUE_BEGIN:
f.write('warning: %s %s can be removed (%u byte)\n' % (
dex_method.get_qualified_name(), op.op, size))
bytes_saved += size
elif op.op == DBG_START_LOCAL:
f.write('warning: %s %s can be removed (%u bytes)\n' % (
dex_method.get_qualified_name(), op.op, size))
bytes_saved += size
elif op.op == DBG_START_LOCAL_EXTENDED:
f.write('warning: %s %s can be removed (%u bytes)\n' % (
dex_method.get_qualified_name(), op.op, size))
bytes_saved += size
elif op.op == DBG_END_LOCAL:
f.write('warning: %s %s can be removed (%u bytes)\n' % (
dex_method.get_qualified_name(), op.op, size))
bytes_saved += size
elif op.op == DBG_RESTART_LOCAL:
f.write('warning: %s %s can be removed (%u bytes)\n' % (
dex_method.get_qualified_name(), op.op, size))
bytes_saved += size
return bytes_saved
def get_line_table(self):
if self.line_table is None:
ops = self.get_ops()
row = debug_info_item.row()
for op_args in ops:
op = op_args[0]
if op == DBG_END_SEQUENCE:
break
if op == DBG_ADVANCE_PC:
row.address += op.addr_offset
elif op == DBG_ADVANCE_LINE:
row.line += op.line_offset
elif op == DBG_START_LOCAL:
pass
elif op == DBG_START_LOCAL_EXTENDED:
pass
elif op == DBG_END_LOCAL:
pass
elif op == DBG_RESTART_LOCAL:
pass
elif op == DBG_SET_PROLOGUE_END:
row.prologue_end = True
elif op == DBG_SET_EPILOGUE_BEGIN:
row.epilogue_begin = True
elif op == DBG_SET_FILE:
row.source_file = op.name_idx
else:
row.line += op.line_offset
row.address += op.addr_offset
self.line_table.append(copy.copy(row))
row.prologue_end = False
row.epilogue_begin = False
return self.line_table
def get_ops(self):
if self.ops is None:
data = self.data
data.push_offset_and_seek(self.debug_info_offset)
self.ops = list()
while True:
op = debug_info_op(data)
self.ops.append(op)
if op.op == DBG_END_SEQUENCE:
break
data.pop_offset_and_seek()
return self.ops
def dump_debug_info(self, f=sys.stdout, prefix=None):
ops = self.get_ops()
for op in ops:
if prefix:
f.write(prefix)
f.write(' ')
op.dump_opcode(f=f)
f.write('\n')
# ----------------------------------------------------------------------
# code_item
# ----------------------------------------------------------------------
class code_item(AutoParser):
items = [
{'type': 'u16', 'name': 'registers_size', 'align': 4},
{'type': 'u16', 'name': 'ins_size'},
{'type': 'u16', 'name': 'outs_size'},
{'type': 'u16', 'name': 'tries_size'},
{'type': 'u32', 'name': 'debug_info_off'},
{'type': 'u32', 'name': 'insns_size', 'format': '%u'},
{'type': 'u16', 'name': 'insns',
'attr_count': 'insns_size', 'dump_list': print_instructions},
{'type': 'u16', 'condition': lambda item,
data: item.tries_size != 0 and item.insns_size & 1},
{'class': try_item, 'name': 'tries', 'attr_count': 'tries_size',
'condition': lambda item, data: item.tries_size != 0,
'default': None},
{'class': encoded_catch_handler_list, 'name': 'handlers',
'condition': lambda item, data: item.tries_size != 0,
'default': None}
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
self.debug_info = None
self.data = data
# Convert insns from a list to a tuple to avoid mutattion and also to
# allow self.insns to be hashed.
self.insns = tuple(self.insns)
def get_debug_info(self):
if self.debug_info is None and self.debug_info_off > 0:
data = self.data
data.push_offset_and_seek(self.debug_info_off)
self.debug_info = debug_info_item(data)
data.pop_offset_and_seek()
return self.debug_info
class encoded_value:
def __init__(self, data):
arg_type = data.get_uint8()
value_arg = arg_type >> 5
value_type = arg_type & 0x1f
self.value_type = ValueFormat(value_type)
self.value = None
size = value_arg + 1
if value_type == VALUE_BYTE:
if value_arg != 0:
raise ValueError(
'VALUE_BYTE value_arg != 0 (%u)' % (value_arg))
self.value = data.get_sint8()
elif value_type == VALUE_SHORT:
self.value = data.get_sint_size(size)
elif value_type == VALUE_CHAR:
self.value = data.get_uint_size(size)
elif value_type == VALUE_INT:
self.value = data.get_sint_size(size)
elif value_type == VALUE_LONG:
self.value = data.get_sint_size(size)
elif value_type == VALUE_FLOAT:
raise ValueError('VALUE_FLOAT not supported yet')
elif value_type == VALUE_DOUBLE:
raise ValueError('VALUE_DOUBLE not supported yet')
elif value_type == VALUE_METHOD_TYPE:
self.value = data.get_uint_size(size)
elif value_type == VALUE_METHOD_HANDLE:
self.value = data.get_uint_size(size)
elif value_type == VALUE_STRING:
self.value = data.get_uint_size(size)
elif value_type == VALUE_TYPE:
self.value = data.get_uint_size(size)
elif value_type == VALUE_FIELD:
self.value = data.get_uint_size(size)
elif value_type == VALUE_METHOD:
self.value = data.get_uint_size(size)
elif value_type == VALUE_ENUM:
self.value = data.get_uint_size(size)
elif value_type == VALUE_ARRAY:
if value_arg != 0:
raise ValueError(
'VALUE_ARRAY value_arg != 0 (%u)' % (value_arg))
raise ValueError('VALUE_ARRAY not supported yet')
# encoded_array: an array of values, in the format specified by
# "encoded_array format". The size of the value is implicit in
# the encoding.
elif value_type == VALUE_ANNOTATION:
if value_arg != 0:
raise ValueError(
'VALUE_ANNOTATION value_arg != 0 (%u)' % (value_arg))
# encoded_annotation: a sub-annotation, in the format specified by
# "encoded_annotation format" below. The size of the value is
# implicit in the encoding.
elif value_type == VALUE_NULL:
if value_arg != 0:
raise ValueError(
'VALUE_ARRAY value_arg != 0 (%u)' % (value_arg))
self.value = 0
elif value_type == VALUE_BOOLEAN:
if size == 0:
self.value = False
else:
self.value = data.get_uint8() != 0
# ----------------------------------------------------------------------
# encoded_array
# ----------------------------------------------------------------------
class encoded_array(AutoParser):
items = [
{'type': 'uleb', 'name': 'size'},
{'class': encoded_value, 'name': 'values', 'attr_count': 'size'},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
class encoded_array_item(AutoParser):
items = [
{'class': encoded_array, 'name': 'value'},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
# ----------------------------------------------------------------------
# field_id_item
# ----------------------------------------------------------------------
class field_id_item(AutoParser):
items = [
{'type': 'u16', 'name': 'class_idx', 'align': 4},
{'type': 'u16', 'name': 'type_idx'},
{'type': 'u32', 'name': 'name_idx'},
]
def __init__(self, data, context):
AutoParser.__init__(self, self.items, data, context)
@classmethod
def get_table_header(self):
return 'CLASS TYPE NAME\n'
def get_dump_flat(self):
return True
# ----------------------------------------------------------------------
# header_item
# ----------------------------------------------------------------------
class header_item(AutoParser):
items = [
{'type': 'cstr[4]', 'name': 'magic', 'validate': is_dex_magic},
{'type': 'u8[3]', 'name': 'version', 'dump': print_version},
{'type': 'u8', 'validate': is_zero}, # NULL byte
{'type': 'u32', 'name': 'checksum'},
{'type': 'u8[20]', 'name': 'signature', 'dump': print_hex_bytes},
{'type': 'u32', 'name': 'file_size'},
{'type': 'u32', 'name': 'header_size'},
{'type': 'u32', 'name': 'endian_tag', 'type': 'u32',
'dump': print_endian},
{'type': 'u32', 'name': 'link_size'},
{'type': 'u32', 'name': 'link_off'},
{'type': 'u32', 'name': 'map_off'},
{'type': 'u32', 'name': 'string_ids_size'},
{'type': 'u32', 'name': 'string_ids_off'},
{'type': 'u32', 'name': 'type_ids_size'},
{'type': 'u32', 'name': 'type_ids_off'},
{'type': 'u32', 'name': 'proto_ids_size'},
{'type': 'u32', 'name': 'proto_ids_off'},
{'type': 'u32', 'name': 'field_ids_size'},
{'type': 'u32', 'name': 'field_ids_off'},
{'type': 'u32', 'name': 'method_ids_size'},
{'type': 'u32', 'name': 'method_ids_off'},
{'type': 'u32', 'name': 'class_defs_size'},
{'type': 'u32', 'name': 'class_defs_off'},
{'type': 'u32', 'name': 'data_size'},
{'type': 'u32', 'name': 'data_off'},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
def get_dump_header(self):
return 'DEX header:'
# ----------------------------------------------------------------------
# map_item
# ----------------------------------------------------------------------
class map_item(AutoParser):
items = [
{'class': TypeCode, 'name': 'type',
'dump_width': TypeCode.max_width()},
{'type': 'u16'},
{'type': 'u32', 'name': 'size'},
{'type': 'u32', 'name': 'offset'},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
def get_list_header_lines(self):
return [' TYPE SIZE OFFSET\n']
def get_dump_flat(self):
return True
# ----------------------------------------------------------------------
# map_list
# ----------------------------------------------------------------------
class map_list(AutoParser):
items = [
{'type': 'u32', 'name': 'size', 'align': 4, 'dump': False},
{'class': map_item, 'name': 'list', 'attr_count': 'size',
'flat': True},
]
def get_dump_header(self):
return 'map_list:'
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
# ----------------------------------------------------------------------
# method_handle_item
# ----------------------------------------------------------------------
class method_handle_item(AutoParser):
items = [
{'class': MethodHandleTypeCode, 'name': 'method_handle_type',
'align': 4},
{'type': 'u16'},
{'type': 'u16', 'name': 'field_or_method_id'},
{'type': 'u16'},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
# ----------------------------------------------------------------------
# method_id_item
# ----------------------------------------------------------------------
class method_id_item(AutoParser):
items = [
{'type': 'u16', 'name': 'class_idx', 'align': 4},
{'type': 'u16', 'name': 'proto_idx'},
{'type': 'u32', 'name': 'name_idx'},
]
def __init__(self, data, context):
AutoParser.__init__(self, self.items, data, context)
@classmethod
def get_table_header(self):
return 'CLASS PROTO NAME\n'
def get_dump_flat(self):
return True
# ----------------------------------------------------------------------
# proto_id_item
# ----------------------------------------------------------------------
class proto_id_item(AutoParser):
items = [
{'type': 'u32', 'name': 'shorty_idx', 'align': 4},
{'type': 'u32', 'name': 'return_type_idx'},
{'type': 'u32', 'name': 'parameters_off'},
]
def __init__(self, data, context):
AutoParser.__init__(self, self.items, data, context)
self.parameters = None
def get_dump_flat(self):
return True
@classmethod
def get_table_header(self):
return 'SHORTY_IDX RETURN PARAMETERS\n'
def get_parameters(self):
if self.parameters_off != 0 and self.parameters is None:
# Get the data from our dex.File object
data = self.context.data
data.push_offset_and_seek(self.parameters_off)
self.parameters = type_list(data)
data.pop_offset_and_seek()
return self.parameters
# ----------------------------------------------------------------------
# string_data_item
# ----------------------------------------------------------------------
class string_data_item(AutoParser):
items = [
{'type': 'uleb', 'name': 'utf16_size', 'format': '%3u'},
{'type': 'cstr', 'name': 'data', 'dump': print_string},
]
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
def get_dump_flat(self):
return True
# ----------------------------------------------------------------------
# type_list
# ----------------------------------------------------------------------
class type_list(AutoParser):
items = [
{'type': 'u32', 'name': 'size', 'align': 4},
{'type': 'u16', 'name': 'list', 'attr_count': 'size'},
]
def get_dump_header(self):
return 'type_list:'
def __init__(self, data):
AutoParser.__init__(self, self.items, data)
class Progard:
def __init__(self, path):
self.path = path
self.classes_dict = {}
class_dict = None
regex = re.compile('\s+([0-9]+:[0-9]+:)?(.*) -> (.*)$')
with open(path, 'r') as f:
for line in f:
line = line.rstrip('\n')
if line:
if line[0].isspace():
match = regex.match(line)
if match:
old = match.group(2)
new = match.group(3)
# print('other old = "%s"' % (old))
# print('other new = "%s"' % (new))
class_dict[new] = old
else:
(old, new) = line.split(' -> ')
# print('class old = "%s"' % (old))
# print('class new = "%s"' % (new))
class_dict = {}
self.classes_dict[new] = (old, class_dict)
def lookup_class(self, new_class):
if new_class in self.classes_dict:
(old_class, class_dict) = self.classes_dict[new_class]
if old_class is not None:
return old_class
return None
def lookup_method(self, new_class, new_method):
if new_class in self.classes_dict:
(old_class, class_dict) = self.classes_dict[new_class]
if new_method in class_dict:
return class_dict[new_method]
return None
class DexMethod:
def __init__(self, dex_class, encoded_method, is_virtual):
self.dex_class = dex_class
self.encoded_method = encoded_method
self.method_id = None
self.is_virtual = is_virtual
self.code_item = None
self.insns = None
self.name_in_file = None
self.name = None
def get_qualified_name(self):
class_name = self.get_class().get_name()
method_name = self.get_name()
if class_name[-1] != ';':
return class_name + ':' + method_name
else:
return class_name + method_name
def get_method_id(self):
if self.method_id is None:
self.method_id = self.get_dex().get_method_id(self.encoded_method)
return self.method_id
def get_method_index(self):
return self.encoded_method.method_idx
def get_code_offset(self):
return self.encoded_method.code_off
def get_code_item_index(self):
code_item = self.get_code_item()
if code_item:
return self.get_dex().get_code_item_index_from_code_off(
code_item.get_offset())
return -1
def get_dex(self):
return self.dex_class.get_dex()
def get_name_in_file(self):
if self.name_in_file is None:
self.name_in_file = self.get_dex().get_string(
self.get_method_id().name_idx)
return self.name_in_file
def get_name(self):
if self.name is None:
cls_mangled = self.get_class().get_mangled_name()
name_in_file = self.get_name_in_file()
if cls_mangled and name_in_file:
self.name = self.get_dex().demangle_class_method_name(
cls_mangled, name_in_file)
if self.name is None:
self.name = name_in_file
return self.name
def get_class(self):
return self.dex_class
def get_code_item(self):
if self.code_item is None:
if self.encoded_method.code_off != 0:
self.code_item = self.get_dex().find_code_item(
self.encoded_method.code_off)
return self.code_item
def get_code_byte_size(self):
code_item = self.get_code_item()
if code_item:
return len(code_item.insns) * 2
return 0
def get_instructions(self):
if self.insns is None:
self.insns = []
code_item = self.get_code_item()
if code_item:
code_units = CodeUnits(code_item.insns)
while code_units.index_is_valid():
insn = DexInstruction()
insn.decode(code_units)
self.insns.append(insn)
return self.insns
def dump(self, dump_code=True, dump_debug_info=True, f=sys.stdout):
if self.is_virtual:
method_type = 'virtual'
else:
method_type = 'direct'
dex = self.get_dex()
f.write('method: (%s) %s%s\n' %
(method_type, self.get_class().get_name(), self.get_name()))
code_item_idx = dex.get_code_item_index_from_code_off(
self.encoded_method.code_off)
self.encoded_method.dump(f=f, prefix=' encoded_method.', flat=False)
method_id = dex.get_method_id(self.encoded_method.method_idx)
if method_id:
method_id.dump(f=f, prefix=' method_id.', flat=False)
proto_id = dex.get_proto_id(method_id.proto_idx)
if proto_id:
proto_id.dump(f=f, prefix=' proto_id.', flat=False)
f.write('\n')
if dump_code:
if code_item_idx >= 0:
code_item = dex.get_code_items()[code_item_idx]
f.write(' code_item[%u] @ %
code_item.get_offset()))
code_item.dump(f=f, prefix=' ')
if dump_debug_info:
self.dump_debug_info(f=f, prefix=' ')
def dump_code(self, f=sys.stdout):
insns = self.get_instructions()
for insn in insns:
insn.dump(f=f)
def get_debug_info(self):
code_item = self.get_code_item()
if code_item:
return code_item.get_debug_info()
return None
def dump_debug_info(self, f=sys.stdout, prefix=None):
debug_info = self.get_debug_info()
if prefix:
f.write(prefix)
if debug_info:
f.write('debug info @ %
debug_info.dump_debug_info(f=f, prefix=prefix)
f.write('\n')
else:
f.write('no debug info\n')
def check_debug_info_encoding(self):
debug_info = self.get_debug_info()
if debug_info:
return debug_info.check_encoding(self)
class DexClass:
def __init__(self, dex, class_def):
self.dex = dex
self.class_def = class_def
self.methods = None
self.num_direct_methods = 0
self.mangled = None
self.demangled = None
def dump(self, f=sys.stdout):
f.write('\nclass: %s\n' % (self.get_name()))
dex = self.get_dex()
class_def_offset = self.class_def.get_offset()
class_def_idx = dex.get_class_def_index_from_offset(class_def_offset)
f.write(' class_def[%u] @ %
class_def_offset))
self.class_def.dump(f=f, flat=False, prefix=' ')
f.write(' class_data_item @ %
self.class_def.class_data.get_offset()))
self.class_def.class_data.dump(f=f, flat=False, prefix=' ')
f.write('\n')
def get_type_index(self):
return self.class_def.class_idx
def is_abstract(self):
return (self.class_def.access_flags & ACC_ABSTRACT) != 0
def get_mangled_name(self):
if self.mangled is None:
dex = self.get_dex()
self.mangled = dex.get_typename(self.class_def.class_idx)
return self.mangled
def get_name(self):
if self.demangled is None:
mangled = self.get_mangled_name()
if mangled:
self.demangled = self.get_dex().demangle_class_name(mangled)
if self.demangled is None:
self.demangled = mangled
return self.demangled
def get_dex(self):
return self.dex
def get_methods(self):
if self.methods is None:
self.methods = []
self.num_direct_methods = len(
self.class_def.class_data.direct_methods)
for encoded_method in self.class_def.class_data.direct_methods:
self.methods.append(DexMethod(self, encoded_method, False))
for encoded_method in self.class_def.class_data.virtual_methods:
self.methods.append(DexMethod(self, encoded_method, True))
return self.methods
def demangle_classname(mangled):
if (mangled and len(mangled) > 2 and mangled[0] == 'L' and
mangled[-1] == ';'):
return mangled[1:-1].replace('/', '.') + ':'
# Already demangled
return mangled
def mangle_classname(demangled):
if (demangled and len(demangled) > 2 and
(demangled[0] != 'L' or demangled[-1] != ';')):
return 'L' + demangled.replace('.', '/') + ';'
# Already demangled
return demangled
class File:
def __init__(self, path, proguard_path):
self.path = path
self.proguard = None
if proguard_path and os.path.exists(proguard_path):
self.proguard = Progard(proguard_path)
self.data = file_extract.FileExtract(open(self.path), '=', 4)
self.header = header_item(self.data)
self.map_list = None
self.string_ids = None
self.type_ids = None
self.proto_ids = None
self.field_ids = None
self.method_ids = None
self.class_defs = None
self.classes = None
self.call_site_ids = None
self.method_handle_items = None
self.code_items = None
self.code_off_to_code_item_idx = {}
self.strings = None
self.call_sites = None
self.dex_classes = {}
def demangle_class_name(self, cls_mangled):
if self.proguard:
cls_demangled = demangle_classname(cls_mangled)
if cls_demangled:
return self.proguard.lookup_class(cls_demangled)
return None
def demangle_class_method_name(self, cls_mangled, method_name):
if self.proguard:
cls_demangled = demangle_classname(cls_mangled)
if cls_demangled:
return self.proguard.lookup_method(cls_demangled, method_name)
return None
def get_map_list(self):
if self.map_list is None:
self.data.push_offset_and_seek(self.header.map_off)
self.map_list = map_list(self.data)
self.data.pop_offset_and_seek()
return self.map_list
def get_map_tuple(self, type_code):
map_list = self.get_map_list()
for item in map_list.list:
if item.type.get_enum_value() == type_code:
return (item.size, item.offset)
return (0, 0)
def find_class(self, class_ref):
class_idx = class_ref
if isinstance(class_ref, six.string_types):
# Make sure the string is in 'L' <classname-with-slashes> ';'
class_mangled = mangle_classname(class_ref)
class_str_idx = self.find_string_idx(class_mangled)
if class_str_idx >= 0:
class_idx = self.find_type_idx(class_str_idx)
if isinstance(class_idx, numbers.Integral):
classes = self.get_classes()
for cls in classes:
if cls.class_def.class_idx == class_idx:
return cls
return None
def find_string_idx(self, match_s):
strings = self.get_strings()
for (i, s) in enumerate(strings):
if match_s == s.data:
return i
return -1
def get_string(self, index):
strings = self.get_strings()
if index < len(strings):
return strings[index].data
return None
def get_typename(self, type_id):
types = self.get_type_ids()
if type_id < len(types):
return self.get_string(types[type_id])
return None
def get_string_ids(self):
if self.string_ids is None:
self.string_ids = list()
self.data.push_offset_and_seek(self.header.string_ids_off)
for i in range(self.header.string_ids_size):
self.string_ids.append(self.data.get_uint32())
self.data.pop_offset_and_seek()
return self.string_ids
def get_type_ids(self):
if self.type_ids is None:
self.type_ids = list()
self.data.push_offset_and_seek(self.header.type_ids_off)
for i in range(self.header.type_ids_size):
self.type_ids.append(self.data.get_uint32())
self.data.pop_offset_and_seek()
return self.type_ids
def get_proto_ids(self):
if self.proto_ids is None:
self.proto_ids = list()
self.data.push_offset_and_seek(self.header.proto_ids_off)
for i in range(self.header.proto_ids_size):
self.proto_ids.append(proto_id_item(self.data, self))
self.data.pop_offset_and_seek()
return self.proto_ids
def get_proto_id(self, proto_idx):
proto_ids = self.get_proto_ids()
if proto_idx >= 0 and proto_idx < len(proto_ids):
return proto_ids[proto_idx]
return None
def get_proto_shorty(self, proto_idx):
id = self.get_proto_id(proto_idx)
return self.get_string(id.shorty_idx)
def get_field_ids(self):
if self.field_ids is None:
self.field_ids = list()
self.data.push_offset_and_seek(self.header.field_ids_off)
for i in range(self.header.field_ids_size):
self.field_ids.append(field_id_item(self.data, self))
self.data.pop_offset_and_seek()
return self.field_ids
def get_method_ids(self):
if self.method_ids is None:
self.method_ids = list()
self.data.push_offset_and_seek(self.header.method_ids_off)
for i in range(self.header.method_ids_size):
self.method_ids.append(method_id_item(self.data, self))
self.data.pop_offset_and_seek()
return self.method_ids
def find_method_ids(self, method_name, class_ref=None):
dex_class = None
if class_ref is not None:
dex_class = self.find_class(class_ref)
matches = list() # Return a list of matching methods
method_ids = self.get_method_ids()
if not method_ids:
return matches
name_idx = self.find_string_idx(method_name)
if name_idx <= 0:
return matches
for method_id in method_ids:
if method_id.name_idx == name_idx:
if dex_class:
if method_id.class_idx != dex_class.class_def.class_idx:
continue
matches.append(method_id)
return matches
def find_method_id_by_code_offset(self, code_off):
class_defs = self.get_class_defs()
for class_def in class_defs:
method_id = class_def.find_encoded_method_by_code_off(code_off)
if method_id:
return method_id
return None
def get_method_id(self, method_ref):
method_ids = self.get_method_ids()
if method_ids:
if isinstance(method_ref, encoded_method):
if method_ref.method_idx < len(method_ids):
return method_ids[method_ref.method_idx]
elif isinstance(method_ref, numbers.Integral):
if method_ref < len(method_ids):
return method_ids[method_ref]
else:
raise ValueError('invalid method_ref type %s' %
(type(method_ref)))
return None
# def get_call_site(self, idx):
# call_site_ids = self.get_call_site_ids()
# if idx >= len(call_site_ids):
# return None
# if self.call_sites[idx] is None:
# self.data.push_offset_and_seek(call_site_ids[idx])
# self.call_sites[idx] = call_site_item(self.data)
# self.data.pop_offset_and_seek()
# return self.call_sites[idx]
def get_call_site_ids(self):
if self.call_site_ids is None:
self.call_site_ids = list()
self.call_sites = list()
(size, offset) = self.get_map_tuple(TYPE_CALL_SITE_ID_ITEM)
self.data.push_offset_and_seek(offset)
for i in range(size):
self.call_site_ids.append(self.data.get_uint32())
self.call_sites.append(None)
self.data.pop_offset_and_seek()
return self.call_site_ids
def get_method_handle_items(self):
if self.method_handle_items is None:
self.method_handle_items = list()
(size, offset) = self.get_map_tuple(TYPE_METHOD_HANDLE_ITEM)
self.data.push_offset_and_seek(offset)
for i in range(size):
self.method_handle_items.append(method_handle_item(self.data))
self.data.pop_offset_and_seek()
return self.method_handle_items
def get_code_items(self):
if self.code_items is None:
self.code_items = list()
(size, offset) = self.get_map_tuple(TYPE_CODE_ITEM)
self.data.push_offset_and_seek(offset)
for i in range(size):
self.data.align_to(4)
item = code_item(self.data)
self.code_items.append(item)
self.code_off_to_code_item_idx[item.get_offset()] = i
self.data.pop_offset_and_seek()
return self.code_items
def report_code_duplication(self):
code_to_code_items = {}
code_items = self.get_code_items()
if code_items:
for code_item in code_items:
key = code_item.insns
if key in code_to_code_items:
code_to_code_items[key].append(code_item)
else:
code_to_code_items[key] = [code_item]
for key in code_to_code_items:
code_items = code_to_code_items[key]
if len(code_items) > 1:
print('-' * 72)
print('The following methods have the same code:')
for code_item in code_items:
method = self.find_method_from_code_off(
code_item.get_offset())
if method.is_virtual:
print('virtual', end=' ')
else:
print('direct', end=' ')
print(method.get_qualified_name())
# Dump the code once for all methods
method.dump_code()
def get_class_def_index_from_offset(self, class_def_offset):
class_defs = self.get_class_defs()
for (i, class_def) in enumerate(class_defs):
if class_def.get_offset() == class_def_offset:
return i
return -1
def get_code_item_index_from_code_off(self, code_off):
# Make sure the code items are created
self.get_code_items()
if code_off in self.code_off_to_code_item_idx:
return self.code_off_to_code_item_idx[code_off]
return -1
def find_code_item(self, code_off):
code_item_idx = self.get_code_item_index_from_code_off(code_off)
if code_item_idx >= 0:
return self.get_code_items()[code_item_idx]
else:
raise ValueError('invalid code item offset %
def find_method_from_code_off(self, code_off):
if code_off == 0:
return None
for cls in self.get_classes():
for method in cls.get_methods():
if method.get_code_offset() == code_off:
return method
return None
def get_class_defs(self):
if self.class_defs is None:
self.class_defs = list()
self.data.push_offset_and_seek(self.header.class_defs_off)
for i in range(self.header.class_defs_size):
class_def = class_def_item(self.data, self)
self.class_defs.append(class_def)
self.data.pop_offset_and_seek()
return self.class_defs
def get_classes(self):
if self.classes is None:
self.classes = list()
class_defs = self.get_class_defs()
for class_def in class_defs:
dex_class = DexClass(self, class_def)
self.classes.append(dex_class)
self.data.pop_offset_and_seek()
return self.classes
def get_strings(self):
if self.strings is None:
self.strings = list()
for string_id_item in self.get_string_ids():
self.data.push_offset_and_seek(string_id_item)
self.strings.append(string_data_item(self.data))
self.data.pop_offset_and_seek()
return self.strings
def dump_header(self, options, f=sys.stdout):
self.header.dump(f=f)
def dump_map_list(self, options, f=sys.stdout):
self.get_map_list().dump(f=f)
f.write('\n')
def dump_string_ids(self, options, f=sys.stdout):
string_ids = self.get_string_ids()
if string_ids:
f.write('string_ids:\n')
for (i, item) in enumerate(self.get_strings()):
f.write('[%3u] %
item.dump(f=f)
f.write(')\n')
def dump_type_ids(self, options, f=sys.stdout):
type_ids = self.get_type_ids()
if type_ids:
f.write('\ntype_ids:\n DESCRIPTOR_IDX\n')
for (i, item) in enumerate(type_ids):
f.write('[%3u] %
(i, item, self.get_string(item)))
def find_type_idx(self, class_str_idx):
types = self.get_type_ids()
i = bisect.bisect_left(types, class_str_idx)
if i != len(types) and types[i] == class_str_idx:
return i
return -1
def find_class_def_by_type_index(self, class_idx):
class_defs = self.get_class_defs()
for class_def in class_defs:
if class_def.class_idx == class_idx:
return class_def
return None
def dump_proto_ids(self, options, f=sys.stdout):
proto_ids = self.get_proto_ids()
if proto_ids:
f.write('\nproto_ids:\n')
f.write(' ' * (5 + 1))
f.write(proto_id_item.get_table_header())
for (i, item) in enumerate(proto_ids):
f.write('[%3u] ' % (i))
item.dump(f=f, print_name=False)
shorty = self.get_string(item.shorty_idx)
ret = self.get_string(item.return_type_idx)
f.write(' ("%s", "%s"' % (shorty, ret))
parameters = item.get_parameters()
if parameters:
f.write(', (')
for (i, type_id) in enumerate(parameters.list):
if i > 0:
f.write(', ')
f.write(self.get_string(type_id))
f.write(')')
else:
f.write(', ()')
f.write(')\n')
def dump_field_ids(self, options, f=sys.stdout):
field_ids = self.get_field_ids()
if field_ids:
f.write('\nfield_ids:\n')
f.write(' ' * (5 + 1))
f.write(field_id_item.get_table_header())
for (i, item) in enumerate(field_ids):
f.write('[%3u] ' % (i))
item.dump(f=f, print_name=False)
f.write(' ("%s", "%s", "%s")\n' % (
self.get_typename(item.class_idx),
self.get_typename(item.type_idx),
self.get_string(item.name_idx)))
def dump_method_ids(self, options, f=sys.stdout):
method_ids = self.get_method_ids()
if method_ids:
f.write('\nmethod_ids:\n')
f.write(' ' * (5 + 1))
f.write(method_id_item.get_table_header())
for (i, item) in enumerate(method_ids):
f.write('[%3u] ' % (i))
item.dump(f=f, print_name=False)
f.write(' ("%s", "%s", "%s")\n' % (
self.get_typename(item.class_idx),
self.get_proto_shorty(item.proto_idx),
self.get_string(item.name_idx)))
def dump_class_defs(self, options, f=sys.stdout):
class_defs = self.get_class_defs()
if class_defs:
f.write('\nclass_defs:\n')
f.write(' ' * (5 + 1))
f.write(class_def_item.get_table_header())
for (i, item) in enumerate(class_defs):
f.write('[%3u] ' % (i))
item.dump(f=f, print_name=False)
f.write(' ("%s")' % (self.get_typename(item.class_idx)))
f.write('\n')
def dump_call_site_ids(self, options, f=sys.stdout):
call_site_ids = self.get_call_site_ids()
if call_site_ids:
f.write('\ncall_site_ids:\n')
f.write(' ' * (5 + 1))
for (i, item) in enumerate(call_site_ids):
f.write('[%3u] %
def dump_method_handle_items(self, options, f=sys.stdout):
method_handle_items = self.get_method_handle_items()
if method_handle_items:
f.write('\nmethod_handle_items:\n')
f.write(' ' * (5 + 1))
for (i, item) in enumerate(method_handle_items):
f.write('[%3u] ' % (i))
item.dump(f=f)
f.write('\n')
def dump_code(self, options, f=sys.stdout):
classes = self.get_classes()
if classes:
for cls in classes:
if cls.is_abstract():
continue
cls.dump(f=f)
methods = cls.get_methods()
dc = options.dump_code or options.dump_all
ddi = options.debug or options.dump_all
for method in methods:
if options.dump_code or options.dump_all:
method.dump(f=f, dump_code=dc, dump_debug_info=ddi)
f.write('\n')
def dump_code_items(self, options, f=sys.stdout):
code_items = self.get_code_items()
if code_items:
for (i, code_item) in enumerate(code_items):
f.write('code_item[%u]:\n' % (i))
code_item.dump(f=f)
def dump(self, options, f=sys.stdout):
self.dump_header(options, f)
f.write('\n')
self.dump_map_list(options, f)
self.dump_string_ids(options, f)
self.dump_type_ids(options, f)
self.dump_proto_ids(options, f)
self.dump_field_ids(options, f)
self.dump_method_ids(options, f)
self.dump_class_defs(options, f)
self.dump_call_site_ids(options, f)
self.dump_method_handle_items(options, f)
self.dump_code(options, f)
self.dump_code_items(options, f)
def sign_extending(value, bit_width):
# is the highest bit (sign) set? (x>>(b-1)) would be faster
if value & (1 << (bit_width - 1)):
return value - (1 << bit_width) # 2s complement
return value
def get_signed_hex_offset_as_str(signed_offset, width):
if signed_offset < 0:
s = '-'
offset = abs(signed_offset)
else:
s = '+'
offset = signed_offset
if width == 2:
s += '%2.2x' % (offset & 0xff)
elif width == 4:
s += '%4.4x' % (offset & 0xffff)
elif width == 8:
s += '%8.8x' % (offset & 0xffffffff)
else:
raise ValueError("only sizes of 2 4 or 8 are supported")
return s
class Opcode(object):
def __init__(self, inst):
self.inst = inst
def check_encoding(self, f=sys.stdout):
return 0 # Return zero to indicate we can't save any bytes
def new_encoding(self, f=sys.stdout):
return 0
def get_op(self):
return self.inst.get_op()
def get_name(self):
op = self.get_op()
return self.ops[op]
def get_num_code_units(self):
return self.num_code_units
def regs_are_sequential(self):
if len(self.regs) <= 1:
return True
prev_reg = self.regs[0]
for i in range(1, len(self.regs)):
curr_reg = self.regs[i]
if prev_reg + 1 != curr_reg:
return False
return True
class Opcode00(Opcode):
ops = {0x00: 'nop'}
num_code_units = 1
max_regs = 0
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.nature = inst.get_AA()
if self.nature == 0:
pass # NOP
elif self.nature == 1:
self.size = code_units.get_code_unit()
self.first_key = code_units.get_int()
self.targets = list()
for i in range(self.size):
self.targets.append(code_units.get_int())
elif self.nature == 2:
self.size = code_units.get_code_unit()
self.keys = list()
self.targets = list()
for i in range(self.size):
self.keys.append(code_units.get_int())
for i in range(self.size):
self.targets.append(code_units.get_int())
elif self.nature == 3:
self.element_width = code_units.get_code_unit()
self.size = code_units.get_uint()
num_code_units = int((self.size * self.element_width + 1) / 2)
encoder = file_extract.FileEncode(StringIO.StringIO(), 'little', 4)
for i in range(num_code_units):
encoder.put_uint16(code_units.get_code_unit())
encoder.seek(0)
self.data = encoder.file.getvalue()
else:
raise ValueError("add support for NOP nature %u" % (self.nature))
def get_name(self):
if self.nature == 0:
return self.ops[0]
elif self.nature == 1:
return 'packed-switch-payload'
elif self.nature == 2:
return 'sparse-switch-payload'
elif self.nature == 3:
return 'fill-array-data-payload'
else:
raise ValueError("add support for NOP nature %u" % (self.nature))
def get_num_code_units(self):
if self.nature == 0:
return 1
elif self.nature == 1:
op_count = 1
size_count = 1
first_key_count = 2
keys_count = self.size * 2
return op_count + size_count + first_key_count + keys_count
elif self.nature == 2:
op_count = 1
size_count = 1
keys_and_targets_count = self.size * 4
return op_count + size_count + keys_and_targets_count
elif self.nature == 3:
op_count = 1
element_width_count = 2
return op_count + element_width_count + len(self.data)
else:
raise ValueError("add support for NOP nature %u" % (self.nature))
def dump(self, f=sys.stdout):
if self.nature == 0:
f.write('%s' % (self.get_name()))
elif self.nature == 1:
f.write('packed-switch-payload\n')
f.write('INDEX KEY TARGET\n===== --------- ---------\n')
for (i, target) in enumerate(self.targets):
f.write('[%3u] %+8.8x %+8.8x\n' %
(i, self.first_key + i, target))
elif self.nature == 2:
f.write('sparse-switch-payload\n')
f.write('INDEX KEY TARGET\n===== --------- ---------\n')
for (i, key) in enumerate(self.keys):
f.write('[%3u] %+8.8x %+8.8x\n' % (i, key, self.targets[i]))
elif self.nature == 3:
f.write('fill-array-data-payload (elem_width = %u, size = %u)\n' %
(self.element_width, self.size))
file_extract.dump_memory(0, self.data, self.element_width, f)
def emulate(self, emulator):
pass
class Opcode01(Opcode):
ops = {0x01: 'move'}
num_code_units = 1
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode02(Opcode):
ops = {0x02: 'move/from16'}
num_code_units = 2
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_AA())
self.regs.append(inst[1])
def check_encoding(self, f=sys.stdout):
if self.regs[0] <= UINT4_MAX and self.regs[1] <= UINT4_MAX:
f.write('warning: "move/from16" can be encoded as a "move"')
f.write(' more efficiently as its registers are both <= %u\n' %
(UINT4_MAX))
return 2
return 0
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode03(Opcode):
ops = {0x03: 'move/16'}
num_code_units = 3
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst[1])
self.regs.append(inst[2])
def check_encoding(self, f=sys.stdout):
if self.regs[0] <= UINT4_MAX and self.regs[1] <= UINT4_MAX:
f.write('warning: "move/16" can be encoded as a "move"')
f.write(' more efficiently as its registers are both <= %u\n' %
(UINT4_MAX))
return 4
if self.regs[0] <= UINT8_MAX:
f.write('warning: "move/16" can be encoded as a "move/from16"')
f.write(' more efficiently as its first register is <= %u\n' %
(UINT8_MAX))
return 2
return 0
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode04(Opcode):
ops = {0x04: 'move-wide'}
num_code_units = 1
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode05(Opcode):
ops = {0x05: 'move-wide/from16'}
num_code_units = 2
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_AA())
self.regs.append(inst[1])
def check_encoding(self, f=sys.stdout):
if self.regs[0] <= UINT4_MAX and self.regs[1] <= UINT4_MAX:
f.write('warning: "move-wide/from16" can be encoded as a ')
f.write('"move-wide" more efficiently as its registers are ')
f.write('both <= %u\n' % (UINT4_MAX))
return 2
return 0
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode06(Opcode):
ops = {0x06: 'move-wide/16'}
num_code_units = 3
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst[1])
self.regs.append(inst[2])
def check_encoding(self, f=sys.stdout):
if self.regs[0] <= UINT4_MAX and self.regs[1] <= UINT4_MAX:
f.write('warning: "move-wide/16" can be encoded as a "move-wide" ')
f.write('more efficiently as its registers are both <= %u\n' %
(UINT4_MAX))
return 4
if self.regs[0] <= UINT8_MAX:
f.write('warning: "move-wide/16" can be encoded as a ')
f.write('"move-wide/from16" more efficiently as its first ')
f.write('register is <= %u\n' % (UINT8_MAX))
return 2
return 0
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode07(Opcode):
ops = {0x07: 'move-object'}
num_code_units = 1
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode08(Opcode):
ops = {0x08: 'move-object/from16 '}
num_code_units = 2
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_AA())
self.regs.append(inst[1])
def check_encoding(self, f=sys.stdout):
if self.regs[0] <= UINT4_MAX and self.regs[1] <= UINT4_MAX:
f.write('warning: "move-object/from16" can be encoded as a ')
f.write('"move-object" more efficiently as its registers are ')
f.write('both <= %u\n' % (UINT4_MAX))
return 2
return 0
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode09(Opcode):
ops = {0x09: 'move-object/16'}
num_code_units = 3
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst[1])
self.regs.append(inst[2])
def check_encoding(self, f=sys.stdout):
if self.regs[0] <= UINT4_MAX and self.regs[1] <= UINT4_MAX:
f.write('warning: "move-object/16" can be encoded as a ')
f.write('"move-object" more efficiently as its registers ')
f.write('are both <= %u\n' % (UINT4_MAX))
return 4
if self.regs[0] <= UINT8_MAX:
f.write('warning: "move-object/16" can be encoded as a ')
f.write('"move-object/from16" more efficiently as its first ')
f.write('register is <= %u\n' % (UINT8_MAX))
return 2
return 0
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode0A_0D(Opcode):
ops = {
0x0a: 'move-result',
0x0b: 'move-result-wide',
0x0c: 'move-result-object',
0x0d: 'move-exception'
}
num_code_units = 1
max_regs = 1
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
def dump(self, f=sys.stdout):
f.write('%s v%u' % (self.get_name(), self.reg))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode0E(Opcode):
ops = {0x0e: 'return-void'}
num_code_units = 1
max_regs = 0
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
def dump(self, f=sys.stdout):
f.write('%s' % (self.get_name()))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode0F(Opcode):
ops = {0x0f: 'return'}
num_code_units = 1
max_regs = 1
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
def dump(self, f=sys.stdout):
f.write('%s v%u' % (self.get_name(), self.reg))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode10(Opcode):
ops = {0x10: 'return-wide'}
num_code_units = 1
max_regs = 1
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
def dump(self, f=sys.stdout):
f.write('%s v%u' % (self.get_name(), self.reg))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode11(Opcode):
ops = {0x11: 'return-object'}
num_code_units = 1
max_regs = 1
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
def dump(self, f=sys.stdout):
f.write('%s v%u' % (self.get_name(), self.reg))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode12(Opcode):
ops = {0x12: 'const/4'}
num_code_units = 1
max_regs = 1
extra_data = 'n'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_A()
self.imm = sign_extending(inst[0] >> 12, 4)
def dump(self, f=sys.stdout):
f.write('%s v%u, (self.get_name(), self.reg, self.imm, self.imm))
def emulate(self, emulator):
emulator.write_register(self.reg, self.imm)
class Opcode13(Opcode):
ops = {0x13: 'const/16'}
num_code_units = 2
max_regs = 1
extra_data = 's'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.imm = sign_extending(inst[1], 16)
def check_encoding(self, f=sys.stdout):
if (self.reg <= UINT4_MAX and INT4_MIN <= self.imm and
self.imm <= INT4_MAX):
f.write('warning: "const/16" can be encoded as a "const/4" more ')
f.write('efficiently as its register is <= %u and ' % (UINT4_MAX))
f.write('(%i <= %i <= %i)\n' % (INT4_MIN, self.imm, INT4_MAX))
return 2
return 0
def new_encoding(self, f=sys.stdout):
if (self.reg <= UINT4_MAX and self.imm > INT4_MAX and
self.imm <= (INT4_MAX + UINT4_MAX)):
f.write('"const/16" could be encoded as a new "const/u4" stores ')
f.write('a 4 bit unsigned offset from +8 for a constant range ')
f.write('of [8-24):\n')
return 2
return 0
def dump(self, f=sys.stdout):
f.write('%s v%u, (self.get_name(), self.reg, self.imm, self.imm))
def emulate(self, emulator):
emulator.write_register(self.reg, self.imm)
class Opcode14(Opcode):
ops = {0x14: 'const'}
num_code_units = 3
max_regs = 1
extra_data = 'i'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.imm = inst.get_uint32(1)
def check_encoding(self, f=sys.stdout):
if (self.reg <= UINT8_MAX and INT16_MIN <= self.imm and
self.imm <= INT16_MAX):
f.write('warning: "const" can be encoded as a "const/16" more ')
f.write('efficiently as its register is < %u ' % (UINT8_MAX))
f.write('and (%i <= %i <= %i)\n' % (INT16_MIN, self.imm,
INT16_MAX))
return 2
return 0
def new_encoding(self, f=sys.stdout):
if self.imm > INT16_MAX and self.imm <= (INT16_MAX + UINT16_MAX):
f.write('"const" could be encoded as a new "const/u16" stores a ')
f.write('16 bit unsigned offset from 32768 instead of a 16 bit ')
f.write('signed value\n')
return 2
return 0
def dump(self, f=sys.stdout):
f.write('%s v%u, (self.get_name(), self.reg, self.imm, self.imm))
def emulate(self, emulator):
emulator.write_register(self.reg, self.imm)
class Opcode15(Opcode):
ops = {0x15: 'const/high16'}
num_code_units = 2
max_regs = 1
extra_data = 'h'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.imm = inst[1] << 16
def dump(self, f=sys.stdout):
f.write('%s v%u, (self.get_name(), self.reg, self.imm, self.imm))
def emulate(self, emulator):
emulator.write_register(self.reg, self.imm)
class Opcode16(Opcode):
ops = {0x16: 'const-wide/16'}
num_code_units = 2
max_regs = 1
extra_data = 's'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.imm = sign_extending(inst[1], 16)
def dump(self, f=sys.stdout):
f.write('%s v%u, (self.get_name(), self.reg, self.imm, self.imm))
def emulate(self, emulator):
emulator.write_register(self.reg, self.imm)
class Opcode17(Opcode):
ops = {0x17: 'const-wide/32'}
num_code_units = 3
max_regs = 1
extra_data = 'i'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.imm = inst.get_sint32(1)
def check_encoding(self, f=sys.stdout):
if INT16_MIN <= self.imm and self.imm <= INT16_MAX:
f.write('warning: "const-wide/32" can be encoded as a ')
f.write('"const-wide/16" more efficiently as (%i <= %i <= %i)\n' %
(UINT8_MAX, INT16_MIN, self.imm, INT16_MAX))
return 2
return 0
def new_encoding(self, f=sys.stdout):
if self.imm > INT16_MAX and self.imm <= (INT16_MAX + UINT16_MAX):
f.write('"const-wide/32" could be encoded as a new ')
f.write('"const-wide/u16" stores a 16 bit unsigned offset from ')
f.write('32768 instead of a 16 bit signed value\n')
return 2
return 0
def dump(self, f=sys.stdout):
f.write('%s v%u, (self.get_name(), self.reg, self.imm, self.imm))
def emulate(self, emulator):
emulator.write_register(self.reg, self.imm)
class Opcode18(Opcode):
ops = {0x18: 'const-wide/64'}
num_code_units = 5
max_regs = 1
extra_data = 'l'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.imm = inst.get_uint64(1)
def check_encoding(self, f=sys.stdout):
if INT16_MIN <= self.imm and self.imm <= INT16_MAX:
f.write('warning: "const-wide/64" can be encoded as a ')
f.write('"const-wide/16" more efficiently as (%i <= %i <= %i)\n' %
(INT16_MIN, self.imm, INT16_MAX))
return 6
if INT32_MIN <= self.imm and self.imm <= INT32_MAX:
f.write('warning: "const-wide/64" can be encoded as a ')
f.write('"const-wide/32" more efficiently as (%i <= %i <= %i)\n' %
(INT32_MIN, self.imm, INT32_MAX))
return 4
return 0
def dump(self, f=sys.stdout):
f.write('%s v%u, (self.get_name(), self.reg, self.imm, self.imm))
def emulate(self, emulator):
emulator.write_register(self.reg, self.imm)
class Opcode19(Opcode):
ops = {0x19: 'const-wide/high16'}
num_code_units = 2
max_regs = 1
extra_data = 'h'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.imm = sign_extending(inst[1], 16) << 48
def dump(self, f=sys.stdout):
f.write('%s v%u, (self.get_name(), self.reg, self.imm, self.imm))
def emulate(self, emulator):
emulator.write_register(self.reg, self.imm)
class Opcode1A(Opcode):
ops = {0x1a: 'const-string'}
num_code_units = 2
max_regs = 1
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.string_idx = inst[1]
def dump(self, f=sys.stdout):
f.write('%s v%u, string@%4.4x' %
(self.get_name(), self.reg, self.string_idx))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode1B(Opcode):
ops = {0x1b: 'const-string/jumbo'}
num_code_units = 3
max_regs = 1
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.string_idx = inst.get_uint32(1)
def dump(self, f=sys.stdout):
f.write('%s v%u, string@%8.8x' %
(self.get_name(), self.reg, self.string_idx))
def check_encoding(self, f=sys.stdout):
if self.signed_offset <= UINT16_MAX:
f.write('warning: "const-string/jumbo" can be encoded as a ')
f.write('"const-string" more efficiently as its offset is ')
f.write('<= UINT16_MAX\n')
return 2
return 0
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode1C(Opcode):
ops = {0x1c: 'const-class'}
num_code_units = 2
max_regs = 1
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.type = inst[1]
def dump(self, f=sys.stdout):
f.write('%s v%u, type@%4.4x' % (self.get_name(), self.reg, self.type))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode1D(Opcode):
ops = {0x1d: 'monitor-enter'}
num_code_units = 1
max_regs = 1
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
def dump(self, f=sys.stdout):
f.write('%s v%u' % (self.get_name(), self.reg))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode1E(Opcode):
ops = {0x1e: 'monitor-exit'}
num_code_units = 1
max_regs = 1
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
def dump(self, f=sys.stdout):
f.write('%s v%u' % (self.get_name(), self.reg))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode1F(Opcode):
ops = {0x1f: 'check-cast'}
num_code_units = 2
max_regs = 1
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.type = inst[1]
def dump(self, f=sys.stdout):
f.write('%s v%u, type@%4.4x' % (self.get_name(), self.reg, self.type))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode20(Opcode):
ops = {0x20: 'instance-of'}
num_code_units = 2
max_regs = 2
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
self.type = inst[1]
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u, type@%4.4x' %
(self.get_name(), self.regs[0], self.regs[1], self.type))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode21(Opcode):
ops = {0x21: 'array-length'}
num_code_units = 1
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode22(Opcode):
ops = {0x22: 'new-instance'}
num_code_units = 2
max_regs = 1
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.type = inst[1]
def dump(self, f=sys.stdout):
f.write('%s v%u, type@%4.4x' % (self.get_name(), self.reg, self.type))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode23(Opcode):
ops = {0x23: 'new-array'}
num_code_units = 2
max_regs = 2
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
self.type = inst[1]
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u, type@%4.4x' %
(self.get_name(), self.regs[0], self.regs[1], self.type))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode24(Opcode):
ops = {0x24: 'filled-new-array'}
num_code_units = 3
max_regs = 5
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
arg_count = inst[0] >> 12
self.type = inst[1]
self.regs = list()
regs = inst[2] | ((inst[0] << 8) & 0xf0000)
for i in range(arg_count):
self.regs.append(regs & 0xf)
regs >>= 4
def dump(self, f=sys.stdout):
f.write("%s {" % (self.get_name()))
first = True
for reg in self.regs:
if not first:
f.write(', ')
f.write("v%u" % (reg))
first = False
f.write("} type@%4.4x" % (self.type))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode25(Opcode):
ops = {0x25: 'filled-new-array/range '}
num_code_units = 3
max_regs = 'r'
extra_data = 'c'
format = '3rc'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
arg_count = inst.get_AA()
self.type = inst[1]
first_reg = inst[2]
self.regs = list()
for i in range(arg_count):
self.regs.append(first_reg + i)
def dump(self, f=sys.stdout):
f.write("%s {" % (self.get_name()))
first = True
for reg in self.regs:
if not first:
f.write(', ')
f.write("v%u" % (reg))
first = False
f.write("} type@%4.4x" % (self.type))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode26(Opcode):
ops = {0x26: 'fill-array-data'}
num_code_units = 3
max_regs = 1
extra_data = 't'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.signed_offset = inst.get_sint32(1)
def dump(self, f=sys.stdout):
f.write('%s v%u, %8.8x // %s' % (self.get_name(), self.reg,
self.inst.code_unit_idx + self.signed_offset,
get_signed_hex_offset_as_str(self.signed_offset, 8)))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode27(Opcode):
ops = {0x27: 'throw'}
num_code_units = 1
max_regs = 1
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
def dump(self, f=sys.stdout):
f.write('%s v%u' % (self.get_name(), self.reg))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode28(Opcode):
ops = {0x28: 'goto'}
num_code_units = 1
max_regs = 0
extra_data = 't'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.signed_offset = inst.get_signed_AA()
def check_encoding(self, f=sys.stdout):
if self.signed_offset == 0:
f.write('error: "goto" has a zero offset (invalid encoding)\n')
return 0
def dump(self, f=sys.stdout):
f.write('%s %4.4x // %+i' % (self.get_name(),
self.inst.code_unit_idx + self.signed_offset,
self.signed_offset))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode29(Opcode):
ops = {0x29: 'goto/16'}
num_code_units = 2
max_regs = 0
extra_data = 't'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.signed_offset = sign_extending(inst[1], 16)
def dump(self, f=sys.stdout):
f.write('%s %4.4x // %+i' % (self.get_name(),
self.inst.code_unit_idx + self.signed_offset,
self.signed_offset))
def check_encoding(self, f=sys.stdout):
if self.signed_offset == 0:
f.write(
'error: "goto/16" has a zero offset (invalid encoding)\n')
elif INT8_MIN <= self.signed_offset and self.signed_offset <= INT8_MAX:
f.write('warning: "goto/16" can be encoded as a "goto" more ')
f.write('efficiently since (INT8_MIN <= offset <= INT8_MAX)\n')
return 2
return 0
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode2A(Opcode):
ops = {0x2A: 'goto/32'}
num_code_units = 3
max_regs = 0
extra_data = 't'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.signed_offset = inst.get_sint32(1)
def dump(self, f=sys.stdout):
f.write('%s %4.4x // %+i' % (self.get_name(),
self.inst.code_unit_idx + self.signed_offset,
self.signed_offset))
def check_encoding(self, f=sys.stdout):
if self.signed_offset == 0:
return 0
if INT8_MIN <= self.signed_offset and self.signed_offset <= INT8_MAX:
f.write('warning: "goto/32" can be encoded as a "goto" more ')
f.write('efficiently since (INT8_MIN <= offset <= INT8_MAX)\n')
return 2
if INT16_MIN <= self.signed_offset and self.signed_offset <= INT16_MAX:
f.write('warning: "goto/32" can be encoded as a "goto/16" more ')
f.write('efficiently since (INT16_MIN <= offset <= INT16_MAX)\n')
return 4
return 0
def new_encoding(self, f=sys.stdout):
if INT16_MIN <= self.signed_offset and self.signed_offset <= INT16_MAX:
return 0
if INT24_MIN <= self.signed_offset and self.signed_offset <= INT24_MAX:
f.write('"goto/32" could be encoded as a new "goto/16" where ')
f.write('that opcode uses the extra 8 bits in the first code ')
f.write('unit to provide a 24 bit branch range\n')
return 2
return 0
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode2B(Opcode):
ops = {0x2b: 'packed-switch'}
num_code_units = 3
max_regs = 1
extra_data = 't'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.branch = inst.get_uint32(1)
def dump(self, f=sys.stdout):
f.write('%s v%u, %8.8x // +%8.8x' % (self.get_name(), self.reg,
self.inst.get_code_unit_index() + self.branch, self.branch))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode2C(Opcode):
ops = {0x2c: 'sparse-switch'}
num_code_units = 3
max_regs = 1
extra_data = 't'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.branch = inst.get_uint32(1)
def dump(self, f=sys.stdout):
f.write('%s v%u, %8.8x // +%8.8x' % (self.get_name(), self.reg,
self.inst.get_code_unit_index() + self.branch, self.branch))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode2D_31(Opcode):
ops = {
0x2d: 'cmpl-float (lt bias)',
0x2e: 'cmpg-float (gt bias)',
0x2f: 'cmpl-double (lt bias)',
0x30: 'cmpg-double (gt bias)',
0x31: 'cmp-long',
}
num_code_units = 2
max_regs = 3
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_AA())
self.regs.append(inst.get_uint8_lo(1))
self.regs.append(inst.get_uint8_hi(1))
def dump(self, f=sys.stdout):
f.write("%s v%u, v%u, v%u" %
(self.get_name(), self.regs[0], self.regs[1], self.regs[2]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode32_37(Opcode):
ops = {
0x32: 'if-eq',
0x33: 'if-ne',
0x34: 'if-lt',
0x35: 'if-ge',
0x36: 'if-gt',
0x37: 'if-le',
}
num_code_units = 2
max_regs = 2
extra_data = 't'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
self.signed_offset = sign_extending(inst[1], 16)
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u, %4.4x // %i' % (self.get_name(), self.regs[0],
self.regs[1], self.inst.code_unit_idx + self.signed_offset,
self.signed_offset))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode38_3D(Opcode):
ops = {
0x38: 'if-eqz',
0x39: 'if-nez',
0x3a: 'if-ltz',
0x3b: 'if-gez',
0x3c: 'if-gtz',
0x3d: 'if-lez',
}
num_code_units = 2
max_regs = 1
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.signed_offset = sign_extending(inst[1], 16)
def dump(self, f=sys.stdout):
f.write('%s v%u, %4.4x // %s' % (self.get_name(), self.reg,
self.signed_offset + self.inst.code_unit_idx,
get_signed_hex_offset_as_str(self.signed_offset, 4)))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode44_51(Opcode):
ops = {
0x44: 'aget',
0x45: 'aget-wide',
0x46: 'aget-object',
0x47: 'aget-boolean',
0x48: 'aget-byte',
0x49: 'aget-char',
0x4a: 'aget-short',
0x4b: 'aput',
0x4c: 'aput-wide',
0x4d: 'aput-object',
0x4e: 'aput-boolean',
0x4f: 'aput-byte',
0x50: 'aput-char',
0x51: 'aput-short',
}
num_code_units = 2
max_regs = 3
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_AA())
self.regs.append(inst.get_uint8_lo(1))
self.regs.append(inst.get_uint8_hi(1))
def dump(self, f=sys.stdout):
f.write("%s v%u, v%u, v%u" %
(self.get_name(), self.regs[0], self.regs[1], self.regs[2]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode52_5f(Opcode):
ops = {
0x52: 'iget',
0x53: 'iget-wide',
0x54: 'iget-object',
0x55: 'iget-boolean',
0x56: 'iget-byte',
0x57: 'iget-char',
0x58: 'iget-short',
0x59: 'iput',
0x5a: 'iput-wide',
0x5b: 'iput-object',
0x5c: 'iput-boolean',
0x5d: 'iput-byte',
0x5e: 'iput-char',
0x5f: 'iput-short',
}
num_code_units = 2
max_regs = 2
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
self.field = inst[1]
def dump(self, f=sys.stdout):
f.write("%s v%u, v%u, field@%4.4x" %
(self.get_name(), self.regs[0], self.regs[1], self.field))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode60_6d(Opcode):
ops = {
0x60: 'sget',
0x61: 'sget-wide',
0x62: 'sget-object',
0x63: 'sget-boolean',
0x64: 'sget-byte',
0x65: 'sget-char',
0x66: 'sget-short',
0x67: 'sput',
0x68: 'sput-wide',
0x69: 'sput-object',
0x6a: 'sput-boolean',
0x6b: 'sput-byte',
0x6c: 'sput-char',
0x6d: 'sput-short',
}
num_code_units = 2
max_regs = 1
extra_data = 'c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.reg = inst.get_AA()
self.field = inst.get_uint16(1)
def dump(self, f=sys.stdout):
f.write("%s v%u, field@%4.4x" %
(self.get_name(), self.reg, self.field))
def emulate(self, emulator):
raise ValueError('emulate not supported')
can_use_new_encoding = 0
cant_use_new_encoding = 0
class Opcode6E_72(Opcode):
ops = {
0x6e: 'invoke-virtual',
0x6f: 'invoke-super',
0x70: 'invoke-direct',
0x71: 'invoke-static',
0x72: 'invoke-interface',
}
num_code_units = 3
max_regs = 5
extra_data = 'c'
format = '35c'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
arg_count = inst[0] >> 12
self.method_idx = inst[1]
self.regs = list()
regs = inst[2] | ((inst[0] << 8) & 0xf0000)
for i in range(arg_count):
self.regs.append(regs & 0xf)
regs >>= 4
def dump(self, f=sys.stdout):
f.write("%s {" % (self.get_name()))
first = True
for reg in self.regs:
if not first:
f.write(', ')
f.write("v%u" % (reg))
first = False
f.write("} method@%4.4x" % (self.method_idx))
def new_encoding(self, f=sys.stdout):
if (self.regs_are_sequential() and
(len(self.regs) == 0 or self.regs[0] <= UINT4_MAX) and
len(self.regs) <= UINT4_MAX):
global can_use_new_encoding
can_use_new_encoding += 1
name = self.get_name()
f.write('"%s" can be encoded as "%s/min-range" ' % (name, name))
f.write('where the first register is contained in the first ')
f.write('opcode\n')
return 2
global cant_use_new_encoding
cant_use_new_encoding += 1
return 0
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode74_78(Opcode):
ops = {
0x74: 'invoke-virtual/range',
0x75: 'invoke-super/range',
0x76: 'invoke-direct/range',
0x77: 'invoke-static/range',
0x78: 'invoke-interface/range',
}
num_code_units = 3
max_regs = 'r'
extra_data = 'c'
format = '3rc'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
arg_count = inst.get_AA()
self.method_idx = inst[1]
first_reg = inst[2]
self.regs = list()
for i in range(arg_count):
self.regs.append(first_reg + i)
def dump(self, f=sys.stdout):
f.write("%s {" % (self.get_name()))
first = True
for reg in self.regs:
if not first:
f.write(', ')
f.write("v%u" % (reg))
first = False
f.write("} method@%4.4x" % (self.method_idx))
def new_encoding(self, f=sys.stdout):
if (self.regs_are_sequential() and
(len(self.regs) == 0 or self.regs[0] <= UINT4_MAX) and
len(self.regs) <= UINT4_MAX):
name = self.get_name()
f.write('"%s" can be encoded as a "%s/min-range" ' % (name, name))
f.write('where the first register is contained in the first ')
f.write('opcode\n')
return 2
return 0
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode7B_8F(Opcode):
ops = {
0x7b: 'neg-int',
0x7c: 'not-int',
0x7d: 'neg-long',
0x7e: 'not-long',
0x7f: 'neg-float',
0x80: 'neg-double',
0x81: 'int-to-long',
0x82: 'int-to-float',
0x83: 'int-to-double',
0x84: 'long-to-int',
0x85: 'long-to-float',
0x86: 'long-to-double',
0x87: 'float-to-int',
0x88: 'float-to-long',
0x89: 'float-to-double',
0x8a: 'double-to-int',
0x8b: 'double-to-long',
0x8c: 'double-to-float',
0x8d: 'int-to-byte',
0x8e: 'int-to-char',
0x8f: 'int-to-short',
}
num_code_units = 1
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class Opcode90_AF(Opcode):
ops = {
0x90: 'add-int',
0x91: 'sub-int',
0x92: 'mul-int',
0x93: 'div-int',
0x94: 'rem-int',
0x95: 'and-int',
0x96: 'or-int',
0x97: 'xor-int',
0x98: 'shl-int',
0x99: 'shr-int',
0x9a: 'ushr-int',
0x9b: 'add-long',
0x9c: 'sub-long',
0x9d: 'mul-long',
0x9e: 'div-long',
0x9f: 'rem-long',
0xa0: 'and-long',
0xa1: 'or-long',
0xa2: 'xor-long',
0xa3: 'shl-long',
0xa4: 'shr-long',
0xa5: 'ushr-long',
0xa6: 'add-float',
0xa7: 'sub-float',
0xa8: 'mul-float',
0xa9: 'div-float',
0xaa: 'rem-float',
0xab: 'add-double',
0xac: 'sub-double',
0xad: 'mul-double',
0xae: 'div-double',
0xaf: 'rem-double',
}
num_code_units = 2
max_regs = 3
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_AA())
self.regs.append(inst.get_uint8_lo(1))
self.regs.append(inst.get_uint8_hi(1))
def dump(self, f=sys.stdout):
f.write("%s v%u, v%u, v%u" %
(self.get_name(), self.regs[0], self.regs[1], self.regs[2]))
def opIsCommutative(self):
op = self.get_op()
return (op == 0x90 or # add-int
op == 0x92 or # mul-int
op == 0x95 or # and-int
op == 0x96 or # or-int
op == 0x97 or # xor-int
op == 0x9b or # add-long
op == 0x9d or # mul-long
op == 0xa0 or # and-long
op == 0xa1 or # or-long
op == 0xa2 or # xor-long
op == 0xa6 or # add-float
op == 0xa8 or # mul-float
op == 0xab or # add-double
op == 0xad) # mul-double
def check_encoding(self, f=sys.stdout):
vAA = self.regs[0]
vBB = self.regs[1]
vCC = self.regs[2]
if vAA == vBB and vAA <= UINT4_MAX and vCC <= UINT4_MAX:
name = self.get_name()
f.write('warning: "%s" can be encoded more efficiently ' % (name))
f.write('as "%s/2addr v%u, v%u"\n' % (name, vAA, vCC))
return 2
if (vAA == vCC and vAA <= UINT4_MAX and vBB <= UINT4_MAX and
self.opIsCommutative()):
name = self.get_name()
f.write('warning: "%s" is commutative and can be ' % (name))
f.write('encoded more efficiently as "%s/2addr v%u, v%u"\n' %
(name, vAA, vBB))
return 2
return 0 # Return zero to indicate we can't save any bytes
def emulate(self, emulator):
raise ValueError('emulate not supported')
class OpcodeB0_CF(Opcode):
ops = {
0xb0: 'add-int/2addr',
0xb1: 'sub-int/2addr',
0xb2: 'mul-int/2addr',
0xb3: 'div-int/2addr',
0xb4: 'rem-int/2addr',
0xb5: 'and-int/2addr',
0xb6: 'or-int/2addr',
0xb7: 'xor-int/2addr',
0xb8: 'shl-int/2addr',
0xb9: 'shr-int/2addr',
0xba: 'ushr-int/2addr',
0xbb: 'add-long/2addr',
0xbc: 'sub-long/2addr',
0xbd: 'mul-long/2addr',
0xbe: 'div-long/2addr',
0xbf: 'rem-long/2addr',
0xc0: 'and-long/2addr',
0xc1: 'or-long/2addr',
0xc2: 'xor-long/2addr',
0xc3: 'shl-long/2addr',
0xc4: 'shr-long/2addr',
0xc5: 'ushr-long/2addr',
0xc6: 'add-float/2addr',
0xc7: 'sub-float/2addr',
0xc8: 'mul-float/2addr',
0xc9: 'div-float/2addr',
0xca: 'rem-float/2addr',
0xcb: 'add-double/2addr',
0xcc: 'sub-double/2addr',
0xcd: 'mul-double/2addr',
0xce: 'div-double/2addr',
0xcf: 'rem-double/2addr ',
}
num_code_units = 1
max_regs = 2
extra_data = 'x'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u' % (self.get_name(), self.regs[0], self.regs[1]))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class OpcodeD0_D7(Opcode):
ops = {
0xd0: 'add-int/lit16',
0xd1: 'rsub-int/lit16',
0xd2: 'mul-int/lit16',
0xd3: 'div-int/lit16',
0xd4: 'rem-int/lit16',
0xd5: 'and-int/lit16',
0xd6: 'or-int/lit16',
0xd7: 'xor-int/lit16',
}
num_code_units = 2
max_regs = 2
extra_data = 's'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_A())
self.regs.append(inst.get_B())
self.imm = sign_extending(inst[1], 16)
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u, #int %i // #%#x' % (self.get_name(),
self.regs[0], self.regs[1], self.imm, self.imm))
def emulate(self, emulator):
emulator.write_register(self.reg, self.imm)
class OpcodeD8_E2(Opcode):
ops = {
0xd8: 'add-int/lit8',
0xd9: 'rsub-int/lit8',
0xda: 'mul-int/lit8',
0xdb: 'div-int/lit8',
0xdc: 'rem-int/lit8',
0xdd: 'and-int/lit8',
0xde: 'or-int/lit8',
0xdf: 'xor-int/lit8',
0xe0: 'shl-int/lit8',
0xe1: 'shr-int/lit8',
0xe2: 'ushr-int/lit8',
}
num_code_units = 2
max_regs = 2
extra_data = 'b'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
self.regs = list()
self.regs.append(inst.get_AA())
self.regs.append(inst.get_uint8_lo(1))
self.imm = sign_extending(inst.get_uint8_hi(1), 8)
def dump(self, f=sys.stdout):
f.write('%s v%u, v%u, #int %i // #%#x' % (self.get_name(),
self.regs[0], self.regs[1], self.imm, self.imm))
def emulate(self, emulator):
emulator.write_register(self.reg, self.imm)
class OpcodeFA(Opcode):
ops = {0xfa: 'invoke-polymorphic'}
num_code_units = 4
max_regs = 5
extra_data = 'cc'
def __init__(self, inst, code_units):
Opcode.__init__(self, inst)
raise ValueError('debug this when we find one of these')
arg_count = inst[0] >> 12
self.method_ref_idx = inst[1]
self.method_hdl_ref = inst[2]
self.regs = list()
regs = inst[3] | ((inst[0] << 8) & 0xf0000)
self.proto = inst[4]
for i in range(arg_count):
self.regs.append(regs & 0xf)
regs >>= 4
def dump(self, f=sys.stdout):
f.write("%s {" % (self.get_name()))
first = True
for reg in self.regs:
if not first:
f.write(', ')
f.write("v%u" % (reg))
first = False
f.write("} type@%4.4x" % (self.type))
def emulate(self, emulator):
raise ValueError('emulate not supported')
class CodeUnits(Opcode):
def __init__(self, code_units):
self.code_units = code_units
self.idx = 0
def index_is_valid(self):
return self.idx < len(self.code_units)
def get_index(self):
return self.idx
def peek_code_unit(self, idx):
return self.code_units[idx]
def get_int(self):
return sign_extending(self.get_uint(), 32)
def get_uint(self):
return self.get_code_unit() | (self.get_code_unit() << 16)
def get_code_unit(self):
idx = self.idx
self.idx += 1
return self.code_units[idx]
def swap16(u):
return ((u >> 8) & 0x00ff) | ((u << 8) & 0xff00)
class DexInstruction(object):
opcode_defs = list()
@classmethod
def initialize(cls):
opcode_classes = [
Opcode00,
Opcode01,
Opcode02,
Opcode03,
Opcode04,
Opcode05,
Opcode06,
Opcode07,
Opcode08,
Opcode09,
Opcode0A_0D,
Opcode0E,
Opcode0F,
Opcode10,
Opcode11,
Opcode12,
Opcode13,
Opcode14,
Opcode15,
Opcode16,
Opcode17,
Opcode18,
Opcode19,
Opcode1A,
Opcode1B,
Opcode1C,
Opcode1D,
Opcode1E,
Opcode1F,
Opcode20,
Opcode21,
Opcode22,
Opcode23,
Opcode24,
Opcode25,
Opcode26,
Opcode27,
Opcode28,
Opcode29,
Opcode2A,
Opcode2B,
Opcode2C,
Opcode2D_31,
Opcode32_37,
Opcode38_3D,
Opcode44_51,
Opcode52_5f,
Opcode60_6d,
Opcode6E_72,
Opcode74_78,
Opcode7B_8F,
Opcode90_AF,
OpcodeB0_CF,
OpcodeD0_D7,
OpcodeD8_E2,
OpcodeFA,
]
for i in range(256):
cls.opcode_defs.append(None)
for opcode_class in opcode_classes:
for op in opcode_class.ops:
if cls.opcode_defs[op] is None:
cls.opcode_defs[op] = opcode_class
else:
raise ValueError("registering the same opcode twice: "
"%#2.2x in %s" % (op, str(opcode_class)))
def dump(self, f=sys.stdout, suffix='\n'):
f.write('%4.4x:' % (self.code_unit_idx))
for code_unit in self.code_units:
f.write(' %4.4x' % (swap16(code_unit)))
num_code_units = len(self.code_units)
if num_code_units < 5:
pad = 5 - num_code_units
for i in range(pad):
f.write(' ')
f.write(' ')
self.instruction.dump(f=f)
if suffix:
f.write(suffix)
def __init__(self):
self.code_unit_idx = -1
self.code_units = None
def check_encoding(self, f=sys.stdout):
bytes_saved = self.instruction.check_encoding(f)
if bytes_saved:
self.dump(f)
return bytes_saved
def new_encoding(self, f=sys.stdout):
bytes_saved = self.instruction.new_encoding(f)
if bytes_saved:
self.dump(f)
return bytes_saved
def get_code_unit_index(self):
return self.code_unit_idx
def decode(self, code_units):
self.code_unit_idx = code_units.get_index()
self.code_units = list()
self.code_units.append(code_units.get_code_unit())
op = self.get_op()
opcode_class = self.opcode_defs[op]
if opcode_class is None:
raise ValueError("unsupported opcode %#4.4x" % (swap16(self[0])))
for i in range(1, opcode_class.num_code_units):
self.code_units.append(code_units.get_code_unit())
self.instruction = opcode_class(self, code_units)
def get_name(self):
return self.instruction.get_name()
def get_num_code_units(self):
return self.instruction.get_num_code_units()
def get_op(self):
return self.code_units[0] & 0xff
def get_A(self):
return (self.code_units[0] >> 8) & 0xf
def get_B(self):
return (self.code_units[0] >> 12) & 0xf
def get_AA(self):
return self.get_uint8_hi(0)
def get_signed_AA(self):
return sign_extending(self.get_AA(), 8)
def get_uint8_lo(self, idx):
return self.code_units[idx] & 0xff
def get_sint8_lo(self, idx):
return sign_extending(self.get_uint8_lo(), 8)
def get_uint8_hi(self, idx):
return (self.code_units[idx] >> 8) & 0xff
def get_sint8_hi(self, idx):
return sign_extending(self.get_uint8_hi(), 8)
def get_uint16(self, idx):
return self.code_units[idx]
def get_sint16(self, idx):
return sign_extending(self.get_uint16(), 16)
def get_uint32(self, idx):
return self.code_units[idx + 1] << 16 | self.code_units[idx]
def get_sint32(self, idx):
return sign_extending(self.get_uint32(idx), 32)
def get_uint64(self, idx):
return (self.code_units[idx + 3] << 48 |
self.code_units[idx + 2] << 32 |
self.code_units[idx + 1] << 16 |
self.code_units[idx])
def get_sint64(self, idx):
return sign_extending(self.get_uint64(idx), 64)
def __len__(self):
return len(self.code_units)
def __getitem__(self, key):
return self.code_units[key]
def emulate(self, emulator):
self.instruction.emulate(emulator)
DexInstruction.initialize()
def get_percentage(part, total):
return (float(part) / float(total)) * 100.0
def print_code_stats(size, total_size, file_size):
code_savings = get_percentage(size, total_size)
file_savings = get_percentage(size, file_size)
print('error: %u of %u code bytes (%u file bytes) ' % (size, total_size,
file_size), end='')
print('could be saved by encoding opcodes more efficiently ', end='')
print('(%2.2f%% code savings, %2.2f%% file savings).\n' % (code_savings,
file_savings))
def print_debug_stats(size, file_size):
file_savings = get_percentage(size, file_size)
print('error: %u debug info bytes of %u file ' % (size, file_size), end='')
print('bytes could be saved by encoding debug info more ', end='')
print('efficiently (%2.2f%% file savings).\n' % (file_savings))
def print_encoding_stats(size, total_size, file_size):
code_savings = get_percentage(size, total_size)
file_savings = get_percentage(size, file_size)
print('%u of %u code bytes could be saved ' % (size, total_size), end='')
print('could be saved by encoding opcodes more efficiently ', end='')
print('(%2.2f%% code savings, %2.2f%% file savings).\n' % (code_savings,
file_savings))
class DexEmulator(object):
def __init__(self):
self.registers = dict()
self.pc = 0
def read_register(self, reg):
if reg in self.registers:
return self.registers[reg]
raise ValueError("reading register with no value")
def write_register(self, reg, value):
self.registers[reg] = value
def emulate(self, uint16_array):
pass
def main():
usage = 'Usage: dex.py [options] [dex file(s)]'
parser = optparse.OptionParser(
usage=usage,
description='A script that parses DEX files.')
parser.add_option('-v', '--verbose',
action='store_true',
dest='verbose',
help='display verbose debug info',
default=False)
parser.add_option('-C', '--color',
action='store_true',
dest='color',
help='Enable colorized output',
default=False)
parser.add_option('-a', '--all',
action='store_true',
dest='dump_all',
help='Dump all DEX sections.',
default=False)
parser.add_option('-H', '--header',
action='store_true',
dest='dump_header',
help='Dump the DEX file header.',
default=False)
parser.add_option('--map-list',
action='store_true',
dest='dump_map_list',
help='Dump the DEX map list info.',
default=False)
parser.add_option('-s', '--strings',
action='store_true',
dest='dump_strings',
help='Dump the DEX strings.',
default=False)
parser.add_option('-t', '--types',
action='store_true',
dest='dump_types',
help='Dump the DEX types.',
default=False)
parser.add_option('-p', '--protos',
action='store_true',
dest='dump_protos',
help='Dump the DEX protos.',
default=False)
parser.add_option('-f', '--fields',
action='store_true',
dest='dump_fields',
help='Dump the DEX fields.',
default=False)
parser.add_option('-m', '--methods',
action='store_true',
dest='dump_methods',
help='Dump the DEX methods.',
default=False)
parser.add_option('--method-handles',
action='store_true',
dest='dump_method_handles',
help='Dump the DEX method handles.',
default=False)
parser.add_option('--classes',
action='store_true',
dest='dump_classes',
help='Dump the DEX classes.',
default=False)
parser.add_option('--class',
dest='class_filter',
help='Find a class by name. ' +
'Accepts `Lpath/to/Class;` or `path.to.Class`',
default=None)
parser.add_option('--method',
dest='method_filter',
help='Find a method by name. Must be used with --class',
default=None)
parser.add_option('--call-sites',
action='store_true',
dest='dump_call_sites',
help='Dump the DEX call sites.',
default=False)
parser.add_option('--code',
action='store_true',
dest='dump_code',
help='Dump the DEX code in all class methods.',
default=False)
parser.add_option('--code-items',
action='store_true',
dest='dump_code_items',
help='Dump the DEX code items.',
default=False)
parser.add_option('--code-duplication',
action='store_true',
dest='code_duplication',
help=('Dump any methods in the DEX file that have the '
'same instructions.'),
default=False)
parser.add_option('--debug',
action='store_true',
dest='debug',
help='Dump the DEX debug info.',
default=False)
parser.add_option('-d', '--disassemble',
action='store_true',
dest='dump_disassembly',
help='Dump the DEX code items instructions.',
default=False)
parser.add_option('--stats',
action='store_true',
dest='dump_stats',
help='Dump the DEX opcode statistics.',
default=False)
parser.add_option('--check-encoding',
action='store_true',
dest='check_encoding',
help='Verify opcodes are efficiently encoded.',
default=False)
parser.add_option('--new-encoding',
action='store_true',
dest='new_encoding',
help='Report byte savings from potential new encodings.',
default=False)
parser.add_option('--proguard',
dest='proguard',
help='Specify a progard file to use for demangling.',
default=None)
(options, files) = parser.parse_args()
total_code_bytes_inefficiently_encoded = 0
total_debug_info_bytes_inefficiently_encoded = 0
total_new_code_bytes_inefficiently_encoded = 0
total_opcode_byte_size = 0
total_file_size = 0
op_name_to_size = {}
string_counts = {}
i = 0
if len(files) == 0:
print('No input files. {}'.format(usage))
return
for (i, path) in enumerate(files):
if os.path.splitext(path)[1] == '.apk':
print('error: dex.py operates on dex files, please unpack your apk')
return
print('Dex file: %s' % (path))
file_size = os.path.getsize(path)
total_file_size += file_size
dex = File(path, options.proguard)
if options.class_filter:
dex_class = dex.find_class(options.class_filter)
if dex_class:
if options.method_filter is None:
dex_class.dump()
for method in dex_class.get_methods():
method_name = method.get_name()
if options.method_filter:
if options.method_filter != method_name:
continue
method.dump()
else:
print('error: class definition not found for "%s"' % (
options.class_filter))
if options.dump_header or options.dump_all:
dex.dump_header(options)
print('')
if options.dump_map_list or options.dump_all:
dex.dump_map_list(options)
if options.dump_strings or options.dump_all:
dex.dump_string_ids(options)
if options.dump_types or options.dump_all:
dex.dump_type_ids(options)
if options.dump_protos or options.dump_all:
dex.dump_proto_ids(options)
if options.dump_fields or options.dump_all:
dex.dump_field_ids(options)
if options.dump_methods or options.dump_all:
dex.dump_method_ids(options)
if options.dump_classes or options.dump_all:
dex.dump_class_defs(options)
if options.dump_call_sites or options.dump_all:
dex.dump_call_site_ids(options)
if options.dump_method_handles or options.dump_all:
dex.dump_method_handle_items(options)
if options.dump_code or options.debug or options.dump_all:
dex.dump_code(options)
if options.dump_code_items:
dex.dump_code_items(options)
if (options.dump_disassembly or options.dump_stats or
options.check_encoding or options.new_encoding):
if options.dump_stats:
for string_item in dex.get_strings():
if string_item.data not in string_counts:
string_counts[string_item.data] = 0
string_counts[string_item.data] += 1
code_bytes_inefficiently_encoded = 0
debug_info_bytes_inefficiently_encoded = 0
new_code_bytes_inefficiently_encoded = 0
file_opcodes_byte_size = 0
classes = dex.get_classes()
used_code_item_indexes = list()
for cls in classes:
methods = cls.get_methods()
for method in methods:
if options.dump_disassembly or options.debug:
method.dump(
f=sys.stdout, dump_code=options.dump_disassembly,
dump_debug_info=options.debug)
opcodes_bytes_size = method.get_code_byte_size()
file_opcodes_byte_size += opcodes_bytes_size
total_opcode_byte_size += opcodes_bytes_size
if (options.dump_stats or options.check_encoding or
options.new_encoding):
for dex_inst in method.get_instructions():
if options.dump_stats:
op_name = dex_inst.get_name()
size = dex_inst.get_num_code_units() * 2
if op_name not in op_name_to_size:
op_name_to_size[op_name] = 0
op_name_to_size[op_name] += size
if options.check_encoding:
code_bytes_inefficiently_encoded += (
dex_inst.check_encoding())
if options.new_encoding:
new_code_bytes_inefficiently_encoded += (
dex_inst.new_encoding())
if options.check_encoding:
code_item_idx = method.get_code_item_index()
if code_item_idx >= 0:
used_code_item_indexes.append(code_item_idx)
debug_info = method.get_debug_info()
if debug_info:
debug_info_bytes_inefficiently_encoded += (
method.check_debug_info_encoding())
if options.check_encoding:
efficiently_encoded = True
if code_bytes_inefficiently_encoded > 0:
efficiently_encoded = False
total_code_bytes_inefficiently_encoded += (
code_bytes_inefficiently_encoded)
print_code_stats(code_bytes_inefficiently_encoded,
file_opcodes_byte_size, file_size)
if debug_info_bytes_inefficiently_encoded > 0:
efficiently_encoded = False
total_debug_info_bytes_inefficiently_encoded += (
debug_info_bytes_inefficiently_encoded)
print_debug_stats(debug_info_bytes_inefficiently_encoded,
file_size)
used_code_item_indexes.sort()
prev_ci_idx = 0
for ci_idx in used_code_item_indexes:
if ci_idx != prev_ci_idx:
efficiently_encoded = False
for idx in range(prev_ci_idx + 1, ci_idx):
print('code_item[%u] is not used and its '
'code_item can be removed' % (idx))
prev_ci_idx = ci_idx
if efficiently_encoded:
print('file is efficiently encoded.')
if options.new_encoding:
if new_code_bytes_inefficiently_encoded > 0:
total_new_code_bytes_inefficiently_encoded += (
new_code_bytes_inefficiently_encoded)
print_encoding_stats(new_code_bytes_inefficiently_encoded,
file_opcodes_byte_size, file_size)
else:
print('file is efficiently encoded.')
if options.code_duplication:
dex.report_code_duplication()
if options.dump_stats:
duped_strings_byte_size = 0
for s in string_counts:
count = string_counts[s]
if count > 1:
s_len = len(s)
duped_strings_byte_size += (count - 1) * \
s_len + get_uleb128_byte_size(s_len)
if duped_strings_byte_size > 0:
print('%u bytes in duplicated strings across dex files.' % (
duped_strings_byte_size))
print('BYTESIZE %AGE OPCODE')
print('======== ===== =================================')
sorted_x = sorted(op_name_to_size.items(),
key=operator.itemgetter(1))
for (op_name, byte_size) in sorted_x:
percentage = get_percentage(byte_size, total_opcode_byte_size)
print('%-8u %5.2f %s' % (byte_size, percentage, op_name))
print('-------- ----- ---------------------------------')
print('%-8u 100.0' % (total_opcode_byte_size))
if i > 0:
if options.check_encoding:
if total_code_bytes_inefficiently_encoded > 0:
print_code_stats(total_code_bytes_inefficiently_encoded,
total_opcode_byte_size, total_file_size)
if total_debug_info_bytes_inefficiently_encoded > 0:
efficiently_encoded = False
print_debug_stats(total_debug_info_bytes_inefficiently_encoded,
total_file_size)
if options.new_encoding:
invoke_kind_percentage = get_percentage(
can_use_new_encoding,
can_use_new_encoding + cant_use_new_encoding)
print('%u invoke-kind opcodes could use new encoding' % (
can_use_new_encoding), end='')
print('%u could not (%2.2f%%)' % (cant_use_new_encoding,
invoke_kind_percentage))
if total_new_code_bytes_inefficiently_encoded > 0:
print_encoding_stats(
total_new_code_bytes_inefficiently_encoded,
total_opcode_byte_size, total_file_size)
if __name__ == '__main__':
main()
| true | true |
7900be472ce54029e928c5b6b03f12aca07184c7 | 2,515 | py | Python | nipype/interfaces/slicer/filtering/extractskeleton.py | oliver-contier/nipype | 07af08f98a69d3d95b4384facb09f8b8cef5fda2 | [
"Apache-2.0"
] | 1 | 2019-03-25T14:11:18.000Z | 2019-03-25T14:11:18.000Z | venv/Lib/site-packages/nipype/interfaces/slicer/filtering/extractskeleton.py | mysnyldz/Tez-Analizi | 47e149bbd6a9e865e9242e50fb7ca1a18adfc640 | [
"MIT"
] | 1 | 2017-01-05T01:24:33.000Z | 2017-01-05T01:24:33.000Z | venv/Lib/site-packages/nipype/interfaces/slicer/filtering/extractskeleton.py | mysnyldz/Tez-Analizi | 47e149bbd6a9e865e9242e50fb7ca1a18adfc640 | [
"MIT"
] | 1 | 2020-01-17T17:30:26.000Z | 2020-01-17T17:30:26.000Z | # -*- coding: utf-8 -*-
# -*- coding: utf8 -*-
"""Autogenerated file - DO NOT EDIT
If you spot a bug, please report it on the mailing list and/or change the generator."""
from nipype.interfaces.base import (
CommandLine,
CommandLineInputSpec,
SEMLikeCommandLine,
TraitedSpec,
File,
Directory,
traits,
isdefined,
InputMultiPath,
OutputMultiPath,
)
import os
class ExtractSkeletonInputSpec(CommandLineInputSpec):
InputImageFileName = File(position=-2, desc="Input image", exists=True, argstr="%s")
OutputImageFileName = traits.Either(
traits.Bool,
File(),
position=-1,
hash_files=False,
desc="Skeleton of the input image",
argstr="%s",
)
type = traits.Enum(
"1D", "2D", desc="Type of skeleton to create", argstr="--type %s"
)
dontPrune = traits.Bool(
desc="Return the full skeleton, not just the maximal skeleton",
argstr="--dontPrune ",
)
numPoints = traits.Int(
desc="Number of points used to represent the skeleton", argstr="--numPoints %d"
)
pointsFile = traits.Str(
desc="Name of the file to store the coordinates of the central (1D) skeleton points",
argstr="--pointsFile %s",
)
class ExtractSkeletonOutputSpec(TraitedSpec):
OutputImageFileName = File(
position=-1, desc="Skeleton of the input image", exists=True
)
class ExtractSkeleton(SEMLikeCommandLine):
"""title: Extract Skeleton
category: Filtering
description: Extract the skeleton of a binary object. The skeleton can be limited to being a 1D curve or allowed to be a full 2D manifold. The branches of the skeleton can be pruned so that only the maximal center skeleton is returned.
version: 0.1.0.$Revision: 2104 $(alpha)
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ExtractSkeleton
contributor: Pierre Seroul (UNC), Martin Styner (UNC), Guido Gerig (UNC), Stephen Aylward (Kitware)
acknowledgements: The original implementation of this method was provided by ETH Zurich, Image Analysis Laboratory of Profs Olaf Kuebler, Gabor Szekely and Guido Gerig. Martin Styner at UNC, Chapel Hill made enhancements. Wrapping for Slicer was provided by Pierre Seroul and Stephen Aylward at Kitware, Inc.
"""
input_spec = ExtractSkeletonInputSpec
output_spec = ExtractSkeletonOutputSpec
_cmd = "ExtractSkeleton "
_outputs_filenames = {"OutputImageFileName": "OutputImageFileName.nii"}
| 33.986486 | 310 | 0.702584 |
from nipype.interfaces.base import (
CommandLine,
CommandLineInputSpec,
SEMLikeCommandLine,
TraitedSpec,
File,
Directory,
traits,
isdefined,
InputMultiPath,
OutputMultiPath,
)
import os
class ExtractSkeletonInputSpec(CommandLineInputSpec):
InputImageFileName = File(position=-2, desc="Input image", exists=True, argstr="%s")
OutputImageFileName = traits.Either(
traits.Bool,
File(),
position=-1,
hash_files=False,
desc="Skeleton of the input image",
argstr="%s",
)
type = traits.Enum(
"1D", "2D", desc="Type of skeleton to create", argstr="--type %s"
)
dontPrune = traits.Bool(
desc="Return the full skeleton, not just the maximal skeleton",
argstr="--dontPrune ",
)
numPoints = traits.Int(
desc="Number of points used to represent the skeleton", argstr="--numPoints %d"
)
pointsFile = traits.Str(
desc="Name of the file to store the coordinates of the central (1D) skeleton points",
argstr="--pointsFile %s",
)
class ExtractSkeletonOutputSpec(TraitedSpec):
OutputImageFileName = File(
position=-1, desc="Skeleton of the input image", exists=True
)
class ExtractSkeleton(SEMLikeCommandLine):
input_spec = ExtractSkeletonInputSpec
output_spec = ExtractSkeletonOutputSpec
_cmd = "ExtractSkeleton "
_outputs_filenames = {"OutputImageFileName": "OutputImageFileName.nii"}
| true | true |
7900bf81fcf2a7857264845fa2fc05dd3f8ad6d3 | 615 | py | Python | tests/test_terms.py | SimonGreenhill/pycldf | 3730a399828d4d985ce814da4b1e2008d4733889 | [
"Apache-2.0"
] | null | null | null | tests/test_terms.py | SimonGreenhill/pycldf | 3730a399828d4d985ce814da4b1e2008d4733889 | [
"Apache-2.0"
] | null | null | null | tests/test_terms.py | SimonGreenhill/pycldf | 3730a399828d4d985ce814da4b1e2008d4733889 | [
"Apache-2.0"
] | null | null | null | import pytest
def test_terms():
from pycldf.terms import TERMS
assert 'alignment' in TERMS.properties
with pytest.raises(ValueError):
TERMS.is_cldf_uri('http://cldf.clld.org/404')
assert not TERMS.is_cldf_uri('http://example.org')
assert TERMS.is_cldf_uri('http://cldf.clld.org/v1.0/terms.rdf#source')
assert len(TERMS.properties) + len(TERMS.classes) == len(TERMS)
assert len(TERMS.modules) + len(TERMS.components) == len(TERMS.classes)
assert 'LanguageTable' in TERMS.components
assert 'LanguageTable' not in TERMS.modules
assert 'Wordlist' in TERMS.modules
| 29.285714 | 75 | 0.710569 | import pytest
def test_terms():
from pycldf.terms import TERMS
assert 'alignment' in TERMS.properties
with pytest.raises(ValueError):
TERMS.is_cldf_uri('http://cldf.clld.org/404')
assert not TERMS.is_cldf_uri('http://example.org')
assert TERMS.is_cldf_uri('http://cldf.clld.org/v1.0/terms.rdf#source')
assert len(TERMS.properties) + len(TERMS.classes) == len(TERMS)
assert len(TERMS.modules) + len(TERMS.components) == len(TERMS.classes)
assert 'LanguageTable' in TERMS.components
assert 'LanguageTable' not in TERMS.modules
assert 'Wordlist' in TERMS.modules
| true | true |
7900c02aaef03de97f13c50a8da2a3a249b26c37 | 2,722 | py | Python | Economic Growth & GDP per capita.py | ph7klw76/Data_science_project | 5b99c49d44e6858269c4220135ea4c2e0f0bcdef | [
"MIT"
] | null | null | null | Economic Growth & GDP per capita.py | ph7klw76/Data_science_project | 5b99c49d44e6858269c4220135ea4c2e0f0bcdef | [
"MIT"
] | null | null | null | Economic Growth & GDP per capita.py | ph7klw76/Data_science_project | 5b99c49d44e6858269c4220135ea4c2e0f0bcdef | [
"MIT"
] | null | null | null | import pandas as pd
data=pd.read_csv("C:/Users/user/Documents/API_NY.GDP.PCAP.CD_DS2_en_csv_v2_1068945.csv") #your raw data obtained from world bank
import pandas as pd
import matplotlib.pyplot as plt
fulldataonly=data.dropna()
listofcountry=fulldataonly['Country Name']
listofcountry=list(listofcountry)
def findcountryrow(country):
for i in range(len(data['Country Name'])):
if data['Country Name'][i]==country:
return i
else:
print("error, country not found") # find which row is the country
listyear=list(range(1960,2018))
x=[]
y=[]
mydata=[]
#for country in range(len(listofcountry)):
# for year in listyear:
# y0=data.loc[findcountryrow(listofcountry[country]),str(year)]
# y1=data.loc[findcountryrow(listofcountry[country]),str(year+1)]
# delta=(y1-y0)/y0
# x.append(y0)
# y.append(delta)
# mydata.append([y0,delta])
fulllistofcountry=data['Country Name']
fulllistofcountry=list(fulllistofcountry)
for country in range(len(fulllistofcountry)):
for year in listyear:
if (pd.notnull(data.loc[country,str(year)]))&(pd.notnull(data.loc[country,str(year+1)])):
y0=data.loc[country,str(year)]
y1=data.loc[country,str(year+1)]
delta=((y1-y0)/y0)*100
x.append(y0)
y.append(delta)
mydata.append([y0,delta])
mydata.sort(key=lambda x: x[0])
count=0
GDP, myGDP=[],[]
Growth, myGrowth=[],[]
mysd=[]
naverage=500
averagedatax,averagedatay=[],[]
import statistics as s
for i in range(len(mydata)):
if count<naverage:
GDP.append(mydata[i][0])
Growth.append(mydata[i][1])
count+=1
if count==naverage:
myGDP=s.mean(GDP)
myGrowth=s.mean(Growth)
mysd.append(s.stdev(Growth))
averagedatax.append(myGDP)
averagedatay.append(myGrowth)
count=0
GDP=[]
Growth=[]
if i==len(mydata)-1:
myGDP=s.mean(GDP)
myGrowth=s.mean(Growth)
mysd.append(s.stdev(Growth))
averagedatax.append(myGDP)
averagedatay.append(myGrowth)
plt.xscale('log')
plt.xlim(100,200000)
plt.xlabel(' GDP per capita in US dollar',size=15)
plt.ylabel('GDP growth rate %',size=15)
plt.title('Dependence of Economic Growth Rate with GDP per capita',size=15)
plt.scatter(averagedatax,averagedatay)
# histogram=mydata[0:1800]
# per=[]
# for gdp, percentage in histogram:
# per.append(percentage)
# plt.xlim(-50,60)
# plt.xlabel('GDP per capita Growth %',size=15)
# plt.ylabel('Density Function',size=15)
# plt.title('Economic Growth for different countries for 1960-2018', size=15)
# plt.hist(x=per, bins='auto', density=True)
| 29.268817 | 129 | 0.649522 | import pandas as pd
data=pd.read_csv("C:/Users/user/Documents/API_NY.GDP.PCAP.CD_DS2_en_csv_v2_1068945.csv")
import pandas as pd
import matplotlib.pyplot as plt
fulldataonly=data.dropna()
listofcountry=fulldataonly['Country Name']
listofcountry=list(listofcountry)
def findcountryrow(country):
for i in range(len(data['Country Name'])):
if data['Country Name'][i]==country:
return i
else:
print("error, country not found")
listyear=list(range(1960,2018))
x=[]
y=[]
mydata=[]
fulllistofcountry=data['Country Name']
fulllistofcountry=list(fulllistofcountry)
for country in range(len(fulllistofcountry)):
for year in listyear:
if (pd.notnull(data.loc[country,str(year)]))&(pd.notnull(data.loc[country,str(year+1)])):
y0=data.loc[country,str(year)]
y1=data.loc[country,str(year+1)]
delta=((y1-y0)/y0)*100
x.append(y0)
y.append(delta)
mydata.append([y0,delta])
mydata.sort(key=lambda x: x[0])
count=0
GDP, myGDP=[],[]
Growth, myGrowth=[],[]
mysd=[]
naverage=500
averagedatax,averagedatay=[],[]
import statistics as s
for i in range(len(mydata)):
if count<naverage:
GDP.append(mydata[i][0])
Growth.append(mydata[i][1])
count+=1
if count==naverage:
myGDP=s.mean(GDP)
myGrowth=s.mean(Growth)
mysd.append(s.stdev(Growth))
averagedatax.append(myGDP)
averagedatay.append(myGrowth)
count=0
GDP=[]
Growth=[]
if i==len(mydata)-1:
myGDP=s.mean(GDP)
myGrowth=s.mean(Growth)
mysd.append(s.stdev(Growth))
averagedatax.append(myGDP)
averagedatay.append(myGrowth)
plt.xscale('log')
plt.xlim(100,200000)
plt.xlabel(' GDP per capita in US dollar',size=15)
plt.ylabel('GDP growth rate %',size=15)
plt.title('Dependence of Economic Growth Rate with GDP per capita',size=15)
plt.scatter(averagedatax,averagedatay)
| true | true |
7900c0445d4afec44b0d6b71c36456ee9de99eaf | 7,440 | py | Python | VIT/Train.py | HzcIrving/DLRL_PlayGround | 0db9a4bdb87130d1d26aea1591ef74cbe6aaa43b | [
"MIT"
] | 27 | 2022-01-27T09:22:59.000Z | 2022-02-22T03:22:52.000Z | VIT/Train.py | HzcIrving/DLRL-PlayGround | 0db9a4bdb87130d1d26aea1591ef74cbe6aaa43b | [
"MIT"
] | null | null | null | VIT/Train.py | HzcIrving/DLRL-PlayGround | 0db9a4bdb87130d1d26aea1591ef74cbe6aaa43b | [
"MIT"
] | null | null | null | #! /usr/bin/enc python
# -*- coding: utf-8 -*-
# author: Irving He
# email: 1910646@tongji.edu.cn
import logging
import argparse
import os
import random
import numpy as np
from tqdm import tqdm
import datetime
from datetime import timedelta
import torch
import torch.distributed as dist
from Data_utils import get_loader
from Data_utils import CONFIGS
from Model import VITransModel
from Utils import WarmupCosineSchedule,WarmupLinearSchedule
from Utils import set_seed, AverageMeter, simple_accuracy, model_save
from tensorboardX import SummaryWriter
def count_parameters(model):
params = sum(p.numel() for p in model.parameters() if p.requires_grad)
return params/1000000
"""Config"""
class VITConfig:
log_dir = "./TB_log/"
dataset = "cifar10" # "cifar100"
model_type = "ViT-B_16"
pretrained_dir = "./Pretrained/imagenet21k_ViT-B_16.npz" # 预训练模型存放位置
save_dir = "./Model/"
record_algo = "Pretrained_VIT_Cifar10_ViTB16_"
test_cycles = datetime.datetime.now().strftime('%Y%m%d_%H%M')
decay_type = "cosine" # "cosine", "linear" 决定了学习率Scheduler类型
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
TB_log = True
img_size = 224
train_batch_size = 64 #512
eval_batch_size = 32 #64
eval_every = 100 # Run prediction on validation set every so many steps.
learning_rate = 3e-2 # SGD起始学习率
weight_decay = 0 #
num_steps = 10000 # Total number of training epochs to perform.
warmup_steps = 500 # 开始的Warmup Step数
max_grad_norm = 1.0
local_rank = -1 # local_rank for distributed training on gpus
seed = 42
gradient_accumulation_steps = 1 # Number of updates steps to accumulate before performing a backward/update pass.
"""Model Valid Process"""
def valid(args,model,writer,test_loader,global_step):
"""
:param args: 参数Config
:param model: 需验证模型
:param writer: TB写入
:param test_loader: 测试数据集
:param global_step: 全局step
:return:
"""
# Validation
eval_losses = AverageMeter()
model.eval()
all_preds, all_label = [],[]
epoch_iterator = tqdm(test_loader,
desc="Validating... (loss=X.X)",
bar_format="{l_bar}{r_bar}",
dynamic_ncols=True)
loss_fct = torch.nn.CrossEntropyLoss()
global_eval_step = 0
for step, batch in enumerate(epoch_iterator):
global_eval_step += 1
batch = tuple(t.to(args.device) for t in batch)
x,y = batch
with torch.no_grad():
logits = model(x)[0]
eval_loss = loss_fct(logits,y)
eval_losses.update(eval_loss.item()) #滑动平均
preds = torch.argmax(logits,dim=-1)
if len(all_preds) == 0:
all_preds.append(preds.detach().cpu().numpy())
all_label.append(y.detach().cpu().numpy())
else:
# append在后面
all_preds[0] = np.append(all_preds[0], preds.detach().cpu().numpy(), axis=0)
all_label[0] = np.append(all_label[0], y.detach().cpu().numpy(), axis=0)
epoch_iterator.set_description("Validating... (loss=%2.5f)" % eval_losses.val)
writer.add_scalar("Train/loss", scalar_value=eval_losses.val, global_step=global_eval_step)
all_preds, all_label = all_preds[0], all_label[0]
# all_preds: numpy.array; all_label: numpy.array;
accuracy = simple_accuracy(all_preds,all_label)
writer.add_scalar("test/accuracy",scalar_value=accuracy,global_step=global_step)
return accuracy
"""Model Training Process"""
def train(args=VITConfig()):
"""
:param args:
- log_dir
"""
# 模型准备
pretrained_model_config = CONFIGS[args.model_type]
num_classes = 10 if args.dataset == "cifar10" else 100
model = VITransModel(pretrained_model_config, args.img_size, zero_head=True, num_classes=num_classes)
model.load_from(np.load(args.pretrained_dir))
model.to(device=args.device)
num_params = count_parameters(model)
if args.TB_log:
os.makedirs(args.log_dir, exist_ok=True)
writer = SummaryWriter(logdir=args.log_dir + args.record_algo + args.test_cycles)
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
# 1. DATA准备
train_loader, test_loader = get_loader(args)
# 2. 准备优化器以及Scheduler
optimizer = torch.optim.SGD(model.parameters(),
lr = args.learning_rate, # init lr
momentum=0.9,
weight_decay=args.weight_decay)
t_total = args.num_steps # Total time steps
if args.decay_type == "cosine":
scheduler = WarmupCosineSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)
else:
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)
# 3. Training
model.zero_grad()
set_seed(args.seed)
losses = AverageMeter()
global_step = 0
best_acc = 0
while True:
model.train()
# 一个数据迭代器
epoch_iterator = tqdm(train_loader,
desc="Training (X / X Steps) (loss=X.X)",
bar_format="{l_bar}{r_bar}",
dynamic_ncols=True)
for step, batch in enumerate(epoch_iterator):
batch = tuple(t.to(args.device) for t in batch)
x,y = batch # XData, YLabel
loss = model.forward(x,y)
loss.backward()
if (step+1)%args.gradient_accumulation_steps == 0:
losses.update(loss.item()*args.gradient_accumulation_steps)
torch.nn.utils.clip_grad_norm(model.parameters(),1.0)
scheduler.step()
optimizer.step()
optimizer.zero_grad()
global_step += 1
# Print Training Info
epoch_iterator.set_description(
"Training (%d / %d Steps) (loss=%2.5f)" % (global_step, t_total, losses.val)
)
writer.add_scalar("Train/loss",scalar_value=losses.val, global_step=global_step)
writer.add_scalar("Train/lr", scalar_value=scheduler.get_lr()[0], global_step=global_step)
# Valid ...
if global_step % args.eval_every == 0:
accuracy = valid(args, model, writer, test_loader, global_step)
if best_acc < accuracy:
best_acc = accuracy
model_save(args.record_algo+args.test_cycles,model)
model.train()
if global_step % t_total == 0:
break
losses.reset()
if global_step % t_total == 0:
break
writer.close()
print("==="*30)
print("Best Accuracy: \t%f" % best_acc)
print("End Training!")
print("==="*30)
if __name__ == "__main__":
train()
# all_preds = []
# all_labels = []
#
# all_pred = torch.tensor([1,0,1,1,0,1])
# all_label = torch.tensor([1,1,1,1,1,1])
#
# all_preds.append(all_pred)
# all_labels.append(all_label)
# print(all_preds)
# all_preds[0] = np.append(all_preds[0],all_label,axis=0)
# all_labels[0] = np.append(all_labels[0],all_pred,axis=0)
# print(type(all_preds[0]))
# print(type(all_labels[0]))
# acc = simple_accuracy(all_preds[0],all_labels[0])
# print(acc) | 33.665158 | 117 | 0.620699 |
import logging
import argparse
import os
import random
import numpy as np
from tqdm import tqdm
import datetime
from datetime import timedelta
import torch
import torch.distributed as dist
from Data_utils import get_loader
from Data_utils import CONFIGS
from Model import VITransModel
from Utils import WarmupCosineSchedule,WarmupLinearSchedule
from Utils import set_seed, AverageMeter, simple_accuracy, model_save
from tensorboardX import SummaryWriter
def count_parameters(model):
params = sum(p.numel() for p in model.parameters() if p.requires_grad)
return params/1000000
class VITConfig:
log_dir = "./TB_log/"
dataset = "cifar10"
model_type = "ViT-B_16"
pretrained_dir = "./Pretrained/imagenet21k_ViT-B_16.npz"
save_dir = "./Model/"
record_algo = "Pretrained_VIT_Cifar10_ViTB16_"
test_cycles = datetime.datetime.now().strftime('%Y%m%d_%H%M')
decay_type = "cosine"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
TB_log = True
img_size = 224
train_batch_size = 64
eval_batch_size = 32
eval_every = 100
learning_rate = 3e-2
weight_decay = 0
num_steps = 10000
warmup_steps = 500
max_grad_norm = 1.0
local_rank = -1
seed = 42
gradient_accumulation_steps = 1
def valid(args,model,writer,test_loader,global_step):
eval_losses = AverageMeter()
model.eval()
all_preds, all_label = [],[]
epoch_iterator = tqdm(test_loader,
desc="Validating... (loss=X.X)",
bar_format="{l_bar}{r_bar}",
dynamic_ncols=True)
loss_fct = torch.nn.CrossEntropyLoss()
global_eval_step = 0
for step, batch in enumerate(epoch_iterator):
global_eval_step += 1
batch = tuple(t.to(args.device) for t in batch)
x,y = batch
with torch.no_grad():
logits = model(x)[0]
eval_loss = loss_fct(logits,y)
eval_losses.update(eval_loss.item())
preds = torch.argmax(logits,dim=-1)
if len(all_preds) == 0:
all_preds.append(preds.detach().cpu().numpy())
all_label.append(y.detach().cpu().numpy())
else:
all_preds[0] = np.append(all_preds[0], preds.detach().cpu().numpy(), axis=0)
all_label[0] = np.append(all_label[0], y.detach().cpu().numpy(), axis=0)
epoch_iterator.set_description("Validating... (loss=%2.5f)" % eval_losses.val)
writer.add_scalar("Train/loss", scalar_value=eval_losses.val, global_step=global_eval_step)
all_preds, all_label = all_preds[0], all_label[0]
accuracy = simple_accuracy(all_preds,all_label)
writer.add_scalar("test/accuracy",scalar_value=accuracy,global_step=global_step)
return accuracy
def train(args=VITConfig()):
pretrained_model_config = CONFIGS[args.model_type]
num_classes = 10 if args.dataset == "cifar10" else 100
model = VITransModel(pretrained_model_config, args.img_size, zero_head=True, num_classes=num_classes)
model.load_from(np.load(args.pretrained_dir))
model.to(device=args.device)
num_params = count_parameters(model)
if args.TB_log:
os.makedirs(args.log_dir, exist_ok=True)
writer = SummaryWriter(logdir=args.log_dir + args.record_algo + args.test_cycles)
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
train_loader, test_loader = get_loader(args)
optimizer = torch.optim.SGD(model.parameters(),
lr = args.learning_rate,
momentum=0.9,
weight_decay=args.weight_decay)
t_total = args.num_steps
if args.decay_type == "cosine":
scheduler = WarmupCosineSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)
else:
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)
model.zero_grad()
set_seed(args.seed)
losses = AverageMeter()
global_step = 0
best_acc = 0
while True:
model.train()
epoch_iterator = tqdm(train_loader,
desc="Training (X / X Steps) (loss=X.X)",
bar_format="{l_bar}{r_bar}",
dynamic_ncols=True)
for step, batch in enumerate(epoch_iterator):
batch = tuple(t.to(args.device) for t in batch)
x,y = batch
loss = model.forward(x,y)
loss.backward()
if (step+1)%args.gradient_accumulation_steps == 0:
losses.update(loss.item()*args.gradient_accumulation_steps)
torch.nn.utils.clip_grad_norm(model.parameters(),1.0)
scheduler.step()
optimizer.step()
optimizer.zero_grad()
global_step += 1
epoch_iterator.set_description(
"Training (%d / %d Steps) (loss=%2.5f)" % (global_step, t_total, losses.val)
)
writer.add_scalar("Train/loss",scalar_value=losses.val, global_step=global_step)
writer.add_scalar("Train/lr", scalar_value=scheduler.get_lr()[0], global_step=global_step)
if global_step % args.eval_every == 0:
accuracy = valid(args, model, writer, test_loader, global_step)
if best_acc < accuracy:
best_acc = accuracy
model_save(args.record_algo+args.test_cycles,model)
model.train()
if global_step % t_total == 0:
break
losses.reset()
if global_step % t_total == 0:
break
writer.close()
print("==="*30)
print("Best Accuracy: \t%f" % best_acc)
print("End Training!")
print("==="*30)
if __name__ == "__main__":
train()
| true | true |
7900c0c46baf4b0bbaeaee4eba13785e8a541fb5 | 1,872 | py | Python | tool/conanfile.py | Bjoe/tinyrefl | b7296be55e75024289fe11e2d696d4227fc09f0b | [
"MIT"
] | 241 | 2018-05-10T14:27:04.000Z | 2022-03-26T10:38:04.000Z | tool/conanfile.py | Bjoe/tinyrefl | b7296be55e75024289fe11e2d696d4227fc09f0b | [
"MIT"
] | 1 | 2019-08-03T17:40:28.000Z | 2019-08-20T13:08:54.000Z | tool/conanfile.py | Bjoe/tinyrefl | b7296be55e75024289fe11e2d696d4227fc09f0b | [
"MIT"
] | 15 | 2018-05-10T17:34:24.000Z | 2022-01-20T23:02:44.000Z | from conans import ConanFile, CMake
import os
class TinyreflTool(ConanFile):
name = 'tinyrefl-tool'
version = '0.4.1'
url = 'https://github.com/Manu343726/tinyrefl'
description = ' A work in progress minimal C++ static reflection API and codegen tool'
scm = {
'type': 'git',
'url': 'https://github.com/Manu343726/tinyrefl',
'revision': 'auto',
'subfolder': 'tinyrefl'
}
generators = 'cmake'
build_requires = ('jsonformoderncpp/3.5.0@vthiery/stable',
'fmt/5.2.1@bincrafters/stable',
'ctti/0.0.2@Manu343726/testing',
'cppast/master@Manu343726/testing',
'llvm_support/6.0.1@Manu343726/testing')
requires = 'clang_executables/6.0.1@Manu343726/testing'
default_options = 'fmt:header_only=True'
settings = 'os', 'compiler', 'build_type', 'arch'
def build(self):
cmake = CMake(self)
cmake.configure(
source_folder='tinyrefl',
defs = {
'TINYREFL_BUILD_TESTS': False,
'TINYREFL_BUILD_EXAMPLES': False
}
)
cmake.build(target='tinyrefl-tool')
def package(self):
self.copy('tinyrefl-tool*',
src='bin',
dst='bin')
self.copy('utils.cmake',
src=os.path.join(self.source_folder, 'tinyrefl', 'cmake'),
dst='cmake',
keep_path=False)
self.copy('driver.cmake',
src=os.path.join(self.source_folder, 'tinyrefl', 'tool'),
dst='cmake',
keep_path=False)
self.copy('tinyrefl_tool-config.cmake',
src=os.path.join(self.source_folder, 'tinyrefl', 'cmake'),
dst='cmake',
keep_path=False)
self.copy('tinyrefl_tool-version.cmake',
dst='cmake',
keep_path=False)
| 31.728814 | 90 | 0.563568 | from conans import ConanFile, CMake
import os
class TinyreflTool(ConanFile):
name = 'tinyrefl-tool'
version = '0.4.1'
url = 'https://github.com/Manu343726/tinyrefl'
description = ' A work in progress minimal C++ static reflection API and codegen tool'
scm = {
'type': 'git',
'url': 'https://github.com/Manu343726/tinyrefl',
'revision': 'auto',
'subfolder': 'tinyrefl'
}
generators = 'cmake'
build_requires = ('jsonformoderncpp/3.5.0@vthiery/stable',
'fmt/5.2.1@bincrafters/stable',
'ctti/0.0.2@Manu343726/testing',
'cppast/master@Manu343726/testing',
'llvm_support/6.0.1@Manu343726/testing')
requires = 'clang_executables/6.0.1@Manu343726/testing'
default_options = 'fmt:header_only=True'
settings = 'os', 'compiler', 'build_type', 'arch'
def build(self):
cmake = CMake(self)
cmake.configure(
source_folder='tinyrefl',
defs = {
'TINYREFL_BUILD_TESTS': False,
'TINYREFL_BUILD_EXAMPLES': False
}
)
cmake.build(target='tinyrefl-tool')
def package(self):
self.copy('tinyrefl-tool*',
src='bin',
dst='bin')
self.copy('utils.cmake',
src=os.path.join(self.source_folder, 'tinyrefl', 'cmake'),
dst='cmake',
keep_path=False)
self.copy('driver.cmake',
src=os.path.join(self.source_folder, 'tinyrefl', 'tool'),
dst='cmake',
keep_path=False)
self.copy('tinyrefl_tool-config.cmake',
src=os.path.join(self.source_folder, 'tinyrefl', 'cmake'),
dst='cmake',
keep_path=False)
self.copy('tinyrefl_tool-version.cmake',
dst='cmake',
keep_path=False)
| true | true |
7900c1de0146d8e223f0e1fbdb220f0b4221f81f | 669 | py | Python | use.py | esoterik0/scrabble-comp | 3ec06e0c5135f0f1abff58d0a1e7997b5e6f41bd | [
"MIT"
] | null | null | null | use.py | esoterik0/scrabble-comp | 3ec06e0c5135f0f1abff58d0a1e7997b5e6f41bd | [
"MIT"
] | null | null | null | use.py | esoterik0/scrabble-comp | 3ec06e0c5135f0f1abff58d0a1e7997b5e6f41bd | [
"MIT"
] | null | null | null | import twl
wolf = twl.Wolf()
def split(wolf, end=7):
return [wolf.len(n)() for n in range(2, end+1)]
def spell(ltrs, wild=0):
return split(wolf.wild(ltrs, wild), len(ltrs)+wild)
def _munge(func, fix, ltrs, wild=0):
return split(func(fix).wild(fix+ltrs, wild), len(fix+ltrs)+wild)
def starts(fix, ltrs, wild=0):
return _munge(wolf.starts, fix, ltrs, wild)
def ends(fix, ltrs, wild=0):
return _munge(wolf.ends, fix, ltrs, wild)
def contains(fix, ltrs, wild=0):
return _munge(wolf.contains, fix, ltrs, wild)
if __name__ == "__main__":
# print(wolf.len(2).words)
# print(wolf.wild('aa')())
print(contains('a', 'ciodtji'))
| 19.676471 | 68 | 0.638266 | import twl
wolf = twl.Wolf()
def split(wolf, end=7):
return [wolf.len(n)() for n in range(2, end+1)]
def spell(ltrs, wild=0):
return split(wolf.wild(ltrs, wild), len(ltrs)+wild)
def _munge(func, fix, ltrs, wild=0):
return split(func(fix).wild(fix+ltrs, wild), len(fix+ltrs)+wild)
def starts(fix, ltrs, wild=0):
return _munge(wolf.starts, fix, ltrs, wild)
def ends(fix, ltrs, wild=0):
return _munge(wolf.ends, fix, ltrs, wild)
def contains(fix, ltrs, wild=0):
return _munge(wolf.contains, fix, ltrs, wild)
if __name__ == "__main__":
print(contains('a', 'ciodtji'))
| true | true |
7900c23e71e10e049df576a8faa1b1a99c90b927 | 854 | py | Python | api/app.py | ThorntonMatthewD/Bot-Detector-Core-Files | cf74e90010701b5ddbc5cd12b04ba27eeac21491 | [
"BSD-2-Clause"
] | null | null | null | api/app.py | ThorntonMatthewD/Bot-Detector-Core-Files | cf74e90010701b5ddbc5cd12b04ba27eeac21491 | [
"BSD-2-Clause"
] | null | null | null | api/app.py | ThorntonMatthewD/Bot-Detector-Core-Files | cf74e90010701b5ddbc5cd12b04ba27eeac21491 | [
"BSD-2-Clause"
] | null | null | null |
from concurrent.futures.process import ProcessPoolExecutor
import api.Config
import api.middleware
from api.Config import app
from api.routers import (feedback, hiscore, label, legacy, legacy_debug,
player, prediction, report, scraper)
app.include_router(hiscore.router)
app.include_router(player.router)
app.include_router(prediction.router)
app.include_router(feedback.router)
app.include_router(report.router)
app.include_router(legacy.router)
app.include_router(scraper.router)
app.include_router(label.router)
app.include_router(legacy_debug.router)
@app.get("/")
async def root():
return {"message": "Hello World"}
# @app.on_event("startup")
# async def startup_event():
# app.state.executor = ProcessPoolExecutor()
# @app.on_event("shutdown")
# async def on_shutdown():
# app.state.executor.shutdown()
| 25.878788 | 72 | 0.757611 |
from concurrent.futures.process import ProcessPoolExecutor
import api.Config
import api.middleware
from api.Config import app
from api.routers import (feedback, hiscore, label, legacy, legacy_debug,
player, prediction, report, scraper)
app.include_router(hiscore.router)
app.include_router(player.router)
app.include_router(prediction.router)
app.include_router(feedback.router)
app.include_router(report.router)
app.include_router(legacy.router)
app.include_router(scraper.router)
app.include_router(label.router)
app.include_router(legacy_debug.router)
@app.get("/")
async def root():
return {"message": "Hello World"}
| true | true |
7900c24f12f07c54fb8d03bc0a4d4d5182d2fcce | 609 | py | Python | hog_cpp/fhog/get_hog.py | ElnuraMusaoglu/KernelizedCorrelationFilter | 78eab4297218b107cf7688e7e7c76d79b5609893 | [
"MIT"
] | 1 | 2021-07-21T08:40:48.000Z | 2021-07-21T08:40:48.000Z | hog_cpp/fhog/get_hog.py | ElnuraMusaoglu/SingleObjectTracking | 282a6312be23f6c4bce3b38c19045a1d1a3bce3b | [
"MIT"
] | null | null | null | hog_cpp/fhog/get_hog.py | ElnuraMusaoglu/SingleObjectTracking | 282a6312be23f6c4bce3b38c19045a1d1a3bce3b | [
"MIT"
] | null | null | null | from hog_cpp.fhog import fhog
import numpy as np
'''
https://github.com/lawpdas/fhog-python
'''
def get_hog(img):
M = np.zeros(img.shape[:2], dtype='float32')
O = np.zeros(img.shape[:2], dtype='float32')
H = np.zeros([img.shape[0] // 4, img.shape[1] // 4, 32], dtype='float32') # python3
fhog.gradientMag(img.astype(np.float32), M, O)
fhog.gradientHist(M, O, H)
H = H[:, :, :31]
return H
'''
if __name__ == "__main__":
img_path = 'D:/DATASET/OTB100/Basketball/img/0001.jpg'
img = cv2.imread(img_path)
sub = img[0:40, 0:40]
H = get_hog(sub)
print(H)
''' | 21 | 88 | 0.599343 | from hog_cpp.fhog import fhog
import numpy as np
def get_hog(img):
M = np.zeros(img.shape[:2], dtype='float32')
O = np.zeros(img.shape[:2], dtype='float32')
H = np.zeros([img.shape[0] // 4, img.shape[1] // 4, 32], dtype='float32')
fhog.gradientMag(img.astype(np.float32), M, O)
fhog.gradientHist(M, O, H)
H = H[:, :, :31]
return H
| true | true |
7900c2d0466c33908fcedf4c30abaf275484dfac | 1,240 | py | Python | test/test_dashboard.py | httpsgithu/python-client | f85a530367cdabe458a11919ad35609b9bc0606b | [
"Apache-2.0"
] | 11 | 2016-05-30T17:16:45.000Z | 2021-06-11T19:32:59.000Z | test/test_dashboard.py | httpsgithu/python-client | f85a530367cdabe458a11919ad35609b9bc0606b | [
"Apache-2.0"
] | 25 | 2016-05-02T23:05:19.000Z | 2020-11-18T22:43:20.000Z | test/test_dashboard.py | httpsgithu/python-client | f85a530367cdabe458a11919ad35609b9bc0606b | [
"Apache-2.0"
] | 30 | 2016-04-29T17:17:11.000Z | 2022-02-11T04:58:37.000Z | # coding: utf-8
"""
Wavefront REST API
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: chitimba@wavefront.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import wavefront_api_client
from wavefront_api_client.models.dashboard import Dashboard # noqa: E501
from wavefront_api_client.rest import ApiException
class TestDashboard(unittest.TestCase):
"""Dashboard unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testDashboard(self):
"""Test Dashboard"""
# FIXME: construct object with mandatory attributes with example values
# model = wavefront_api_client.models.dashboard.Dashboard() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 30.243902 | 409 | 0.726613 |
from __future__ import absolute_import
import unittest
import wavefront_api_client
from wavefront_api_client.models.dashboard import Dashboard
from wavefront_api_client.rest import ApiException
class TestDashboard(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testDashboard(self):
s
if __name__ == '__main__':
unittest.main()
| true | true |
7900c31976870a6b511804ec56f46af19c8af321 | 5,343 | py | Python | configs/ddod/swin.py | VietDunghacker/VarifocalNet | f57917afb3c29ceba1d3c4f824d10b9cc53aaa40 | [
"Apache-2.0"
] | null | null | null | configs/ddod/swin.py | VietDunghacker/VarifocalNet | f57917afb3c29ceba1d3c4f824d10b9cc53aaa40 | [
"Apache-2.0"
] | null | null | null | configs/ddod/swin.py | VietDunghacker/VarifocalNet | f57917afb3c29ceba1d3c4f824d10b9cc53aaa40 | [
"Apache-2.0"
] | null | null | null | _base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py',
'../_base_/swa.py'
]
# model settings
model = dict(
type='ATSS',
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth',
backbone=dict(
type='SwinTransformer',
embed_dim=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=7,
mlp_ratio=4.,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
ape=False,
drop_path_rate=0.3,
patch_norm=True,
out_indices=(0, 1, 2, 3),
use_checkpoint=True,
),
neck=dict(
type='PAFPNX',
in_channels=[128, 256, 512, 1024],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5,
relu_before_extra_convs=True,
pafpn_conv_cfg=dict(type='DCNv2'),
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)),
bbox_head=dict(
type='DDODHead',
num_classes=1,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0),
loss_bbox=dict(type='CIoULoss', loss_weight=2.0),
loss_iou=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(type='ATSSCostAssigner', topk=9),
reg_assigner=dict(type='ATSSCostAssigner', topk=9, alpha=0.5),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# data setting
dataset_type = 'CocoDataset'
data_root = '/content/data/'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
albu_train_transforms = [
dict(type='ShiftScaleRotate', shift_limit=0.0625, scale_limit=0.0, rotate_limit=0, interpolation=1, p=0.5),
dict(type='RandomBrightnessContrast', brightness_limit=[0.1, 0.3], contrast_limit=[0.1, 0.3], p=0.2),
dict(
type='OneOf',
transforms=[
dict(
type='RGBShift',
r_shift_limit=10,
g_shift_limit=10,
b_shift_limit=10,
p=1.0),
dict(
type='HueSaturationValue',
hue_shift_limit=20,
sat_shift_limit=30,
val_shift_limit=20,
p=1.0)
],
p=0.1),
dict(type='ImageCompression', quality_lower=85, quality_upper=95, p=0.2),
dict(type='ChannelShuffle', p=0.1),
dict(
type='OneOf',
transforms=[
dict(type='Blur', blur_limit=3, p=1.0),
dict(type='MedianBlur', blur_limit=3, p=1.0)
],
p=0.1),
]
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomCrop',
crop_type='relative_range',
crop_size=(0.9, 0.9),
allow_negative_crop = False),
dict(
type='Resize',
img_scale=[(720, 720), (960, 960)],
multiscale_mode='range',
keep_ratio=True),
dict(
type='CutOut',
n_holes=(5, 10),
cutout_shape=[(4, 4), (4, 8), (8, 4), (8, 8),
(16, 8), (8, 16), (16, 16), (16, 32), (32, 16), (32, 32),
(32, 48), (48, 32), (48, 48)]),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Pad', size_divisor=32),
dict(
type='Albu',
transforms=albu_train_transforms,
bbox_params=dict(
type='BboxParams',
format='pascal_voc',
label_fields=['gt_labels'],
min_visibility=0.0,
filter_lost_elements=True),
keymap={
'img': 'image',
'gt_bboxes': 'bboxes'
},
update_pad_shape=False,
skip_img_without_anno=True),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(800, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=12,
workers_per_gpu=4,
train=dict(type = dataset_type,
ann_file = data_root + '/annotations/instances_train2017.json',
img_prefix = 'train_images/',
pipeline=train_pipeline),
val=dict(type = dataset_type,
ann_file = data_root + '/annotations/instances_val2017.json',
img_prefix = 'val_images/',
pipeline=test_pipeline,
samples_per_gpu = 24),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, betas = (0.9, 0.999), weight_decay=0.05)
optimizer_config = dict(grad_clip = None)
log_config = dict(interval = 10)
# learning policy
lr_config = dict(
policy='CosineAnnealing',
min_lr_ratio = 0.2,
warmup='linear',
warmup_iters=500,
warmup_ratio=0.1,
)
runner = dict(type='IterBasedRunner', max_iters=3000, max_epochs = None)
checkpoint_config = dict(interval = 100)
evaluation = dict(interval = 100, metric = 'bbox')
fp16 = dict(loss_scale=512.)
# runtime
load_from = '/gdrive/My Drive/checkpoints/bvr_atss_x101_dcn_fpn_2x_coco.pth'
resume_from = None
workflow = [('train', 1)]
| 26.984848 | 121 | 0.685944 | _base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py',
'../_base_/swa.py'
]
model = dict(
type='ATSS',
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth',
backbone=dict(
type='SwinTransformer',
embed_dim=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=7,
mlp_ratio=4.,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
ape=False,
drop_path_rate=0.3,
patch_norm=True,
out_indices=(0, 1, 2, 3),
use_checkpoint=True,
),
neck=dict(
type='PAFPNX',
in_channels=[128, 256, 512, 1024],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5,
relu_before_extra_convs=True,
pafpn_conv_cfg=dict(type='DCNv2'),
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)),
bbox_head=dict(
type='DDODHead',
num_classes=1,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0),
loss_bbox=dict(type='CIoULoss', loss_weight=2.0),
loss_iou=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
train_cfg=dict(
assigner=dict(type='ATSSCostAssigner', topk=9),
reg_assigner=dict(type='ATSSCostAssigner', topk=9, alpha=0.5),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
dataset_type = 'CocoDataset'
data_root = '/content/data/'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
albu_train_transforms = [
dict(type='ShiftScaleRotate', shift_limit=0.0625, scale_limit=0.0, rotate_limit=0, interpolation=1, p=0.5),
dict(type='RandomBrightnessContrast', brightness_limit=[0.1, 0.3], contrast_limit=[0.1, 0.3], p=0.2),
dict(
type='OneOf',
transforms=[
dict(
type='RGBShift',
r_shift_limit=10,
g_shift_limit=10,
b_shift_limit=10,
p=1.0),
dict(
type='HueSaturationValue',
hue_shift_limit=20,
sat_shift_limit=30,
val_shift_limit=20,
p=1.0)
],
p=0.1),
dict(type='ImageCompression', quality_lower=85, quality_upper=95, p=0.2),
dict(type='ChannelShuffle', p=0.1),
dict(
type='OneOf',
transforms=[
dict(type='Blur', blur_limit=3, p=1.0),
dict(type='MedianBlur', blur_limit=3, p=1.0)
],
p=0.1),
]
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomCrop',
crop_type='relative_range',
crop_size=(0.9, 0.9),
allow_negative_crop = False),
dict(
type='Resize',
img_scale=[(720, 720), (960, 960)],
multiscale_mode='range',
keep_ratio=True),
dict(
type='CutOut',
n_holes=(5, 10),
cutout_shape=[(4, 4), (4, 8), (8, 4), (8, 8),
(16, 8), (8, 16), (16, 16), (16, 32), (32, 16), (32, 32),
(32, 48), (48, 32), (48, 48)]),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Pad', size_divisor=32),
dict(
type='Albu',
transforms=albu_train_transforms,
bbox_params=dict(
type='BboxParams',
format='pascal_voc',
label_fields=['gt_labels'],
min_visibility=0.0,
filter_lost_elements=True),
keymap={
'img': 'image',
'gt_bboxes': 'bboxes'
},
update_pad_shape=False,
skip_img_without_anno=True),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(800, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=12,
workers_per_gpu=4,
train=dict(type = dataset_type,
ann_file = data_root + '/annotations/instances_train2017.json',
img_prefix = 'train_images/',
pipeline=train_pipeline),
val=dict(type = dataset_type,
ann_file = data_root + '/annotations/instances_val2017.json',
img_prefix = 'val_images/',
pipeline=test_pipeline,
samples_per_gpu = 24),
test=dict(pipeline=test_pipeline))
optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, betas = (0.9, 0.999), weight_decay=0.05)
optimizer_config = dict(grad_clip = None)
log_config = dict(interval = 10)
lr_config = dict(
policy='CosineAnnealing',
min_lr_ratio = 0.2,
warmup='linear',
warmup_iters=500,
warmup_ratio=0.1,
)
runner = dict(type='IterBasedRunner', max_iters=3000, max_epochs = None)
checkpoint_config = dict(interval = 100)
evaluation = dict(interval = 100, metric = 'bbox')
fp16 = dict(loss_scale=512.)
load_from = '/gdrive/My Drive/checkpoints/bvr_atss_x101_dcn_fpn_2x_coco.pth'
resume_from = None
workflow = [('train', 1)]
| true | true |
7900c3c0e778bfd876a804489fa7878ff9fbd507 | 219 | py | Python | 13)Abstract classes.py | SriCharan220800/RomanReigns | 0ec11c65fa0cfa6264f162c5e3f2ba5e45986fbb | [
"MIT"
] | null | null | null | 13)Abstract classes.py | SriCharan220800/RomanReigns | 0ec11c65fa0cfa6264f162c5e3f2ba5e45986fbb | [
"MIT"
] | null | null | null | 13)Abstract classes.py | SriCharan220800/RomanReigns | 0ec11c65fa0cfa6264f162c5e3f2ba5e45986fbb | [
"MIT"
] | null | null | null | from abc import ABCMeta, abstractmethod
class Book(object, metaclass=ABCMeta):
def __init__(self,title,author):
self.title=title
self.author=author
@abstractmethod
def display(): pass
| 27.375 | 40 | 0.680365 | from abc import ABCMeta, abstractmethod
class Book(object, metaclass=ABCMeta):
def __init__(self,title,author):
self.title=title
self.author=author
@abstractmethod
def display(): pass
| true | true |
7900c408cec8c2c2c9cb20810011990a9d0f2f78 | 2,854 | py | Python | bitshares/account.py | silverchen0402/python-bitshares | aafbcf5cd09e7bca99dd156fd60b9df8ba508630 | [
"MIT"
] | 102 | 2018-04-08T23:05:00.000Z | 2022-03-31T10:10:03.000Z | bitshares/account.py | silverchen0402/python-bitshares | aafbcf5cd09e7bca99dd156fd60b9df8ba508630 | [
"MIT"
] | 246 | 2018-04-03T12:35:49.000Z | 2022-02-28T10:44:28.000Z | bitshares/account.py | silverchen0402/python-bitshares | aafbcf5cd09e7bca99dd156fd60b9df8ba508630 | [
"MIT"
] | 128 | 2018-04-14T01:39:12.000Z | 2022-03-25T08:56:51.000Z | # -*- coding: utf-8 -*-
from .amount import Amount
from .instance import BlockchainInstance
from graphenecommon.account import (
Account as GrapheneAccount,
AccountUpdate as GrapheneAccountUpdate,
)
from bitsharesbase import operations
@BlockchainInstance.inject
class Account(GrapheneAccount):
"""
This class allows to easily access Account data.
:param str account_name: Name of the account
:param bitshares.bitshares.BitShares blockchain_instance: BitShares
instance
:param bool full: Obtain all account data including orders, positions, etc.
:param bool lazy: Use lazy loading
:param bool full: Obtain all account data including orders, positions,
etc.
:returns: Account data
:rtype: dictionary
:raises bitshares.exceptions.AccountDoesNotExistsException: if account
does not exist
Instances of this class are dictionaries that come with additional
methods (see below) that allow dealing with an account and it's
corresponding functions.
.. code-block:: python
from bitshares.account import Account
account = Account("init0")
print(account)
.. note:: This class comes with its own caching function to reduce the
load on the API server. Instances of this class can be
refreshed with ``Account.refresh()``.
"""
def define_classes(self):
self.type_id = 2
self.amount_class = Amount
self.operations = operations
@property
def call_positions(self):
"""Alias for :func:bitshares.account.Account.callpositions."""
return self.callpositions()
@property
def callpositions(self):
"""List call positions (collateralized positions :doc:`mpa`)"""
self.ensure_full()
from .dex import Dex
dex = Dex(blockchain_instance=self.blockchain)
return dex.list_debt_positions(self)
@property
def openorders(self):
"""Returns open Orders."""
from .price import Order
self.ensure_full()
return [
Order(o, blockchain_instance=self.blockchain) for o in self["limit_orders"]
]
@BlockchainInstance.inject
class AccountUpdate(GrapheneAccountUpdate):
"""
This purpose of this class is to keep track of account updates as they are pushed
through by :class:`bitshares.notify.Notify`.
Instances of this class are dictionaries and take the following
form:
.. code-block: js
{'id': '2.6.29',
'lifetime_fees_paid': '44261516129',
'most_recent_op': '2.9.0',
'owner': '1.2.29',
'pending_fees': 0,
'pending_vested_fees': 16310,
'total_core_in_orders': '6788845277634',
'total_ops': 0}
"""
def define_classes(self):
self.account_class = Account
| 29.729167 | 87 | 0.662929 |
from .amount import Amount
from .instance import BlockchainInstance
from graphenecommon.account import (
Account as GrapheneAccount,
AccountUpdate as GrapheneAccountUpdate,
)
from bitsharesbase import operations
@BlockchainInstance.inject
class Account(GrapheneAccount):
def define_classes(self):
self.type_id = 2
self.amount_class = Amount
self.operations = operations
@property
def call_positions(self):
return self.callpositions()
@property
def callpositions(self):
self.ensure_full()
from .dex import Dex
dex = Dex(blockchain_instance=self.blockchain)
return dex.list_debt_positions(self)
@property
def openorders(self):
from .price import Order
self.ensure_full()
return [
Order(o, blockchain_instance=self.blockchain) for o in self["limit_orders"]
]
@BlockchainInstance.inject
class AccountUpdate(GrapheneAccountUpdate):
def define_classes(self):
self.account_class = Account
| true | true |
7900c487556224959505e315e98519b2ba0eb018 | 22,124 | py | Python | ludwig/models/modules/recurrent_modules.py | rajputakhil/ludwig | dd1a37ea1018db6624f05d72c34ae8b0f7068e6c | [
"Apache-2.0"
] | null | null | null | ludwig/models/modules/recurrent_modules.py | rajputakhil/ludwig | dd1a37ea1018db6624f05d72c34ae8b0f7068e6c | [
"Apache-2.0"
] | null | null | null | ludwig/models/modules/recurrent_modules.py | rajputakhil/ludwig | dd1a37ea1018db6624f05d72c34ae8b0f7068e6c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import collections
import logging
import tensorflow as tf
from tensorflow.contrib.rnn import MultiRNNCell, LSTMStateTuple
from tensorflow.python.framework import dtypes, tensor_shape
from tensorflow.python.framework import ops
from tensorflow.python.util import nest
from ludwig.models.modules.fully_connected_modules import fc_layer
from ludwig.models.modules.initializer_modules import get_initializer
from ludwig.models.modules.reduction_modules import reduce_sequence
from ludwig.utils.tf_utils import sequence_length_3D, sequence_length_2D
def get_cell_fun(cell_type):
if cell_type == 'rnn':
cell_fn = tf.nn.rnn_cell.BasicRNNCell
elif cell_type == 'lstm':
# allows for optional peephole connections and cell clipping
cell_fn = tf.nn.rnn_cell.LSTMCell
elif cell_type == 'lstm_block':
# Faster version of basic LSTM
cell_fn = tf.contrib.rnn.LSTMBlockCell
elif cell_type == 'lstm_ln':
cell_fn = tf.contrib.rnn.LayerNormBasicLSTMCell
elif cell_type == 'lstm_cudnn':
cell_fn = tf.contrib.cudnn_rnn.CudnnCompatibleLSTMCell
elif cell_type == 'gru':
cell_fn = tf.nn.rnn_cell.GRUCell
elif cell_type == 'gru_block':
# Faster version of GRU (25% faster in my tests)
cell_fn = tf.contrib.rnn.GRUBlockCell
elif cell_type == 'gru_cudnn':
# Faster version of GRU (25% faster in my tests)
cell_fn = tf.contrib.cudnn_rnn.CudnnCompatibleGRUCell
else:
cell_fn = tf.nn.rnn_cell.BasicRNNCell
return cell_fn
class Projection(tf.layers.Layer):
def __init__(self, projection_weights, projection_biases, name=None,
**kwargs):
super(Projection, self).__init__(name=name, **kwargs)
self.projection_weights = projection_weights
self.projection_biases = projection_biases
def call(self, inputs, **kwargs):
inputs_shape = inputs.shape.as_list()
weights_shape = self.projection_weights.shape.as_list()
assert inputs_shape[-1] == weights_shape[0]
inputs = tf.reshape(inputs, [-1, inputs_shape[-1]])
outputs = tf.matmul(inputs, self.projection_weights)
if self.projection_biases is not None:
outputs = tf.nn.bias_add(outputs, self.projection_biases)
outputs_shape = inputs_shape
outputs_shape[0] = -1 # batch_size
outputs_shape[-1] = weights_shape[1]
outputs = tf.reshape(outputs, outputs_shape)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape = input_shape
output_shape[-1] = self.projection_biases.shape.as_list()[0]
# output_shape = [input_shape[0], self.projection_biases.shape.as_list()[0]]
return tensor_shape.TensorShape(output_shape)
class BasicDecoderOutput(
collections.namedtuple('BasicDecoderOutput',
('rnn_output', 'sample_id', 'projection_input'))):
pass
class BasicDecoder(tf.contrib.seq2seq.BasicDecoder):
def _projection_input_size(self):
return self._cell.output_size
@property
def output_size(self):
return BasicDecoderOutput(
rnn_output=self._rnn_output_size(),
sample_id=self._helper.sample_ids_shape,
projection_input=self._projection_input_size())
@property
def output_dtype(self):
dtype = nest.flatten(self._initial_state)[0].dtype
return BasicDecoderOutput(
nest.map_structure(lambda _: dtype, self._rnn_output_size()),
self._helper.sample_ids_dtype,
nest.map_structure(lambda _: dtype, self._projection_input_size()))
def step(self, time, inputs, state, name=None):
with ops.name_scope(name, 'BasicDecoderStep', (time, inputs, state)):
cell_outputs, cell_state = self._cell(inputs, state)
projection_inputs = cell_outputs # get projection_inputs to compute sampled_softmax_cross_entropy_loss
if self._output_layer is not None:
cell_outputs = self._output_layer(cell_outputs)
sample_ids = self._helper.sample(
time=time, outputs=cell_outputs, state=cell_state)
(finished, next_inputs, next_state) = self._helper.next_inputs(
time=time,
outputs=cell_outputs,
state=cell_state,
sample_ids=sample_ids)
outputs = BasicDecoderOutput(cell_outputs, sample_ids,
projection_inputs)
return (outputs, next_state, next_inputs, finished)
class TimeseriesTrainingHelper(tf.contrib.seq2seq.TrainingHelper):
def sample(self, time, outputs, name=None, **unused_kwargs):
with ops.name_scope(name, 'TrainingHelperSample', [time, outputs]):
return tf.zeros(tf.shape(outputs)[:-1], dtype=dtypes.int32)
class RecurrentStack:
def __init__(
self,
state_size=256,
cell_type='rnn',
num_layers=1,
bidirectional=False,
dropout=False,
regularize=True,
reduce_output='last',
**kwargs
):
self.state_size = state_size
self.cell_type = cell_type
self.num_layers = num_layers
self.bidirectional = bidirectional
self.dropout = dropout
self.regularize = regularize
self.reduce_output = reduce_output
def __call__(
self,
input_sequence,
regularizer,
dropout_rate,
is_training=True
):
if not self.regularize:
regularizer = None
# Calculate the length of input_sequence and the batch size
sequence_length = sequence_length_3D(input_sequence)
# RNN cell
cell_fn = get_cell_fun(self.cell_type)
# initial state
# init_state = tf.get_variable(
# 'init_state',
# [1, state_size],
# initializer=tf.constant_initializer(0.0),
# )
# init_state = tf.tile(init_state, [batch_size, 1])
# main RNN operation
with tf.variable_scope('rnn_stack', reuse=tf.AUTO_REUSE,
regularizer=regularizer) as vs:
if self.bidirectional:
# forward direction cell
fw_cell = lambda state_size: cell_fn(state_size)
bw_cell = lambda state_size: cell_fn(state_size)
fw_cells = [fw_cell(self.state_size) for _ in
range(self.num_layers)]
bw_cells = [bw_cell(self.state_size) for _ in
range(self.num_layers)]
rnn_outputs, final_state_fw, final_state_bw = tf.contrib.rnn.stack_bidirectional_dynamic_rnn(
cells_fw=fw_cells,
cells_bw=bw_cells,
dtype=tf.float32,
sequence_length=sequence_length,
inputs=input_sequence
)
else:
cell = lambda state_size: cell_fn(state_size)
cells = MultiRNNCell(
[cell(self.state_size) for _ in range(self.num_layers)],
state_is_tuple=True)
rnn_outputs, final_state = tf.nn.dynamic_rnn(
cells,
input_sequence,
sequence_length=sequence_length,
dtype=tf.float32)
# initial_state=init_state)
for v in tf.global_variables():
if v.name.startswith(vs.name):
logging.debug(' {}: {}'.format(v.name, v))
logging.debug(' rnn_outputs: {0}'.format(rnn_outputs))
rnn_output = reduce_sequence(rnn_outputs, self.reduce_output)
logging.debug(' reduced_rnn_output: {0}'.format(rnn_output))
# dropout
if self.dropout and dropout_rate is not None:
rnn_output = tf.layers.dropout(
rnn_output,
rate=dropout_rate,
training=is_training
)
logging.debug(' dropout_rnn: {0}'.format(rnn_output))
return rnn_output, rnn_output.shape.as_list()[-1]
def recurrent_decoder(encoder_outputs, targets, max_sequence_length, vocab_size,
cell_type='rnn', state_size=256, embedding_size=50,
num_layers=1,
attention_mechanism=None, beam_width=1, projection=True,
tied_target_embeddings=True, embeddings=None,
initializer=None, regularizer=None,
is_timeseries=False):
with tf.variable_scope('rnn_decoder', reuse=tf.AUTO_REUSE,
regularizer=regularizer):
# ================ Setup ================
if beam_width > 1 and is_timeseries:
raise ValueError('Invalid beam_width: {}'.format(beam_width))
GO_SYMBOL = vocab_size
END_SYMBOL = 0
batch_size = tf.shape(encoder_outputs)[0]
# ================ Projection ================
# Project the encoder outputs to the size of the decoder state
encoder_outputs_size = encoder_outputs.shape[-1]
if projection and encoder_outputs_size != state_size:
with tf.variable_scope('projection'):
encoder_output_rank = len(encoder_outputs.shape)
if encoder_output_rank > 2:
sequence_length = tf.shape(encoder_outputs)[1]
encoder_outputs = tf.reshape(encoder_outputs,
[-1, encoder_outputs_size])
encoder_outputs = fc_layer(encoder_outputs,
encoder_outputs.shape[-1],
state_size,
activation=None,
initializer=initializer)
encoder_outputs = tf.reshape(encoder_outputs,
[-1, sequence_length,
state_size])
else:
encoder_outputs = fc_layer(encoder_outputs,
encoder_outputs.shape[-1],
state_size,
activation=None,
initializer=initializer)
# ================ Targets sequence ================
# Calculate the length of inputs and the batch size
with tf.variable_scope('sequence'):
targets_sequence_length = sequence_length_2D(targets)
start_tokens = tf.tile([GO_SYMBOL], [batch_size])
end_tokens = tf.tile([END_SYMBOL], [batch_size])
if is_timeseries:
start_tokens = tf.cast(start_tokens, tf.float32)
end_tokens = tf.cast(end_tokens, tf.float32)
targets_with_go = tf.concat([
tf.expand_dims(start_tokens, 1),
targets,
tf.expand_dims(end_tokens, 1)], 1)
logging.debug(' targets_with_go: {0}'.format(targets_with_go))
targets_sequence_length_with_eos = targets_sequence_length + 1 # the EOS symbol is 0 so it's not increasing the real length of the sequence
# ================ Embeddings ================
if is_timeseries:
targets_embedded = tf.expand_dims(targets_with_go, -1)
targets_embeddings = None
else:
with tf.variable_scope('embedding'):
if embeddings is not None:
embedding_size = embeddings.shape.as_list()[-1]
if tied_target_embeddings:
state_size = embedding_size
elif tied_target_embeddings:
embedding_size = state_size
if embeddings is not None:
embedding_go = tf.get_variable('embedding_GO',
initializer=tf.random_uniform(
[1, embedding_size],
-1.0, 1.0))
targets_embeddings = tf.concat([embeddings, embedding_go],
axis=0)
else:
initializer_obj = get_initializer(initializer)
targets_embeddings = tf.get_variable(
'embeddings',
initializer=initializer_obj(
[vocab_size + 1, embedding_size]),
regularizer=regularizer
)
logging.debug(
' targets_embeddings: {0}'.format(targets_embeddings))
targets_embedded = tf.nn.embedding_lookup(targets_embeddings,
targets_with_go,
name='decoder_input_embeddings')
logging.debug(' targets_embedded: {0}'.format(targets_embedded))
# ================ Class prediction ================
if tied_target_embeddings:
class_weights = tf.transpose(targets_embeddings)
else:
initializer_obj = get_initializer(initializer)
class_weights = tf.get_variable(
'class_weights',
initializer=initializer_obj([state_size, vocab_size + 1]),
regularizer=regularizer
)
logging.debug(' class_weights: {0}'.format(class_weights))
class_biases = tf.get_variable('class_biases', [vocab_size + 1])
logging.debug(' class_biases: {0}'.format(class_biases))
projection_layer = Projection(class_weights, class_biases)
# ================ RNN ================
initial_state = encoder_outputs
with tf.variable_scope('rnn_cells') as vs:
# Cell
cell_fun = get_cell_fun(cell_type)
if num_layers == 1:
cell = cell_fun(state_size)
if cell_type.startswith('lstm'):
initial_state = LSTMStateTuple(c=initial_state,
h=initial_state)
elif num_layers > 1:
cell = MultiRNNCell(
[cell_fun(state_size) for _ in range(num_layers)],
state_is_tuple=True)
if cell_type.startswith('lstm'):
initial_state = LSTMStateTuple(c=initial_state,
h=initial_state)
initial_state = tuple([initial_state] * num_layers)
else:
raise ValueError('num_layers in recurrent decoser: {}. '
'Number of layers in a recurrenct decoder cannot be <= 0'.format(
num_layers))
# Attention
if attention_mechanism is not None:
if attention_mechanism == 'bahdanau':
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(
num_units=state_size, memory=encoder_outputs,
memory_sequence_length=sequence_length_3D(
encoder_outputs))
elif attention_mechanism == 'luong':
attention_mechanism = tf.contrib.seq2seq.LuongAttention(
num_units=state_size, memory=encoder_outputs,
memory_sequence_length=sequence_length_3D(
encoder_outputs))
else:
raise ValueError(
'Attention mechanism {} not supported'.format(
attention_mechanism))
cell = tf.contrib.seq2seq.AttentionWrapper(
cell, attention_mechanism, attention_layer_size=state_size)
initial_state = cell.zero_state(dtype=tf.float32,
batch_size=batch_size)
for v in tf.global_variables():
if v.name.startswith(vs.name):
logging.debug(' {}: {}'.format(v.name, v))
# ================ Decoding ================
def decode(initial_state, cell, helper, beam_width=1,
projection_layer=None):
# The decoder itself
if beam_width > 1:
# Tile inputs for beam search decoder
beam_initial_state = tf.contrib.seq2seq.tile_batch(
initial_state, beam_width)
decoder = tf.contrib.seq2seq.BeamSearchDecoder(
cell=cell,
embedding=targets_embeddings,
start_tokens=start_tokens,
end_token=END_SYMBOL,
initial_state=beam_initial_state,
beam_width=beam_width,
output_layer=projection_layer)
else:
decoder = BasicDecoder(
cell=cell, helper=helper,
initial_state=initial_state,
output_layer=projection_layer)
# The decoding operation
outputs = tf.contrib.seq2seq.dynamic_decode(
decoder=decoder,
output_time_major=False,
impute_finished=False if beam_width > 1 else True,
maximum_iterations=max_sequence_length
)
return outputs
# ================ Decoding helpers ================
if is_timeseries:
train_helper = TimeseriesTrainingHelper(
inputs=targets_embedded,
sequence_length=targets_sequence_length_with_eos)
final_outputs_pred, final_state_pred, final_sequence_lengths_pred = decode(
initial_state,
cell,
train_helper,
projection_layer=projection_layer)
eval_logits = final_outputs_pred.rnn_output
train_logits = final_outputs_pred.projection_input
predictions_sequence = tf.reshape(eval_logits, [batch_size, -1])
predictions_sequence_length_with_eos = final_sequence_lengths_pred
else:
train_helper = tf.contrib.seq2seq.TrainingHelper(
inputs=targets_embedded,
sequence_length=targets_sequence_length_with_eos)
final_outputs_train, final_state_train, final_sequence_lengths_train, = decode(
initial_state,
cell,
train_helper,
projection_layer=projection_layer)
eval_logits = final_outputs_train.rnn_output
train_logits = final_outputs_train.projection_input
# train_predictions = final_outputs_train.sample_id
pred_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(
embedding=targets_embeddings,
start_tokens=start_tokens,
end_token=END_SYMBOL)
final_outputs_pred, final_state_pred, final_sequence_lengths_pred = decode(
initial_state,
cell,
pred_helper,
beam_width,
projection_layer=projection_layer)
if beam_width > 1:
predictions_sequence = final_outputs_pred.beam_search_decoder_output.predicted_ids[
:, :, 0]
# final_outputs_pred..predicted_ids[:,:,0] would work too, but it contains -1s for padding
predictions_sequence_scores = final_outputs_pred.beam_search_decoder_output.scores[
:, :, 0]
predictions_sequence_length_with_eos = final_sequence_lengths_pred[
:, 0]
else:
predictions_sequence = final_outputs_pred.sample_id
predictions_sequence_scores = final_outputs_pred.rnn_output
predictions_sequence_length_with_eos = final_sequence_lengths_pred
logging.debug(' train_logits: {0}'.format(train_logits))
logging.debug(' eval_logits: {0}'.format(eval_logits))
logging.debug(' predictions_sequence: {0}'.format(predictions_sequence))
logging.debug(' predictions_sequence_scores: {0}'.format(
predictions_sequence_scores))
return predictions_sequence, predictions_sequence_scores, predictions_sequence_length_with_eos, \
targets_sequence_length_with_eos, eval_logits, train_logits, class_weights, class_biases
| 45.805383 | 153 | 0.557991 |
import collections
import logging
import tensorflow as tf
from tensorflow.contrib.rnn import MultiRNNCell, LSTMStateTuple
from tensorflow.python.framework import dtypes, tensor_shape
from tensorflow.python.framework import ops
from tensorflow.python.util import nest
from ludwig.models.modules.fully_connected_modules import fc_layer
from ludwig.models.modules.initializer_modules import get_initializer
from ludwig.models.modules.reduction_modules import reduce_sequence
from ludwig.utils.tf_utils import sequence_length_3D, sequence_length_2D
def get_cell_fun(cell_type):
if cell_type == 'rnn':
cell_fn = tf.nn.rnn_cell.BasicRNNCell
elif cell_type == 'lstm':
cell_fn = tf.nn.rnn_cell.LSTMCell
elif cell_type == 'lstm_block':
cell_fn = tf.contrib.rnn.LSTMBlockCell
elif cell_type == 'lstm_ln':
cell_fn = tf.contrib.rnn.LayerNormBasicLSTMCell
elif cell_type == 'lstm_cudnn':
cell_fn = tf.contrib.cudnn_rnn.CudnnCompatibleLSTMCell
elif cell_type == 'gru':
cell_fn = tf.nn.rnn_cell.GRUCell
elif cell_type == 'gru_block':
cell_fn = tf.contrib.rnn.GRUBlockCell
elif cell_type == 'gru_cudnn':
cell_fn = tf.contrib.cudnn_rnn.CudnnCompatibleGRUCell
else:
cell_fn = tf.nn.rnn_cell.BasicRNNCell
return cell_fn
class Projection(tf.layers.Layer):
def __init__(self, projection_weights, projection_biases, name=None,
**kwargs):
super(Projection, self).__init__(name=name, **kwargs)
self.projection_weights = projection_weights
self.projection_biases = projection_biases
def call(self, inputs, **kwargs):
inputs_shape = inputs.shape.as_list()
weights_shape = self.projection_weights.shape.as_list()
assert inputs_shape[-1] == weights_shape[0]
inputs = tf.reshape(inputs, [-1, inputs_shape[-1]])
outputs = tf.matmul(inputs, self.projection_weights)
if self.projection_biases is not None:
outputs = tf.nn.bias_add(outputs, self.projection_biases)
outputs_shape = inputs_shape
outputs_shape[0] = -1
outputs_shape[-1] = weights_shape[1]
outputs = tf.reshape(outputs, outputs_shape)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape = input_shape
output_shape[-1] = self.projection_biases.shape.as_list()[0]
return tensor_shape.TensorShape(output_shape)
class BasicDecoderOutput(
collections.namedtuple('BasicDecoderOutput',
('rnn_output', 'sample_id', 'projection_input'))):
pass
class BasicDecoder(tf.contrib.seq2seq.BasicDecoder):
def _projection_input_size(self):
return self._cell.output_size
@property
def output_size(self):
return BasicDecoderOutput(
rnn_output=self._rnn_output_size(),
sample_id=self._helper.sample_ids_shape,
projection_input=self._projection_input_size())
@property
def output_dtype(self):
dtype = nest.flatten(self._initial_state)[0].dtype
return BasicDecoderOutput(
nest.map_structure(lambda _: dtype, self._rnn_output_size()),
self._helper.sample_ids_dtype,
nest.map_structure(lambda _: dtype, self._projection_input_size()))
def step(self, time, inputs, state, name=None):
with ops.name_scope(name, 'BasicDecoderStep', (time, inputs, state)):
cell_outputs, cell_state = self._cell(inputs, state)
projection_inputs = cell_outputs
if self._output_layer is not None:
cell_outputs = self._output_layer(cell_outputs)
sample_ids = self._helper.sample(
time=time, outputs=cell_outputs, state=cell_state)
(finished, next_inputs, next_state) = self._helper.next_inputs(
time=time,
outputs=cell_outputs,
state=cell_state,
sample_ids=sample_ids)
outputs = BasicDecoderOutput(cell_outputs, sample_ids,
projection_inputs)
return (outputs, next_state, next_inputs, finished)
class TimeseriesTrainingHelper(tf.contrib.seq2seq.TrainingHelper):
def sample(self, time, outputs, name=None, **unused_kwargs):
with ops.name_scope(name, 'TrainingHelperSample', [time, outputs]):
return tf.zeros(tf.shape(outputs)[:-1], dtype=dtypes.int32)
class RecurrentStack:
def __init__(
self,
state_size=256,
cell_type='rnn',
num_layers=1,
bidirectional=False,
dropout=False,
regularize=True,
reduce_output='last',
**kwargs
):
self.state_size = state_size
self.cell_type = cell_type
self.num_layers = num_layers
self.bidirectional = bidirectional
self.dropout = dropout
self.regularize = regularize
self.reduce_output = reduce_output
def __call__(
self,
input_sequence,
regularizer,
dropout_rate,
is_training=True
):
if not self.regularize:
regularizer = None
sequence_length = sequence_length_3D(input_sequence)
cell_fn = get_cell_fun(self.cell_type)
with tf.variable_scope('rnn_stack', reuse=tf.AUTO_REUSE,
regularizer=regularizer) as vs:
if self.bidirectional:
fw_cell = lambda state_size: cell_fn(state_size)
bw_cell = lambda state_size: cell_fn(state_size)
fw_cells = [fw_cell(self.state_size) for _ in
range(self.num_layers)]
bw_cells = [bw_cell(self.state_size) for _ in
range(self.num_layers)]
rnn_outputs, final_state_fw, final_state_bw = tf.contrib.rnn.stack_bidirectional_dynamic_rnn(
cells_fw=fw_cells,
cells_bw=bw_cells,
dtype=tf.float32,
sequence_length=sequence_length,
inputs=input_sequence
)
else:
cell = lambda state_size: cell_fn(state_size)
cells = MultiRNNCell(
[cell(self.state_size) for _ in range(self.num_layers)],
state_is_tuple=True)
rnn_outputs, final_state = tf.nn.dynamic_rnn(
cells,
input_sequence,
sequence_length=sequence_length,
dtype=tf.float32)
for v in tf.global_variables():
if v.name.startswith(vs.name):
logging.debug(' {}: {}'.format(v.name, v))
logging.debug(' rnn_outputs: {0}'.format(rnn_outputs))
rnn_output = reduce_sequence(rnn_outputs, self.reduce_output)
logging.debug(' reduced_rnn_output: {0}'.format(rnn_output))
if self.dropout and dropout_rate is not None:
rnn_output = tf.layers.dropout(
rnn_output,
rate=dropout_rate,
training=is_training
)
logging.debug(' dropout_rnn: {0}'.format(rnn_output))
return rnn_output, rnn_output.shape.as_list()[-1]
def recurrent_decoder(encoder_outputs, targets, max_sequence_length, vocab_size,
cell_type='rnn', state_size=256, embedding_size=50,
num_layers=1,
attention_mechanism=None, beam_width=1, projection=True,
tied_target_embeddings=True, embeddings=None,
initializer=None, regularizer=None,
is_timeseries=False):
with tf.variable_scope('rnn_decoder', reuse=tf.AUTO_REUSE,
regularizer=regularizer):
if beam_width > 1 and is_timeseries:
raise ValueError('Invalid beam_width: {}'.format(beam_width))
GO_SYMBOL = vocab_size
END_SYMBOL = 0
batch_size = tf.shape(encoder_outputs)[0]
encoder_outputs_size = encoder_outputs.shape[-1]
if projection and encoder_outputs_size != state_size:
with tf.variable_scope('projection'):
encoder_output_rank = len(encoder_outputs.shape)
if encoder_output_rank > 2:
sequence_length = tf.shape(encoder_outputs)[1]
encoder_outputs = tf.reshape(encoder_outputs,
[-1, encoder_outputs_size])
encoder_outputs = fc_layer(encoder_outputs,
encoder_outputs.shape[-1],
state_size,
activation=None,
initializer=initializer)
encoder_outputs = tf.reshape(encoder_outputs,
[-1, sequence_length,
state_size])
else:
encoder_outputs = fc_layer(encoder_outputs,
encoder_outputs.shape[-1],
state_size,
activation=None,
initializer=initializer)
with tf.variable_scope('sequence'):
targets_sequence_length = sequence_length_2D(targets)
start_tokens = tf.tile([GO_SYMBOL], [batch_size])
end_tokens = tf.tile([END_SYMBOL], [batch_size])
if is_timeseries:
start_tokens = tf.cast(start_tokens, tf.float32)
end_tokens = tf.cast(end_tokens, tf.float32)
targets_with_go = tf.concat([
tf.expand_dims(start_tokens, 1),
targets,
tf.expand_dims(end_tokens, 1)], 1)
logging.debug(' targets_with_go: {0}'.format(targets_with_go))
targets_sequence_length_with_eos = targets_sequence_length + 1
# ================ Embeddings ================
if is_timeseries:
targets_embedded = tf.expand_dims(targets_with_go, -1)
targets_embeddings = None
else:
with tf.variable_scope('embedding'):
if embeddings is not None:
embedding_size = embeddings.shape.as_list()[-1]
if tied_target_embeddings:
state_size = embedding_size
elif tied_target_embeddings:
embedding_size = state_size
if embeddings is not None:
embedding_go = tf.get_variable('embedding_GO',
initializer=tf.random_uniform(
[1, embedding_size],
-1.0, 1.0))
targets_embeddings = tf.concat([embeddings, embedding_go],
axis=0)
else:
initializer_obj = get_initializer(initializer)
targets_embeddings = tf.get_variable(
'embeddings',
initializer=initializer_obj(
[vocab_size + 1, embedding_size]),
regularizer=regularizer
)
logging.debug(
' targets_embeddings: {0}'.format(targets_embeddings))
targets_embedded = tf.nn.embedding_lookup(targets_embeddings,
targets_with_go,
name='decoder_input_embeddings')
logging.debug(' targets_embedded: {0}'.format(targets_embedded))
# ================ Class prediction ================
if tied_target_embeddings:
class_weights = tf.transpose(targets_embeddings)
else:
initializer_obj = get_initializer(initializer)
class_weights = tf.get_variable(
'class_weights',
initializer=initializer_obj([state_size, vocab_size + 1]),
regularizer=regularizer
)
logging.debug(' class_weights: {0}'.format(class_weights))
class_biases = tf.get_variable('class_biases', [vocab_size + 1])
logging.debug(' class_biases: {0}'.format(class_biases))
projection_layer = Projection(class_weights, class_biases)
# ================ RNN ================
initial_state = encoder_outputs
with tf.variable_scope('rnn_cells') as vs:
# Cell
cell_fun = get_cell_fun(cell_type)
if num_layers == 1:
cell = cell_fun(state_size)
if cell_type.startswith('lstm'):
initial_state = LSTMStateTuple(c=initial_state,
h=initial_state)
elif num_layers > 1:
cell = MultiRNNCell(
[cell_fun(state_size) for _ in range(num_layers)],
state_is_tuple=True)
if cell_type.startswith('lstm'):
initial_state = LSTMStateTuple(c=initial_state,
h=initial_state)
initial_state = tuple([initial_state] * num_layers)
else:
raise ValueError('num_layers in recurrent decoser: {}. '
'Number of layers in a recurrenct decoder cannot be <= 0'.format(
num_layers))
# Attention
if attention_mechanism is not None:
if attention_mechanism == 'bahdanau':
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(
num_units=state_size, memory=encoder_outputs,
memory_sequence_length=sequence_length_3D(
encoder_outputs))
elif attention_mechanism == 'luong':
attention_mechanism = tf.contrib.seq2seq.LuongAttention(
num_units=state_size, memory=encoder_outputs,
memory_sequence_length=sequence_length_3D(
encoder_outputs))
else:
raise ValueError(
'Attention mechanism {} not supported'.format(
attention_mechanism))
cell = tf.contrib.seq2seq.AttentionWrapper(
cell, attention_mechanism, attention_layer_size=state_size)
initial_state = cell.zero_state(dtype=tf.float32,
batch_size=batch_size)
for v in tf.global_variables():
if v.name.startswith(vs.name):
logging.debug(' {}: {}'.format(v.name, v))
# ================ Decoding ================
def decode(initial_state, cell, helper, beam_width=1,
projection_layer=None):
# The decoder itself
if beam_width > 1:
# Tile inputs for beam search decoder
beam_initial_state = tf.contrib.seq2seq.tile_batch(
initial_state, beam_width)
decoder = tf.contrib.seq2seq.BeamSearchDecoder(
cell=cell,
embedding=targets_embeddings,
start_tokens=start_tokens,
end_token=END_SYMBOL,
initial_state=beam_initial_state,
beam_width=beam_width,
output_layer=projection_layer)
else:
decoder = BasicDecoder(
cell=cell, helper=helper,
initial_state=initial_state,
output_layer=projection_layer)
# The decoding operation
outputs = tf.contrib.seq2seq.dynamic_decode(
decoder=decoder,
output_time_major=False,
impute_finished=False if beam_width > 1 else True,
maximum_iterations=max_sequence_length
)
return outputs
# ================ Decoding helpers ================
if is_timeseries:
train_helper = TimeseriesTrainingHelper(
inputs=targets_embedded,
sequence_length=targets_sequence_length_with_eos)
final_outputs_pred, final_state_pred, final_sequence_lengths_pred = decode(
initial_state,
cell,
train_helper,
projection_layer=projection_layer)
eval_logits = final_outputs_pred.rnn_output
train_logits = final_outputs_pred.projection_input
predictions_sequence = tf.reshape(eval_logits, [batch_size, -1])
predictions_sequence_length_with_eos = final_sequence_lengths_pred
else:
train_helper = tf.contrib.seq2seq.TrainingHelper(
inputs=targets_embedded,
sequence_length=targets_sequence_length_with_eos)
final_outputs_train, final_state_train, final_sequence_lengths_train, = decode(
initial_state,
cell,
train_helper,
projection_layer=projection_layer)
eval_logits = final_outputs_train.rnn_output
train_logits = final_outputs_train.projection_input
# train_predictions = final_outputs_train.sample_id
pred_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(
embedding=targets_embeddings,
start_tokens=start_tokens,
end_token=END_SYMBOL)
final_outputs_pred, final_state_pred, final_sequence_lengths_pred = decode(
initial_state,
cell,
pred_helper,
beam_width,
projection_layer=projection_layer)
if beam_width > 1:
predictions_sequence = final_outputs_pred.beam_search_decoder_output.predicted_ids[
:, :, 0]
# final_outputs_pred..predicted_ids[:,:,0] would work too, but it contains -1s for padding
predictions_sequence_scores = final_outputs_pred.beam_search_decoder_output.scores[
:, :, 0]
predictions_sequence_length_with_eos = final_sequence_lengths_pred[
:, 0]
else:
predictions_sequence = final_outputs_pred.sample_id
predictions_sequence_scores = final_outputs_pred.rnn_output
predictions_sequence_length_with_eos = final_sequence_lengths_pred
logging.debug(' train_logits: {0}'.format(train_logits))
logging.debug(' eval_logits: {0}'.format(eval_logits))
logging.debug(' predictions_sequence: {0}'.format(predictions_sequence))
logging.debug(' predictions_sequence_scores: {0}'.format(
predictions_sequence_scores))
return predictions_sequence, predictions_sequence_scores, predictions_sequence_length_with_eos, \
targets_sequence_length_with_eos, eval_logits, train_logits, class_weights, class_biases
| true | true |
7900c550ff1e1959f9941584bd74e59841f443d9 | 8,106 | py | Python | layers.py | wangxiaoyunanne/TAGCN | 9f2df35e1586f49efcd6d4706e3edd2499c1c6f1 | [
"MIT"
] | 19 | 2018-02-17T23:21:33.000Z | 2021-03-06T00:41:52.000Z | layers.py | wangxiaoyunanne/TAGCN | 9f2df35e1586f49efcd6d4706e3edd2499c1c6f1 | [
"MIT"
] | 1 | 2020-10-24T12:15:34.000Z | 2021-04-21T08:55:43.000Z | layers.py | wangxiaoyunanne/TAGCN | 9f2df35e1586f49efcd6d4706e3edd2499c1c6f1 | [
"MIT"
] | 9 | 2018-04-02T08:04:15.000Z | 2019-12-10T09:29:06.000Z | from inits import *
import tensorflow as tf
flags = tf.app.flags
FLAGS = flags.FLAGS
# global unique layer ID dictionary for layer name assignment
_LAYER_UIDS = {}
def get_layer_uid(layer_name=''):
"""Helper function, assigns unique layer IDs."""
if layer_name not in _LAYER_UIDS:
_LAYER_UIDS[layer_name] = 1
return 1
else:
_LAYER_UIDS[layer_name] += 1
return _LAYER_UIDS[layer_name]
def sparse_dropout(x, keep_prob, noise_shape):
"""Dropout for sparse tensors."""
random_tensor = keep_prob
random_tensor += tf.random_uniform(noise_shape)
dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
pre_out = tf.sparse_retain(x, dropout_mask)
return pre_out * (1./keep_prob)
def dot(x, y, sparse=False):
"""Wrapper for tf.matmul (sparse vs dense)."""
if sparse:
res = tf.sparse_tensor_dense_matmul(x, y)
else:
res = tf.matmul(x, y)
return res
class Layer(object):
"""Base layer class. Defines basic API for all layer objects.
Implementation inspired by keras (http://keras.io).
# Properties
name: String, defines the variable scope of the layer.
logging: Boolean, switches Tensorflow histogram logging on/off
# Methods
_call(inputs): Defines computation graph of layer
(i.e. takes input, returns output)
__call__(inputs): Wrapper for _call()
_log_vars(): Log all variables
"""
def __init__(self, **kwargs):
allowed_kwargs = {'name', 'logging'}
for kwarg in kwargs.keys():
assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg
name = kwargs.get('name')
if not name:
layer = self.__class__.__name__.lower()
name = layer + '_' + str(get_layer_uid(layer))
self.name = name
self.vars = {}
logging = kwargs.get('logging', False)
self.logging = logging
self.sparse_inputs = False
def _call(self, inputs):
return inputs
def __call__(self, inputs):
with tf.name_scope(self.name):
if self.logging and not self.sparse_inputs:
tf.summary.histogram(self.name + '/inputs', inputs)
outputs = self._call(inputs)
if self.logging:
tf.summary.histogram(self.name + '/outputs', outputs)
return outputs
def _log_vars(self):
for var in self.vars:
tf.summary.histogram(self.name + '/vars/' + var, self.vars[var])
class Dense(Layer):
"""Dense layer."""
def __init__(self, input_dim, output_dim, placeholders, dropout=0., sparse_inputs=False,
act=tf.nn.relu, bias=False, featureless=False, **kwargs):
super(Dense, self).__init__(**kwargs)
if dropout:
self.dropout = placeholders['dropout']
else:
self.dropout = 0.
self.act = act
self.sparse_inputs = sparse_inputs
self.featureless = featureless
self.bias = bias
# helper variable for sparse dropout
self.num_features_nonzero = placeholders['num_features_nonzero']
with tf.variable_scope(self.name + '_vars'):
self.vars['weights'] = glorot([input_dim, output_dim],
name='weights')
if self.bias:
self.vars['bias'] = zeros([output_dim], name='bias')
if self.logging:
self._log_vars()
def _call(self, inputs):
x = inputs
# dropout
if self.sparse_inputs:
x = sparse_dropout(x, 1-self.dropout, self.num_features_nonzero)
else:
x = tf.nn.dropout(x, 1-self.dropout)
# transform
output = dot(x, self.vars['weights'], sparse=self.sparse_inputs)
# bias
if self.bias:
output += self.vars['bias']
return self.act(output)
class GraphConvolution(Layer):
"""Graph convolution layer."""
def __init__(self, input_dim, output_dim, placeholders, dropout=0.,
sparse_inputs=False, act=tf.nn.relu, bias=False,
featureless=False, **kwargs):
super(GraphConvolution, self).__init__(**kwargs)
if dropout:
self.dropout = placeholders['dropout']
else:
self.dropout = 0.
self.act = act
self.support = placeholders['support']
self.sparse_inputs = sparse_inputs
self.featureless = featureless
self.bias = bias
# helper variable for sparse dropout
self.num_features_nonzero = placeholders['num_features_nonzero']
with tf.variable_scope(self.name + '_vars'):
for i in range(len(self.support)):
self.vars['weights_' + str(i)] = glorot([input_dim, output_dim],
name='weights_' + str(i))
if self.bias:
self.vars['bias'] = zeros([output_dim], name='bias')
if self.logging:
self._log_vars()
def _call(self, inputs):
x = inputs
# dropout
if self.sparse_inputs:
x = sparse_dropout(x, 1-self.dropout, self.num_features_nonzero)
else:
x = tf.nn.dropout(x, 1-self.dropout)
# convolve
supports = list()
for i in range(len(self.support)):
if not self.featureless:
pre_sup = dot(x, self.vars['weights_' + str(i)],
sparse=self.sparse_inputs)
else:
pre_sup = self.vars['weights_' + str(i)]
support = dot(self.support[i], pre_sup, sparse=True)
supports.append(support)
output = tf.add_n(supports)
# bias
if self.bias:
output += self.vars['bias']
return self.act(output)
class TAGraphConvolution(Layer):
"""Graph convolution layer."""
def __init__(self, input_dim, output_dim, placeholders, dropout=0.,
sparse_inputs=False, act=tf.nn.relu, bias=False,
featureless=False, **kwargs):
super(TAGraphConvolution, self).__init__(**kwargs)
if dropout:
self.dropout = placeholders['dropout']
else:
self.dropout = 0.
self.act = act
self.support = placeholders['support']
self.sparse_inputs = sparse_inputs
self.featureless = featureless
self.bias = bias
# helper variable for sparse dropout
self.num_features_nonzero = placeholders['num_features_nonzero']
with tf.variable_scope(self.name + '_vars'):
for k in range(2):
self.vars['weights_' + str(k)] = tf.get_variable(shape=[input_dim, output_dim], name=('weights_' + str(k)), initializer=tf.contrib.layers.xavier_initializer())
if self.bias:
# self.vars['bias'] = ones([1],name='bias')
# self.vars['bias'] = self.vars['bias'] * np.ones([2708,output_dim],dtype=np.float32)
self.vars['bias'] = zeros([output_dim], name='bias') # zeros([2708,output_dim], name='bias')
self.conv = np.zeros(output_dim,dtype=np.float32)
if self.logging:
self._log_vars()
def _call(self, inputs):
x = inputs
# dropout
if self.sparse_inputs:
x = sparse_dropout(x, 1-self.dropout, self.num_features_nonzero)
else:
x = tf.nn.dropout(x, 1-self.dropout)
# convolve
supports = list()
for k in range(2):
w_k = self.support[:,:,k]
# s = tf.matmul(w_k,x) #
G_k = self.vars['weights_' + str(k)]
res = dot(x,G_k,sparse=self.sparse_inputs) # res = tf.matmul(s,G_k)
res = dot(w_k,res)
supports.append(res)
output = tf.add_n(supports)
# self.conv = tf.add(self.conv,res)
# bias
if self.bias:
output += self.vars['bias'] # self.conv += self.vars['bias']
return self.act(output) # self.conv
| 31.297297 | 175 | 0.576733 | from inits import *
import tensorflow as tf
flags = tf.app.flags
FLAGS = flags.FLAGS
_LAYER_UIDS = {}
def get_layer_uid(layer_name=''):
if layer_name not in _LAYER_UIDS:
_LAYER_UIDS[layer_name] = 1
return 1
else:
_LAYER_UIDS[layer_name] += 1
return _LAYER_UIDS[layer_name]
def sparse_dropout(x, keep_prob, noise_shape):
random_tensor = keep_prob
random_tensor += tf.random_uniform(noise_shape)
dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
pre_out = tf.sparse_retain(x, dropout_mask)
return pre_out * (1./keep_prob)
def dot(x, y, sparse=False):
if sparse:
res = tf.sparse_tensor_dense_matmul(x, y)
else:
res = tf.matmul(x, y)
return res
class Layer(object):
def __init__(self, **kwargs):
allowed_kwargs = {'name', 'logging'}
for kwarg in kwargs.keys():
assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg
name = kwargs.get('name')
if not name:
layer = self.__class__.__name__.lower()
name = layer + '_' + str(get_layer_uid(layer))
self.name = name
self.vars = {}
logging = kwargs.get('logging', False)
self.logging = logging
self.sparse_inputs = False
def _call(self, inputs):
return inputs
def __call__(self, inputs):
with tf.name_scope(self.name):
if self.logging and not self.sparse_inputs:
tf.summary.histogram(self.name + '/inputs', inputs)
outputs = self._call(inputs)
if self.logging:
tf.summary.histogram(self.name + '/outputs', outputs)
return outputs
def _log_vars(self):
for var in self.vars:
tf.summary.histogram(self.name + '/vars/' + var, self.vars[var])
class Dense(Layer):
def __init__(self, input_dim, output_dim, placeholders, dropout=0., sparse_inputs=False,
act=tf.nn.relu, bias=False, featureless=False, **kwargs):
super(Dense, self).__init__(**kwargs)
if dropout:
self.dropout = placeholders['dropout']
else:
self.dropout = 0.
self.act = act
self.sparse_inputs = sparse_inputs
self.featureless = featureless
self.bias = bias
self.num_features_nonzero = placeholders['num_features_nonzero']
with tf.variable_scope(self.name + '_vars'):
self.vars['weights'] = glorot([input_dim, output_dim],
name='weights')
if self.bias:
self.vars['bias'] = zeros([output_dim], name='bias')
if self.logging:
self._log_vars()
def _call(self, inputs):
x = inputs
if self.sparse_inputs:
x = sparse_dropout(x, 1-self.dropout, self.num_features_nonzero)
else:
x = tf.nn.dropout(x, 1-self.dropout)
output = dot(x, self.vars['weights'], sparse=self.sparse_inputs)
if self.bias:
output += self.vars['bias']
return self.act(output)
class GraphConvolution(Layer):
def __init__(self, input_dim, output_dim, placeholders, dropout=0.,
sparse_inputs=False, act=tf.nn.relu, bias=False,
featureless=False, **kwargs):
super(GraphConvolution, self).__init__(**kwargs)
if dropout:
self.dropout = placeholders['dropout']
else:
self.dropout = 0.
self.act = act
self.support = placeholders['support']
self.sparse_inputs = sparse_inputs
self.featureless = featureless
self.bias = bias
self.num_features_nonzero = placeholders['num_features_nonzero']
with tf.variable_scope(self.name + '_vars'):
for i in range(len(self.support)):
self.vars['weights_' + str(i)] = glorot([input_dim, output_dim],
name='weights_' + str(i))
if self.bias:
self.vars['bias'] = zeros([output_dim], name='bias')
if self.logging:
self._log_vars()
def _call(self, inputs):
x = inputs
if self.sparse_inputs:
x = sparse_dropout(x, 1-self.dropout, self.num_features_nonzero)
else:
x = tf.nn.dropout(x, 1-self.dropout)
supports = list()
for i in range(len(self.support)):
if not self.featureless:
pre_sup = dot(x, self.vars['weights_' + str(i)],
sparse=self.sparse_inputs)
else:
pre_sup = self.vars['weights_' + str(i)]
support = dot(self.support[i], pre_sup, sparse=True)
supports.append(support)
output = tf.add_n(supports)
if self.bias:
output += self.vars['bias']
return self.act(output)
class TAGraphConvolution(Layer):
def __init__(self, input_dim, output_dim, placeholders, dropout=0.,
sparse_inputs=False, act=tf.nn.relu, bias=False,
featureless=False, **kwargs):
super(TAGraphConvolution, self).__init__(**kwargs)
if dropout:
self.dropout = placeholders['dropout']
else:
self.dropout = 0.
self.act = act
self.support = placeholders['support']
self.sparse_inputs = sparse_inputs
self.featureless = featureless
self.bias = bias
self.num_features_nonzero = placeholders['num_features_nonzero']
with tf.variable_scope(self.name + '_vars'):
for k in range(2):
self.vars['weights_' + str(k)] = tf.get_variable(shape=[input_dim, output_dim], name=('weights_' + str(k)), initializer=tf.contrib.layers.xavier_initializer())
if self.bias:
self.vars['bias'] = zeros([output_dim], name='bias')
self.conv = np.zeros(output_dim,dtype=np.float32)
if self.logging:
self._log_vars()
def _call(self, inputs):
x = inputs
if self.sparse_inputs:
x = sparse_dropout(x, 1-self.dropout, self.num_features_nonzero)
else:
x = tf.nn.dropout(x, 1-self.dropout)
supports = list()
for k in range(2):
w_k = self.support[:,:,k]
G_k = self.vars['weights_' + str(k)]
res = dot(x,G_k,sparse=self.sparse_inputs)
res = dot(w_k,res)
supports.append(res)
output = tf.add_n(supports)
if self.bias:
output += self.vars['bias']
return self.act(output)
| true | true |
7900c7bac81dcda626efb8e505fa4df9aa38aeb8 | 2,940 | py | Python | low_level_simulation/src/rosbridge_suite/rosbridge_library/src/rosbridge_library/rosbridge_protocol.py | abiantorres/autonomous-vehicles-system-simulation | 3f0112036b2b270f5055729c648a1310976df933 | [
"Apache-2.0"
] | 60 | 2021-09-07T12:42:48.000Z | 2022-03-12T09:30:36.000Z | low_level_simulation/src/rosbridge_suite/rosbridge_library/src/rosbridge_library/rosbridge_protocol.py | abiantorres/autonomous-vehicles-system-simulation | 3f0112036b2b270f5055729c648a1310976df933 | [
"Apache-2.0"
] | 1 | 2021-04-30T21:19:51.000Z | 2021-04-30T21:19:51.000Z | low_level_simulation/src/rosbridge_suite/rosbridge_library/src/rosbridge_library/rosbridge_protocol.py | abiantorres/autonomous-vehicles-system-simulation | 3f0112036b2b270f5055729c648a1310976df933 | [
"Apache-2.0"
] | 1 | 2021-09-14T07:39:48.000Z | 2021-09-14T07:39:48.000Z | # Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
from rosbridge_library.protocol import Protocol
from rosbridge_library.capabilities.call_service import CallService
from rosbridge_library.capabilities.advertise import Advertise
from rosbridge_library.capabilities.publish import Publish
from rosbridge_library.capabilities.subscribe import Subscribe
# imports for defragmentation
from rosbridge_library.capabilities.defragmentation import Defragment
# imports for external service_server
from rosbridge_library.capabilities.advertise_service import AdvertiseService
from rosbridge_library.capabilities.service_response import ServiceResponse
from rosbridge_library.capabilities.unadvertise_service import UnadvertiseService
class RosbridgeProtocol(Protocol):
""" Adds the handlers for the rosbridge opcodes """
rosbridge_capabilities = [CallService, Advertise, Publish, Subscribe, Defragment, AdvertiseService, ServiceResponse, UnadvertiseService]
print("registered capabilities (classes):")
for cap in rosbridge_capabilities:
print(" -", str(cap))
parameters = None
def __init__(self, client_id, parameters = None):
self.parameters = parameters
Protocol.__init__(self, client_id)
for capability_class in self.rosbridge_capabilities:
self.add_capability(capability_class)
| 46.666667 | 140 | 0.793537 |
from __future__ import print_function
from rosbridge_library.protocol import Protocol
from rosbridge_library.capabilities.call_service import CallService
from rosbridge_library.capabilities.advertise import Advertise
from rosbridge_library.capabilities.publish import Publish
from rosbridge_library.capabilities.subscribe import Subscribe
from rosbridge_library.capabilities.defragmentation import Defragment
from rosbridge_library.capabilities.advertise_service import AdvertiseService
from rosbridge_library.capabilities.service_response import ServiceResponse
from rosbridge_library.capabilities.unadvertise_service import UnadvertiseService
class RosbridgeProtocol(Protocol):
rosbridge_capabilities = [CallService, Advertise, Publish, Subscribe, Defragment, AdvertiseService, ServiceResponse, UnadvertiseService]
print("registered capabilities (classes):")
for cap in rosbridge_capabilities:
print(" -", str(cap))
parameters = None
def __init__(self, client_id, parameters = None):
self.parameters = parameters
Protocol.__init__(self, client_id)
for capability_class in self.rosbridge_capabilities:
self.add_capability(capability_class)
| true | true |
7900c81da9f5cd9a737c8d27983858f055be3fde | 30,656 | py | Python | stacks/XIAOMATECH/1.0/services/BEACON/package/scripts/beacon.py | tvorogme/dataops | acfa21df42a20768c004c6630a064f4e38e280b2 | [
"Apache-2.0"
] | 3 | 2019-08-13T01:44:16.000Z | 2019-12-10T04:05:56.000Z | stacks/XIAOMATECH/1.0/services/BEACON/package/scripts/beacon.py | tvorogme/dataops | acfa21df42a20768c004c6630a064f4e38e280b2 | [
"Apache-2.0"
] | null | null | null | stacks/XIAOMATECH/1.0/services/BEACON/package/scripts/beacon.py | tvorogme/dataops | acfa21df42a20768c004c6630a064f4e38e280b2 | [
"Apache-2.0"
] | 7 | 2019-05-29T17:35:25.000Z | 2021-12-04T07:55:10.000Z | import os.path
import time
from resource_management.core.exceptions import Fail
from resource_management.core.source import Template
from resource_management.core.source import StaticFile
from resource_management.core.source import DownloadSource
from resource_management.core.resources import Execute
from resource_management.core.resources.system import Directory
from resource_management.core.resources.system import File
from resource_management.libraries.functions import get_user_call_output
from resource_management.libraries.functions import format
from resource_management.libraries.functions.show_logs import show_logs
from resource_management.libraries.functions.security_commons import update_credential_provider_path
from resource_management.libraries.resources.xml_config import XmlConfig
from resource_management.core.logger import Logger
from resource_management.libraries.script.config_dictionary import UnknownConfiguration
import beacon_utils
from resource_management.libraries.script import Script
import ranger_api_functions
def install_beacon():
import params
Directory([params.etc_prefix_dir],
owner=params.beacon_user,
group=params.user_group,
mode=0755,
create_parents=True)
if not os.path.exists(Script.get_stack_root() + '/' + params.version_dir) or not os.path.exists(
params.install_dir):
Execute('rm -rf %s' % Script.get_stack_root() + '/' + params.version_dir)
Execute('rm -rf %s' % params.install_dir)
Execute(
'wget ' + params.download_url + ' -O /tmp/' + params.filename,
user=params.beacon_user)
Execute('tar -zxf /tmp/' + params.filename + ' -C ' + Script.get_stack_root())
Execute('ln -s ' + Script.get_stack_root() + '/' + params.version_dir + ' ' + params.install_dir)
Execute(' cp -r ' + params.install_dir + '/conf/* ' + params.etc_prefix_dir)
Execute(' rm -rf ' + params.install_dir + '/conf')
Execute('ln -s ' + params.etc_prefix_dir + ' ' + params.install_dir +
'/conf')
Execute('chown -R %s:%s %s/%s' %
(params.beacon_user, params.user_group, params.stack_root, params.version_dir))
Execute('chown -R %s:%s %s' % (params.beacon_user, params.user_group,
params.install_dir))
Execute('/bin/rm -f /tmp/' + params.filename)
def beacon(type, action=None, upgrade_type=None):
import params
if action == 'config':
create_directory(params.beacon_home_dir)
create_directory(params.beacon_plugin_staging_dir)
cloud_cred_provider = params.beacon_cloud_cred_provider_dir.split('://')[1]
cloud_cred_parts = cloud_cred_provider.split('/', 1)
create_directory("/" + cloud_cred_parts[1], cloud_cred_parts[0])
if params.is_hive_installed:
if not isinstance(params.hive_repl_cmrootdir, UnknownConfiguration):
beacon_utils.create_hdfs_directory(params.hive_repl_cmrootdir,
params.hive_user,
01777)
if not isinstance(params.hive_repl_rootdir, UnknownConfiguration):
beacon_utils.create_hdfs_directory(params.hive_repl_rootdir,
params.hive_user,
0700)
Directory(params.beacon_pid_dir,
owner=params.beacon_user,
create_parents=True,
mode=0755,
cd_access="a",
)
Directory(params.beacon_data_dir,
owner=params.beacon_user,
create_parents=True,
mode=0755,
cd_access="a",
)
Directory(params.beacon_log_dir,
owner=params.beacon_user,
create_parents=True,
mode=0755,
cd_access="a",
)
Directory(params.beacon_webapp_dir,
owner=params.beacon_user,
create_parents=True)
Directory(params.beacon_home,
owner=params.beacon_user,
create_parents=True)
Directory(params.etc_prefix_dir,
mode=0755,
create_parents=True)
Directory(params.beacon_conf_dir,
owner=params.beacon_user,
create_parents=True)
environment_dictionary = {
"HADOOP_HOME": params.hadoop_home_dir,
"JAVA_HOME": params.java_home,
"BEACON_LOG_DIR": params.beacon_log_dir,
"BEACON_PID_DIR": params.beacon_pid_dir,
"BEACON_DATA_DIR": params.beacon_data_dir,
"BEACON_CLUSTER": params.beacon_cluster_name,
"HADOOP_CONF": params.hadoop_conf_dir
}
pid = get_user_call_output.get_user_call_output(format("cat {server_pid_file}"), user=params.beacon_user,
is_checked_call=False)[1]
process_exists = format("ls {server_pid_file} && ps -p {pid}")
if type == 'server':
if action == 'start':
try:
if params.credential_store_enabled:
if 'hadoop.security.credential.provider.path' in params.beacon_env:
credential_provider_path = params.beacon_env['hadoop.security.credential.provider.path']
credential_provider_src_path = credential_provider_path[len('jceks://file'):]
File(params.beacon_credential_provider_path[len('jceks://file'):],
owner=params.beacon_user,
group=params.user_group,
mode=0640,
content=StaticFile(credential_provider_src_path)
)
else:
Logger.error(
"hadoop.security.credential.provider.path property not found in beacon-env config-type")
File(os.path.join(params.beacon_conf_dir, 'beacon.yml'),
owner='root',
group='root',
mode=0644,
content=Template("beacon.yml.j2")
)
params.beacon_security_site = update_credential_provider_path(
params.beacon_security_site,
'beacon-security-site',
os.path.join(params.beacon_conf_dir, 'beacon-security-site.jceks'),
params.beacon_user,
params.user_group
)
XmlConfig("beacon-security-site.xml",
conf_dir=params.beacon_conf_dir,
configurations=params.beacon_security_site,
configuration_attributes=params.config['configuration_attributes']['beacon-security-site'],
owner=params.beacon_user,
group=params.user_group,
mode=0644
)
Execute(format('{beacon_home}/bin/beacon setup'),
user=params.beacon_user,
path=params.hadoop_bin_dir,
environment=environment_dictionary
)
if params.download_mysql_driver:
download_mysql_driver()
Execute(format('{beacon_home}/bin/beacon start'),
user=params.beacon_user,
path=params.hadoop_bin_dir,
environment=environment_dictionary,
not_if=process_exists,
)
if params.has_ranger_admin:
ranger_admin_url = params.config['configurations']['admin-properties']['policymgr_external_url']
ranger_admin_user = params.config['configurations']['ranger-env']['admin_username']
ranger_admin_passwd = params.config['configurations']['ranger-env']['admin_password']
if not params.security_enabled:
# Creating/Updating beacon.ranger.user with role "ROLE_SYS_ADMIN"
response_user = ranger_api_functions.get_user(ranger_admin_url, params.beacon_ranger_user,
format(
"{ranger_admin_user}:{ranger_admin_passwd}"))
if response_user is not None and response_user['name'] == params.beacon_ranger_user:
response_user_role = response_user['userRoleList'][0]
Logger.info(format(
"Beacon Ranger User with username {beacon_ranger_user} exists with role {response_user_role}"))
if response_user_role != "ROLE_SYS_ADMIN":
response_user_role = ranger_api_functions.update_user_role(ranger_admin_url,
params.beacon_ranger_user,
"ROLE_SYS_ADMIN", format(
"{ranger_admin_user}:{ranger_admin_passwd}"))
else:
response_code = ranger_api_functions.create_user(ranger_admin_url,
params.beacon_ranger_user,
params.beacon_ranger_password,
"ROLE_SYS_ADMIN", format(
"{ranger_admin_user}:{ranger_admin_passwd}"))
# Updating beacon_user role depending upon cluster environment
count = 0
while count < 10:
beacon_user_get = ranger_api_functions.get_user(ranger_admin_url, params.beacon_user, format(
"{ranger_admin_user}:{ranger_admin_passwd}"))
if beacon_user_get is not None:
break
else:
time.sleep(10) # delay for 10 seconds
count = count + 1
Logger.error(
format('Retrying to fetch {beacon_user} user from Ranger Admin for {count} time(s)'))
if beacon_user_get is not None and beacon_user_get['name'] == params.beacon_user:
beacon_user_get_role = beacon_user_get['userRoleList'][0]
if params.security_enabled and beacon_user_get_role != "ROLE_SYS_ADMIN":
beacon_service_user = ranger_api_functions.update_user_role(ranger_admin_url,
params.beacon_user,
"ROLE_SYS_ADMIN", format(
"{ranger_admin_user}:{ranger_admin_passwd}"))
elif not params.security_enabled and beacon_user_get_role != "ROLE_USER":
beacon_service_user = ranger_api_functions.update_user_role(ranger_admin_url,
params.beacon_user, "ROLE_USER",
format(
"{ranger_admin_user}:{ranger_admin_passwd}"))
if params.ranger_hive_plugin_enabled:
# Get Ranger Hive default policy for resource database, table, column
response_policy = ranger_api_functions.get_ranger_service_default_policy(ranger_admin_url,
params.service_name,
format(
"{ranger_admin_user}:{ranger_admin_passwd}"),
['database', 'table',
'column'])
if response_policy:
user_present = ranger_api_functions.check_user_policy(response_policy, params.beacon_user)
if not user_present and beacon_user_get is not None and beacon_user_get[
'name'] == params.beacon_user:
policy_id = response_policy['id']
beacon_user_policy_item = {'groups': [], 'conditions': [],
'users': [params.beacon_user],
'accesses': [{'isAllowed': True, 'type': 'all'},
{'isAllowed': True, 'type': 'repladmin'}],
'delegateAdmin': False}
policy_data = ranger_api_functions.update_policy_item(response_policy,
beacon_user_policy_item)
update_policy_response = ranger_api_functions.update_policy(ranger_admin_url, policy_id,
policy_data, format(
"{ranger_admin_user}:{ranger_admin_passwd}"))
# Get Ranger Hive default policy for resource hiveservice
response_policy = ranger_api_functions.get_ranger_service_default_policy(ranger_admin_url,
params.service_name,
format(
"{ranger_admin_user}:{ranger_admin_passwd}"),
['hiveservice'])
if response_policy:
user_present = ranger_api_functions.check_user_policy(response_policy, params.beacon_user)
if not user_present and beacon_user_get is not None and beacon_user_get[
'name'] == params.beacon_user:
# Updating beacon_user in Ranger Hive default policy for resource hiveservice
policy_id = response_policy['id']
beacon_user_policy_item = {'groups': [], 'conditions': [],
'users': [params.beacon_user],
'accesses': [{'isAllowed': True, 'type': 'serviceadmin'}],
'delegateAdmin': False}
policy_data = ranger_api_functions.update_policy_item(response_policy,
beacon_user_policy_item)
update_policy_response = ranger_api_functions.update_policy(ranger_admin_url, policy_id,
policy_data, format(
"{ranger_admin_user}:{ranger_admin_passwd}"))
if params.ranger_atlas_plugin_enabled:
# Creating beacon.atlas.user with role "ROLE_USER"
beacon_atlas_user_response = ranger_api_functions.get_user(ranger_admin_url,
params.beacon_atlas_user, format(
"{ranger_admin_user}:{ranger_admin_passwd}"))
if beacon_atlas_user_response is not None and beacon_atlas_user_response[
'name'] == params.beacon_atlas_user:
beacon_atlas_user_role = beacon_atlas_user_response['userRoleList'][0]
Logger.info(format(
"Beacon Atlas User with username {beacon_atlas_user} exists with role {beacon_atlas_user_role}"))
else:
beacon_atlas_user_create_response_code = ranger_api_functions.create_user(ranger_admin_url,
params.beacon_atlas_user,
params.beacon_atlas_password,
"ROLE_USER",
format(
"{ranger_admin_user}:{ranger_admin_passwd}"))
if params.security_enabled:
get_beacon_atlas_user = params.beacon_user
else:
get_beacon_atlas_user = params.beacon_atlas_user
if params.is_stack_3_0_or_further:
# Get Ranger Atlas default policy for ENTITY TYPE, ENTITY CLASSIFICATION and ENTITY ID resource
atlas_entity_policy_response = ranger_api_functions.get_ranger_service_default_policy(
ranger_admin_url, params.ranger_atlas_service_name,
format("{ranger_admin_user}:{ranger_admin_passwd}"),
['entity', 'entity-classification', 'entity-type'])
if atlas_entity_policy_response:
beacon_atlas_user_present = ranger_api_functions.check_user_policy(
atlas_entity_policy_response, get_beacon_atlas_user)
if not beacon_atlas_user_present:
# Updating beacon atlas user in Ranger Atlas default policy for entity resource
atlas_entity_policy_id = atlas_entity_policy_response['id']
beacon_atlas_user_policy_item = {'groups': [], 'conditions': [],
'users': [get_beacon_atlas_user], 'accesses': [
{'type': 'entity-read', 'isAllowed': True},
{'type': 'entity-create', 'isAllowed': True},
{'type': 'entity-update', 'isAllowed': True}]}
atlas_entity_policy_data = ranger_api_functions.update_policy_item(
atlas_entity_policy_response, beacon_atlas_user_policy_item)
atlas_update_entity_policy_response = ranger_api_functions.update_policy(
ranger_admin_url, atlas_entity_policy_id, atlas_entity_policy_data,
format("{ranger_admin_user}:{ranger_admin_passwd}"))
# Get Ranger Atlas default policy for ATLAS SERVICE resource
atlas_service_policy_response = ranger_api_functions.get_ranger_service_default_policy(
ranger_admin_url, params.ranger_atlas_service_name,
format("{ranger_admin_user}:{ranger_admin_passwd}"), ['atlas-service'])
if atlas_service_policy_response:
beacon_atlas_user_present = ranger_api_functions.check_user_policy(
atlas_service_policy_response, get_beacon_atlas_user)
if not beacon_atlas_user_present:
# Updating beacon atlas user in Ranger Atlas default policy for service resource
atlas_service_policy_id = atlas_service_policy_response['id']
beacon_atlas_user_policy_item = {'groups': [], 'conditions': [],
'users': [get_beacon_atlas_user], 'accesses': [
{'type': 'admin-export', 'isAllowed': True},
{'type': 'admin-import', 'isAllowed': True}]}
atlas_service_policy_data = ranger_api_functions.update_policy_item(
atlas_service_policy_response, beacon_atlas_user_policy_item)
atlas_service_policy_update_response = ranger_api_functions.update_policy(
ranger_admin_url, atlas_service_policy_id, atlas_service_policy_data,
format("{ranger_admin_user}:{ranger_admin_passwd}"))
# Get Ranger Atlas default policy for TYPE CATEGORY and TYPE resource
atlas_type_category_policy_response = ranger_api_functions.get_ranger_service_default_policy(
ranger_admin_url, params.ranger_atlas_service_name,
format("{ranger_admin_user}:{ranger_admin_passwd}"), ['type', 'type-category'])
if atlas_type_category_policy_response:
beacon_atlas_user_present = ranger_api_functions.check_user_policy(
atlas_type_category_policy_response, get_beacon_atlas_user)
if not beacon_atlas_user_present:
# Updating beacon atlas user in Ranger Atlas default policy for type category and type resource
atlas_type_category_policy_id = atlas_type_category_policy_response['id']
beacon_atlas_user_policy_item = {'groups': [], 'conditions': [],
'users': [get_beacon_atlas_user], 'accesses': [
{'type': 'type-create', 'isAllowed': True},
{'type': 'type-update', 'isAllowed': True},
{'type': 'type-delete', 'isAllowed': True}]}
atlas_type_category_policy_data = ranger_api_functions.update_policy_item(
atlas_type_category_policy_response, beacon_atlas_user_policy_item)
atlas_update_type_category_policy_response = ranger_api_functions.update_policy(
ranger_admin_url, atlas_type_category_policy_id,
atlas_type_category_policy_data,
format("{ranger_admin_user}:{ranger_admin_passwd}"))
else:
# Get Ranger Atlas default policy for ENTITY resource
atlas_policy_response = ranger_api_functions.get_ranger_service_default_policy(
ranger_admin_url, params.ranger_atlas_service_name,
format("{ranger_admin_user}:{ranger_admin_passwd}"), ['entity'])
if atlas_policy_response:
beacon_atlas_user_present = ranger_api_functions.check_user_policy(
atlas_policy_response, get_beacon_atlas_user)
if not beacon_atlas_user_present:
# Updating beacon atlas user in Ranger Atlas default policy for entity resource
atlas_policy_id = atlas_policy_response['id']
beacon_atlas_user_policy_item = {'groups': [], 'conditions': [],
'users': [get_beacon_atlas_user],
'accesses': [{'type': 'read', 'isAllowed': True},
{'type': 'create', 'isAllowed': True},
{'type': 'update', 'isAllowed': True},
{'type': 'delete', 'isAllowed': True},
{'type': 'all', 'isAllowed': True}]}
atlas_policy_data = ranger_api_functions.update_policy_item(atlas_policy_response,
beacon_atlas_user_policy_item)
atlas_update_policy_response = ranger_api_functions.update_policy(ranger_admin_url,
atlas_policy_id,
atlas_policy_data,
format(
"{ranger_admin_user}:{ranger_admin_passwd}"))
# Get Ranger Atlas default policy for OPERATION resource
atlas_operation_policy_response = ranger_api_functions.get_ranger_service_default_policy(
ranger_admin_url, params.ranger_atlas_service_name,
format("{ranger_admin_user}:{ranger_admin_passwd}"), ['operation'])
if atlas_operation_policy_response:
beacon_atlas_user_present = ranger_api_functions.check_user_policy(
atlas_operation_policy_response, get_beacon_atlas_user)
if not beacon_atlas_user_present:
# Updating beacon atlas user in Ranger Atlas default policy for operation resource
atlas_operation_policy_id = atlas_operation_policy_response['id']
beacon_atlas_user_policy_item = {'groups': [], 'conditions': [],
'users': [get_beacon_atlas_user],
'accesses': [{'type': 'read', 'isAllowed': True},
{'type': 'create', 'isAllowed': True},
{'type': 'update', 'isAllowed': True},
{'type': 'delete', 'isAllowed': True},
{'type': 'all', 'isAllowed': True}]}
atlas_operation_policy_data = ranger_api_functions.update_policy_item(
atlas_operation_policy_response, beacon_atlas_user_policy_item)
atlas_operation_policy_update_response = ranger_api_functions.update_policy(
ranger_admin_url, atlas_operation_policy_id, atlas_operation_policy_data,
format("{ranger_admin_user}:{ranger_admin_passwd}"))
except Exception as e:
show_logs(params.beacon_log_dir, params.beacon_user)
if action == 'stop':
try:
Execute(format('{beacon_home}/bin/beacon stop'),
user=params.beacon_user,
path=params.hadoop_bin_dir,
environment=environment_dictionary)
except:
show_logs(params.beacon_log_dir, params.beacon_user)
File(params.server_pid_file, action='delete')
def create_directory(directory, scheme=None):
import params
if (scheme is None or scheme == ''):
if params.is_hdfs_installed:
scheme = 'hdfs'
else:
scheme = 'file'
Logger.info("Creating directory {0}:/{1}".format(scheme, directory))
if scheme == 'file':
Directory(directory,
owner=params.beacon_user,
create_parents=True,
mode=0755,
cd_access="a")
elif scheme == 'hdfs':
beacon_utils.create_hdfs_directory(directory, params.beacon_user, 0775)
params.HdfsResource(None, action="execute")
def download_mysql_driver():
import params
if params.jdbc_jar_name is None:
raise Fail("Mysql JDBC driver not installed on ambari-server")
File(
params.mysql_driver_target,
content=DownloadSource(params.driver_source),
mode=0644
)
| 65.926882 | 151 | 0.479123 | import os.path
import time
from resource_management.core.exceptions import Fail
from resource_management.core.source import Template
from resource_management.core.source import StaticFile
from resource_management.core.source import DownloadSource
from resource_management.core.resources import Execute
from resource_management.core.resources.system import Directory
from resource_management.core.resources.system import File
from resource_management.libraries.functions import get_user_call_output
from resource_management.libraries.functions import format
from resource_management.libraries.functions.show_logs import show_logs
from resource_management.libraries.functions.security_commons import update_credential_provider_path
from resource_management.libraries.resources.xml_config import XmlConfig
from resource_management.core.logger import Logger
from resource_management.libraries.script.config_dictionary import UnknownConfiguration
import beacon_utils
from resource_management.libraries.script import Script
import ranger_api_functions
def install_beacon():
import params
Directory([params.etc_prefix_dir],
owner=params.beacon_user,
group=params.user_group,
mode=0755,
create_parents=True)
if not os.path.exists(Script.get_stack_root() + '/' + params.version_dir) or not os.path.exists(
params.install_dir):
Execute('rm -rf %s' % Script.get_stack_root() + '/' + params.version_dir)
Execute('rm -rf %s' % params.install_dir)
Execute(
'wget ' + params.download_url + ' -O /tmp/' + params.filename,
user=params.beacon_user)
Execute('tar -zxf /tmp/' + params.filename + ' -C ' + Script.get_stack_root())
Execute('ln -s ' + Script.get_stack_root() + '/' + params.version_dir + ' ' + params.install_dir)
Execute(' cp -r ' + params.install_dir + '/conf/* ' + params.etc_prefix_dir)
Execute(' rm -rf ' + params.install_dir + '/conf')
Execute('ln -s ' + params.etc_prefix_dir + ' ' + params.install_dir +
'/conf')
Execute('chown -R %s:%s %s/%s' %
(params.beacon_user, params.user_group, params.stack_root, params.version_dir))
Execute('chown -R %s:%s %s' % (params.beacon_user, params.user_group,
params.install_dir))
Execute('/bin/rm -f /tmp/' + params.filename)
def beacon(type, action=None, upgrade_type=None):
import params
if action == 'config':
create_directory(params.beacon_home_dir)
create_directory(params.beacon_plugin_staging_dir)
cloud_cred_provider = params.beacon_cloud_cred_provider_dir.split('://')[1]
cloud_cred_parts = cloud_cred_provider.split('/', 1)
create_directory("/" + cloud_cred_parts[1], cloud_cred_parts[0])
if params.is_hive_installed:
if not isinstance(params.hive_repl_cmrootdir, UnknownConfiguration):
beacon_utils.create_hdfs_directory(params.hive_repl_cmrootdir,
params.hive_user,
01777)
if not isinstance(params.hive_repl_rootdir, UnknownConfiguration):
beacon_utils.create_hdfs_directory(params.hive_repl_rootdir,
params.hive_user,
0700)
Directory(params.beacon_pid_dir,
owner=params.beacon_user,
create_parents=True,
mode=0755,
cd_access="a",
)
Directory(params.beacon_data_dir,
owner=params.beacon_user,
create_parents=True,
mode=0755,
cd_access="a",
)
Directory(params.beacon_log_dir,
owner=params.beacon_user,
create_parents=True,
mode=0755,
cd_access="a",
)
Directory(params.beacon_webapp_dir,
owner=params.beacon_user,
create_parents=True)
Directory(params.beacon_home,
owner=params.beacon_user,
create_parents=True)
Directory(params.etc_prefix_dir,
mode=0755,
create_parents=True)
Directory(params.beacon_conf_dir,
owner=params.beacon_user,
create_parents=True)
environment_dictionary = {
"HADOOP_HOME": params.hadoop_home_dir,
"JAVA_HOME": params.java_home,
"BEACON_LOG_DIR": params.beacon_log_dir,
"BEACON_PID_DIR": params.beacon_pid_dir,
"BEACON_DATA_DIR": params.beacon_data_dir,
"BEACON_CLUSTER": params.beacon_cluster_name,
"HADOOP_CONF": params.hadoop_conf_dir
}
pid = get_user_call_output.get_user_call_output(format("cat {server_pid_file}"), user=params.beacon_user,
is_checked_call=False)[1]
process_exists = format("ls {server_pid_file} && ps -p {pid}")
if type == 'server':
if action == 'start':
try:
if params.credential_store_enabled:
if 'hadoop.security.credential.provider.path' in params.beacon_env:
credential_provider_path = params.beacon_env['hadoop.security.credential.provider.path']
credential_provider_src_path = credential_provider_path[len('jceks://file'):]
File(params.beacon_credential_provider_path[len('jceks://file'):],
owner=params.beacon_user,
group=params.user_group,
mode=0640,
content=StaticFile(credential_provider_src_path)
)
else:
Logger.error(
"hadoop.security.credential.provider.path property not found in beacon-env config-type")
File(os.path.join(params.beacon_conf_dir, 'beacon.yml'),
owner='root',
group='root',
mode=0644,
content=Template("beacon.yml.j2")
)
params.beacon_security_site = update_credential_provider_path(
params.beacon_security_site,
'beacon-security-site',
os.path.join(params.beacon_conf_dir, 'beacon-security-site.jceks'),
params.beacon_user,
params.user_group
)
XmlConfig("beacon-security-site.xml",
conf_dir=params.beacon_conf_dir,
configurations=params.beacon_security_site,
configuration_attributes=params.config['configuration_attributes']['beacon-security-site'],
owner=params.beacon_user,
group=params.user_group,
mode=0644
)
Execute(format('{beacon_home}/bin/beacon setup'),
user=params.beacon_user,
path=params.hadoop_bin_dir,
environment=environment_dictionary
)
if params.download_mysql_driver:
download_mysql_driver()
Execute(format('{beacon_home}/bin/beacon start'),
user=params.beacon_user,
path=params.hadoop_bin_dir,
environment=environment_dictionary,
not_if=process_exists,
)
if params.has_ranger_admin:
ranger_admin_url = params.config['configurations']['admin-properties']['policymgr_external_url']
ranger_admin_user = params.config['configurations']['ranger-env']['admin_username']
ranger_admin_passwd = params.config['configurations']['ranger-env']['admin_password']
if not params.security_enabled:
response_user = ranger_api_functions.get_user(ranger_admin_url, params.beacon_ranger_user,
format(
"{ranger_admin_user}:{ranger_admin_passwd}"))
if response_user is not None and response_user['name'] == params.beacon_ranger_user:
response_user_role = response_user['userRoleList'][0]
Logger.info(format(
"Beacon Ranger User with username {beacon_ranger_user} exists with role {response_user_role}"))
if response_user_role != "ROLE_SYS_ADMIN":
response_user_role = ranger_api_functions.update_user_role(ranger_admin_url,
params.beacon_ranger_user,
"ROLE_SYS_ADMIN", format(
"{ranger_admin_user}:{ranger_admin_passwd}"))
else:
response_code = ranger_api_functions.create_user(ranger_admin_url,
params.beacon_ranger_user,
params.beacon_ranger_password,
"ROLE_SYS_ADMIN", format(
"{ranger_admin_user}:{ranger_admin_passwd}"))
count = 0
while count < 10:
beacon_user_get = ranger_api_functions.get_user(ranger_admin_url, params.beacon_user, format(
"{ranger_admin_user}:{ranger_admin_passwd}"))
if beacon_user_get is not None:
break
else:
time.sleep(10)
count = count + 1
Logger.error(
format('Retrying to fetch {beacon_user} user from Ranger Admin for {count} time(s)'))
if beacon_user_get is not None and beacon_user_get['name'] == params.beacon_user:
beacon_user_get_role = beacon_user_get['userRoleList'][0]
if params.security_enabled and beacon_user_get_role != "ROLE_SYS_ADMIN":
beacon_service_user = ranger_api_functions.update_user_role(ranger_admin_url,
params.beacon_user,
"ROLE_SYS_ADMIN", format(
"{ranger_admin_user}:{ranger_admin_passwd}"))
elif not params.security_enabled and beacon_user_get_role != "ROLE_USER":
beacon_service_user = ranger_api_functions.update_user_role(ranger_admin_url,
params.beacon_user, "ROLE_USER",
format(
"{ranger_admin_user}:{ranger_admin_passwd}"))
if params.ranger_hive_plugin_enabled:
response_policy = ranger_api_functions.get_ranger_service_default_policy(ranger_admin_url,
params.service_name,
format(
"{ranger_admin_user}:{ranger_admin_passwd}"),
['database', 'table',
'column'])
if response_policy:
user_present = ranger_api_functions.check_user_policy(response_policy, params.beacon_user)
if not user_present and beacon_user_get is not None and beacon_user_get[
'name'] == params.beacon_user:
policy_id = response_policy['id']
beacon_user_policy_item = {'groups': [], 'conditions': [],
'users': [params.beacon_user],
'accesses': [{'isAllowed': True, 'type': 'all'},
{'isAllowed': True, 'type': 'repladmin'}],
'delegateAdmin': False}
policy_data = ranger_api_functions.update_policy_item(response_policy,
beacon_user_policy_item)
update_policy_response = ranger_api_functions.update_policy(ranger_admin_url, policy_id,
policy_data, format(
"{ranger_admin_user}:{ranger_admin_passwd}"))
response_policy = ranger_api_functions.get_ranger_service_default_policy(ranger_admin_url,
params.service_name,
format(
"{ranger_admin_user}:{ranger_admin_passwd}"),
['hiveservice'])
if response_policy:
user_present = ranger_api_functions.check_user_policy(response_policy, params.beacon_user)
if not user_present and beacon_user_get is not None and beacon_user_get[
'name'] == params.beacon_user:
policy_id = response_policy['id']
beacon_user_policy_item = {'groups': [], 'conditions': [],
'users': [params.beacon_user],
'accesses': [{'isAllowed': True, 'type': 'serviceadmin'}],
'delegateAdmin': False}
policy_data = ranger_api_functions.update_policy_item(response_policy,
beacon_user_policy_item)
update_policy_response = ranger_api_functions.update_policy(ranger_admin_url, policy_id,
policy_data, format(
"{ranger_admin_user}:{ranger_admin_passwd}"))
if params.ranger_atlas_plugin_enabled:
beacon_atlas_user_response = ranger_api_functions.get_user(ranger_admin_url,
params.beacon_atlas_user, format(
"{ranger_admin_user}:{ranger_admin_passwd}"))
if beacon_atlas_user_response is not None and beacon_atlas_user_response[
'name'] == params.beacon_atlas_user:
beacon_atlas_user_role = beacon_atlas_user_response['userRoleList'][0]
Logger.info(format(
"Beacon Atlas User with username {beacon_atlas_user} exists with role {beacon_atlas_user_role}"))
else:
beacon_atlas_user_create_response_code = ranger_api_functions.create_user(ranger_admin_url,
params.beacon_atlas_user,
params.beacon_atlas_password,
"ROLE_USER",
format(
"{ranger_admin_user}:{ranger_admin_passwd}"))
if params.security_enabled:
get_beacon_atlas_user = params.beacon_user
else:
get_beacon_atlas_user = params.beacon_atlas_user
if params.is_stack_3_0_or_further:
atlas_entity_policy_response = ranger_api_functions.get_ranger_service_default_policy(
ranger_admin_url, params.ranger_atlas_service_name,
format("{ranger_admin_user}:{ranger_admin_passwd}"),
['entity', 'entity-classification', 'entity-type'])
if atlas_entity_policy_response:
beacon_atlas_user_present = ranger_api_functions.check_user_policy(
atlas_entity_policy_response, get_beacon_atlas_user)
if not beacon_atlas_user_present:
atlas_entity_policy_id = atlas_entity_policy_response['id']
beacon_atlas_user_policy_item = {'groups': [], 'conditions': [],
'users': [get_beacon_atlas_user], 'accesses': [
{'type': 'entity-read', 'isAllowed': True},
{'type': 'entity-create', 'isAllowed': True},
{'type': 'entity-update', 'isAllowed': True}]}
atlas_entity_policy_data = ranger_api_functions.update_policy_item(
atlas_entity_policy_response, beacon_atlas_user_policy_item)
atlas_update_entity_policy_response = ranger_api_functions.update_policy(
ranger_admin_url, atlas_entity_policy_id, atlas_entity_policy_data,
format("{ranger_admin_user}:{ranger_admin_passwd}"))
atlas_service_policy_response = ranger_api_functions.get_ranger_service_default_policy(
ranger_admin_url, params.ranger_atlas_service_name,
format("{ranger_admin_user}:{ranger_admin_passwd}"), ['atlas-service'])
if atlas_service_policy_response:
beacon_atlas_user_present = ranger_api_functions.check_user_policy(
atlas_service_policy_response, get_beacon_atlas_user)
if not beacon_atlas_user_present:
atlas_service_policy_id = atlas_service_policy_response['id']
beacon_atlas_user_policy_item = {'groups': [], 'conditions': [],
'users': [get_beacon_atlas_user], 'accesses': [
{'type': 'admin-export', 'isAllowed': True},
{'type': 'admin-import', 'isAllowed': True}]}
atlas_service_policy_data = ranger_api_functions.update_policy_item(
atlas_service_policy_response, beacon_atlas_user_policy_item)
atlas_service_policy_update_response = ranger_api_functions.update_policy(
ranger_admin_url, atlas_service_policy_id, atlas_service_policy_data,
format("{ranger_admin_user}:{ranger_admin_passwd}"))
atlas_type_category_policy_response = ranger_api_functions.get_ranger_service_default_policy(
ranger_admin_url, params.ranger_atlas_service_name,
format("{ranger_admin_user}:{ranger_admin_passwd}"), ['type', 'type-category'])
if atlas_type_category_policy_response:
beacon_atlas_user_present = ranger_api_functions.check_user_policy(
atlas_type_category_policy_response, get_beacon_atlas_user)
if not beacon_atlas_user_present:
atlas_type_category_policy_id = atlas_type_category_policy_response['id']
beacon_atlas_user_policy_item = {'groups': [], 'conditions': [],
'users': [get_beacon_atlas_user], 'accesses': [
{'type': 'type-create', 'isAllowed': True},
{'type': 'type-update', 'isAllowed': True},
{'type': 'type-delete', 'isAllowed': True}]}
atlas_type_category_policy_data = ranger_api_functions.update_policy_item(
atlas_type_category_policy_response, beacon_atlas_user_policy_item)
atlas_update_type_category_policy_response = ranger_api_functions.update_policy(
ranger_admin_url, atlas_type_category_policy_id,
atlas_type_category_policy_data,
format("{ranger_admin_user}:{ranger_admin_passwd}"))
else:
atlas_policy_response = ranger_api_functions.get_ranger_service_default_policy(
ranger_admin_url, params.ranger_atlas_service_name,
format("{ranger_admin_user}:{ranger_admin_passwd}"), ['entity'])
if atlas_policy_response:
beacon_atlas_user_present = ranger_api_functions.check_user_policy(
atlas_policy_response, get_beacon_atlas_user)
if not beacon_atlas_user_present:
atlas_policy_id = atlas_policy_response['id']
beacon_atlas_user_policy_item = {'groups': [], 'conditions': [],
'users': [get_beacon_atlas_user],
'accesses': [{'type': 'read', 'isAllowed': True},
{'type': 'create', 'isAllowed': True},
{'type': 'update', 'isAllowed': True},
{'type': 'delete', 'isAllowed': True},
{'type': 'all', 'isAllowed': True}]}
atlas_policy_data = ranger_api_functions.update_policy_item(atlas_policy_response,
beacon_atlas_user_policy_item)
atlas_update_policy_response = ranger_api_functions.update_policy(ranger_admin_url,
atlas_policy_id,
atlas_policy_data,
format(
"{ranger_admin_user}:{ranger_admin_passwd}"))
atlas_operation_policy_response = ranger_api_functions.get_ranger_service_default_policy(
ranger_admin_url, params.ranger_atlas_service_name,
format("{ranger_admin_user}:{ranger_admin_passwd}"), ['operation'])
if atlas_operation_policy_response:
beacon_atlas_user_present = ranger_api_functions.check_user_policy(
atlas_operation_policy_response, get_beacon_atlas_user)
if not beacon_atlas_user_present:
atlas_operation_policy_id = atlas_operation_policy_response['id']
beacon_atlas_user_policy_item = {'groups': [], 'conditions': [],
'users': [get_beacon_atlas_user],
'accesses': [{'type': 'read', 'isAllowed': True},
{'type': 'create', 'isAllowed': True},
{'type': 'update', 'isAllowed': True},
{'type': 'delete', 'isAllowed': True},
{'type': 'all', 'isAllowed': True}]}
atlas_operation_policy_data = ranger_api_functions.update_policy_item(
atlas_operation_policy_response, beacon_atlas_user_policy_item)
atlas_operation_policy_update_response = ranger_api_functions.update_policy(
ranger_admin_url, atlas_operation_policy_id, atlas_operation_policy_data,
format("{ranger_admin_user}:{ranger_admin_passwd}"))
except Exception as e:
show_logs(params.beacon_log_dir, params.beacon_user)
if action == 'stop':
try:
Execute(format('{beacon_home}/bin/beacon stop'),
user=params.beacon_user,
path=params.hadoop_bin_dir,
environment=environment_dictionary)
except:
show_logs(params.beacon_log_dir, params.beacon_user)
File(params.server_pid_file, action='delete')
def create_directory(directory, scheme=None):
import params
if (scheme is None or scheme == ''):
if params.is_hdfs_installed:
scheme = 'hdfs'
else:
scheme = 'file'
Logger.info("Creating directory {0}:/{1}".format(scheme, directory))
if scheme == 'file':
Directory(directory,
owner=params.beacon_user,
create_parents=True,
mode=0755,
cd_access="a")
elif scheme == 'hdfs':
beacon_utils.create_hdfs_directory(directory, params.beacon_user, 0775)
params.HdfsResource(None, action="execute")
def download_mysql_driver():
import params
if params.jdbc_jar_name is None:
raise Fail("Mysql JDBC driver not installed on ambari-server")
File(
params.mysql_driver_target,
content=DownloadSource(params.driver_source),
mode=0644
)
| false | true |
7900c983b6c77702216c835dde7ae52ea81aa9b8 | 39,525 | py | Python | pandas/tests/indexes/datetimes/test_arithmetic.py | wla80/pandas | dccfee53ff68dfa2c42a7571f26ba640694aa547 | [
"BSD-3-Clause"
] | null | null | null | pandas/tests/indexes/datetimes/test_arithmetic.py | wla80/pandas | dccfee53ff68dfa2c42a7571f26ba640694aa547 | [
"BSD-3-Clause"
] | null | null | null | pandas/tests/indexes/datetimes/test_arithmetic.py | wla80/pandas | dccfee53ff68dfa2c42a7571f26ba640694aa547 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import warnings
from datetime import datetime, timedelta
import operator
import pytest
import numpy as np
import pandas as pd
from pandas.compat.numpy import np_datetime64_compat
import pandas.util.testing as tm
from pandas.errors import PerformanceWarning, NullFrequencyError
from pandas import (Timestamp, Timedelta, Series,
DatetimeIndex, TimedeltaIndex,
date_range)
from pandas._libs import tslib
from pandas._libs.tslibs.offsets import shift_months
@pytest.fixture(params=[None, 'UTC', 'Asia/Tokyo',
'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific'])
def tz(request):
return request.param
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=str)
def delta(request):
# Several ways of representing two hours
return request.param
@pytest.fixture(
params=[
datetime(2011, 1, 1),
DatetimeIndex(['2011-01-01', '2011-01-02']),
DatetimeIndex(['2011-01-01', '2011-01-02']).tz_localize('US/Eastern'),
np.datetime64('2011-01-01'),
Timestamp('2011-01-01')],
ids=lambda x: type(x).__name__)
def addend(request):
return request.param
class TestDatetimeIndexComparisons(object):
@pytest.mark.parametrize('other', [datetime(2016, 1, 1),
Timestamp('2016-01-01'),
np.datetime64('2016-01-01')])
def test_dti_cmp_datetimelike(self, other, tz):
dti = pd.date_range('2016-01-01', periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
elif isinstance(other, Timestamp):
other = other.tz_localize(dti.tzinfo)
else:
other = tslib._localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
def dti_cmp_non_datetime(self, tz):
# GH#19301 by convention datetime.date is not considered comparable
# to Timestamp or DatetimeIndex. This may change in the future.
dti = pd.date_range('2016-01-01', periods=2, tz=tz)
other = datetime(2016, 1, 1).date()
assert not (dti == other).any()
assert (dti != other).all()
with pytest.raises(TypeError):
dti < other
with pytest.raises(TypeError):
dti <= other
with pytest.raises(TypeError):
dti > other
with pytest.raises(TypeError):
dti >= other
@pytest.mark.parametrize('other', [None, np.nan, pd.NaT])
def test_dti_eq_null_scalar(self, other, tz):
# GH#19301
dti = pd.date_range('2016-01-01', periods=2, tz=tz)
assert not (dti == other).any()
@pytest.mark.parametrize('other', [None, np.nan, pd.NaT])
def test_dti_ne_null_scalar(self, other, tz):
# GH#19301
dti = pd.date_range('2016-01-01', periods=2, tz=tz)
assert (dti != other).all()
@pytest.mark.parametrize('other', [None, np.nan])
def test_dti_cmp_null_scalar_inequality(self, tz, other):
# GH#19301
dti = pd.date_range('2016-01-01', periods=2, tz=tz)
with pytest.raises(TypeError):
dti < other
with pytest.raises(TypeError):
dti <= other
with pytest.raises(TypeError):
dti > other
with pytest.raises(TypeError):
dti >= other
def test_dti_cmp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for lhs, rhs in [(left, right),
(left.astype(object), right.astype(object))]:
result = rhs == lhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = pd.DatetimeIndex(['2014-01-01', pd.NaT, '2014-03-01', pd.NaT,
'2014-05-01', '2014-07-01'])
didx2 = pd.DatetimeIndex(['2014-02-01', '2014-03-01', pd.NaT, pd.NaT,
'2014-06-01', '2014-07-01'])
darr = np.array([np_datetime64_compat('2014-02-01 00:00Z'),
np_datetime64_compat('2014-03-01 00:00Z'),
np_datetime64_compat('nat'), np.datetime64('nat'),
np_datetime64_compat('2014-06-01 00:00Z'),
np_datetime64_compat('2014-07-01 00:00Z')])
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, pd.NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('op', [operator.eq, operator.ne,
operator.gt, operator.ge,
operator.lt, operator.le])
def test_comparison_tzawareness_compat(self, op):
# GH#18162
dr = pd.date_range('2016-01-01', periods=6)
dz = dr.tz_localize('US/Pacific')
with pytest.raises(TypeError):
op(dr, dz)
with pytest.raises(TypeError):
op(dr, list(dz))
with pytest.raises(TypeError):
op(dz, dr)
with pytest.raises(TypeError):
op(dz, list(dr))
# Check that there isn't a problem aware-aware and naive-naive do not
# raise
assert (dr == dr).all()
assert (dr == list(dr)).all()
assert (dz == dz).all()
assert (dz == list(dz)).all()
# Check comparisons against scalar Timestamps
ts = pd.Timestamp('2000-03-14 01:59')
ts_tz = pd.Timestamp('2000-03-14 01:59', tz='Europe/Amsterdam')
assert (dr > ts).all()
with pytest.raises(TypeError):
op(dr, ts_tz)
assert (dz > ts_tz).all()
with pytest.raises(TypeError):
op(dz, ts)
@pytest.mark.parametrize('op', [operator.eq, operator.ne,
operator.gt, operator.ge,
operator.lt, operator.le])
def test_nat_comparison_tzawareness(self, op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
dti = pd.DatetimeIndex(['2014-01-01', pd.NaT, '2014-03-01', pd.NaT,
'2014-05-01', '2014-07-01'])
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, pd.NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize('US/Pacific'), pd.NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_int_raises(self):
rng = date_range('1/1/2000', periods=10)
# raise TypeError for now
with pytest.raises(TypeError):
rng < rng[3].value
def test_dti_cmp_list(self):
rng = date_range('1/1/2000', periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
class TestDatetimeIndexArithmetic(object):
def test_dti_add_timestamp_raises(self):
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add DatetimeIndex and Timestamp"
with tm.assert_raises_regex(TypeError, msg):
idx + Timestamp('2011-01-01')
def test_dti_radd_timestamp_raises(self):
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add DatetimeIndex and Timestamp"
with tm.assert_raises_regex(TypeError, msg):
Timestamp('2011-01-01') + idx
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
def test_dti_add_int(self, tz, one):
# Variants of `one` for #19012
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
result = rng + one
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_iadd_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
rng += one
tm.assert_index_equal(rng, expected)
def test_dti_sub_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
result = rng - one
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_isub_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
rng -= one
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# DatetimeIndex.shift is used in integer addition
def test_dti_shift_tzaware(self, tz):
# GH#9903
idx = pd.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_dti_shift_freqs(self):
# test shift for DatetimeIndex and non DatetimeIndex
# GH#8083
drange = pd.date_range('20130101', periods=5)
result = drange.shift(1)
expected = pd.DatetimeIndex(['2013-01-02', '2013-01-03', '2013-01-04',
'2013-01-05',
'2013-01-06'], freq='D')
tm.assert_index_equal(result, expected)
result = drange.shift(-1)
expected = pd.DatetimeIndex(['2012-12-31', '2013-01-01', '2013-01-02',
'2013-01-03', '2013-01-04'],
freq='D')
tm.assert_index_equal(result, expected)
result = drange.shift(3, freq='2D')
expected = pd.DatetimeIndex(['2013-01-07', '2013-01-08', '2013-01-09',
'2013-01-10',
'2013-01-11'], freq='D')
tm.assert_index_equal(result, expected)
def test_dti_shift_int(self):
rng = date_range('1/1/2000', periods=20)
result = rng + 5
expected = rng.shift(5)
tm.assert_index_equal(result, expected)
result = rng - 5
expected = rng.shift(-5)
tm.assert_index_equal(result, expected)
def test_dti_shift_no_freq(self):
# GH#19147
dti = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01'], freq=None)
with pytest.raises(NullFrequencyError):
dti.shift(2)
@pytest.mark.parametrize('tzstr', ['US/Eastern', 'dateutil/US/Eastern'])
def test_dti_shift_localized(self, tzstr):
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
dr_tz = dr.tz_localize(tzstr)
result = dr_tz.shift(1, '10T')
assert result.tz == dr_tz.tz
# -------------------------------------------------------------
# Binary operations DatetimeIndex and timedelta-like
def test_dti_add_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_iadd_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
rng += delta
tm.assert_index_equal(rng, expected)
def test_dti_sub_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
def test_dti_isub_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
rng -= delta
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
def test_dti_add_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz)
# add with TimdeltaIndex
result = dti + tdi
tm.assert_index_equal(result, expected)
result = tdi + dti
tm.assert_index_equal(result, expected)
# add with timedelta64 array
result = dti + tdi.values
tm.assert_index_equal(result, expected)
result = tdi.values + dti
tm.assert_index_equal(result, expected)
def test_dti_iadd_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz)
# iadd with TimdeltaIndex
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result += tdi
tm.assert_index_equal(result, expected)
result = pd.timedelta_range('0 days', periods=10)
result += dti
tm.assert_index_equal(result, expected)
# iadd with timedelta64 array
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result += tdi.values
tm.assert_index_equal(result, expected)
result = pd.timedelta_range('0 days', periods=10)
result += dti
tm.assert_index_equal(result, expected)
def test_dti_sub_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz, freq='-1D')
# sub with TimedeltaIndex
result = dti - tdi
tm.assert_index_equal(result, expected)
msg = 'cannot subtract TimedeltaIndex and DatetimeIndex'
with tm.assert_raises_regex(TypeError, msg):
tdi - dti
# sub with timedelta64 array
result = dti - tdi.values
tm.assert_index_equal(result, expected)
msg = 'cannot perform __neg__ with this index type:'
with tm.assert_raises_regex(TypeError, msg):
tdi.values - dti
def test_dti_isub_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz, freq='-1D')
# isub with TimedeltaIndex
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result -= tdi
tm.assert_index_equal(result, expected)
msg = 'cannot subtract TimedeltaIndex and DatetimeIndex'
with tm.assert_raises_regex(TypeError, msg):
tdi -= dti
# isub with timedelta64 array
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result -= tdi.values
tm.assert_index_equal(result, expected)
msg = '|'.join(['cannot perform __neg__ with this index type:',
'ufunc subtract cannot use operands with types'])
with tm.assert_raises_regex(TypeError, msg):
tdi.values -= dti
# -------------------------------------------------------------
# Binary Operations DatetimeIndex and datetime-like
# TODO: A couple other tests belong in this section. Move them in
# A PR where there isn't already a giant diff.
def test_add_datetimelike_and_dti(self, addend):
# GH#9631
dti = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = 'cannot add DatetimeIndex and {0}'.format(
type(addend).__name__)
with tm.assert_raises_regex(TypeError, msg):
dti + addend
with tm.assert_raises_regex(TypeError, msg):
addend + dti
def test_add_datetimelike_and_dti_tz(self, addend):
# GH#9631
dti_tz = DatetimeIndex(['2011-01-01',
'2011-01-02']).tz_localize('US/Eastern')
msg = 'cannot add DatetimeIndex and {0}'.format(
type(addend).__name__)
with tm.assert_raises_regex(TypeError, msg):
dti_tz + addend
with tm.assert_raises_regex(TypeError, msg):
addend + dti_tz
# -------------------------------------------------------------
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
dti_tz - dti
with pytest.raises(TypeError):
dti - dti_tz
with pytest.raises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with pytest.raises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('freq', [None, 'D'])
def test_sub_period(self, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
def test_ufunc_coercions(self):
idx = date_range('2011-01-01', periods=3, freq='2D', name='x')
delta = np.timedelta64(1, 'D')
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = date_range('2011-01-02', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = date_range('2010-12-31', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
delta = np.array([np.timedelta64(1, 'D'), np.timedelta64(2, 'D'),
np.timedelta64(3, 'D')])
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = DatetimeIndex(['2011-01-02', '2011-01-05', '2011-01-08'],
freq='3D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '3D'
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = DatetimeIndex(['2010-12-31', '2011-01-01', '2011-01-02'],
freq='D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'D'
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(['now', pd.Timestamp.max])
dtimin = pd.to_datetime(['now', pd.Timestamp.min])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants:
with pytest.raises(OverflowError):
dtimax - variant
expected = pd.Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = pd.Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError):
dtimin - variant
@pytest.mark.parametrize('names', [('foo', None, None),
('baz', 'bar', None),
('bar', 'bar', 'bar')])
@pytest.mark.parametrize('tz', [None, 'America/Chicago'])
def test_dti_add_series(self, tz, names):
# GH#13905
index = DatetimeIndex(['2016-06-28 05:30', '2016-06-28 05:31'],
tz=tz, name=names[0])
ser = Series([Timedelta(seconds=5)] * 2,
index=index, name=names[1])
expected = Series(index + Timedelta(seconds=5),
index=index, name=names[2])
# passing name arg isn't enough when names[2] is None
expected.name = names[2]
assert expected.dtype == index.dtype
result = ser + index
tm.assert_series_equal(result, expected)
result2 = index + ser
tm.assert_series_equal(result2, expected)
expected = index + Timedelta(seconds=5)
result3 = ser.values + index
tm.assert_index_equal(result3, expected)
result4 = index + ser.values
tm.assert_index_equal(result4, expected)
def test_dti_add_offset_array(self, tz):
# GH#18849
dti = pd.date_range('2017-01-01', periods=2, tz=tz)
other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
with tm.assert_produces_warning(PerformanceWarning):
res = dti + other
expected = DatetimeIndex([dti[n] + other[n] for n in range(len(dti))],
name=dti.name, freq='infer')
tm.assert_index_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + dti
tm.assert_index_equal(res2, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_dti_add_offset_index(self, tz, names):
# GH#18849, GH#19744
dti = pd.date_range('2017-01-01', periods=2, tz=tz, name=names[0])
other = pd.Index([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)],
name=names[1])
with tm.assert_produces_warning(PerformanceWarning):
res = dti + other
expected = DatetimeIndex([dti[n] + other[n] for n in range(len(dti))],
name=names[2], freq='infer')
tm.assert_index_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + dti
tm.assert_index_equal(res2, expected)
def test_dti_sub_offset_array(self, tz):
# GH#18824
dti = pd.date_range('2017-01-01', periods=2, tz=tz)
other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
with tm.assert_produces_warning(PerformanceWarning):
res = dti - other
expected = DatetimeIndex([dti[n] - other[n] for n in range(len(dti))],
name=dti.name, freq='infer')
tm.assert_index_equal(res, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_dti_sub_offset_index(self, tz, names):
# GH#18824, GH#19744
dti = pd.date_range('2017-01-01', periods=2, tz=tz, name=names[0])
other = pd.Index([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)],
name=names[1])
with tm.assert_produces_warning(PerformanceWarning):
res = dti - other
expected = DatetimeIndex([dti[n] - other[n] for n in range(len(dti))],
name=names[2], freq='infer')
tm.assert_index_equal(res, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_dti_with_offset_series(self, tz, names):
# GH#18849
dti = pd.date_range('2017-01-01', periods=2, tz=tz, name=names[0])
other = Series([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)],
name=names[1])
expected_add = Series([dti[n] + other[n] for n in range(len(dti))],
name=names[2])
with tm.assert_produces_warning(PerformanceWarning):
res = dti + other
tm.assert_series_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + dti
tm.assert_series_equal(res2, expected_add)
expected_sub = Series([dti[n] - other[n] for n in range(len(dti))],
name=names[2])
with tm.assert_produces_warning(PerformanceWarning):
res3 = dti - other
tm.assert_series_equal(res3, expected_sub)
def test_dti_add_offset_tzaware(self):
dates = date_range('2012-11-01', periods=3, tz='US/Pacific')
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
# GH#6818
for tz in ['UTC', 'US/Pacific', 'Asia/Tokyo']:
dates = date_range('2010-11-01 00:00', periods=3, tz=tz, freq='H')
expected = DatetimeIndex(['2010-11-01 05:00', '2010-11-01 06:00',
'2010-11-01 07:00'], freq='H', tz=tz)
offset = dates + pd.offsets.Hour(5)
tm.assert_index_equal(offset, expected)
offset = dates + np.timedelta64(5, 'h')
tm.assert_index_equal(offset, expected)
offset = dates + timedelta(hours=5)
tm.assert_index_equal(offset, expected)
@pytest.mark.parametrize('klass,assert_func', [
(Series, tm.assert_series_equal),
(DatetimeIndex, tm.assert_index_equal)])
def test_dt64_with_offset_array(klass, assert_func):
# GH#10699
# array of offsets
box = Series if klass is Series else pd.Index
with tm.assert_produces_warning(PerformanceWarning):
s = klass([Timestamp('2000-1-1'), Timestamp('2000-2-1')])
result = s + box([pd.offsets.DateOffset(years=1),
pd.offsets.MonthEnd()])
exp = klass([Timestamp('2001-1-1'), Timestamp('2000-2-29')])
assert_func(result, exp)
# same offset
result = s + box([pd.offsets.DateOffset(years=1),
pd.offsets.DateOffset(years=1)])
exp = klass([Timestamp('2001-1-1'), Timestamp('2001-2-1')])
assert_func(result, exp)
@pytest.mark.parametrize('klass,assert_func', [
(Series, tm.assert_series_equal),
(DatetimeIndex, tm.assert_index_equal)])
def test_dt64_with_DateOffsets_relativedelta(klass, assert_func):
# GH#10699
vec = klass([Timestamp('2000-01-05 00:15:00'),
Timestamp('2000-01-31 00:23:00'),
Timestamp('2000-01-01'),
Timestamp('2000-03-31'),
Timestamp('2000-02-29'),
Timestamp('2000-12-31'),
Timestamp('2000-05-15'),
Timestamp('2001-06-15')])
# DateOffset relativedelta fastpath
relative_kwargs = [('years', 2), ('months', 5), ('days', 3),
('hours', 5), ('minutes', 10), ('seconds', 2),
('microseconds', 5)]
for i, kwd in enumerate(relative_kwargs):
op = pd.DateOffset(**dict([kwd]))
assert_func(klass([x + op for x in vec]), vec + op)
assert_func(klass([x - op for x in vec]), vec - op)
op = pd.DateOffset(**dict(relative_kwargs[:i + 1]))
assert_func(klass([x + op for x in vec]), vec + op)
assert_func(klass([x - op for x in vec]), vec - op)
@pytest.mark.parametrize('cls_and_kwargs', [
'YearBegin', ('YearBegin', {'month': 5}),
'YearEnd', ('YearEnd', {'month': 5}),
'MonthBegin', 'MonthEnd',
'SemiMonthEnd', 'SemiMonthBegin',
'Week', ('Week', {'weekday': 3}),
'BusinessDay', 'BDay', 'QuarterEnd', 'QuarterBegin',
'CustomBusinessDay', 'CDay', 'CBMonthEnd',
'CBMonthBegin', 'BMonthBegin', 'BMonthEnd',
'BusinessHour', 'BYearBegin', 'BYearEnd',
'BQuarterBegin', ('LastWeekOfMonth', {'weekday': 2}),
('FY5253Quarter', {'qtr_with_extra_week': 1,
'startingMonth': 1,
'weekday': 2,
'variation': 'nearest'}),
('FY5253', {'weekday': 0, 'startingMonth': 2, 'variation': 'nearest'}),
('WeekOfMonth', {'weekday': 2, 'week': 2}),
'Easter', ('DateOffset', {'day': 4}),
('DateOffset', {'month': 5})])
@pytest.mark.parametrize('normalize', [True, False])
@pytest.mark.parametrize('klass,assert_func', [
(Series, tm.assert_series_equal),
(DatetimeIndex, tm.assert_index_equal)])
def test_dt64_with_DateOffsets(klass, assert_func, normalize, cls_and_kwargs):
# GH#10699
# assert these are equal on a piecewise basis
vec = klass([Timestamp('2000-01-05 00:15:00'),
Timestamp('2000-01-31 00:23:00'),
Timestamp('2000-01-01'),
Timestamp('2000-03-31'),
Timestamp('2000-02-29'),
Timestamp('2000-12-31'),
Timestamp('2000-05-15'),
Timestamp('2001-06-15')])
if isinstance(cls_and_kwargs, tuple):
# If cls_name param is a tuple, then 2nd entry is kwargs for
# the offset constructor
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
offset_cls = getattr(pd.offsets, cls_name)
with warnings.catch_warnings(record=True):
for n in [0, 5]:
if (cls_name in ['WeekOfMonth', 'LastWeekOfMonth',
'FY5253Quarter', 'FY5253'] and n == 0):
# passing n = 0 is invalid for these offset classes
continue
offset = offset_cls(n, normalize=normalize, **kwargs)
assert_func(klass([x + offset for x in vec]), vec + offset)
assert_func(klass([x - offset for x in vec]), vec - offset)
assert_func(klass([offset + x for x in vec]), offset + vec)
# GH 10699
@pytest.mark.parametrize('klass,assert_func', zip([Series, DatetimeIndex],
[tm.assert_series_equal,
tm.assert_index_equal]))
def test_datetime64_with_DateOffset(klass, assert_func):
s = klass(date_range('2000-01-01', '2000-01-31'), name='a')
result = s + pd.DateOffset(years=1)
result2 = pd.DateOffset(years=1) + s
exp = klass(date_range('2001-01-01', '2001-01-31'), name='a')
assert_func(result, exp)
assert_func(result2, exp)
result = s - pd.DateOffset(years=1)
exp = klass(date_range('1999-01-01', '1999-01-31'), name='a')
assert_func(result, exp)
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
pd.Timestamp('2000-02-15', tz='US/Central')], name='a')
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = klass([Timestamp('2000-01-16 00:15:00', tz='US/Central'),
Timestamp('2000-02-16', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
pd.Timestamp('2000-02-15', tz='US/Central')], name='a')
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = klass([Timestamp('2000-01-31 00:15:00', tz='US/Central'),
Timestamp('2000-02-29', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
@pytest.mark.parametrize('years', [-1, 0, 1])
@pytest.mark.parametrize('months', [-2, 0, 2])
def test_shift_months(years, months):
s = DatetimeIndex([Timestamp('2000-01-05 00:15:00'),
Timestamp('2000-01-31 00:23:00'),
Timestamp('2000-01-01'),
Timestamp('2000-02-29'),
Timestamp('2000-12-31')])
actual = DatetimeIndex(shift_months(s.asi8, years * 12 + months))
raw = [x + pd.offsets.DateOffset(years=years, months=months)
for x in s]
expected = DatetimeIndex(raw)
tm.assert_index_equal(actual, expected)
| 40.126904 | 79 | 0.559089 |
import warnings
from datetime import datetime, timedelta
import operator
import pytest
import numpy as np
import pandas as pd
from pandas.compat.numpy import np_datetime64_compat
import pandas.util.testing as tm
from pandas.errors import PerformanceWarning, NullFrequencyError
from pandas import (Timestamp, Timedelta, Series,
DatetimeIndex, TimedeltaIndex,
date_range)
from pandas._libs import tslib
from pandas._libs.tslibs.offsets import shift_months
@pytest.fixture(params=[None, 'UTC', 'Asia/Tokyo',
'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific'])
def tz(request):
return request.param
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=str)
def delta(request):
return request.param
@pytest.fixture(
params=[
datetime(2011, 1, 1),
DatetimeIndex(['2011-01-01', '2011-01-02']),
DatetimeIndex(['2011-01-01', '2011-01-02']).tz_localize('US/Eastern'),
np.datetime64('2011-01-01'),
Timestamp('2011-01-01')],
ids=lambda x: type(x).__name__)
def addend(request):
return request.param
class TestDatetimeIndexComparisons(object):
@pytest.mark.parametrize('other', [datetime(2016, 1, 1),
Timestamp('2016-01-01'),
np.datetime64('2016-01-01')])
def test_dti_cmp_datetimelike(self, other, tz):
dti = pd.date_range('2016-01-01', periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
return
elif isinstance(other, Timestamp):
other = other.tz_localize(dti.tzinfo)
else:
other = tslib._localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
def dti_cmp_non_datetime(self, tz):
tz=tz)
other = datetime(2016, 1, 1).date()
assert not (dti == other).any()
assert (dti != other).all()
with pytest.raises(TypeError):
dti < other
with pytest.raises(TypeError):
dti <= other
with pytest.raises(TypeError):
dti > other
with pytest.raises(TypeError):
dti >= other
@pytest.mark.parametrize('other', [None, np.nan, pd.NaT])
def test_dti_eq_null_scalar(self, other, tz):
dti = pd.date_range('2016-01-01', periods=2, tz=tz)
assert not (dti == other).any()
@pytest.mark.parametrize('other', [None, np.nan, pd.NaT])
def test_dti_ne_null_scalar(self, other, tz):
dti = pd.date_range('2016-01-01', periods=2, tz=tz)
assert (dti != other).all()
@pytest.mark.parametrize('other', [None, np.nan])
def test_dti_cmp_null_scalar_inequality(self, tz, other):
dti = pd.date_range('2016-01-01', periods=2, tz=tz)
with pytest.raises(TypeError):
dti < other
with pytest.raises(TypeError):
dti <= other
with pytest.raises(TypeError):
dti > other
with pytest.raises(TypeError):
dti >= other
def test_dti_cmp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for lhs, rhs in [(left, right),
(left.astype(object), right.astype(object))]:
result = rhs == lhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = pd.DatetimeIndex(['2014-01-01', pd.NaT, '2014-03-01', pd.NaT,
'2014-05-01', '2014-07-01'])
didx2 = pd.DatetimeIndex(['2014-02-01', '2014-03-01', pd.NaT, pd.NaT,
'2014-06-01', '2014-07-01'])
darr = np.array([np_datetime64_compat('2014-02-01 00:00Z'),
np_datetime64_compat('2014-03-01 00:00Z'),
np_datetime64_compat('nat'), np.datetime64('nat'),
np_datetime64_compat('2014-06-01 00:00Z'),
np_datetime64_compat('2014-07-01 00:00Z')])
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, pd.NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('op', [operator.eq, operator.ne,
operator.gt, operator.ge,
operator.lt, operator.le])
def test_comparison_tzawareness_compat(self, op):
dr = pd.date_range('2016-01-01', periods=6)
dz = dr.tz_localize('US/Pacific')
with pytest.raises(TypeError):
op(dr, dz)
with pytest.raises(TypeError):
op(dr, list(dz))
with pytest.raises(TypeError):
op(dz, dr)
with pytest.raises(TypeError):
op(dz, list(dr))
# raise
assert (dr == dr).all()
assert (dr == list(dr)).all()
assert (dz == dz).all()
assert (dz == list(dz)).all()
# Check comparisons against scalar Timestamps
ts = pd.Timestamp('2000-03-14 01:59')
ts_tz = pd.Timestamp('2000-03-14 01:59', tz='Europe/Amsterdam')
assert (dr > ts).all()
with pytest.raises(TypeError):
op(dr, ts_tz)
assert (dz > ts_tz).all()
with pytest.raises(TypeError):
op(dz, ts)
@pytest.mark.parametrize('op', [operator.eq, operator.ne,
operator.gt, operator.ge,
operator.lt, operator.le])
def test_nat_comparison_tzawareness(self, op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
dti = pd.DatetimeIndex(['2014-01-01', pd.NaT, '2014-03-01', pd.NaT,
'2014-05-01', '2014-07-01'])
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, pd.NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize('US/Pacific'), pd.NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_int_raises(self):
rng = date_range('1/1/2000', periods=10)
# raise TypeError for now
with pytest.raises(TypeError):
rng < rng[3].value
def test_dti_cmp_list(self):
rng = date_range('1/1/2000', periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
class TestDatetimeIndexArithmetic(object):
def test_dti_add_timestamp_raises(self):
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add DatetimeIndex and Timestamp"
with tm.assert_raises_regex(TypeError, msg):
idx + Timestamp('2011-01-01')
def test_dti_radd_timestamp_raises(self):
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add DatetimeIndex and Timestamp"
with tm.assert_raises_regex(TypeError, msg):
Timestamp('2011-01-01') + idx
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
def test_dti_add_int(self, tz, one):
# Variants of `one` for #19012
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
result = rng + one
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_iadd_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
rng += one
tm.assert_index_equal(rng, expected)
def test_dti_sub_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
result = rng - one
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_isub_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
rng -= one
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# DatetimeIndex.shift is used in integer addition
def test_dti_shift_tzaware(self, tz):
# GH#9903
idx = pd.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_dti_shift_freqs(self):
# test shift for DatetimeIndex and non DatetimeIndex
# GH#8083
drange = pd.date_range('20130101', periods=5)
result = drange.shift(1)
expected = pd.DatetimeIndex(['2013-01-02', '2013-01-03', '2013-01-04',
'2013-01-05',
'2013-01-06'], freq='D')
tm.assert_index_equal(result, expected)
result = drange.shift(-1)
expected = pd.DatetimeIndex(['2012-12-31', '2013-01-01', '2013-01-02',
'2013-01-03', '2013-01-04'],
freq='D')
tm.assert_index_equal(result, expected)
result = drange.shift(3, freq='2D')
expected = pd.DatetimeIndex(['2013-01-07', '2013-01-08', '2013-01-09',
'2013-01-10',
'2013-01-11'], freq='D')
tm.assert_index_equal(result, expected)
def test_dti_shift_int(self):
rng = date_range('1/1/2000', periods=20)
result = rng + 5
expected = rng.shift(5)
tm.assert_index_equal(result, expected)
result = rng - 5
expected = rng.shift(-5)
tm.assert_index_equal(result, expected)
def test_dti_shift_no_freq(self):
# GH#19147
dti = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01'], freq=None)
with pytest.raises(NullFrequencyError):
dti.shift(2)
@pytest.mark.parametrize('tzstr', ['US/Eastern', 'dateutil/US/Eastern'])
def test_dti_shift_localized(self, tzstr):
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
dr_tz = dr.tz_localize(tzstr)
result = dr_tz.shift(1, '10T')
assert result.tz == dr_tz.tz
# -------------------------------------------------------------
# Binary operations DatetimeIndex and timedelta-like
def test_dti_add_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_iadd_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
rng += delta
tm.assert_index_equal(rng, expected)
def test_dti_sub_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
def test_dti_isub_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
rng -= delta
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
def test_dti_add_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz)
# add with TimdeltaIndex
result = dti + tdi
tm.assert_index_equal(result, expected)
result = tdi + dti
tm.assert_index_equal(result, expected)
# add with timedelta64 array
result = dti + tdi.values
tm.assert_index_equal(result, expected)
result = tdi.values + dti
tm.assert_index_equal(result, expected)
def test_dti_iadd_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz)
# iadd with TimdeltaIndex
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result += tdi
tm.assert_index_equal(result, expected)
result = pd.timedelta_range('0 days', periods=10)
result += dti
tm.assert_index_equal(result, expected)
# iadd with timedelta64 array
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result += tdi.values
tm.assert_index_equal(result, expected)
result = pd.timedelta_range('0 days', periods=10)
result += dti
tm.assert_index_equal(result, expected)
def test_dti_sub_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz, freq='-1D')
# sub with TimedeltaIndex
result = dti - tdi
tm.assert_index_equal(result, expected)
msg = 'cannot subtract TimedeltaIndex and DatetimeIndex'
with tm.assert_raises_regex(TypeError, msg):
tdi - dti
# sub with timedelta64 array
result = dti - tdi.values
tm.assert_index_equal(result, expected)
msg = 'cannot perform __neg__ with this index type:'
with tm.assert_raises_regex(TypeError, msg):
tdi.values - dti
def test_dti_isub_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz, freq='-1D')
# isub with TimedeltaIndex
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result -= tdi
tm.assert_index_equal(result, expected)
msg = 'cannot subtract TimedeltaIndex and DatetimeIndex'
with tm.assert_raises_regex(TypeError, msg):
tdi -= dti
# isub with timedelta64 array
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result -= tdi.values
tm.assert_index_equal(result, expected)
msg = '|'.join(['cannot perform __neg__ with this index type:',
'ufunc subtract cannot use operands with types'])
with tm.assert_raises_regex(TypeError, msg):
tdi.values -= dti
# -------------------------------------------------------------
# Binary Operations DatetimeIndex and datetime-like
# TODO: A couple other tests belong in this section. Move them in
# A PR where there isn't already a giant diff.
def test_add_datetimelike_and_dti(self, addend):
dti = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = 'cannot add DatetimeIndex and {0}'.format(
type(addend).__name__)
with tm.assert_raises_regex(TypeError, msg):
dti + addend
with tm.assert_raises_regex(TypeError, msg):
addend + dti
def test_add_datetimelike_and_dti_tz(self, addend):
dti_tz = DatetimeIndex(['2011-01-01',
'2011-01-02']).tz_localize('US/Eastern')
msg = 'cannot add DatetimeIndex and {0}'.format(
type(addend).__name__)
with tm.assert_raises_regex(TypeError, msg):
dti_tz + addend
with tm.assert_raises_regex(TypeError, msg):
addend + dti_tz
def test_sub_dti_dti(self):
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
dti_tz - dti
with pytest.raises(TypeError):
dti - dti_tz
with pytest.raises(TypeError):
dti_tz - dti_tz2
dti -= dti
tm.assert_index_equal(dti, expected)
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with pytest.raises(ValueError):
dti1 - dti2
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('freq', [None, 'D'])
def test_sub_period(self, freq):
p = pd.Period('2011-01-01', freq='D')
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
def test_ufunc_coercions(self):
idx = date_range('2011-01-01', periods=3, freq='2D', name='x')
delta = np.timedelta64(1, 'D')
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = date_range('2011-01-02', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = date_range('2010-12-31', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
delta = np.array([np.timedelta64(1, 'D'), np.timedelta64(2, 'D'),
np.timedelta64(3, 'D')])
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = DatetimeIndex(['2011-01-02', '2011-01-05', '2011-01-08'],
freq='3D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '3D'
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = DatetimeIndex(['2010-12-31', '2011-01-01', '2011-01-02'],
freq='D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'D'
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(['now', pd.Timestamp.max])
dtimin = pd.to_datetime(['now', pd.Timestamp.min])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants:
with pytest.raises(OverflowError):
dtimax - variant
expected = pd.Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = pd.Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError):
dtimin - variant
@pytest.mark.parametrize('names', [('foo', None, None),
('baz', 'bar', None),
('bar', 'bar', 'bar')])
@pytest.mark.parametrize('tz', [None, 'America/Chicago'])
def test_dti_add_series(self, tz, names):
index = DatetimeIndex(['2016-06-28 05:30', '2016-06-28 05:31'],
tz=tz, name=names[0])
ser = Series([Timedelta(seconds=5)] * 2,
index=index, name=names[1])
expected = Series(index + Timedelta(seconds=5),
index=index, name=names[2])
expected.name = names[2]
assert expected.dtype == index.dtype
result = ser + index
tm.assert_series_equal(result, expected)
result2 = index + ser
tm.assert_series_equal(result2, expected)
expected = index + Timedelta(seconds=5)
result3 = ser.values + index
tm.assert_index_equal(result3, expected)
result4 = index + ser.values
tm.assert_index_equal(result4, expected)
def test_dti_add_offset_array(self, tz):
# GH#18849
dti = pd.date_range('2017-01-01', periods=2, tz=tz)
other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
with tm.assert_produces_warning(PerformanceWarning):
res = dti + other
expected = DatetimeIndex([dti[n] + other[n] for n in range(len(dti))],
name=dti.name, freq='infer')
tm.assert_index_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + dti
tm.assert_index_equal(res2, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_dti_add_offset_index(self, tz, names):
# GH#18849, GH#19744
dti = pd.date_range('2017-01-01', periods=2, tz=tz, name=names[0])
other = pd.Index([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)],
name=names[1])
with tm.assert_produces_warning(PerformanceWarning):
res = dti + other
expected = DatetimeIndex([dti[n] + other[n] for n in range(len(dti))],
name=names[2], freq='infer')
tm.assert_index_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + dti
tm.assert_index_equal(res2, expected)
def test_dti_sub_offset_array(self, tz):
# GH#18824
dti = pd.date_range('2017-01-01', periods=2, tz=tz)
other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
with tm.assert_produces_warning(PerformanceWarning):
res = dti - other
expected = DatetimeIndex([dti[n] - other[n] for n in range(len(dti))],
name=dti.name, freq='infer')
tm.assert_index_equal(res, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_dti_sub_offset_index(self, tz, names):
# GH#18824, GH#19744
dti = pd.date_range('2017-01-01', periods=2, tz=tz, name=names[0])
other = pd.Index([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)],
name=names[1])
with tm.assert_produces_warning(PerformanceWarning):
res = dti - other
expected = DatetimeIndex([dti[n] - other[n] for n in range(len(dti))],
name=names[2], freq='infer')
tm.assert_index_equal(res, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_dti_with_offset_series(self, tz, names):
# GH#18849
dti = pd.date_range('2017-01-01', periods=2, tz=tz, name=names[0])
other = Series([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)],
name=names[1])
expected_add = Series([dti[n] + other[n] for n in range(len(dti))],
name=names[2])
with tm.assert_produces_warning(PerformanceWarning):
res = dti + other
tm.assert_series_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + dti
tm.assert_series_equal(res2, expected_add)
expected_sub = Series([dti[n] - other[n] for n in range(len(dti))],
name=names[2])
with tm.assert_produces_warning(PerformanceWarning):
res3 = dti - other
tm.assert_series_equal(res3, expected_sub)
def test_dti_add_offset_tzaware(self):
dates = date_range('2012-11-01', periods=3, tz='US/Pacific')
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
# GH#6818
for tz in ['UTC', 'US/Pacific', 'Asia/Tokyo']:
dates = date_range('2010-11-01 00:00', periods=3, tz=tz, freq='H')
expected = DatetimeIndex(['2010-11-01 05:00', '2010-11-01 06:00',
'2010-11-01 07:00'], freq='H', tz=tz)
offset = dates + pd.offsets.Hour(5)
tm.assert_index_equal(offset, expected)
offset = dates + np.timedelta64(5, 'h')
tm.assert_index_equal(offset, expected)
offset = dates + timedelta(hours=5)
tm.assert_index_equal(offset, expected)
@pytest.mark.parametrize('klass,assert_func', [
(Series, tm.assert_series_equal),
(DatetimeIndex, tm.assert_index_equal)])
def test_dt64_with_offset_array(klass, assert_func):
# GH#10699
# array of offsets
box = Series if klass is Series else pd.Index
with tm.assert_produces_warning(PerformanceWarning):
s = klass([Timestamp('2000-1-1'), Timestamp('2000-2-1')])
result = s + box([pd.offsets.DateOffset(years=1),
pd.offsets.MonthEnd()])
exp = klass([Timestamp('2001-1-1'), Timestamp('2000-2-29')])
assert_func(result, exp)
# same offset
result = s + box([pd.offsets.DateOffset(years=1),
pd.offsets.DateOffset(years=1)])
exp = klass([Timestamp('2001-1-1'), Timestamp('2001-2-1')])
assert_func(result, exp)
@pytest.mark.parametrize('klass,assert_func', [
(Series, tm.assert_series_equal),
(DatetimeIndex, tm.assert_index_equal)])
def test_dt64_with_DateOffsets_relativedelta(klass, assert_func):
# GH#10699
vec = klass([Timestamp('2000-01-05 00:15:00'),
Timestamp('2000-01-31 00:23:00'),
Timestamp('2000-01-01'),
Timestamp('2000-03-31'),
Timestamp('2000-02-29'),
Timestamp('2000-12-31'),
Timestamp('2000-05-15'),
Timestamp('2001-06-15')])
# DateOffset relativedelta fastpath
relative_kwargs = [('years', 2), ('months', 5), ('days', 3),
('hours', 5), ('minutes', 10), ('seconds', 2),
('microseconds', 5)]
for i, kwd in enumerate(relative_kwargs):
op = pd.DateOffset(**dict([kwd]))
assert_func(klass([x + op for x in vec]), vec + op)
assert_func(klass([x - op for x in vec]), vec - op)
op = pd.DateOffset(**dict(relative_kwargs[:i + 1]))
assert_func(klass([x + op for x in vec]), vec + op)
assert_func(klass([x - op for x in vec]), vec - op)
@pytest.mark.parametrize('cls_and_kwargs', [
'YearBegin', ('YearBegin', {'month': 5}),
'YearEnd', ('YearEnd', {'month': 5}),
'MonthBegin', 'MonthEnd',
'SemiMonthEnd', 'SemiMonthBegin',
'Week', ('Week', {'weekday': 3}),
'BusinessDay', 'BDay', 'QuarterEnd', 'QuarterBegin',
'CustomBusinessDay', 'CDay', 'CBMonthEnd',
'CBMonthBegin', 'BMonthBegin', 'BMonthEnd',
'BusinessHour', 'BYearBegin', 'BYearEnd',
'BQuarterBegin', ('LastWeekOfMonth', {'weekday': 2}),
('FY5253Quarter', {'qtr_with_extra_week': 1,
'startingMonth': 1,
'weekday': 2,
'variation': 'nearest'}),
('FY5253', {'weekday': 0, 'startingMonth': 2, 'variation': 'nearest'}),
('WeekOfMonth', {'weekday': 2, 'week': 2}),
'Easter', ('DateOffset', {'day': 4}),
('DateOffset', {'month': 5})])
@pytest.mark.parametrize('normalize', [True, False])
@pytest.mark.parametrize('klass,assert_func', [
(Series, tm.assert_series_equal),
(DatetimeIndex, tm.assert_index_equal)])
def test_dt64_with_DateOffsets(klass, assert_func, normalize, cls_and_kwargs):
# GH#10699
# assert these are equal on a piecewise basis
vec = klass([Timestamp('2000-01-05 00:15:00'),
Timestamp('2000-01-31 00:23:00'),
Timestamp('2000-01-01'),
Timestamp('2000-03-31'),
Timestamp('2000-02-29'),
Timestamp('2000-12-31'),
Timestamp('2000-05-15'),
Timestamp('2001-06-15')])
if isinstance(cls_and_kwargs, tuple):
# If cls_name param is a tuple, then 2nd entry is kwargs for
# the offset constructor
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
offset_cls = getattr(pd.offsets, cls_name)
with warnings.catch_warnings(record=True):
for n in [0, 5]:
if (cls_name in ['WeekOfMonth', 'LastWeekOfMonth',
'FY5253Quarter', 'FY5253'] and n == 0):
# passing n = 0 is invalid for these offset classes
continue
offset = offset_cls(n, normalize=normalize, **kwargs)
assert_func(klass([x + offset for x in vec]), vec + offset)
assert_func(klass([x - offset for x in vec]), vec - offset)
assert_func(klass([offset + x for x in vec]), offset + vec)
# GH 10699
@pytest.mark.parametrize('klass,assert_func', zip([Series, DatetimeIndex],
[tm.assert_series_equal,
tm.assert_index_equal]))
def test_datetime64_with_DateOffset(klass, assert_func):
s = klass(date_range('2000-01-01', '2000-01-31'), name='a')
result = s + pd.DateOffset(years=1)
result2 = pd.DateOffset(years=1) + s
exp = klass(date_range('2001-01-01', '2001-01-31'), name='a')
assert_func(result, exp)
assert_func(result2, exp)
result = s - pd.DateOffset(years=1)
exp = klass(date_range('1999-01-01', '1999-01-31'), name='a')
assert_func(result, exp)
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
pd.Timestamp('2000-02-15', tz='US/Central')], name='a')
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = klass([Timestamp('2000-01-16 00:15:00', tz='US/Central'),
Timestamp('2000-02-16', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
pd.Timestamp('2000-02-15', tz='US/Central')], name='a')
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = klass([Timestamp('2000-01-31 00:15:00', tz='US/Central'),
Timestamp('2000-02-29', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
@pytest.mark.parametrize('years', [-1, 0, 1])
@pytest.mark.parametrize('months', [-2, 0, 2])
def test_shift_months(years, months):
s = DatetimeIndex([Timestamp('2000-01-05 00:15:00'),
Timestamp('2000-01-31 00:23:00'),
Timestamp('2000-01-01'),
Timestamp('2000-02-29'),
Timestamp('2000-12-31')])
actual = DatetimeIndex(shift_months(s.asi8, years * 12 + months))
raw = [x + pd.offsets.DateOffset(years=years, months=months)
for x in s]
expected = DatetimeIndex(raw)
tm.assert_index_equal(actual, expected)
| true | true |
7900c9cc64f98dcf97c1b398ea6136afc3e8ab40 | 67 | py | Python | params.py | iskrich/simplechat | 9692334dbc6db0491541947fd8495d2ac0c7205c | [
"MIT"
] | 1 | 2017-10-18T09:53:14.000Z | 2017-10-18T09:53:14.000Z | params.py | iskrich/simplechat | 9692334dbc6db0491541947fd8495d2ac0c7205c | [
"MIT"
] | null | null | null | params.py | iskrich/simplechat | 9692334dbc6db0491541947fd8495d2ac0c7205c | [
"MIT"
] | null | null | null | host = "localhost"
port = 1111
max_users = 100
buffer_size = 1024
| 11.166667 | 18 | 0.716418 | host = "localhost"
port = 1111
max_users = 100
buffer_size = 1024
| true | true |
7900cac2ad9844d2b8c439dd2e7e2b2e8737dd78 | 6,082 | py | Python | tests/test_codetesting.py | paullinnerud/fontbakery | 666b3425b14f6c59a43cddf30279ca2fdc6e714e | [
"Apache-2.0"
] | null | null | null | tests/test_codetesting.py | paullinnerud/fontbakery | 666b3425b14f6c59a43cddf30279ca2fdc6e714e | [
"Apache-2.0"
] | null | null | null | tests/test_codetesting.py | paullinnerud/fontbakery | 666b3425b14f6c59a43cddf30279ca2fdc6e714e | [
"Apache-2.0"
] | null | null | null | import os
from glyphsLib import GSFont
import pytest
from fontbakery.codetesting import (
assert_PASS,
assert_results_contain,
assert_SKIP,
GLYPHSAPP_TEST_FILE,
PATH_TEST_DATA,
portable_path,
TEST_FILE,
)
from fontbakery.message import Message
from fontbakery.status import PASS, FAIL, WARN, ERROR, INFO, SKIP, DEBUG
def test_portable_path():
test_path = "dir/subdir/file"
assert portable_path(test_path) == f"{os.sep}".join(test_path.split("/"))
def test_TEST_FILE():
file_path = "dir/file"
assert TEST_FILE(file_path) == f"{PATH_TEST_DATA}{file_path}"
def test_GLYPHSAPP_TEST_FILE():
glyphs_filename = "Comfortaa.glyphs"
gfile = GLYPHSAPP_TEST_FILE(glyphs_filename)
assert isinstance(gfile, GSFont)
def test_assert_SKIP_success(capsys):
skip_msg = "SKIP message"
skip_reason = "SKIP reason"
results = [
(PASS,),
(SKIP, skip_msg),
]
assert assert_SKIP(results, skip_reason) == skip_msg
captured = capsys.readouterr()
assert captured.out == f"Test SKIP {skip_reason}\n"
def test_assert_SKIP_failure(capsys):
pass_msg = "PASS message"
skip_reason = "SKIP reason"
results = [
(SKIP,),
(PASS, pass_msg),
]
with pytest.raises(AssertionError):
assert_SKIP(results, skip_reason)
captured = capsys.readouterr()
assert captured.out == f"Test SKIP {skip_reason}\n"
def test_assert_PASS_success(capsys):
pass_msg = "PASS message"
pass_reason = "with a good font..."
results = [
(SKIP,),
(PASS, pass_msg),
]
assert assert_PASS(results) == pass_msg
captured = capsys.readouterr()
assert captured.out == f"Test PASS {pass_reason}\n"
def test_assert_PASS_failure(capsys):
skip_msg = "SKIP message"
pass_reason = "with a good font..."
results = [
(PASS,),
(SKIP, skip_msg),
]
with pytest.raises(AssertionError):
assert_PASS(results)
captured = capsys.readouterr()
assert captured.out == f"Test PASS {pass_reason}\n"
def test_assert_PASS_ignore_error_true(capsys):
error_msg = "ERROR message"
pass_reason = "with a good font..."
ignore = "an error"
results = [
(PASS,),
(ERROR, error_msg),
]
assert assert_PASS(results, ignore_error=ignore) is None
captured = capsys.readouterr()
assert captured.out == f"Test PASS {pass_reason}\n{ignore}\n"
def test_assert_PASS_ignore_error_false(capsys):
error_msg = "ERROR message"
pass_reason = "with a good font..."
results = [
(PASS,),
(ERROR, error_msg),
]
with pytest.raises(AssertionError):
assert_PASS(results)
captured = capsys.readouterr()
assert captured.out == f"Test PASS {pass_reason}\n"
def test_assert_results_contain_expected_msgcode_string():
bogus_msgcode = True
with pytest.raises(Exception) as err:
assert_results_contain([], PASS, bogus_msgcode)
assert str(err.value) == "The expected message code must be a string"
def test_assert_results_contain_ignore_error_true(capsys):
msg_code = "a message code"
ignore = "an error"
expected_status = PASS
results = [
(ERROR, ""),
(FAIL, ""),
]
assert (
assert_results_contain(results, expected_status, msg_code, ignore_error=ignore)
is None
)
captured = capsys.readouterr()
assert captured.out == f"Test {expected_status} [{msg_code}]\n{ignore}\n"
def test_assert_results_contain_bare_string(capsys):
msg_code = "a message code"
bare_str = "just a string"
reason = "just because..."
expected_status = PASS
results = [
(WARN, bare_str),
(INFO, bare_str),
]
with pytest.raises(Exception) as err:
assert_results_contain(results, expected_status, msg_code, reason)
assert f"(Bare string: {bare_str!r})" in str(err.value)
captured = capsys.readouterr()
assert captured.out == f"Test {expected_status} {reason}\n"
def test_assert_results_contain_success_string_msg(capsys):
msg_code = "a message code"
expected_status = PASS
results = [
(PASS, msg_code),
]
assert assert_results_contain(results, expected_status, msg_code) == msg_code
captured = capsys.readouterr()
assert captured.out == f"Test {expected_status} [{msg_code}]\n"
def test_assert_results_contain_failure_string_msg(capsys):
msg_code = "a message code"
expected_status = PASS
results = [
(DEBUG, msg_code),
]
exception_message = (
f"Expected to find {expected_status}, [code: {msg_code}]\n"
f"But did not find it in:\n"
f"{results}"
)
with pytest.raises(Exception) as err:
assert_results_contain(results, expected_status, msg_code)
assert str(err.value) == exception_message
captured = capsys.readouterr()
assert captured.out == f"Test {expected_status} [{msg_code}]\n"
def test_assert_results_contain_success_message_msg(capsys):
msg_code = "a message code"
msg_human = "human readable message"
message = Message(msg_code, msg_human)
expected_status = FAIL
results = [
(FAIL, message),
]
assert assert_results_contain(results, expected_status, msg_code) == msg_human
captured = capsys.readouterr()
assert captured.out == f"Test {expected_status} [{msg_code}]\n"
def test_assert_results_contain_failure_message_msg(capsys):
msg_code = "a message code"
msg_human = "human readable message"
message = Message(msg_code, msg_human)
expected_status = FAIL
results = [
(ERROR, message),
]
exception_message = (
f"Expected to find {expected_status}, [code: {msg_code}]\n"
f"But did not find it in:\n"
f"{results}"
)
with pytest.raises(Exception) as err:
assert_results_contain(results, expected_status, msg_code)
assert str(err.value) == exception_message
captured = capsys.readouterr()
assert captured.out == f"Test {expected_status} [{msg_code}]\n"
| 27.151786 | 87 | 0.66853 | import os
from glyphsLib import GSFont
import pytest
from fontbakery.codetesting import (
assert_PASS,
assert_results_contain,
assert_SKIP,
GLYPHSAPP_TEST_FILE,
PATH_TEST_DATA,
portable_path,
TEST_FILE,
)
from fontbakery.message import Message
from fontbakery.status import PASS, FAIL, WARN, ERROR, INFO, SKIP, DEBUG
def test_portable_path():
test_path = "dir/subdir/file"
assert portable_path(test_path) == f"{os.sep}".join(test_path.split("/"))
def test_TEST_FILE():
file_path = "dir/file"
assert TEST_FILE(file_path) == f"{PATH_TEST_DATA}{file_path}"
def test_GLYPHSAPP_TEST_FILE():
glyphs_filename = "Comfortaa.glyphs"
gfile = GLYPHSAPP_TEST_FILE(glyphs_filename)
assert isinstance(gfile, GSFont)
def test_assert_SKIP_success(capsys):
skip_msg = "SKIP message"
skip_reason = "SKIP reason"
results = [
(PASS,),
(SKIP, skip_msg),
]
assert assert_SKIP(results, skip_reason) == skip_msg
captured = capsys.readouterr()
assert captured.out == f"Test SKIP {skip_reason}\n"
def test_assert_SKIP_failure(capsys):
pass_msg = "PASS message"
skip_reason = "SKIP reason"
results = [
(SKIP,),
(PASS, pass_msg),
]
with pytest.raises(AssertionError):
assert_SKIP(results, skip_reason)
captured = capsys.readouterr()
assert captured.out == f"Test SKIP {skip_reason}\n"
def test_assert_PASS_success(capsys):
pass_msg = "PASS message"
pass_reason = "with a good font..."
results = [
(SKIP,),
(PASS, pass_msg),
]
assert assert_PASS(results) == pass_msg
captured = capsys.readouterr()
assert captured.out == f"Test PASS {pass_reason}\n"
def test_assert_PASS_failure(capsys):
skip_msg = "SKIP message"
pass_reason = "with a good font..."
results = [
(PASS,),
(SKIP, skip_msg),
]
with pytest.raises(AssertionError):
assert_PASS(results)
captured = capsys.readouterr()
assert captured.out == f"Test PASS {pass_reason}\n"
def test_assert_PASS_ignore_error_true(capsys):
error_msg = "ERROR message"
pass_reason = "with a good font..."
ignore = "an error"
results = [
(PASS,),
(ERROR, error_msg),
]
assert assert_PASS(results, ignore_error=ignore) is None
captured = capsys.readouterr()
assert captured.out == f"Test PASS {pass_reason}\n{ignore}\n"
def test_assert_PASS_ignore_error_false(capsys):
error_msg = "ERROR message"
pass_reason = "with a good font..."
results = [
(PASS,),
(ERROR, error_msg),
]
with pytest.raises(AssertionError):
assert_PASS(results)
captured = capsys.readouterr()
assert captured.out == f"Test PASS {pass_reason}\n"
def test_assert_results_contain_expected_msgcode_string():
bogus_msgcode = True
with pytest.raises(Exception) as err:
assert_results_contain([], PASS, bogus_msgcode)
assert str(err.value) == "The expected message code must be a string"
def test_assert_results_contain_ignore_error_true(capsys):
msg_code = "a message code"
ignore = "an error"
expected_status = PASS
results = [
(ERROR, ""),
(FAIL, ""),
]
assert (
assert_results_contain(results, expected_status, msg_code, ignore_error=ignore)
is None
)
captured = capsys.readouterr()
assert captured.out == f"Test {expected_status} [{msg_code}]\n{ignore}\n"
def test_assert_results_contain_bare_string(capsys):
msg_code = "a message code"
bare_str = "just a string"
reason = "just because..."
expected_status = PASS
results = [
(WARN, bare_str),
(INFO, bare_str),
]
with pytest.raises(Exception) as err:
assert_results_contain(results, expected_status, msg_code, reason)
assert f"(Bare string: {bare_str!r})" in str(err.value)
captured = capsys.readouterr()
assert captured.out == f"Test {expected_status} {reason}\n"
def test_assert_results_contain_success_string_msg(capsys):
msg_code = "a message code"
expected_status = PASS
results = [
(PASS, msg_code),
]
assert assert_results_contain(results, expected_status, msg_code) == msg_code
captured = capsys.readouterr()
assert captured.out == f"Test {expected_status} [{msg_code}]\n"
def test_assert_results_contain_failure_string_msg(capsys):
msg_code = "a message code"
expected_status = PASS
results = [
(DEBUG, msg_code),
]
exception_message = (
f"Expected to find {expected_status}, [code: {msg_code}]\n"
f"But did not find it in:\n"
f"{results}"
)
with pytest.raises(Exception) as err:
assert_results_contain(results, expected_status, msg_code)
assert str(err.value) == exception_message
captured = capsys.readouterr()
assert captured.out == f"Test {expected_status} [{msg_code}]\n"
def test_assert_results_contain_success_message_msg(capsys):
msg_code = "a message code"
msg_human = "human readable message"
message = Message(msg_code, msg_human)
expected_status = FAIL
results = [
(FAIL, message),
]
assert assert_results_contain(results, expected_status, msg_code) == msg_human
captured = capsys.readouterr()
assert captured.out == f"Test {expected_status} [{msg_code}]\n"
def test_assert_results_contain_failure_message_msg(capsys):
msg_code = "a message code"
msg_human = "human readable message"
message = Message(msg_code, msg_human)
expected_status = FAIL
results = [
(ERROR, message),
]
exception_message = (
f"Expected to find {expected_status}, [code: {msg_code}]\n"
f"But did not find it in:\n"
f"{results}"
)
with pytest.raises(Exception) as err:
assert_results_contain(results, expected_status, msg_code)
assert str(err.value) == exception_message
captured = capsys.readouterr()
assert captured.out == f"Test {expected_status} [{msg_code}]\n"
| true | true |
7900cb853894cab710d9bb81e68dd9a97f3bd9b0 | 18,534 | py | Python | caper/cromwell_rest_api.py | procha2/caper | e9ea0baa3517178ce7b850df8a59eba6479fbcb6 | [
"MIT"
] | 31 | 2019-06-20T15:34:23.000Z | 2022-03-19T13:58:42.000Z | caper/cromwell_rest_api.py | procha2/caper | e9ea0baa3517178ce7b850df8a59eba6479fbcb6 | [
"MIT"
] | 66 | 2019-06-25T20:12:16.000Z | 2022-03-29T17:07:50.000Z | caper/cromwell_rest_api.py | procha2/caper | e9ea0baa3517178ce7b850df8a59eba6479fbcb6 | [
"MIT"
] | 11 | 2019-10-21T20:35:10.000Z | 2021-09-08T22:15:38.000Z | import fnmatch
import io
import logging
from uuid import UUID
import requests
from requests.exceptions import ConnectionError, HTTPError
from .cromwell_metadata import CromwellMetadata
logger = logging.getLogger(__name__)
def requests_error_handler(func):
"""Re-raise ConnectionError with help message.
Continue on HTTP 404 error (server is on but workflow doesn't exist).
Otherwise, re-raise from None to hide nested tracebacks.
"""
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except HTTPError as err:
if err.response.status_code == 404:
logger.error("Workflow doesn't seem to exist.")
return
message = (
'{err}\n\n'
'Cromwell server is on but got an HTTP error other than 404. '
).format(err=err)
raise HTTPError(message) from None
except ConnectionError as err:
message = (
'{err}\n\n'
'Failed to connect to Cromwell server. '
'Check if Caper server is running. '
'Also check if hostname and port are correct. '
'method={method}, '
'url={url}'.format(
err=err, method=err.request.method, url=err.request.url
)
)
raise ConnectionError(message) from None
return wrapper
def is_valid_uuid(workflow_id, version=4):
"""To validate Cromwell's UUID (lowercase only).
This does not allow uppercase UUIDs.
"""
if not isinstance(workflow_id, str):
return False
if not workflow_id.islower():
return False
try:
UUID(workflow_id, version=version)
except ValueError:
return False
return True
def has_wildcard(workflow_id_or_label):
"""Check if string or any element in list/tuple has
a wildcard (? or *).
Args:
workflow_id_or_label:
Workflow ID (str) or label (str).
Or array (list, tuple) of them.
"""
if workflow_id_or_label is None:
return False
if isinstance(workflow_id_or_label, (list, tuple)):
for val in workflow_id_or_label:
if has_wildcard(val):
return True
return False
else:
return '?' in workflow_id_or_label or '*' in workflow_id_or_label
class CromwellRestAPI:
QUERY_URL = 'http://{hostname}:{port}'
ENDPOINT_BACKEND = '/api/workflows/v1/backends'
ENDPOINT_WORKFLOWS = '/api/workflows/v1/query'
ENDPOINT_METADATA = '/api/workflows/v1/{wf_id}/metadata'
ENDPOINT_LABELS = '/api/workflows/v1/{wf_id}/labels'
ENDPOINT_SUBMIT = '/api/workflows/v1'
ENDPOINT_ABORT = '/api/workflows/v1/{wf_id}/abort'
ENDPOINT_RELEASE_HOLD = '/api/workflows/v1/{wf_id}/releaseHold'
DEFAULT_HOSTNAME = 'localhost'
DEFAULT_PORT = 8000
def __init__(
self, hostname=DEFAULT_HOSTNAME, port=DEFAULT_PORT, user=None, password=None
):
self._hostname = hostname
self._port = port
self._user = user
self._password = password
self.__init_auth()
def submit(
self,
source,
dependencies=None,
inputs=None,
options=None,
labels=None,
on_hold=False,
):
"""Submit a workflow.
Returns:
JSON Response from POST request submit a workflow
"""
manifest = {}
with open(source) as fp:
manifest['workflowSource'] = io.StringIO(fp.read())
if dependencies:
with open(dependencies, 'rb') as fp:
manifest['workflowDependencies'] = io.BytesIO(fp.read())
if inputs:
with open(inputs) as fp:
manifest['workflowInputs'] = io.StringIO(fp.read())
else:
manifest['workflowInputs'] = io.StringIO('{}')
if options:
with open(options) as fp:
manifest['workflowOptions'] = io.StringIO(fp.read())
if labels:
with open(labels) as fp:
manifest['labels'] = io.StringIO(fp.read())
if on_hold:
manifest['workflowOnHold'] = True
r = self.__request_post(CromwellRestAPI.ENDPOINT_SUBMIT, manifest)
logger.debug('submit: {r}'.format(r=r))
return r
def abort(self, workflow_ids=None, labels=None):
"""Abort workflows matching workflow IDs or labels
Returns:
List of JSON responses from POST request
for aborting workflows
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
r = self.__request_post(
CromwellRestAPI.ENDPOINT_ABORT.format(wf_id=workflow_id)
)
result.append(r)
logger.debug('abort: {r}'.format(r=result))
return result
def release_hold(self, workflow_ids=None, labels=None):
"""Release hold of workflows matching workflow IDs or labels
Returns:
List of JSON responses from POST request
for releasing hold of workflows
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
r = self.__request_post(
CromwellRestAPI.ENDPOINT_RELEASE_HOLD.format(wf_id=workflow_id)
)
result.append(r)
logger.debug('release_hold: {r}'.format(r=result))
return result
def get_default_backend(self):
"""Retrieve default backend name
Returns:
Default backend name
"""
return self.get_backends()['defaultBackend']
def get_backends(self):
"""Retrieve available backend names and default backend name
Returns:
JSON response with keys "defaultBackend" and "supportedBackends"
Example: {"defaultBackend":"Local","supportedBackends":
["Local","aws","gcp","pbs","sge","slurm"]}
"""
return self.__request_get(CromwellRestAPI.ENDPOINT_BACKEND)
def find_valid_workflow_ids(
self, workflow_ids=None, labels=None, exclude_subworkflow=True
):
"""Checks if workflow ID in `workflow_ids` are already valid UUIDs (without wildcards).
If so then we don't have to send the server a query to get matching workflow IDs.
"""
if not labels and workflow_ids and all(is_valid_uuid(i) for i in workflow_ids):
return workflow_ids
else:
workflows = self.find(
workflow_ids=workflow_ids,
labels=labels,
exclude_subworkflow=exclude_subworkflow,
)
if not workflows:
return
return [w['id'] for w in workflows]
def get_metadata(self, workflow_ids=None, labels=None, embed_subworkflow=False):
"""Retrieve metadata for workflows matching workflow IDs or labels
Args:
workflow_ids:
List of workflows IDs to find workflows matched.
labels:
List of Caper's string labels to find workflows matched.
embed_subworkflow:
Recursively embed subworkflow's metadata in main
workflow's metadata.
This flag is to mimic behavior of Cromwell run mode with -m.
Metadata JSON generated with Cromwell run mode
includes all subworkflows embedded in main workflow's JSON file.
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
params = {}
if embed_subworkflow:
params['expandSubWorkflows'] = True
m = self.__request_get(
CromwellRestAPI.ENDPOINT_METADATA.format(wf_id=workflow_id),
params=params,
)
if m:
cm = CromwellMetadata(m)
result.append(cm.metadata)
return result
def get_labels(self, workflow_id):
"""Get labels JSON for a specified workflow
Returns:
Labels JSON for a workflow
"""
if workflow_id is None or not is_valid_uuid(workflow_id):
return
r = self.__request_get(
CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id)
)
if r is None:
return
return r['labels']
def get_label(self, workflow_id, key):
"""Get a label for a key in a specified workflow
Returns:
Value for a specified key in labels JSON for a workflow
"""
labels = self.get_labels(workflow_id)
if labels is None:
return
if key in labels:
return labels[key]
def update_labels(self, workflow_id, labels):
"""Update labels for a specified workflow with
a list of (key, val) tuples
"""
if workflow_id is None or labels is None:
return
r = self.__request_patch(
CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id), labels
)
logger.debug('update_labels: {r}'.format(r=r))
return r
def find_with_wildcard(
self, workflow_ids=None, labels=None, exclude_subworkflow=True
):
"""Retrieves all workflows from Cromwell server.
And then find matching workflows by ID or labels.
Wildcards (? and *) are allowed for both parameters.
"""
result = []
if not workflow_ids and not labels:
return result
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
},
)
if resp and resp['results']:
for workflow in resp['results']:
matched = False
if 'id' not in workflow:
continue
if workflow_ids:
for wf_id in workflow_ids:
if fnmatch.fnmatchcase(workflow['id'], wf_id):
result.append(workflow)
matched = True
break
if matched:
continue
if labels and 'labels' in workflow:
for k, v in labels:
v_ = workflow['labels'].get(k)
if not v_:
continue
if isinstance(v_, str) and isinstance(v, str):
# matching with wildcards for str values only
if fnmatch.fnmatchcase(v_, v):
result.append(workflow)
break
elif v_ == v:
result.append(workflow)
break
logger.debug(
'find_with_wildcard: workflow_ids={workflow_ids}, '
'labels={labels}, result={result}'.format(
workflow_ids=workflow_ids, labels=labels, result=result
)
)
return result
def find_by_workflow_ids(self, workflow_ids=None, exclude_subworkflow=True):
"""Finds workflows by exactly matching workflow IDs (UUIDs).
Does OR search for a list of workflow IDs.
Invalid UUID in `workflows_ids` will be ignored without warning.
Wildcards (? and *) are not allowed.
Args:
workflow_ids:
List of workflow ID (UUID) strings.
Lower-case only (Cromwell uses lower-case UUIDs).
Returns:
List of matched workflow JSONs.
"""
if has_wildcard(workflow_ids):
raise ValueError(
'Wildcards are not allowed in workflow_ids. '
'ids={ids}'.format(ids=workflow_ids)
)
result = []
if workflow_ids:
# exclude invalid workflow UUIDs.
workflow_ids = [wf_id for wf_id in workflow_ids if is_valid_uuid(wf_id)]
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
'id': workflow_ids,
},
)
if resp and resp['results']:
result.extend(resp['results'])
logger.debug(
'find_by_workflow_ids: workflow_ids={workflow_ids}, '
'result={result}'.format(workflow_ids=workflow_ids, result=result)
)
return result
def find_by_labels(self, labels=None, exclude_subworkflow=True):
"""Finds workflows by exactly matching labels (key, value) tuples.
Does OR search for a list of label key/value pairs.
Wildcards (? and *) are not allowed.
Args:
labels:
List of labels (key/value pairs).
Returns:
List of matched workflow JSONs.
"""
if has_wildcard(labels):
raise ValueError(
'Wildcards are not allowed in labels. '
'labels={labels}'.format(labels=labels)
)
result = []
if labels:
# reformat labels with `:` notation. exclude pairs with empty value.
labels = [
'{key}:{val}'.format(key=key, val=val) for key, val in labels if val
]
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
'labelor': labels,
},
)
if resp and resp['results']:
result.extend(resp['results'])
logger.debug(
'find_by_labels: labels={labels}, result={result}'.format(
labels=labels, result=result
)
)
return result
def find(self, workflow_ids=None, labels=None, exclude_subworkflow=True):
"""Wrapper for the following three find functions.
- find_with_wildcard
- find_by_workflow_ids
- find_by_labels
Find workflows by matching workflow IDs or label (key, value) tuples.
Does OR search for both parameters.
Wildcards (? and *) in both parameters are allowed but Caper will
retrieve a list of all workflows, which can lead to HTTP 503 of
Cromwell server if there are many subworkflows and not `exclude_subworkflow`.
Args:
workflow_ids:
List of workflow ID (UUID) strings.
Lower-case only.
labels:
List of labels (key/value pairs).
exclude_subworkflow:
Exclude subworkflows.
Returns:
List of matched workflow JSONs.
"""
wildcard_found_in_workflow_ids = has_wildcard(workflow_ids)
wildcard_found_in_labels = has_wildcard(
[val for key, val in labels] if labels else None
)
if wildcard_found_in_workflow_ids or wildcard_found_in_labels:
return self.find_with_wildcard(
workflow_ids=workflow_ids,
labels=labels,
exclude_subworkflow=exclude_subworkflow,
)
result = []
result_by_labels = self.find_by_labels(
labels=labels, exclude_subworkflow=exclude_subworkflow
)
result.extend(result_by_labels)
workflow_ids_found_by_labels = [workflow['id'] for workflow in result_by_labels]
result.extend(
[
workflow
for workflow in self.find_by_workflow_ids(
workflow_ids=workflow_ids, exclude_subworkflow=exclude_subworkflow
)
if workflow['id'] not in workflow_ids_found_by_labels
]
)
return result
def __init_auth(self):
"""Init auth object
"""
if self._user is not None and self._password is not None:
self._auth = (self._user, self._password)
else:
self._auth = None
@requests_error_handler
def __request_get(self, endpoint, params=None):
"""GET request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.get(
url, auth=self._auth, params=params, headers={'accept': 'application/json'}
)
resp.raise_for_status()
return resp.json()
@requests_error_handler
def __request_post(self, endpoint, manifest=None):
"""POST request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.post(
url, files=manifest, auth=self._auth, headers={'accept': 'application/json'}
)
resp.raise_for_status()
return resp.json()
@requests_error_handler
def __request_patch(self, endpoint, data):
"""POST request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.patch(
url,
data=data,
auth=self._auth,
headers={'accept': 'application/json', 'content-type': 'application/json'},
)
resp.raise_for_status()
return resp.json()
| 33.334532 | 95 | 0.563721 | import fnmatch
import io
import logging
from uuid import UUID
import requests
from requests.exceptions import ConnectionError, HTTPError
from .cromwell_metadata import CromwellMetadata
logger = logging.getLogger(__name__)
def requests_error_handler(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except HTTPError as err:
if err.response.status_code == 404:
logger.error("Workflow doesn't seem to exist.")
return
message = (
'{err}\n\n'
'Cromwell server is on but got an HTTP error other than 404. '
).format(err=err)
raise HTTPError(message) from None
except ConnectionError as err:
message = (
'{err}\n\n'
'Failed to connect to Cromwell server. '
'Check if Caper server is running. '
'Also check if hostname and port are correct. '
'method={method}, '
'url={url}'.format(
err=err, method=err.request.method, url=err.request.url
)
)
raise ConnectionError(message) from None
return wrapper
def is_valid_uuid(workflow_id, version=4):
if not isinstance(workflow_id, str):
return False
if not workflow_id.islower():
return False
try:
UUID(workflow_id, version=version)
except ValueError:
return False
return True
def has_wildcard(workflow_id_or_label):
if workflow_id_or_label is None:
return False
if isinstance(workflow_id_or_label, (list, tuple)):
for val in workflow_id_or_label:
if has_wildcard(val):
return True
return False
else:
return '?' in workflow_id_or_label or '*' in workflow_id_or_label
class CromwellRestAPI:
QUERY_URL = 'http://{hostname}:{port}'
ENDPOINT_BACKEND = '/api/workflows/v1/backends'
ENDPOINT_WORKFLOWS = '/api/workflows/v1/query'
ENDPOINT_METADATA = '/api/workflows/v1/{wf_id}/metadata'
ENDPOINT_LABELS = '/api/workflows/v1/{wf_id}/labels'
ENDPOINT_SUBMIT = '/api/workflows/v1'
ENDPOINT_ABORT = '/api/workflows/v1/{wf_id}/abort'
ENDPOINT_RELEASE_HOLD = '/api/workflows/v1/{wf_id}/releaseHold'
DEFAULT_HOSTNAME = 'localhost'
DEFAULT_PORT = 8000
def __init__(
self, hostname=DEFAULT_HOSTNAME, port=DEFAULT_PORT, user=None, password=None
):
self._hostname = hostname
self._port = port
self._user = user
self._password = password
self.__init_auth()
def submit(
self,
source,
dependencies=None,
inputs=None,
options=None,
labels=None,
on_hold=False,
):
manifest = {}
with open(source) as fp:
manifest['workflowSource'] = io.StringIO(fp.read())
if dependencies:
with open(dependencies, 'rb') as fp:
manifest['workflowDependencies'] = io.BytesIO(fp.read())
if inputs:
with open(inputs) as fp:
manifest['workflowInputs'] = io.StringIO(fp.read())
else:
manifest['workflowInputs'] = io.StringIO('{}')
if options:
with open(options) as fp:
manifest['workflowOptions'] = io.StringIO(fp.read())
if labels:
with open(labels) as fp:
manifest['labels'] = io.StringIO(fp.read())
if on_hold:
manifest['workflowOnHold'] = True
r = self.__request_post(CromwellRestAPI.ENDPOINT_SUBMIT, manifest)
logger.debug('submit: {r}'.format(r=r))
return r
def abort(self, workflow_ids=None, labels=None):
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
r = self.__request_post(
CromwellRestAPI.ENDPOINT_ABORT.format(wf_id=workflow_id)
)
result.append(r)
logger.debug('abort: {r}'.format(r=result))
return result
def release_hold(self, workflow_ids=None, labels=None):
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
r = self.__request_post(
CromwellRestAPI.ENDPOINT_RELEASE_HOLD.format(wf_id=workflow_id)
)
result.append(r)
logger.debug('release_hold: {r}'.format(r=result))
return result
def get_default_backend(self):
return self.get_backends()['defaultBackend']
def get_backends(self):
return self.__request_get(CromwellRestAPI.ENDPOINT_BACKEND)
def find_valid_workflow_ids(
self, workflow_ids=None, labels=None, exclude_subworkflow=True
):
if not labels and workflow_ids and all(is_valid_uuid(i) for i in workflow_ids):
return workflow_ids
else:
workflows = self.find(
workflow_ids=workflow_ids,
labels=labels,
exclude_subworkflow=exclude_subworkflow,
)
if not workflows:
return
return [w['id'] for w in workflows]
def get_metadata(self, workflow_ids=None, labels=None, embed_subworkflow=False):
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
params = {}
if embed_subworkflow:
params['expandSubWorkflows'] = True
m = self.__request_get(
CromwellRestAPI.ENDPOINT_METADATA.format(wf_id=workflow_id),
params=params,
)
if m:
cm = CromwellMetadata(m)
result.append(cm.metadata)
return result
def get_labels(self, workflow_id):
if workflow_id is None or not is_valid_uuid(workflow_id):
return
r = self.__request_get(
CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id)
)
if r is None:
return
return r['labels']
def get_label(self, workflow_id, key):
labels = self.get_labels(workflow_id)
if labels is None:
return
if key in labels:
return labels[key]
def update_labels(self, workflow_id, labels):
if workflow_id is None or labels is None:
return
r = self.__request_patch(
CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id), labels
)
logger.debug('update_labels: {r}'.format(r=r))
return r
def find_with_wildcard(
self, workflow_ids=None, labels=None, exclude_subworkflow=True
):
result = []
if not workflow_ids and not labels:
return result
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
},
)
if resp and resp['results']:
for workflow in resp['results']:
matched = False
if 'id' not in workflow:
continue
if workflow_ids:
for wf_id in workflow_ids:
if fnmatch.fnmatchcase(workflow['id'], wf_id):
result.append(workflow)
matched = True
break
if matched:
continue
if labels and 'labels' in workflow:
for k, v in labels:
v_ = workflow['labels'].get(k)
if not v_:
continue
if isinstance(v_, str) and isinstance(v, str):
# matching with wildcards for str values only
if fnmatch.fnmatchcase(v_, v):
result.append(workflow)
break
elif v_ == v:
result.append(workflow)
break
logger.debug(
'find_with_wildcard: workflow_ids={workflow_ids}, '
'labels={labels}, result={result}'.format(
workflow_ids=workflow_ids, labels=labels, result=result
)
)
return result
def find_by_workflow_ids(self, workflow_ids=None, exclude_subworkflow=True):
if has_wildcard(workflow_ids):
raise ValueError(
'Wildcards are not allowed in workflow_ids. '
'ids={ids}'.format(ids=workflow_ids)
)
result = []
if workflow_ids:
# exclude invalid workflow UUIDs.
workflow_ids = [wf_id for wf_id in workflow_ids if is_valid_uuid(wf_id)]
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
'id': workflow_ids,
},
)
if resp and resp['results']:
result.extend(resp['results'])
logger.debug(
'find_by_workflow_ids: workflow_ids={workflow_ids}, '
'result={result}'.format(workflow_ids=workflow_ids, result=result)
)
return result
def find_by_labels(self, labels=None, exclude_subworkflow=True):
if has_wildcard(labels):
raise ValueError(
'Wildcards are not allowed in labels. '
'labels={labels}'.format(labels=labels)
)
result = []
if labels:
# reformat labels with `:` notation. exclude pairs with empty value.
labels = [
'{key}:{val}'.format(key=key, val=val) for key, val in labels if val
]
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
'labelor': labels,
},
)
if resp and resp['results']:
result.extend(resp['results'])
logger.debug(
'find_by_labels: labels={labels}, result={result}'.format(
labels=labels, result=result
)
)
return result
def find(self, workflow_ids=None, labels=None, exclude_subworkflow=True):
wildcard_found_in_workflow_ids = has_wildcard(workflow_ids)
wildcard_found_in_labels = has_wildcard(
[val for key, val in labels] if labels else None
)
if wildcard_found_in_workflow_ids or wildcard_found_in_labels:
return self.find_with_wildcard(
workflow_ids=workflow_ids,
labels=labels,
exclude_subworkflow=exclude_subworkflow,
)
result = []
result_by_labels = self.find_by_labels(
labels=labels, exclude_subworkflow=exclude_subworkflow
)
result.extend(result_by_labels)
workflow_ids_found_by_labels = [workflow['id'] for workflow in result_by_labels]
result.extend(
[
workflow
for workflow in self.find_by_workflow_ids(
workflow_ids=workflow_ids, exclude_subworkflow=exclude_subworkflow
)
if workflow['id'] not in workflow_ids_found_by_labels
]
)
return result
def __init_auth(self):
if self._user is not None and self._password is not None:
self._auth = (self._user, self._password)
else:
self._auth = None
@requests_error_handler
def __request_get(self, endpoint, params=None):
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.get(
url, auth=self._auth, params=params, headers={'accept': 'application/json'}
)
resp.raise_for_status()
return resp.json()
@requests_error_handler
def __request_post(self, endpoint, manifest=None):
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.post(
url, files=manifest, auth=self._auth, headers={'accept': 'application/json'}
)
resp.raise_for_status()
return resp.json()
@requests_error_handler
def __request_patch(self, endpoint, data):
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.patch(
url,
data=data,
auth=self._auth,
headers={'accept': 'application/json', 'content-type': 'application/json'},
)
resp.raise_for_status()
return resp.json()
| true | true |
7900cb93d03fe060411436ade4a2d3ad8af0f33a | 578 | py | Python | dataent/patches/v11_0/rename_email_alert_to_notification.py | dataent/dataent | c41bd5942ffe5513f4d921c4c0595c84bbc422b4 | [
"MIT"
] | null | null | null | dataent/patches/v11_0/rename_email_alert_to_notification.py | dataent/dataent | c41bd5942ffe5513f4d921c4c0595c84bbc422b4 | [
"MIT"
] | 6 | 2020-03-24T17:15:56.000Z | 2022-02-10T18:41:31.000Z | dataent/patches/v11_0/rename_email_alert_to_notification.py | dataent/dataent | c41bd5942ffe5513f4d921c4c0595c84bbc422b4 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
import dataent
from dataent.model.rename_doc import rename_doc
def execute():
if dataent.db.table_exists("Email Alert Recipient") and not dataent.db.table_exists("Notification Recipient"):
rename_doc('DocType', 'Email Alert Recipient', 'Notification Recipient')
dataent.reload_doc('email', 'doctype', 'notification_recipient')
if dataent.db.table_exists("Email Alert") and not dataent.db.table_exists("Notification"):
rename_doc('DocType', 'Email Alert', 'Notification')
dataent.reload_doc('email', 'doctype', 'notification')
| 44.461538 | 111 | 0.780277 | from __future__ import unicode_literals
import dataent
from dataent.model.rename_doc import rename_doc
def execute():
if dataent.db.table_exists("Email Alert Recipient") and not dataent.db.table_exists("Notification Recipient"):
rename_doc('DocType', 'Email Alert Recipient', 'Notification Recipient')
dataent.reload_doc('email', 'doctype', 'notification_recipient')
if dataent.db.table_exists("Email Alert") and not dataent.db.table_exists("Notification"):
rename_doc('DocType', 'Email Alert', 'Notification')
dataent.reload_doc('email', 'doctype', 'notification')
| true | true |
7900cc319516ba4743eb1499d221e0b3583ac6c5 | 27,165 | py | Python | zzh/mllib/model/_deep_fm.py | zhangzhenhu/zzh | ebacd9c0c46a0a537d014550bd2bff0a85452a6e | [
"MIT"
] | null | null | null | zzh/mllib/model/_deep_fm.py | zhangzhenhu/zzh | ebacd9c0c46a0a537d014550bd2bff0a85452a6e | [
"MIT"
] | null | null | null | zzh/mllib/model/_deep_fm.py | zhangzhenhu/zzh | ebacd9c0c46a0a537d014550bd2bff0a85452a6e | [
"MIT"
] | null | null | null | """
Tensorflow implementation of DeepFM
"""
import numpy as np
import tensorflow as tf
import tensorflow.compat.v1 as tf1
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.metrics import roc_auc_score
from time import time
from tensorflow.contrib.layers.python.layers import batch_norm as batch_norm
from sklearn import metrics
# from yellowfin import YFOptimizer
import os
import sys
import json
"""
关于 X_i 和 X_v
为什么要把训练数据分成两个矩阵?
FM模型需要为每个特征训练一个embedding vector,
在模型计算过程中使用 embedding_lookup + index matrix 可以方便计算。
首先把特征分成两种,一种是不需要one hot(数值类),一种是需要one hot(枚举类)。
然后定义,one hot 之前的特征称为 field,one hot 之后的特征为 feature。
- X_i 表示 feat_index
- X_v 表示 feat_value
**feat_index**
feat_index 存储的是样本的 field 的"feature索引",shape=(N,field_size)。
feat_index[i,j]表示的是第i个样本第j个field的 feature_index。
如果当前 field 不需要 one hot,此 field 就只会映射成一个 feature;
如果当前 field 需要 one hot,此 field 就会被映射成多个 feature ,
每个枚举值是一个 feature,其实就是进行 one hot 编码。
比如 feat_index[i,j]=c,表示 第i个样本第j个 field 的对应着第c个feature,
c是 feature_index。
当然如果 field_j 是数值 field,所有样本的j列都是一样的值,因为 field_j 不需要onehot。
如果 field_j 需要one hot,c的值就是其原来的枚举值onehot后映射对应的 feature_index。
feat_index 是给 embedding_lookup是用的。
**feat_value**
feat_value 存储的是样本field的"值",shape=(N,field_size)。
feat_value[i,j]表示的是第i个样本第j个field的值。
如果当前field 不需要 one hot,feat_value[i,j]就是原始数据值;
如果当前field 需要 one hot,feat_value[i,j]就是常量1;
注意:这里有一个前提条件,就是 one_hot 的 field 变量只能取一个值,一个变量可以有多个取值的情况是不支持的。
"""
class DeepFM(BaseEstimator, TransformerMixin):
def __init__(self, feature_size, field_size,
embedding_size=8, dropout_fm=[1.0, 1.0],
deep_layers=[32, 32], dropout_deep=[0.5, 0.5, 0.5],
deep_layers_activation=tf.nn.relu,
epoch=10, batch_size=256,
learning_rate=0.001, optimizer_type="adam",
batch_norm=0, batch_norm_decay=0.995,
verbose=False, random_seed=2016,
use_fm=True, use_deep=True,
loss_type="logloss", eval_metric=roc_auc_score,
l2_reg=0.0, greater_is_better=True, threshold=0.5
):
assert (use_fm or use_deep)
assert loss_type in ["logloss", "mse"], \
"loss_type can be either 'logloss' for classification task or 'mse' for regression task"
self.feature_size = feature_size # 259 denote as M, size of the feature dictionary
self.field_size = field_size # 39 denote as F, size of the feature fields
self.embedding_size = embedding_size # 8 denote as K, size of the feature embedding
self.dropout_fm = dropout_fm
self.deep_layers = deep_layers
self.dropout_deep = dropout_deep
self.deep_layers_activation = deep_layers_activation
self.use_fm = use_fm
self.use_deep = use_deep
self.l2_reg = l2_reg
self.epoch = epoch
self.batch_size = batch_size
self.learning_rate = learning_rate
self.optimizer_type = optimizer_type
self.batch_norm = batch_norm
self.batch_norm_decay = batch_norm_decay
self.verbose = verbose # 是否打印参数总量
self.random_seed = random_seed
self.loss_type = loss_type
self.eval_metric = eval_metric
self.greater_is_better = greater_is_better # 是否值越大越好
self.train_result, self.valid_result = [], []
self.sess = None
self.graph = None
self._config = None
self.threshold = threshold
def _make_config_pack(self):
self._config = {
"feature_size": self.feature_size, # 259 denote as M, size of the feature dictionary
"field_size ": self.field_size, # 39 denote as F, size of the feature fields
"embedding_size ": self.embedding_size, # 8 denote as K, size of the feature embedding
"dropout_fm ": self.dropout_fm,
"deep_layers ": self.deep_layers,
"dropout_deep ": self.dropout_deep,
"deep_layers_activation ": self.deep_layers_activation,
"use_fm ": self.use_fm,
"use_deep ": self.use_deep,
"l2_reg ": self.l2_reg,
"epoch ": self.epoch,
"batch_size ": self.batch_size,
"learning_rate ": self.learning_rate,
"optimizer_type ": self.optimizer_type,
"batch_norm ": self.batch_norm,
"batch_norm_decay ": self.batch_norm_decay,
"verbose ": self.verbose, # 是否打印参数总量
"random_seed ": self.random_seed,
"loss_type": self.loss_type,
"eval_metric ": self.eval_metric,
"greater_is_better ": self.greater_is_better, # 是否值越大越好
}
# self.model_path = '%s/deepfm' % (save_path)
# self._init_graph()
def init_graph(self):
if self.sess is not None:
return
self.graph = tf.Graph()
with self.graph.as_default():
tf1.set_random_seed(self.random_seed)
self.feat_index = tf1.placeholder(tf.int32, shape=[None, None],
name="feat_index") # None * F
self.feat_value = tf1.placeholder(tf.float32, shape=[None, None],
name="feat_value") # None * F
self.label = tf1.placeholder(tf.float32, shape=[None, 1], name="label") # None * 1
self.dropout_keep_fm = tf1.placeholder(tf.float32, shape=[None], name="dropout_keep_fm")
self.dropout_keep_deep = tf1.placeholder(tf.float32, shape=[None], name="dropout_keep_deep")
self.train_phase = tf1.placeholder(tf.bool, name="train_phase")
self.weights = self._initialize_weights()
# 每一个feature 有一个 embedding
# feature_embeddings.shape=(self.feature_size, self.embedding_size)
# feat_index[i,j] 存储的是 第i条样本第j个field 对应的 feature_index
# 1. 如果 field_j 是非 one hot 特征,则 field_j 不需要拆成多个 feature,
# feat_index[:,j] 所有样本行都是同一个值,对应同一个 feature_index。
# 2. 如果 field_j 是 one hot 特征,则 field_j 需要拆成多个 feature,每个枚举值独立成一个 feature,
# 此时 feat_index[:,j] 不同行是不同值,其值表示 枚举值Value(field_j) 对应的 feature_index.
# 比如,第i=3行样本,第j=5个field表示颜色,其值是红色,红色被 onehot成 feature_index=13.则 feat_index[3,5]=13
# shape=(N样本数量 * field_size * K)
# N 表示样本的数量
# K 是嵌入向量的长度,
# 取出所有样本,每个 feature 的嵌入向量
# 对于one_hot 的 field,相当于只取出来枚举值对应的 feature_index 的嵌入向量,
# 相当于每个 field 取一个,最终每条样本嵌入向量的数量还是 field 。
self.embeddings = tf.nn.embedding_lookup(
self.weights["feature_embeddings"], # shape=(self.feature_size, self.embedding_size)
self.feat_index # N * field_size
)
# shape=(None * F * 1)
#
feat_value = tf.reshape(self.feat_value, shape=[-1, self.field_size, 1]) # None * F * 1
# FM部分的公式是 (x_i * x_j)(v_i*v_j)=(x_i*v_i)(x_j*v_j)
# 这里先把每个特征的向量乘上其特征值。
self.embeddings = tf.multiply(self.embeddings, feat_value) # None * F * K
# ---------- first order term ----------
# 对于k维,tf.reduce_sum(x,axis=k-1)的结果是对最里面一维所有元素进行求和
self.y_first_order = tf.nn.embedding_lookup(self.weights["feature_bias"], self.feat_index) # None * F * 1
self.y_first_order = tf.reduce_sum(tf.multiply(self.y_first_order, feat_value), 2) # None * F
self.y_first_order = tf.nn.dropout(self.y_first_order, rate=1 - self.dropout_keep_fm[0]) # None * F
# ---------- second order term ---------------
# sum_square part
self.summed_features_emb = tf.reduce_sum(self.embeddings, 1) # None * K
self.summed_features_emb_square = tf.square(self.summed_features_emb) # None * K
# square_sum part
self.squared_features_emb = tf.square(self.embeddings)
self.squared_sum_features_emb = tf.reduce_sum(self.squared_features_emb, 1) # None * K
# second order
self.y_second_order = 0.5 * tf.subtract(self.summed_features_emb_square,
self.squared_sum_features_emb) # None * K
self.y_second_order = tf.nn.dropout(self.y_second_order, rate=1 - self.dropout_keep_fm[1]) # None * K
# ---------- Deep component ----------
self.y_deep = tf.reshape(self.embeddings, shape=[-1, self.field_size * self.embedding_size]) # None * (F*K)
self.y_deep = tf.nn.dropout(self.y_deep, rate=1 - self.dropout_keep_deep[0])
for i in range(0, len(self.deep_layers)):
self.y_deep = tf.add(tf.matmul(self.y_deep, self.weights["layer_%d" % i]),
self.weights["bias_%d" % i]) # None * layer[i] * 1
if self.batch_norm:
self.y_deep = self.batch_norm_layer(self.y_deep, train_phase=self.train_phase,
scope_bn="bn_%d" % i) # None * layer[i] * 1
self.y_deep = self.deep_layers_activation(self.y_deep)
self.y_deep = tf.nn.dropout(self.y_deep,
rate=1 - self.dropout_keep_deep[1 + i]) # dropout at each Deep layer
# ---------- DeepFM ----------
if self.use_fm and self.use_deep:
concat_input = tf.concat([self.y_first_order, self.y_second_order, self.y_deep], axis=1)
elif self.use_fm:
concat_input = tf.concat([self.y_first_order, self.y_second_order], axis=1)
elif self.use_deep:
concat_input = self.y_deep
self.out = tf.add(tf.matmul(concat_input, self.weights["concat_projection"]), self.weights["concat_bias"])
# loss
if self.loss_type == "logloss":
self.out = tf.nn.sigmoid(self.out, name='out')
self.loss = tf1.losses.log_loss(self.label, self.out)
elif self.loss_type == "mse":
self.loss = tf.nn.l2_loss(tf.subtract(self.label, self.out))
# l2 regularization on weights 正则
if self.l2_reg > 0:
self.loss += tf.contrib.layers.l2_regularizer(
self.l2_reg)(self.weights["concat_projection"])
if self.use_deep:
for i in range(len(self.deep_layers)):
self.loss += tf.contrib.layers.l2_regularizer(
self.l2_reg)(self.weights["layer_%d" % i])
# optimizer
# 这里可以使用现成的ftrl优化损失
# optimizer = tf.train.FtrlOptimizer(lr) # lr: learningRate
# gradients = optimizer.compute_gradients(loss) # cost
# train_op = optimizer.apply_gradients(gradients, global_step=global_step)
if self.optimizer_type == "adam":
self.optimizer = tf1.train.AdamOptimizer(learning_rate=self.learning_rate, beta1=0.9, beta2=0.999,
epsilon=1e-8).minimize(self.loss)
elif self.optimizer_type == "adagrad":
self.optimizer = tf1.train.AdagradOptimizer(learning_rate=self.learning_rate,
initial_accumulator_value=1e-8).minimize(self.loss)
elif self.optimizer_type == "gd":
self.optimizer = tf1.train.GradientDescentOptimizer(learning_rate=self.learning_rate).minimize(
self.loss)
elif self.optimizer_type == "momentum":
self.optimizer = tf1.train.MomentumOptimizer(learning_rate=self.learning_rate, momentum=0.95).minimize(
self.loss)
# elif self.optimizer_type == "yellowfin":
# self.optimizer = YFOptimizer(learning_rate=self.learning_rate, momentum=0.0).minimize(
# self.loss)
# init
self.saver = tf1.train.Saver()
init = tf1.global_variables_initializer()
self.sess = self._init_session()
self.sess.run(init)
# number of params
total_parameters = 0
for variable in self.weights.values():
shape = variable.get_shape()
variable_parameters = 1
for dim in shape:
variable_parameters *= dim.value
total_parameters += variable_parameters
if self.verbose > 0:
print("#params: %d" % total_parameters)
def _init_session(self):
config = tf1.ConfigProto(device_count={"gpu": 0})
config.gpu_options.allow_growth = True # 根据运行情况分配GPU内存
return tf1.Session(config=config)
def _initialize_weights(self):
weights = dict() # 定义参数字典
# embeddings
weights["feature_embeddings"] = tf.Variable(
tf.random.normal([self.feature_size, self.embedding_size], 0.0, 0.01),
# tf.random_normal([self.feature_size, self.embedding_size], 0.0, 0.01),
name="feature_embeddings") # feature_size * K
weights["feature_bias"] = tf.Variable(
# tf.random_uniform([self.feature_size, 1], 0.0, 1.0), name="feature_bias") # feature_size * 1
tf.random.uniform([self.feature_size, 1], 0.0, 1.0), name="feature_bias") # feature_size * 1
# deep layers
num_layer = len(self.deep_layers) # 层数
input_size = self.field_size * self.embedding_size
glorot = np.sqrt(2.0 / (input_size + self.deep_layers[0])) # 正态分布的标准差
weights["layer_0"] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(input_size, self.deep_layers[0])), dtype=np.float32)
weights["bias_0"] = tf.Variable(np.random.normal(loc=0, scale=glorot, size=(1, self.deep_layers[0])),
dtype=np.float32) # 1 * layers[0]
for i in range(1, num_layer):
glorot = np.sqrt(2.0 / (self.deep_layers[i - 1] + self.deep_layers[i]))
weights["layer_%d" % i] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(self.deep_layers[i - 1], self.deep_layers[i])),
dtype=np.float32) # layers[i-1] * layers[i]
weights["bias_%d" % i] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(1, self.deep_layers[i])),
dtype=np.float32) # 1 * layer[i]
# final concat projection layer
if self.use_fm and self.use_deep:
input_size = self.field_size + self.embedding_size + self.deep_layers[-1]
elif self.use_fm:
input_size = self.field_size + self.embedding_size
elif self.use_deep:
input_size = self.deep_layers[-1]
glorot = np.sqrt(2.0 / (input_size + 1))
weights["concat_projection"] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(input_size, 1)),
dtype=np.float32) # layers[i-1]*layers[i]
weights["concat_bias"] = tf.Variable(tf.constant(0.01), dtype=np.float32)
return weights
def batch_norm_layer(self, x, train_phase, scope_bn):
bn_train = batch_norm(x, decay=self.batch_norm_decay, center=True, scale=True, updates_collections=None,
is_training=True, reuse=None, trainable=True, scope=scope_bn)
bn_inference = batch_norm(x, decay=self.batch_norm_decay, center=True, scale=True, updates_collections=None,
is_training=False, reuse=True, trainable=True, scope=scope_bn)
z = tf.cond(train_phase, lambda: bn_train, lambda: bn_inference)
return z
def get_batch(self, Xi, Xv, y, batch_size, index):
start = index * batch_size
end = (index + 1) * batch_size
end = end if end < len(y) else len(y)
return Xi[start:end], Xv[start:end], [[y_] for y_ in y[start:end]]
# shuffle three lists simutaneously
def shuffle_in_unison_scary(self, a, b, c):
rng_state = np.random.get_state()
np.random.shuffle(a)
np.random.set_state(rng_state)
np.random.shuffle(b)
np.random.set_state(rng_state)
np.random.shuffle(c)
def fit_on_batch(self, Xi, Xv, y):
feed_dict = {self.feat_index: Xi,
self.feat_value: Xv,
self.label: y,
self.dropout_keep_fm: self.dropout_fm,
self.dropout_keep_deep: self.dropout_deep,
self.train_phase: True}
out, loss, opt = self.sess.run((self.out, self.loss, self.optimizer), feed_dict=feed_dict)
return out, loss
def fit(self, Xi_train, Xv_train, y_train,
Xi_valid=None, Xv_valid=None, y_valid=None,
early_stopping=False, refit=False):
"""
:param Xi_train: [[ind1_1, ind1_2, ...], [ind2_1, ind2_2, ...], ..., [indi_1, indi_2, ..., indi_j, ...], ...]
indi_j is the feature index of feature field j of sample i in the training set
:param Xv_train: [[val1_1, val1_2, ...], [val2_1, val2_2, ...], ..., [vali_1, vali_2, ..., vali_j, ...], ...]
vali_j is the feature value of feature field j of sample i in the training set
vali_j can be either binary (1/0, for binary/categorical features) or float (e.g., 10.24, for numerical features)
:param y_train: label of each sample in the training set
:param Xi_valid: list of list of feature indices of each sample in the validation set
:param Xv_valid: list of list of feature values of each sample in the validation set
:param y_valid: label of each sample in the validation set
:param early_stopping: perform early stopping or not
:param refit: refit the model on the train+valid dataset or not
:return: None
"""
has_valid = Xv_valid is not None
Xi_train = Xi_train.copy()
Xv_train = Xv_train.copy()
y_train = y_train.copy()
for epoch in range(self.epoch):
t1 = time()
self.shuffle_in_unison_scary(Xi_train, Xv_train, y_train)
total_batch = int(len(y_train) / self.batch_size)
for i in range(total_batch):
Xi_batch, Xv_batch, y_batch = self.get_batch(Xi_train, Xv_train, y_train, self.batch_size, i)
trian_out, train_loss = self.fit_on_batch(Xi_batch, Xv_batch, y_batch)
# print(trian_out, file=sys.stderr)
if i % 1000 == 0:
# print(trian_out, file=sys.stderr)
print("epoch:%d batch:%d train_loss=%.4f" % (epoch, i, train_loss), file=sys.stderr)
# evaluate training and validation datasets
train_me = self.evaluate(Xi_train, Xv_train, y_train)
self.train_result.append(train_me)
if has_valid:
valid_me = self.evaluate(Xi_valid, Xv_valid, y_valid)
self.valid_result.append(valid_me)
if self.verbose > 0 and epoch % self.verbose == 0:
print("[%d] [train] auc=%.4f acc=%.4f mse=%.4f precision_1=%.4f recall_1=%.4f [%.1f s]"
% (epoch + 1,
train_me['auc'],
train_me['acc'],
train_me['mse'],
train_me['precision_1'],
train_me['recall_1'],
time() - t1))
if has_valid:
print(
"[%d] [valid] auc=%.4f acc=%.4f mse=%.4f precision_1=%.4f recall_1=%.4f [%.1f s]"
% (epoch + 1,
valid_me['auc'],
valid_me['acc'],
valid_me['mse'],
valid_me['precision_1'],
valid_me['recall_1'],
time() - t1))
if has_valid and early_stopping and self.training_termination(self.valid_result):
break
# fit a few more epoch on train+valid until result reaches the best_train_score
if has_valid and refit:
if self.greater_is_better:
best_valid_score = max(self.valid_result)
else:
best_valid_score = min(self.valid_result)
best_epoch = self.valid_result.index(best_valid_score)
best_train_score = self.train_result[best_epoch]
Xi_train = Xi_train + Xi_valid
Xv_train = Xv_train + Xv_valid
y_train = y_train + y_valid
for epoch in range(100):
self.shuffle_in_unison_scary(Xi_train, Xv_train, y_train)
total_batch = int(len(y_train) / self.batch_size)
for i in range(total_batch):
Xi_batch, Xv_batch, y_batch = self.get_batch(Xi_train, Xv_train, y_train,
self.batch_size, i)
self.fit_on_batch(Xi_batch, Xv_batch, y_batch)
# check
train_result = self.evaluate(Xi_train, Xv_train, y_train)
if abs(train_result - best_train_score) < 0.001 or \
(self.greater_is_better and train_result > best_train_score) or \
((not self.greater_is_better) and train_result < best_train_score):
break
def training_termination(self, valid_result):
if len(valid_result) > 5:
if self.greater_is_better:
if valid_result[-1] < valid_result[-2] and \
valid_result[-2] < valid_result[-3] and \
valid_result[-3] < valid_result[-4] and \
valid_result[-4] < valid_result[-5]:
return True
else:
if valid_result[-1] > valid_result[-2] \
and valid_result[-2] > valid_result[-3] \
and valid_result[-3] > valid_result[-4] \
and valid_result[-4] > valid_result[-5]:
return True
return False
def predict(self, Xi, Xv):
"""
:param Xi: list of list of feature indices of each sample in the dataset
:param Xv: list of list of feature values of each sample in the dataset
:return: predicted probability of each sample
"""
dummy_y = [1] * len(Xi)
batch_index = 0
Xi_batch, Xv_batch, y_batch = self.get_batch(Xi, Xv, dummy_y, self.batch_size, batch_index)
y_pred = None
while len(Xi_batch) > 0:
num_batch = len(y_batch)
feed_dict = {self.feat_index: Xi_batch,
self.feat_value: Xv_batch,
# self.label: y_batch,
self.dropout_keep_fm: [1.0] * len(self.dropout_fm),
self.dropout_keep_deep: [1.0] * len(self.dropout_deep),
self.train_phase: False}
batch_out = self.sess.run(self.out, feed_dict=feed_dict)
if batch_index == 0:
y_pred = np.reshape(batch_out, (num_batch,))
else:
y_pred = np.concatenate((y_pred, np.reshape(batch_out, (num_batch,))))
batch_index += 1
Xi_batch, Xv_batch, y_batch = self.get_batch(Xi, Xv, dummy_y, self.batch_size, batch_index)
return y_pred
def evaluate(self, Xi, Xv, y_true):
"""
:param Xi: list of list of feature indices of each sample in the dataset
:param Xv: list of list of feature values of each sample in the dataset
:param y: label of each sample in the dataset
:return: metric of the evaluation
"""
size = y_true.shape[0]
y_pred = self.predict(Xi, Xv)
error = y_true - y_pred
mse = (error * error).sum() / size
y_pred_m = y_pred.copy()
y_pred_m[y_pred_m >= self.threshold] = 1
y_pred_m[y_pred_m < self.threshold] = 0
# accuracy = metrics.accuracy_score(y_true, y_pred_m)
cm = metrics.confusion_matrix(y_true, y_pred_m, labels=[1, 0])
# 实际正样本数量
real_1_count = cm[0, :].sum()
# 预测为正样本数量
predict_1_count = cm[:, 0].sum()
# 正样本 预测正确的数量
right_1_count = cm[0, 0]
if predict_1_count == 0:
precision_1 = 0
else:
# 正样本精确率
precision_1 = right_1_count / predict_1_count
if real_1_count == 0:
recall_1 = 0
else:
# 正样本召回率
recall_1 = right_1_count / real_1_count
return {
"size": size,
"acc": (cm[0, 0] + cm[1, 1]) / size,
# "实际退费人次": cm[0, :].sum(),
# "预测退费人次": cm[:, 0].sum(),
# "预测正确人次": cm[0, 0],
# "预测错误人次": cm[1, 0],
"precision_1": precision_1,
"recall_1": recall_1,
"auc": self.eval_metric(y_true, y_pred),
"mse": mse
}
def save(self, save_path):
model_prefix = os.path.join(save_path, 'deepfm')
print("Save model...", save_path, file=sys.stderr)
self.saver.save(self.sess, model_prefix)
if self._config is not None:
config_path = os.path.join(save_path, "config.json")
with open(config_path, 'w') as fp:
json.dump(fp)
print("Save model done.", save_path, file=sys.stderr)
def load(self, model_path):
if self.sess is not None:
self.sess.close()
if self.graph is not None:
self.graph = None
model_prefix = os.path.join(model_path, 'deepfm')
# self.sess = tf.Session()
# with tf.Session() as sess:
# print('load model', file=sys.stderr)
# t1 = time()
print("Load model...", model_path, file=sys.stderr)
self.sess = tf1.Session()
saver = tf1.train.import_meta_graph(model_prefix + '.meta', clear_devices=True)
saver.restore(self.sess, model_prefix)
self.feat_index = tf1.get_default_graph().get_tensor_by_name('feat_index:0')
self.feat_value = tf1.get_default_graph().get_tensor_by_name('feat_value:0')
self.dropout_keep_fm = tf1.get_default_graph().get_tensor_by_name('dropout_keep_fm:0')
self.dropout_keep_deep = tf1.get_default_graph().get_tensor_by_name('dropout_keep_deep:0')
self.train_phase = tf1.get_default_graph().get_tensor_by_name('train_phase:0')
self.out = tf1.get_default_graph().get_tensor_by_name('out:0')
config_path = os.path.join(model_path, "config.json")
if os.path.exists(config_path):
with open(config_path) as fp:
self._config = json.load(fp)
else:
self._config = None
print("Load model done", model_path, file=sys.stderr)
| 45.732323 | 146 | 0.57817 |
import numpy as np
import tensorflow as tf
import tensorflow.compat.v1 as tf1
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.metrics import roc_auc_score
from time import time
from tensorflow.contrib.layers.python.layers import batch_norm as batch_norm
from sklearn import metrics
import os
import sys
import json
class DeepFM(BaseEstimator, TransformerMixin):
def __init__(self, feature_size, field_size,
embedding_size=8, dropout_fm=[1.0, 1.0],
deep_layers=[32, 32], dropout_deep=[0.5, 0.5, 0.5],
deep_layers_activation=tf.nn.relu,
epoch=10, batch_size=256,
learning_rate=0.001, optimizer_type="adam",
batch_norm=0, batch_norm_decay=0.995,
verbose=False, random_seed=2016,
use_fm=True, use_deep=True,
loss_type="logloss", eval_metric=roc_auc_score,
l2_reg=0.0, greater_is_better=True, threshold=0.5
):
assert (use_fm or use_deep)
assert loss_type in ["logloss", "mse"], \
"loss_type can be either 'logloss' for classification task or 'mse' for regression task"
self.feature_size = feature_size
self.field_size = field_size
self.embedding_size = embedding_size
self.dropout_fm = dropout_fm
self.deep_layers = deep_layers
self.dropout_deep = dropout_deep
self.deep_layers_activation = deep_layers_activation
self.use_fm = use_fm
self.use_deep = use_deep
self.l2_reg = l2_reg
self.epoch = epoch
self.batch_size = batch_size
self.learning_rate = learning_rate
self.optimizer_type = optimizer_type
self.batch_norm = batch_norm
self.batch_norm_decay = batch_norm_decay
self.verbose = verbose
self.random_seed = random_seed
self.loss_type = loss_type
self.eval_metric = eval_metric
self.greater_is_better = greater_is_better
self.train_result, self.valid_result = [], []
self.sess = None
self.graph = None
self._config = None
self.threshold = threshold
def _make_config_pack(self):
self._config = {
"feature_size": self.feature_size,
"field_size ": self.field_size,
"embedding_size ": self.embedding_size,
"dropout_fm ": self.dropout_fm,
"deep_layers ": self.deep_layers,
"dropout_deep ": self.dropout_deep,
"deep_layers_activation ": self.deep_layers_activation,
"use_fm ": self.use_fm,
"use_deep ": self.use_deep,
"l2_reg ": self.l2_reg,
"epoch ": self.epoch,
"batch_size ": self.batch_size,
"learning_rate ": self.learning_rate,
"optimizer_type ": self.optimizer_type,
"batch_norm ": self.batch_norm,
"batch_norm_decay ": self.batch_norm_decay,
"verbose ": self.verbose,
"random_seed ": self.random_seed,
"loss_type": self.loss_type,
"eval_metric ": self.eval_metric,
"greater_is_better ": self.greater_is_better,
}
def init_graph(self):
if self.sess is not None:
return
self.graph = tf.Graph()
with self.graph.as_default():
tf1.set_random_seed(self.random_seed)
self.feat_index = tf1.placeholder(tf.int32, shape=[None, None],
name="feat_index")
self.feat_value = tf1.placeholder(tf.float32, shape=[None, None],
name="feat_value")
self.label = tf1.placeholder(tf.float32, shape=[None, 1], name="label")
self.dropout_keep_fm = tf1.placeholder(tf.float32, shape=[None], name="dropout_keep_fm")
self.dropout_keep_deep = tf1.placeholder(tf.float32, shape=[None], name="dropout_keep_deep")
self.train_phase = tf1.placeholder(tf.bool, name="train_phase")
self.weights = self._initialize_weights()
self.embeddings = tf.nn.embedding_lookup(
self.weights["feature_embeddings"],
self.feat_index
)
feat_value = tf.reshape(self.feat_value, shape=[-1, self.field_size, 1])
self.embeddings = tf.multiply(self.embeddings, feat_value)
self.y_first_order = tf.nn.embedding_lookup(self.weights["feature_bias"], self.feat_index)
self.y_first_order = tf.reduce_sum(tf.multiply(self.y_first_order, feat_value), 2)
self.y_first_order = tf.nn.dropout(self.y_first_order, rate=1 - self.dropout_keep_fm[0])
self.summed_features_emb = tf.reduce_sum(self.embeddings, 1)
self.summed_features_emb_square = tf.square(self.summed_features_emb)
self.squared_features_emb = tf.square(self.embeddings)
self.squared_sum_features_emb = tf.reduce_sum(self.squared_features_emb, 1)
self.y_second_order = 0.5 * tf.subtract(self.summed_features_emb_square,
self.squared_sum_features_emb)
self.y_second_order = tf.nn.dropout(self.y_second_order, rate=1 - self.dropout_keep_fm[1])
self.y_deep = tf.reshape(self.embeddings, shape=[-1, self.field_size * self.embedding_size])
self.y_deep = tf.nn.dropout(self.y_deep, rate=1 - self.dropout_keep_deep[0])
for i in range(0, len(self.deep_layers)):
self.y_deep = tf.add(tf.matmul(self.y_deep, self.weights["layer_%d" % i]),
self.weights["bias_%d" % i])
if self.batch_norm:
self.y_deep = self.batch_norm_layer(self.y_deep, train_phase=self.train_phase,
scope_bn="bn_%d" % i)
self.y_deep = self.deep_layers_activation(self.y_deep)
self.y_deep = tf.nn.dropout(self.y_deep,
rate=1 - self.dropout_keep_deep[1 + i])
if self.use_fm and self.use_deep:
concat_input = tf.concat([self.y_first_order, self.y_second_order, self.y_deep], axis=1)
elif self.use_fm:
concat_input = tf.concat([self.y_first_order, self.y_second_order], axis=1)
elif self.use_deep:
concat_input = self.y_deep
self.out = tf.add(tf.matmul(concat_input, self.weights["concat_projection"]), self.weights["concat_bias"])
if self.loss_type == "logloss":
self.out = tf.nn.sigmoid(self.out, name='out')
self.loss = tf1.losses.log_loss(self.label, self.out)
elif self.loss_type == "mse":
self.loss = tf.nn.l2_loss(tf.subtract(self.label, self.out))
if self.l2_reg > 0:
self.loss += tf.contrib.layers.l2_regularizer(
self.l2_reg)(self.weights["concat_projection"])
if self.use_deep:
for i in range(len(self.deep_layers)):
self.loss += tf.contrib.layers.l2_regularizer(
self.l2_reg)(self.weights["layer_%d" % i])
if self.optimizer_type == "adam":
self.optimizer = tf1.train.AdamOptimizer(learning_rate=self.learning_rate, beta1=0.9, beta2=0.999,
epsilon=1e-8).minimize(self.loss)
elif self.optimizer_type == "adagrad":
self.optimizer = tf1.train.AdagradOptimizer(learning_rate=self.learning_rate,
initial_accumulator_value=1e-8).minimize(self.loss)
elif self.optimizer_type == "gd":
self.optimizer = tf1.train.GradientDescentOptimizer(learning_rate=self.learning_rate).minimize(
self.loss)
elif self.optimizer_type == "momentum":
self.optimizer = tf1.train.MomentumOptimizer(learning_rate=self.learning_rate, momentum=0.95).minimize(
self.loss)
self.saver = tf1.train.Saver()
init = tf1.global_variables_initializer()
self.sess = self._init_session()
self.sess.run(init)
total_parameters = 0
for variable in self.weights.values():
shape = variable.get_shape()
variable_parameters = 1
for dim in shape:
variable_parameters *= dim.value
total_parameters += variable_parameters
if self.verbose > 0:
print("#params: %d" % total_parameters)
def _init_session(self):
config = tf1.ConfigProto(device_count={"gpu": 0})
config.gpu_options.allow_growth = True
return tf1.Session(config=config)
def _initialize_weights(self):
weights = dict()
weights["feature_embeddings"] = tf.Variable(
tf.random.normal([self.feature_size, self.embedding_size], 0.0, 0.01),
name="feature_embeddings")
weights["feature_bias"] = tf.Variable(
ndom.uniform([self.feature_size, 1], 0.0, 1.0), name="feature_bias")
num_layer = len(self.deep_layers)
input_size = self.field_size * self.embedding_size
glorot = np.sqrt(2.0 / (input_size + self.deep_layers[0]))
weights["layer_0"] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(input_size, self.deep_layers[0])), dtype=np.float32)
weights["bias_0"] = tf.Variable(np.random.normal(loc=0, scale=glorot, size=(1, self.deep_layers[0])),
dtype=np.float32)
for i in range(1, num_layer):
glorot = np.sqrt(2.0 / (self.deep_layers[i - 1] + self.deep_layers[i]))
weights["layer_%d" % i] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(self.deep_layers[i - 1], self.deep_layers[i])),
dtype=np.float32)
weights["bias_%d" % i] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(1, self.deep_layers[i])),
dtype=np.float32)
if self.use_fm and self.use_deep:
input_size = self.field_size + self.embedding_size + self.deep_layers[-1]
elif self.use_fm:
input_size = self.field_size + self.embedding_size
elif self.use_deep:
input_size = self.deep_layers[-1]
glorot = np.sqrt(2.0 / (input_size + 1))
weights["concat_projection"] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(input_size, 1)),
dtype=np.float32)
weights["concat_bias"] = tf.Variable(tf.constant(0.01), dtype=np.float32)
return weights
def batch_norm_layer(self, x, train_phase, scope_bn):
bn_train = batch_norm(x, decay=self.batch_norm_decay, center=True, scale=True, updates_collections=None,
is_training=True, reuse=None, trainable=True, scope=scope_bn)
bn_inference = batch_norm(x, decay=self.batch_norm_decay, center=True, scale=True, updates_collections=None,
is_training=False, reuse=True, trainable=True, scope=scope_bn)
z = tf.cond(train_phase, lambda: bn_train, lambda: bn_inference)
return z
def get_batch(self, Xi, Xv, y, batch_size, index):
start = index * batch_size
end = (index + 1) * batch_size
end = end if end < len(y) else len(y)
return Xi[start:end], Xv[start:end], [[y_] for y_ in y[start:end]]
def shuffle_in_unison_scary(self, a, b, c):
rng_state = np.random.get_state()
np.random.shuffle(a)
np.random.set_state(rng_state)
np.random.shuffle(b)
np.random.set_state(rng_state)
np.random.shuffle(c)
def fit_on_batch(self, Xi, Xv, y):
feed_dict = {self.feat_index: Xi,
self.feat_value: Xv,
self.label: y,
self.dropout_keep_fm: self.dropout_fm,
self.dropout_keep_deep: self.dropout_deep,
self.train_phase: True}
out, loss, opt = self.sess.run((self.out, self.loss, self.optimizer), feed_dict=feed_dict)
return out, loss
def fit(self, Xi_train, Xv_train, y_train,
Xi_valid=None, Xv_valid=None, y_valid=None,
early_stopping=False, refit=False):
has_valid = Xv_valid is not None
Xi_train = Xi_train.copy()
Xv_train = Xv_train.copy()
y_train = y_train.copy()
for epoch in range(self.epoch):
t1 = time()
self.shuffle_in_unison_scary(Xi_train, Xv_train, y_train)
total_batch = int(len(y_train) / self.batch_size)
for i in range(total_batch):
Xi_batch, Xv_batch, y_batch = self.get_batch(Xi_train, Xv_train, y_train, self.batch_size, i)
trian_out, train_loss = self.fit_on_batch(Xi_batch, Xv_batch, y_batch)
if i % 1000 == 0:
print("epoch:%d batch:%d train_loss=%.4f" % (epoch, i, train_loss), file=sys.stderr)
train_me = self.evaluate(Xi_train, Xv_train, y_train)
self.train_result.append(train_me)
if has_valid:
valid_me = self.evaluate(Xi_valid, Xv_valid, y_valid)
self.valid_result.append(valid_me)
if self.verbose > 0 and epoch % self.verbose == 0:
print("[%d] [train] auc=%.4f acc=%.4f mse=%.4f precision_1=%.4f recall_1=%.4f [%.1f s]"
% (epoch + 1,
train_me['auc'],
train_me['acc'],
train_me['mse'],
train_me['precision_1'],
train_me['recall_1'],
time() - t1))
if has_valid:
print(
"[%d] [valid] auc=%.4f acc=%.4f mse=%.4f precision_1=%.4f recall_1=%.4f [%.1f s]"
% (epoch + 1,
valid_me['auc'],
valid_me['acc'],
valid_me['mse'],
valid_me['precision_1'],
valid_me['recall_1'],
time() - t1))
if has_valid and early_stopping and self.training_termination(self.valid_result):
break
if has_valid and refit:
if self.greater_is_better:
best_valid_score = max(self.valid_result)
else:
best_valid_score = min(self.valid_result)
best_epoch = self.valid_result.index(best_valid_score)
best_train_score = self.train_result[best_epoch]
Xi_train = Xi_train + Xi_valid
Xv_train = Xv_train + Xv_valid
y_train = y_train + y_valid
for epoch in range(100):
self.shuffle_in_unison_scary(Xi_train, Xv_train, y_train)
total_batch = int(len(y_train) / self.batch_size)
for i in range(total_batch):
Xi_batch, Xv_batch, y_batch = self.get_batch(Xi_train, Xv_train, y_train,
self.batch_size, i)
self.fit_on_batch(Xi_batch, Xv_batch, y_batch)
train_result = self.evaluate(Xi_train, Xv_train, y_train)
if abs(train_result - best_train_score) < 0.001 or \
(self.greater_is_better and train_result > best_train_score) or \
((not self.greater_is_better) and train_result < best_train_score):
break
def training_termination(self, valid_result):
if len(valid_result) > 5:
if self.greater_is_better:
if valid_result[-1] < valid_result[-2] and \
valid_result[-2] < valid_result[-3] and \
valid_result[-3] < valid_result[-4] and \
valid_result[-4] < valid_result[-5]:
return True
else:
if valid_result[-1] > valid_result[-2] \
and valid_result[-2] > valid_result[-3] \
and valid_result[-3] > valid_result[-4] \
and valid_result[-4] > valid_result[-5]:
return True
return False
def predict(self, Xi, Xv):
dummy_y = [1] * len(Xi)
batch_index = 0
Xi_batch, Xv_batch, y_batch = self.get_batch(Xi, Xv, dummy_y, self.batch_size, batch_index)
y_pred = None
while len(Xi_batch) > 0:
num_batch = len(y_batch)
feed_dict = {self.feat_index: Xi_batch,
self.feat_value: Xv_batch,
self.dropout_keep_fm: [1.0] * len(self.dropout_fm),
self.dropout_keep_deep: [1.0] * len(self.dropout_deep),
self.train_phase: False}
batch_out = self.sess.run(self.out, feed_dict=feed_dict)
if batch_index == 0:
y_pred = np.reshape(batch_out, (num_batch,))
else:
y_pred = np.concatenate((y_pred, np.reshape(batch_out, (num_batch,))))
batch_index += 1
Xi_batch, Xv_batch, y_batch = self.get_batch(Xi, Xv, dummy_y, self.batch_size, batch_index)
return y_pred
def evaluate(self, Xi, Xv, y_true):
size = y_true.shape[0]
y_pred = self.predict(Xi, Xv)
error = y_true - y_pred
mse = (error * error).sum() / size
y_pred_m = y_pred.copy()
y_pred_m[y_pred_m >= self.threshold] = 1
y_pred_m[y_pred_m < self.threshold] = 0
cm = metrics.confusion_matrix(y_true, y_pred_m, labels=[1, 0])
real_1_count = cm[0, :].sum()
predict_1_count = cm[:, 0].sum()
right_1_count = cm[0, 0]
if predict_1_count == 0:
precision_1 = 0
else:
precision_1 = right_1_count / predict_1_count
if real_1_count == 0:
recall_1 = 0
else:
recall_1 = right_1_count / real_1_count
return {
"size": size,
"acc": (cm[0, 0] + cm[1, 1]) / size,
"precision_1": precision_1,
"recall_1": recall_1,
"auc": self.eval_metric(y_true, y_pred),
"mse": mse
}
def save(self, save_path):
model_prefix = os.path.join(save_path, 'deepfm')
print("Save model...", save_path, file=sys.stderr)
self.saver.save(self.sess, model_prefix)
if self._config is not None:
config_path = os.path.join(save_path, "config.json")
with open(config_path, 'w') as fp:
json.dump(fp)
print("Save model done.", save_path, file=sys.stderr)
def load(self, model_path):
if self.sess is not None:
self.sess.close()
if self.graph is not None:
self.graph = None
model_prefix = os.path.join(model_path, 'deepfm')
print("Load model...", model_path, file=sys.stderr)
self.sess = tf1.Session()
saver = tf1.train.import_meta_graph(model_prefix + '.meta', clear_devices=True)
saver.restore(self.sess, model_prefix)
self.feat_index = tf1.get_default_graph().get_tensor_by_name('feat_index:0')
self.feat_value = tf1.get_default_graph().get_tensor_by_name('feat_value:0')
self.dropout_keep_fm = tf1.get_default_graph().get_tensor_by_name('dropout_keep_fm:0')
self.dropout_keep_deep = tf1.get_default_graph().get_tensor_by_name('dropout_keep_deep:0')
self.train_phase = tf1.get_default_graph().get_tensor_by_name('train_phase:0')
self.out = tf1.get_default_graph().get_tensor_by_name('out:0')
config_path = os.path.join(model_path, "config.json")
if os.path.exists(config_path):
with open(config_path) as fp:
self._config = json.load(fp)
else:
self._config = None
print("Load model done", model_path, file=sys.stderr)
| true | true |
7900cceb71bac89841602d5cf4d5c758a3009a4d | 2,092 | py | Python | python/hsfs/constructor/join.py | DhananjayMukhedkar/feature-store-api | 8d3726911d56876a3ad5e2e55b0ac5e1b610d4dd | [
"Apache-2.0"
] | null | null | null | python/hsfs/constructor/join.py | DhananjayMukhedkar/feature-store-api | 8d3726911d56876a3ad5e2e55b0ac5e1b610d4dd | [
"Apache-2.0"
] | null | null | null | python/hsfs/constructor/join.py | DhananjayMukhedkar/feature-store-api | 8d3726911d56876a3ad5e2e55b0ac5e1b610d4dd | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2020 Logical Clocks AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from hsfs import util
from hsfs.constructor import query
import humps
class Join:
INNER = "INNER"
LEFT = "LEFT"
RIGHT = "RIGHT"
FULL = "FULL"
CROSS = "CROSS"
LEFT_SEMI_JOIN = "LEFT_SEMI_JOIN"
COMMA = "COMMA"
def __init__(self, query, on, left_on, right_on, join_type, prefix):
self._query = query
self._on = util.parse_features(on)
self._left_on = util.parse_features(left_on)
self._right_on = util.parse_features(right_on)
self._join_type = join_type or self.INNER
self._prefix = prefix
def to_dict(self):
return {
"query": self._query,
"on": self._on,
"leftOn": self._left_on,
"rightOn": self._right_on,
"type": self._join_type,
"prefix": self._prefix,
}
@classmethod
def from_response_json(cls, json_dict):
json_decamelized = humps.decamelize(json_dict)
return cls(
query=query.Query.from_response_json(json_decamelized["query"]),
on=json_decamelized.get("on", None),
left_on=json_decamelized.get("left_on", None),
right_on=json_decamelized.get("right_on", None),
join_type=json_decamelized.get("join_type", None),
prefix=json_decamelized.get("prefix", None),
)
@property
def query(self):
return self._query
@query.setter
def query(self, query):
self._query = query
| 29.885714 | 76 | 0.640057 |
from hsfs import util
from hsfs.constructor import query
import humps
class Join:
INNER = "INNER"
LEFT = "LEFT"
RIGHT = "RIGHT"
FULL = "FULL"
CROSS = "CROSS"
LEFT_SEMI_JOIN = "LEFT_SEMI_JOIN"
COMMA = "COMMA"
def __init__(self, query, on, left_on, right_on, join_type, prefix):
self._query = query
self._on = util.parse_features(on)
self._left_on = util.parse_features(left_on)
self._right_on = util.parse_features(right_on)
self._join_type = join_type or self.INNER
self._prefix = prefix
def to_dict(self):
return {
"query": self._query,
"on": self._on,
"leftOn": self._left_on,
"rightOn": self._right_on,
"type": self._join_type,
"prefix": self._prefix,
}
@classmethod
def from_response_json(cls, json_dict):
json_decamelized = humps.decamelize(json_dict)
return cls(
query=query.Query.from_response_json(json_decamelized["query"]),
on=json_decamelized.get("on", None),
left_on=json_decamelized.get("left_on", None),
right_on=json_decamelized.get("right_on", None),
join_type=json_decamelized.get("join_type", None),
prefix=json_decamelized.get("prefix", None),
)
@property
def query(self):
return self._query
@query.setter
def query(self, query):
self._query = query
| true | true |
7900cf807768f81af7a8afeee1f467074b04189f | 16,579 | py | Python | official/nlp/transformer/utils/metrics.py | 873040/Abhishek | 2ddd716e66bc5cc6e6f0787508dd07da0e02e75a | [
"Apache-2.0"
] | 153 | 2020-10-25T13:58:04.000Z | 2022-03-07T06:01:54.000Z | official/nlp/transformer/utils/metrics.py | 873040/Abhishek | 2ddd716e66bc5cc6e6f0787508dd07da0e02e75a | [
"Apache-2.0"
] | 11 | 2020-07-13T08:29:00.000Z | 2022-03-24T07:21:09.000Z | official/nlp/transformer/utils/metrics.py | 873040/Abhishek | 2ddd716e66bc5cc6e6f0787508dd07da0e02e75a | [
"Apache-2.0"
] | 23 | 2020-10-25T14:44:47.000Z | 2021-03-31T02:12:13.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for calculating loss, accuracy, and other model metrics.
Metrics:
- Padded loss, accuracy, and negative log perplexity. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/metrics.py
- BLEU approximation. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/bleu_hook.py
- ROUGE score. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/rouge.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow.compat.v1 as tf
def _pad_tensors_to_same_length(x, y):
"""Pad x and y so that the results have the same length (second dimension)."""
with tf.name_scope("pad_to_same_length"):
x_length = tf.shape(x)[1]
y_length = tf.shape(y)[1]
max_length = tf.maximum(x_length, y_length)
x = tf.pad(x, [[0, 0], [0, max_length - x_length], [0, 0]])
y = tf.pad(y, [[0, 0], [0, max_length - y_length]])
return x, y
def padded_cross_entropy_loss(logits, labels, smoothing, vocab_size):
"""Calculate cross entropy loss while ignoring padding.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch_size, length_labels]
smoothing: Label smoothing constant, used to determine the on and off values
vocab_size: int size of the vocabulary
Returns:
Returns the cross entropy loss and weight tensors: float32 tensors with
shape [batch_size, max(length_logits, length_labels)]
"""
with tf.name_scope("loss", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
# Calculate smoothing cross entropy
with tf.name_scope("smoothing_cross_entropy", values=[logits, labels]):
confidence = 1.0 - smoothing
low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1)
soft_targets = tf.one_hot(
tf.cast(labels, tf.int32),
depth=vocab_size,
on_value=confidence,
off_value=low_confidence)
xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(
logits=logits, labels=soft_targets)
# Calculate the best (lowest) possible value of cross entropy, and
# subtract from the cross entropy loss.
normalizing_constant = -(
confidence * tf.log(confidence) + tf.to_float(vocab_size - 1) *
low_confidence * tf.log(low_confidence + 1e-20))
xentropy -= normalizing_constant
weights = tf.to_float(tf.not_equal(labels, 0))
return xentropy * weights, weights
def _convert_to_eval_metric(metric_fn):
"""Wrap a metric fn that returns scores and weights as an eval metric fn.
The input metric_fn returns values for the current batch. The wrapper
aggregates the return values collected over all of the batches evaluated.
Args:
metric_fn: function that returns scores and weights for the current batch's
logits and predicted labels.
Returns:
function that aggregates the scores and weights from metric_fn.
"""
def problem_metric_fn(*args):
"""Returns an aggregation of the metric_fn's returned values."""
(scores, weights) = metric_fn(*args)
# The tf.metrics.mean function assures correct aggregation.
return tf.metrics.mean(scores, weights)
return problem_metric_fn
def get_eval_metrics(logits, labels, params):
"""Return dictionary of model evaluation metrics."""
metrics = {
"accuracy": _convert_to_eval_metric(padded_accuracy)(logits, labels),
"accuracy_top5": _convert_to_eval_metric(padded_accuracy_top5)(
logits, labels),
"accuracy_per_sequence": _convert_to_eval_metric(
padded_sequence_accuracy)(logits, labels),
"neg_log_perplexity": _convert_to_eval_metric(padded_neg_log_perplexity)(
logits, labels, params["vocab_size"]),
}
if not params["use_tpu"]:
# TPU does not support tf.py_func
metrics.update({
"approx_bleu_score": _convert_to_eval_metric(
bleu_score)(logits, labels),
"rouge_2_fscore": _convert_to_eval_metric(
rouge_2_fscore)(logits, labels),
"rouge_L_fscore": _convert_to_eval_metric(
rouge_l_fscore)(logits, labels),
})
# Prefix each of the metric names with "metrics/". This allows the metric
# graphs to display under the "metrics" category in TensorBoard.
metrics = {"metrics/%s" % k: v for k, v in six.iteritems(metrics)}
return metrics
def padded_accuracy(logits, labels):
"""Percentage of times that predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
outputs = tf.to_int32(tf.argmax(logits, axis=-1))
padded_labels = tf.to_int32(labels)
return tf.to_float(tf.equal(outputs, padded_labels)), weights
def padded_accuracy_topk(logits, labels, k):
"""Percentage of times that top-k predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy_topk", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
effective_k = tf.minimum(k, tf.shape(logits)[-1])
_, outputs = tf.nn.top_k(logits, k=effective_k)
outputs = tf.to_int32(outputs)
padded_labels = tf.to_int32(labels)
padded_labels = tf.expand_dims(padded_labels, axis=-1)
padded_labels += tf.zeros_like(outputs) # Pad to same shape.
same = tf.to_float(tf.equal(outputs, padded_labels))
same_topk = tf.reduce_sum(same, axis=-1)
return same_topk, weights
def padded_accuracy_top5(logits, labels):
return padded_accuracy_topk(logits, labels, 5)
def padded_sequence_accuracy(logits, labels):
"""Percentage of times that predictions matches labels everywhere (non-0)."""
with tf.variable_scope("padded_sequence_accuracy", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
outputs = tf.to_int32(tf.argmax(logits, axis=-1))
padded_labels = tf.to_int32(labels)
not_correct = tf.to_float(tf.not_equal(outputs, padded_labels)) * weights
axis = list(range(1, len(outputs.get_shape())))
correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis))
return correct_seq, tf.constant(1.0)
def padded_neg_log_perplexity(logits, labels, vocab_size):
"""Average log-perplexity excluding padding 0s. No smoothing."""
num, den = padded_cross_entropy_loss(logits, labels, 0, vocab_size)
return -num, den
def bleu_score(logits, labels):
"""Approximate BLEU score computation between labels and predictions.
An approximate BLEU scoring method since we do not glue word pieces or
decode the ids and tokenize the output. By default, we use ngram order of 4
and use brevity penalty. Also, this does not have beam search.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch-size, length_labels]
Returns:
bleu: int, approx bleu score
"""
predictions = tf.to_int32(tf.argmax(logits, axis=-1))
# TODO: Look into removing use of py_func
bleu = tf.py_func(compute_bleu, (labels, predictions), tf.float32)
return bleu, tf.constant(1.0)
def _get_ngrams_with_counter(segment, max_order):
"""Extracts all n-grams up to a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in xrange(1, max_order + 1):
for i in xrange(0, len(segment) - order + 1):
ngram = tuple(segment[i:i + order])
ngram_counts[ngram] += 1
return ngram_counts
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
use_bp=True):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
use_bp: boolean, whether to apply brevity penalty.
Returns:
BLEU score.
"""
reference_length = 0
translation_length = 0
bp = 1.0
geo_mean = 0
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
precisions = []
for (references, translations) in zip(reference_corpus, translation_corpus):
reference_length += len(references)
translation_length += len(translations)
ref_ngram_counts = _get_ngrams_with_counter(references, max_order)
translation_ngram_counts = _get_ngrams_with_counter(translations, max_order)
overlap = dict((ngram,
min(count, translation_ngram_counts[ngram]))
for ngram, count in ref_ngram_counts.items())
for ngram in overlap:
matches_by_order[len(ngram) - 1] += overlap[ngram]
for ngram in translation_ngram_counts:
possible_matches_by_order[len(ngram) - 1] += translation_ngram_counts[
ngram]
precisions = [0] * max_order
smooth = 1.0
for i in xrange(0, max_order):
if possible_matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[i]
if matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[
i]
else:
smooth *= 2
precisions[i] = 1.0 / (smooth * possible_matches_by_order[i])
else:
precisions[i] = 0.0
if max(precisions) > 0:
p_log_sum = sum(math.log(p) for p in precisions if p)
geo_mean = math.exp(p_log_sum / max_order)
if use_bp:
ratio = translation_length / reference_length
bp = math.exp(1 - 1. / ratio) if ratio < 1.0 else 1.0
bleu = geo_mean * bp
return np.float32(bleu)
def rouge_2_fscore(logits, labels):
"""ROUGE-2 F1 score computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
logits: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge2_fscore: approx rouge-2 f1 score.
"""
predictions = tf.to_int32(tf.argmax(logits, axis=-1))
# TODO: Look into removing use of py_func
rouge_2_f_score = tf.py_func(rouge_n, (predictions, labels), tf.float32)
return rouge_2_f_score, tf.constant(1.0)
def _get_ngrams(n, text):
"""Calculates n-grams.
Args:
n: which n-grams to calculate
text: An array of tokens
Returns:
A set of n-grams
"""
ngram_set = set()
text_length = len(text)
max_index_ngram_start = text_length - n
for i in range(max_index_ngram_start + 1):
ngram_set.add(tuple(text[i:i + n]))
return ngram_set
def rouge_n(eval_sentences, ref_sentences, n=2):
"""Computes ROUGE-N f1 score of two text collections of sentences.
Source: https://www.microsoft.com/en-us/research/publication/
rouge-a-package-for-automatic-evaluation-of-summaries/
Args:
eval_sentences: Predicted sentences.
ref_sentences: Sentences from the reference set
n: Size of ngram. Defaults to 2.
Returns:
f1 score for ROUGE-N
"""
f1_scores = []
for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):
eval_ngrams = _get_ngrams(n, eval_sentence)
ref_ngrams = _get_ngrams(n, ref_sentence)
ref_count = len(ref_ngrams)
eval_count = len(eval_ngrams)
# Count the overlapping ngrams between evaluated and reference
overlapping_ngrams = eval_ngrams.intersection(ref_ngrams)
overlapping_count = len(overlapping_ngrams)
# Handle edge case. This isn't mathematically correct, but it's good enough
if eval_count == 0:
precision = 0.0
else:
precision = float(overlapping_count) / eval_count
if ref_count == 0:
recall = 0.0
else:
recall = float(overlapping_count) / ref_count
f1_scores.append(2.0 * ((precision * recall) / (precision + recall + 1e-8)))
# return overlapping_count / reference_count
return np.mean(f1_scores, dtype=np.float32)
def rouge_l_fscore(predictions, labels):
"""ROUGE scores computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
predictions: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge_l_fscore: approx rouge-l f1 score.
"""
outputs = tf.to_int32(tf.argmax(predictions, axis=-1))
rouge_l_f_score = tf.py_func(rouge_l_sentence_level, (outputs, labels),
tf.float32)
return rouge_l_f_score, tf.constant(1.0)
def rouge_l_sentence_level(eval_sentences, ref_sentences):
"""Computes ROUGE-L (sentence level) of two collections of sentences.
Source: https://www.microsoft.com/en-us/research/publication/
rouge-a-package-for-automatic-evaluation-of-summaries/
Calculated according to:
R_lcs = LCS(X,Y)/m
P_lcs = LCS(X,Y)/n
F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)
where:
X = reference summary
Y = Candidate summary
m = length of reference summary
n = length of candidate summary
Args:
eval_sentences: The sentences that have been picked by the summarizer
ref_sentences: The sentences from the reference set
Returns:
A float: F_lcs
"""
f1_scores = []
for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):
m = float(len(ref_sentence))
n = float(len(eval_sentence))
lcs = _len_lcs(eval_sentence, ref_sentence)
f1_scores.append(_f_lcs(lcs, m, n))
return np.mean(f1_scores, dtype=np.float32)
def _len_lcs(x, y):
"""Returns the length of the Longest Common Subsequence between two seqs.
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: sequence of words
y: sequence of words
Returns
integer: Length of LCS between x and y
"""
table = _lcs(x, y)
n, m = len(x), len(y)
return table[n, m]
def _lcs(x, y):
"""Computes the length of the LCS between two seqs.
The implementation below uses a DP programming algorithm and runs
in O(nm) time where n = len(x) and m = len(y).
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: collection of words
y: collection of words
Returns:
Table of dictionary of coord and len lcs
"""
n, m = len(x), len(y)
table = dict()
for i in range(n + 1):
for j in range(m + 1):
if i == 0 or j == 0:
table[i, j] = 0
elif x[i - 1] == y[j - 1]:
table[i, j] = table[i - 1, j - 1] + 1
else:
table[i, j] = max(table[i - 1, j], table[i, j - 1])
return table
def _f_lcs(llcs, m, n):
"""Computes the LCS-based F-measure score.
Source: http://research.microsoft.com/en-us/um/people/cyl/download/papers/
rouge-working-note-v1.3.1.pdf
Args:
llcs: Length of LCS
m: number of words in reference summary
n: number of words in candidate summary
Returns:
Float. LCS-based F-measure score
"""
r_lcs = llcs / m
p_lcs = llcs / n
beta = p_lcs / (r_lcs + 1e-12)
num = (1 + (beta ** 2)) * r_lcs * p_lcs
denom = r_lcs + ((beta ** 2) * p_lcs)
f_lcs = num / (denom + 1e-12)
return f_lcs
| 33.765784 | 93 | 0.701671 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import numpy as np
import six
from six.moves import xrange
import tensorflow.compat.v1 as tf
def _pad_tensors_to_same_length(x, y):
with tf.name_scope("pad_to_same_length"):
x_length = tf.shape(x)[1]
y_length = tf.shape(y)[1]
max_length = tf.maximum(x_length, y_length)
x = tf.pad(x, [[0, 0], [0, max_length - x_length], [0, 0]])
y = tf.pad(y, [[0, 0], [0, max_length - y_length]])
return x, y
def padded_cross_entropy_loss(logits, labels, smoothing, vocab_size):
with tf.name_scope("loss", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
with tf.name_scope("smoothing_cross_entropy", values=[logits, labels]):
confidence = 1.0 - smoothing
low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1)
soft_targets = tf.one_hot(
tf.cast(labels, tf.int32),
depth=vocab_size,
on_value=confidence,
off_value=low_confidence)
xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(
logits=logits, labels=soft_targets)
normalizing_constant = -(
confidence * tf.log(confidence) + tf.to_float(vocab_size - 1) *
low_confidence * tf.log(low_confidence + 1e-20))
xentropy -= normalizing_constant
weights = tf.to_float(tf.not_equal(labels, 0))
return xentropy * weights, weights
def _convert_to_eval_metric(metric_fn):
def problem_metric_fn(*args):
(scores, weights) = metric_fn(*args)
return tf.metrics.mean(scores, weights)
return problem_metric_fn
def get_eval_metrics(logits, labels, params):
metrics = {
"accuracy": _convert_to_eval_metric(padded_accuracy)(logits, labels),
"accuracy_top5": _convert_to_eval_metric(padded_accuracy_top5)(
logits, labels),
"accuracy_per_sequence": _convert_to_eval_metric(
padded_sequence_accuracy)(logits, labels),
"neg_log_perplexity": _convert_to_eval_metric(padded_neg_log_perplexity)(
logits, labels, params["vocab_size"]),
}
if not params["use_tpu"]:
metrics.update({
"approx_bleu_score": _convert_to_eval_metric(
bleu_score)(logits, labels),
"rouge_2_fscore": _convert_to_eval_metric(
rouge_2_fscore)(logits, labels),
"rouge_L_fscore": _convert_to_eval_metric(
rouge_l_fscore)(logits, labels),
})
metrics = {"metrics/%s" % k: v for k, v in six.iteritems(metrics)}
return metrics
def padded_accuracy(logits, labels):
with tf.variable_scope("padded_accuracy", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
outputs = tf.to_int32(tf.argmax(logits, axis=-1))
padded_labels = tf.to_int32(labels)
return tf.to_float(tf.equal(outputs, padded_labels)), weights
def padded_accuracy_topk(logits, labels, k):
with tf.variable_scope("padded_accuracy_topk", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
effective_k = tf.minimum(k, tf.shape(logits)[-1])
_, outputs = tf.nn.top_k(logits, k=effective_k)
outputs = tf.to_int32(outputs)
padded_labels = tf.to_int32(labels)
padded_labels = tf.expand_dims(padded_labels, axis=-1)
padded_labels += tf.zeros_like(outputs)
same = tf.to_float(tf.equal(outputs, padded_labels))
same_topk = tf.reduce_sum(same, axis=-1)
return same_topk, weights
def padded_accuracy_top5(logits, labels):
return padded_accuracy_topk(logits, labels, 5)
def padded_sequence_accuracy(logits, labels):
with tf.variable_scope("padded_sequence_accuracy", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
outputs = tf.to_int32(tf.argmax(logits, axis=-1))
padded_labels = tf.to_int32(labels)
not_correct = tf.to_float(tf.not_equal(outputs, padded_labels)) * weights
axis = list(range(1, len(outputs.get_shape())))
correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis))
return correct_seq, tf.constant(1.0)
def padded_neg_log_perplexity(logits, labels, vocab_size):
num, den = padded_cross_entropy_loss(logits, labels, 0, vocab_size)
return -num, den
def bleu_score(logits, labels):
predictions = tf.to_int32(tf.argmax(logits, axis=-1))
bleu = tf.py_func(compute_bleu, (labels, predictions), tf.float32)
return bleu, tf.constant(1.0)
def _get_ngrams_with_counter(segment, max_order):
ngram_counts = collections.Counter()
for order in xrange(1, max_order + 1):
for i in xrange(0, len(segment) - order + 1):
ngram = tuple(segment[i:i + order])
ngram_counts[ngram] += 1
return ngram_counts
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
use_bp=True):
reference_length = 0
translation_length = 0
bp = 1.0
geo_mean = 0
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
precisions = []
for (references, translations) in zip(reference_corpus, translation_corpus):
reference_length += len(references)
translation_length += len(translations)
ref_ngram_counts = _get_ngrams_with_counter(references, max_order)
translation_ngram_counts = _get_ngrams_with_counter(translations, max_order)
overlap = dict((ngram,
min(count, translation_ngram_counts[ngram]))
for ngram, count in ref_ngram_counts.items())
for ngram in overlap:
matches_by_order[len(ngram) - 1] += overlap[ngram]
for ngram in translation_ngram_counts:
possible_matches_by_order[len(ngram) - 1] += translation_ngram_counts[
ngram]
precisions = [0] * max_order
smooth = 1.0
for i in xrange(0, max_order):
if possible_matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[i]
if matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[
i]
else:
smooth *= 2
precisions[i] = 1.0 / (smooth * possible_matches_by_order[i])
else:
precisions[i] = 0.0
if max(precisions) > 0:
p_log_sum = sum(math.log(p) for p in precisions if p)
geo_mean = math.exp(p_log_sum / max_order)
if use_bp:
ratio = translation_length / reference_length
bp = math.exp(1 - 1. / ratio) if ratio < 1.0 else 1.0
bleu = geo_mean * bp
return np.float32(bleu)
def rouge_2_fscore(logits, labels):
predictions = tf.to_int32(tf.argmax(logits, axis=-1))
rouge_2_f_score = tf.py_func(rouge_n, (predictions, labels), tf.float32)
return rouge_2_f_score, tf.constant(1.0)
def _get_ngrams(n, text):
ngram_set = set()
text_length = len(text)
max_index_ngram_start = text_length - n
for i in range(max_index_ngram_start + 1):
ngram_set.add(tuple(text[i:i + n]))
return ngram_set
def rouge_n(eval_sentences, ref_sentences, n=2):
f1_scores = []
for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):
eval_ngrams = _get_ngrams(n, eval_sentence)
ref_ngrams = _get_ngrams(n, ref_sentence)
ref_count = len(ref_ngrams)
eval_count = len(eval_ngrams)
overlapping_ngrams = eval_ngrams.intersection(ref_ngrams)
overlapping_count = len(overlapping_ngrams)
if eval_count == 0:
precision = 0.0
else:
precision = float(overlapping_count) / eval_count
if ref_count == 0:
recall = 0.0
else:
recall = float(overlapping_count) / ref_count
f1_scores.append(2.0 * ((precision * recall) / (precision + recall + 1e-8)))
return np.mean(f1_scores, dtype=np.float32)
def rouge_l_fscore(predictions, labels):
outputs = tf.to_int32(tf.argmax(predictions, axis=-1))
rouge_l_f_score = tf.py_func(rouge_l_sentence_level, (outputs, labels),
tf.float32)
return rouge_l_f_score, tf.constant(1.0)
def rouge_l_sentence_level(eval_sentences, ref_sentences):
f1_scores = []
for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):
m = float(len(ref_sentence))
n = float(len(eval_sentence))
lcs = _len_lcs(eval_sentence, ref_sentence)
f1_scores.append(_f_lcs(lcs, m, n))
return np.mean(f1_scores, dtype=np.float32)
def _len_lcs(x, y):
table = _lcs(x, y)
n, m = len(x), len(y)
return table[n, m]
def _lcs(x, y):
n, m = len(x), len(y)
table = dict()
for i in range(n + 1):
for j in range(m + 1):
if i == 0 or j == 0:
table[i, j] = 0
elif x[i - 1] == y[j - 1]:
table[i, j] = table[i - 1, j - 1] + 1
else:
table[i, j] = max(table[i - 1, j], table[i, j - 1])
return table
def _f_lcs(llcs, m, n):
r_lcs = llcs / m
p_lcs = llcs / n
beta = p_lcs / (r_lcs + 1e-12)
num = (1 + (beta ** 2)) * r_lcs * p_lcs
denom = r_lcs + ((beta ** 2) * p_lcs)
f_lcs = num / (denom + 1e-12)
return f_lcs
| true | true |
7900d070d6a3db889d18a3209b1393fcb34e551c | 3,446 | py | Python | kubernetes_py/utils/HttpRequest.py | Unacademy/kubernetes-py | ad6150c2e27369590dc7a7330fe296bc45755cff | [
"Apache-2.0"
] | null | null | null | kubernetes_py/utils/HttpRequest.py | Unacademy/kubernetes-py | ad6150c2e27369590dc7a7330fe296bc45755cff | [
"Apache-2.0"
] | null | null | null | kubernetes_py/utils/HttpRequest.py | Unacademy/kubernetes-py | ad6150c2e27369590dc7a7330fe296bc45755cff | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.md', which is part of this source code package.
#
import re
import base64
import json
import os
import tempfile
import requests
import urllib3
from kubernetes_py.utils.ConvertData import convert
from six.moves.urllib.parse import urlencode
RE_VALID_SSL_IP = re.compile(
r'^https://(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])')
class HttpRequest:
def __init__(self, method='GET', host='localhost:80', url='/', data=None, auth=None,
cert=None, ca_cert=None, ca_cert_data=None, token=None):
self.http_method = method
self.http_host = host
self.url = url
self.data = data
self.auth = auth
self.cert = cert
self.ca_cert = ca_cert
self.ca_cert_data = ca_cert_data
self.token = token
def send(self):
state = dict(success=False, reason=None, status=None, data=None)
http_headers = dict()
http_headers['Accept'] = 'application/json'
if self.http_method in ['PUT', 'POST', 'PATCH']:
http_headers['Content-type'] = 'application/json'
if self.token is not None:
http_headers['Authorization'] = 'Bearer {token}'.format(token=self.token)
if self.data is not None and self.http_method in ['GET']:
url = "{0}?{1}".format(self.url, urlencode(self.data))
self.url = url
self.url = self.http_host + self.url
temp = None
verify = False
if self.ca_cert is not None:
verify = self.ca_cert
if self.ca_cert_data is not None:
temp = tempfile.NamedTemporaryFile(delete=False)
data = base64.b64decode(self.ca_cert_data)
temp.write(data)
temp.close()
verify = temp.name
# TODO: TLS issue with Python 2.7 and urllib3 when hostname is an IP address
# A better fix should be found but I can't think of anything else for now.
search_result = RE_VALID_SSL_IP.search(self.http_host)
if search_result:
verify = False
urllib3.disable_warnings()
try:
response = requests.request(
method=self.http_method,
url=self.url,
auth=self.auth,
cert=self.cert,
headers=http_headers,
data="" if self.data is None else json.dumps(self.data),
verify=verify
)
except Exception as err:
raise err
finally:
if temp is not None:
os.unlink(temp.name)
state['status'] = response.status_code
state['reason'] = response.reason
# There was an issue with "kubectl logs" type requests where returned content is "text/plain" and
# we do have characters of unknown origin.
try:
resp_data = response.content.decode('utf-8')
except UnicodeDecodeError:
resp_data = response.content
if len(resp_data) > 0:
try:
state['data'] = convert(data=json.loads(resp_data))
except Exception:
state['data'] = resp_data
if 200 <= state['status'] <= 299:
state['success'] = True
return state
| 31.327273 | 120 | 0.578642 |
import re
import base64
import json
import os
import tempfile
import requests
import urllib3
from kubernetes_py.utils.ConvertData import convert
from six.moves.urllib.parse import urlencode
RE_VALID_SSL_IP = re.compile(
r'^https://(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])')
class HttpRequest:
def __init__(self, method='GET', host='localhost:80', url='/', data=None, auth=None,
cert=None, ca_cert=None, ca_cert_data=None, token=None):
self.http_method = method
self.http_host = host
self.url = url
self.data = data
self.auth = auth
self.cert = cert
self.ca_cert = ca_cert
self.ca_cert_data = ca_cert_data
self.token = token
def send(self):
state = dict(success=False, reason=None, status=None, data=None)
http_headers = dict()
http_headers['Accept'] = 'application/json'
if self.http_method in ['PUT', 'POST', 'PATCH']:
http_headers['Content-type'] = 'application/json'
if self.token is not None:
http_headers['Authorization'] = 'Bearer {token}'.format(token=self.token)
if self.data is not None and self.http_method in ['GET']:
url = "{0}?{1}".format(self.url, urlencode(self.data))
self.url = url
self.url = self.http_host + self.url
temp = None
verify = False
if self.ca_cert is not None:
verify = self.ca_cert
if self.ca_cert_data is not None:
temp = tempfile.NamedTemporaryFile(delete=False)
data = base64.b64decode(self.ca_cert_data)
temp.write(data)
temp.close()
verify = temp.name
search_result = RE_VALID_SSL_IP.search(self.http_host)
if search_result:
verify = False
urllib3.disable_warnings()
try:
response = requests.request(
method=self.http_method,
url=self.url,
auth=self.auth,
cert=self.cert,
headers=http_headers,
data="" if self.data is None else json.dumps(self.data),
verify=verify
)
except Exception as err:
raise err
finally:
if temp is not None:
os.unlink(temp.name)
state['status'] = response.status_code
state['reason'] = response.reason
# There was an issue with "kubectl logs" type requests where returned content is "text/plain" and
# we do have characters of unknown origin.
try:
resp_data = response.content.decode('utf-8')
except UnicodeDecodeError:
resp_data = response.content
if len(resp_data) > 0:
try:
state['data'] = convert(data=json.loads(resp_data))
except Exception:
state['data'] = resp_data
if 200 <= state['status'] <= 299:
state['success'] = True
return state
| true | true |
7900d0da85aba71c44cb6fb642ef10d47ab012c5 | 4,420 | py | Python | core/dbt/parser/read_files.py | tconbeer/dbt | bf867f6aff79fd9dad98ed36ceecd4aa181fe106 | [
"Apache-2.0"
] | null | null | null | core/dbt/parser/read_files.py | tconbeer/dbt | bf867f6aff79fd9dad98ed36ceecd4aa181fe106 | [
"Apache-2.0"
] | null | null | null | core/dbt/parser/read_files.py | tconbeer/dbt | bf867f6aff79fd9dad98ed36ceecd4aa181fe106 | [
"Apache-2.0"
] | null | null | null | from dbt.clients.system import load_file_contents
from dbt.contracts.files import (
FilePath, ParseFileType, SourceFile, FileHash, AnySourceFile, SchemaSourceFile
)
from dbt.parser.schemas import yaml_from_file
from dbt.parser.search import FilesystemSearcher
# This loads the files contents and creates the SourceFile object
def load_source_file(
path: FilePath, parse_file_type: ParseFileType,
project_name: str) -> AnySourceFile:
file_contents = load_file_contents(path.absolute_path, strip=False)
checksum = FileHash.from_contents(file_contents)
sf_cls = SchemaSourceFile if parse_file_type == ParseFileType.Schema else SourceFile
source_file = sf_cls(path=path, checksum=checksum,
parse_file_type=parse_file_type, project_name=project_name)
source_file.contents = file_contents.strip()
if parse_file_type == ParseFileType.Schema:
source_file.dfy = yaml_from_file(source_file)
return source_file
# Special processing for big seed files
def load_seed_source_file(match: FilePath, project_name) -> SourceFile:
if match.seed_too_large():
# We don't want to calculate a hash of this file. Use the path.
source_file = SourceFile.big_seed(match)
else:
file_contents = load_file_contents(match.absolute_path, strip=False)
checksum = FileHash.from_contents(file_contents)
source_file = SourceFile(path=match, checksum=checksum)
source_file.contents = ''
source_file.parse_file_type = ParseFileType.Seed
source_file.project_name = project_name
return source_file
# Use the FilesystemSearcher to get a bunch of FilePaths, then turn
# them into a bunch of FileSource objects
def get_source_files(project, paths, extension, parse_file_type):
# file path list
fp_list = list(FilesystemSearcher(
project, paths, extension
))
# file block list
fb_list = []
for fp in fp_list:
if parse_file_type == ParseFileType.Seed:
fb_list.append(load_seed_source_file(fp, project.project_name))
else:
fb_list.append(load_source_file(
fp, parse_file_type, project.project_name))
return fb_list
def read_files_for_parser(project, files, dirs, extension, parse_ft):
parser_files = []
source_files = get_source_files(
project, dirs, extension, parse_ft
)
for sf in source_files:
files[sf.file_id] = sf
parser_files.append(sf.file_id)
return parser_files
# This needs to read files for multiple projects, so the 'files'
# dictionary needs to be passed in. What determines the order of
# the various projects? Is the root project always last? Do the
# non-root projects need to be done separately in order?
def read_files(project, files, parser_files):
project_files = {}
project_files['MacroParser'] = read_files_for_parser(
project, files, project.macro_paths, '.sql', ParseFileType.Macro,
)
project_files['ModelParser'] = read_files_for_parser(
project, files, project.source_paths, '.sql', ParseFileType.Model,
)
project_files['SnapshotParser'] = read_files_for_parser(
project, files, project.snapshot_paths, '.sql', ParseFileType.Snapshot,
)
project_files['AnalysisParser'] = read_files_for_parser(
project, files, project.analysis_paths, '.sql', ParseFileType.Analysis,
)
project_files['DataTestParser'] = read_files_for_parser(
project, files, project.test_paths, '.sql', ParseFileType.Test,
)
project_files['SeedParser'] = read_files_for_parser(
project, files, project.data_paths, '.csv', ParseFileType.Seed,
)
project_files['DocumentationParser'] = read_files_for_parser(
project, files, project.docs_paths, '.md', ParseFileType.Documentation,
)
project_files['SchemaParser'] = read_files_for_parser(
project, files, project.all_source_paths, '.yml', ParseFileType.Schema,
)
# Also read .yaml files for schema files. Might be better to change
# 'read_files_for_parser' accept an array in the future.
yaml_files = read_files_for_parser(
project, files, project.all_source_paths, '.yaml', ParseFileType.Schema,
)
project_files['SchemaParser'].extend(yaml_files)
# Store the parser files for this particular project
parser_files[project.project_name] = project_files
| 37.457627 | 88 | 0.722398 | from dbt.clients.system import load_file_contents
from dbt.contracts.files import (
FilePath, ParseFileType, SourceFile, FileHash, AnySourceFile, SchemaSourceFile
)
from dbt.parser.schemas import yaml_from_file
from dbt.parser.search import FilesystemSearcher
def load_source_file(
path: FilePath, parse_file_type: ParseFileType,
project_name: str) -> AnySourceFile:
file_contents = load_file_contents(path.absolute_path, strip=False)
checksum = FileHash.from_contents(file_contents)
sf_cls = SchemaSourceFile if parse_file_type == ParseFileType.Schema else SourceFile
source_file = sf_cls(path=path, checksum=checksum,
parse_file_type=parse_file_type, project_name=project_name)
source_file.contents = file_contents.strip()
if parse_file_type == ParseFileType.Schema:
source_file.dfy = yaml_from_file(source_file)
return source_file
def load_seed_source_file(match: FilePath, project_name) -> SourceFile:
if match.seed_too_large():
source_file = SourceFile.big_seed(match)
else:
file_contents = load_file_contents(match.absolute_path, strip=False)
checksum = FileHash.from_contents(file_contents)
source_file = SourceFile(path=match, checksum=checksum)
source_file.contents = ''
source_file.parse_file_type = ParseFileType.Seed
source_file.project_name = project_name
return source_file
# Use the FilesystemSearcher to get a bunch of FilePaths, then turn
# them into a bunch of FileSource objects
def get_source_files(project, paths, extension, parse_file_type):
# file path list
fp_list = list(FilesystemSearcher(
project, paths, extension
))
# file block list
fb_list = []
for fp in fp_list:
if parse_file_type == ParseFileType.Seed:
fb_list.append(load_seed_source_file(fp, project.project_name))
else:
fb_list.append(load_source_file(
fp, parse_file_type, project.project_name))
return fb_list
def read_files_for_parser(project, files, dirs, extension, parse_ft):
parser_files = []
source_files = get_source_files(
project, dirs, extension, parse_ft
)
for sf in source_files:
files[sf.file_id] = sf
parser_files.append(sf.file_id)
return parser_files
# This needs to read files for multiple projects, so the 'files'
# dictionary needs to be passed in. What determines the order of
# the various projects? Is the root project always last? Do the
# non-root projects need to be done separately in order?
def read_files(project, files, parser_files):
project_files = {}
project_files['MacroParser'] = read_files_for_parser(
project, files, project.macro_paths, '.sql', ParseFileType.Macro,
)
project_files['ModelParser'] = read_files_for_parser(
project, files, project.source_paths, '.sql', ParseFileType.Model,
)
project_files['SnapshotParser'] = read_files_for_parser(
project, files, project.snapshot_paths, '.sql', ParseFileType.Snapshot,
)
project_files['AnalysisParser'] = read_files_for_parser(
project, files, project.analysis_paths, '.sql', ParseFileType.Analysis,
)
project_files['DataTestParser'] = read_files_for_parser(
project, files, project.test_paths, '.sql', ParseFileType.Test,
)
project_files['SeedParser'] = read_files_for_parser(
project, files, project.data_paths, '.csv', ParseFileType.Seed,
)
project_files['DocumentationParser'] = read_files_for_parser(
project, files, project.docs_paths, '.md', ParseFileType.Documentation,
)
project_files['SchemaParser'] = read_files_for_parser(
project, files, project.all_source_paths, '.yml', ParseFileType.Schema,
)
# Also read .yaml files for schema files. Might be better to change
# 'read_files_for_parser' accept an array in the future.
yaml_files = read_files_for_parser(
project, files, project.all_source_paths, '.yaml', ParseFileType.Schema,
)
project_files['SchemaParser'].extend(yaml_files)
# Store the parser files for this particular project
parser_files[project.project_name] = project_files
| true | true |
7900d11a0ef28b695663f599916955db69023cfb | 5,046 | py | Python | docs/source/conf.py | fccg/DeepCTR | ed5cd0dbef7c249087734b3aba0c8326988f367f | [
"Apache-2.0"
] | 1 | 2020-05-16T07:49:03.000Z | 2020-05-16T07:49:03.000Z | docs/source/conf.py | fccg/DeepCTR | ed5cd0dbef7c249087734b3aba0c8326988f367f | [
"Apache-2.0"
] | null | null | null | docs/source/conf.py | fccg/DeepCTR | ed5cd0dbef7c249087734b3aba0c8326988f367f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../../'))
# -- Project information -----------------------------------------------------
project = 'DeepCTR'
copyright = '2017-present, Weichen Shen'
author = 'Weichen Shen'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '0.7.4'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
#source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'DeepCTRdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'DeepCTR.tex', 'DeepCTR Documentation',
'Weichen Shen', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'deepctr', 'DeepCTR Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'DeepCTR', 'DeepCTR Documentation',
author, 'DeepCTR', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
todo_include_todos = False
html_theme = 'sphinx_rtd_theme'
source_parsers = {
'.md': 'recommonmark.parser.CommonMarkParser',
}
| 29.682353 | 79 | 0.649822 |
import os
import sys
sys.path.insert(0, os.path.abspath('../../'))
project = 'DeepCTR'
copyright = '2017-present, Weichen Shen'
author = 'Weichen Shen'
version = ''
release = '0.7.4'
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
templates_path = ['_templates']
source_suffix = ['.rst', '.md']
master_doc = 'index'
language = None
exclude_patterns = []
pygments_style = 'sphinx'
html_theme = 'alabaster'
html_static_path = ['_static']
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'DeepCTRdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'DeepCTR.tex', 'DeepCTR Documentation',
'Weichen Shen', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'deepctr', 'DeepCTR Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'DeepCTR', 'DeepCTR Documentation',
author, 'DeepCTR', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
todo_include_todos = False
html_theme = 'sphinx_rtd_theme'
source_parsers = {
'.md': 'recommonmark.parser.CommonMarkParser',
}
| true | true |
7900d1b99e0b3aa1837b18a6a088ca63cc1001b1 | 8,697 | py | Python | trains/utilities/args.py | noklam/trains | 70536544ed5e2b9aac8576ef2eaaef31c99ca670 | [
"Apache-2.0"
] | 1 | 2020-11-19T14:00:40.000Z | 2020-11-19T14:00:40.000Z | trains/utilities/args.py | noklam/trains | 70536544ed5e2b9aac8576ef2eaaef31c99ca670 | [
"Apache-2.0"
] | null | null | null | trains/utilities/args.py | noklam/trains | 70536544ed5e2b9aac8576ef2eaaef31c99ca670 | [
"Apache-2.0"
] | null | null | null | """ Argparse utilities"""
import sys
from six import PY2
from argparse import ArgumentParser
try:
from argparse import _SubParsersAction
except ImportError:
_SubParsersAction = type(None)
class PatchArgumentParser:
_original_parse_args = None
_original_parse_known_args = None
_original_add_subparsers = None
_add_subparsers_counter = 0
_current_task = None
_calling_current_task = False
_last_parsed_args = None
_last_arg_parser = None
@staticmethod
def add_subparsers(self, **kwargs):
if 'dest' not in kwargs:
if kwargs.get('title'):
kwargs['dest'] = '/' + kwargs['title']
else:
PatchArgumentParser._add_subparsers_counter += 1
kwargs['dest'] = '/subparser%d' % PatchArgumentParser._add_subparsers_counter
return PatchArgumentParser._original_add_subparsers(self, **kwargs)
@staticmethod
def parse_args(self, args=None, namespace=None):
return PatchArgumentParser._patched_parse_args(PatchArgumentParser._original_parse_args,
self, args=args, namespace=namespace)
@staticmethod
def parse_known_args(self, args=None, namespace=None):
return PatchArgumentParser._patched_parse_args(PatchArgumentParser._original_parse_known_args,
self, args=args, namespace=namespace)
@staticmethod
def _patched_parse_args(original_parse_fn, self, args=None, namespace=None):
current_task = PatchArgumentParser._current_task
# if we are running remotely, we always have a task id, so we better patch the argparser as soon as possible.
if not current_task:
from ..config import running_remotely, get_remote_task_id
if running_remotely():
# this will cause the current_task() to set PatchArgumentParser._current_task
from trains import Task
# noinspection PyBroadException
try:
current_task = Task.get_task(task_id=get_remote_task_id())
except Exception:
pass
# automatically connect to current task:
if current_task:
from ..config import running_remotely
if PatchArgumentParser._calling_current_task:
# if we are here and running remotely by now we should try to parse the arguments
if original_parse_fn:
PatchArgumentParser._add_last_parsed_args(original_parse_fn(self, args=args, namespace=namespace))
return PatchArgumentParser._last_parsed_args[-1]
PatchArgumentParser._calling_current_task = True
# Store last instance and result
PatchArgumentParser._add_last_arg_parser(self)
parsed_args = None
# parse if we are running in dev mode
if not running_remotely() and original_parse_fn:
parsed_args = original_parse_fn(self, args=args, namespace=namespace)
PatchArgumentParser._add_last_parsed_args(parsed_args)
# noinspection PyBroadException
try:
# sync to/from task
# noinspection PyProtectedMember
current_task._connect_argparse(
self, args=args, namespace=namespace,
parsed_args=parsed_args[0] if isinstance(parsed_args, tuple) else parsed_args
)
except Exception:
pass
# sync back and parse
if running_remotely() and original_parse_fn:
# if we are running python2 check if we have subparsers,
# if we do we need to patch the args, because there is no default subparser
if PY2:
import itertools
def _get_sub_parsers_defaults(subparser, prev=[]):
actions_grp = [a._actions for a in subparser.choices.values()] if isinstance(
subparser, _SubParsersAction) else [subparser._actions]
sub_parsers_defaults = [[subparser]] if hasattr(
subparser, 'default') and subparser.default else []
for actions in actions_grp:
sub_parsers_defaults += [_get_sub_parsers_defaults(a, prev)
for a in actions if isinstance(a, _SubParsersAction) and
hasattr(a, 'default') and a.default]
return list(itertools.chain.from_iterable(sub_parsers_defaults))
sub_parsers_defaults = _get_sub_parsers_defaults(self)
if sub_parsers_defaults:
if args is None:
# args default to the system args
import sys as _sys
args = _sys.argv[1:]
else:
args = list(args)
# make sure we append the subparsers
for a in sub_parsers_defaults:
if a.default not in args:
args.append(a.default)
PatchArgumentParser._add_last_parsed_args(original_parse_fn(self, args=args, namespace=namespace))
else:
PatchArgumentParser._add_last_parsed_args(parsed_args or {})
PatchArgumentParser._calling_current_task = False
return PatchArgumentParser._last_parsed_args[-1]
# Store last instance and result
PatchArgumentParser._add_last_arg_parser(self)
PatchArgumentParser._add_last_parsed_args(
{} if not original_parse_fn else original_parse_fn(self, args=args, namespace=namespace))
return PatchArgumentParser._last_parsed_args[-1]
@staticmethod
def _add_last_parsed_args(parsed_args):
PatchArgumentParser._last_parsed_args = (PatchArgumentParser._last_parsed_args or []) + [parsed_args]
@staticmethod
def _add_last_arg_parser(a_argparser):
PatchArgumentParser._last_arg_parser = (PatchArgumentParser._last_arg_parser or []) + [a_argparser]
def patch_argparse():
# make sure we only patch once
if not sys.modules.get('argparse') or hasattr(sys.modules['argparse'].ArgumentParser, '_parse_args_patched'):
return
# mark patched argparse
sys.modules['argparse'].ArgumentParser._parse_args_patched = True
# patch argparser
PatchArgumentParser._original_parse_args = sys.modules['argparse'].ArgumentParser.parse_args
PatchArgumentParser._original_parse_known_args = sys.modules['argparse'].ArgumentParser.parse_known_args
PatchArgumentParser._original_add_subparsers = sys.modules['argparse'].ArgumentParser.add_subparsers
sys.modules['argparse'].ArgumentParser.parse_args = PatchArgumentParser.parse_args
sys.modules['argparse'].ArgumentParser.parse_known_args = PatchArgumentParser.parse_known_args
sys.modules['argparse'].ArgumentParser.add_subparsers = PatchArgumentParser.add_subparsers
# Notice! we are patching argparser, sop we know if someone parsed arguments before connecting to task
patch_argparse()
def call_original_argparser(self, args=None, namespace=None):
if PatchArgumentParser._original_parse_args:
return PatchArgumentParser._original_parse_args(self, args=args, namespace=namespace)
def argparser_parseargs_called():
return PatchArgumentParser._last_arg_parser is not None
def argparser_update_currenttask(task):
PatchArgumentParser._current_task = task
def get_argparser_last_args():
if not PatchArgumentParser._last_arg_parser or not PatchArgumentParser._last_parsed_args:
return []
return [(parser, args[0] if isinstance(args, tuple) else args)
for parser, args in zip(PatchArgumentParser._last_arg_parser, PatchArgumentParser._last_parsed_args)]
def add_params_to_parser(parser, params):
assert isinstance(parser, ArgumentParser)
assert isinstance(params, dict)
def get_type_details(v):
for t in (int, float, str):
try:
value = t(v)
return t, value
except ValueError:
continue
# AJB temporary protection from ui problems sending empty dicts
params.pop('', None)
for param, value in params.items():
type, type_value = get_type_details(value)
parser.add_argument('--%s' % param, type=type, default=type_value)
return parser
| 44.147208 | 118 | 0.64505 | import sys
from six import PY2
from argparse import ArgumentParser
try:
from argparse import _SubParsersAction
except ImportError:
_SubParsersAction = type(None)
class PatchArgumentParser:
_original_parse_args = None
_original_parse_known_args = None
_original_add_subparsers = None
_add_subparsers_counter = 0
_current_task = None
_calling_current_task = False
_last_parsed_args = None
_last_arg_parser = None
@staticmethod
def add_subparsers(self, **kwargs):
if 'dest' not in kwargs:
if kwargs.get('title'):
kwargs['dest'] = '/' + kwargs['title']
else:
PatchArgumentParser._add_subparsers_counter += 1
kwargs['dest'] = '/subparser%d' % PatchArgumentParser._add_subparsers_counter
return PatchArgumentParser._original_add_subparsers(self, **kwargs)
@staticmethod
def parse_args(self, args=None, namespace=None):
return PatchArgumentParser._patched_parse_args(PatchArgumentParser._original_parse_args,
self, args=args, namespace=namespace)
@staticmethod
def parse_known_args(self, args=None, namespace=None):
return PatchArgumentParser._patched_parse_args(PatchArgumentParser._original_parse_known_args,
self, args=args, namespace=namespace)
@staticmethod
def _patched_parse_args(original_parse_fn, self, args=None, namespace=None):
current_task = PatchArgumentParser._current_task
if not current_task:
from ..config import running_remotely, get_remote_task_id
if running_remotely():
from trains import Task
try:
current_task = Task.get_task(task_id=get_remote_task_id())
except Exception:
pass
if current_task:
from ..config import running_remotely
if PatchArgumentParser._calling_current_task:
if original_parse_fn:
PatchArgumentParser._add_last_parsed_args(original_parse_fn(self, args=args, namespace=namespace))
return PatchArgumentParser._last_parsed_args[-1]
PatchArgumentParser._calling_current_task = True
PatchArgumentParser._add_last_arg_parser(self)
parsed_args = None
if not running_remotely() and original_parse_fn:
parsed_args = original_parse_fn(self, args=args, namespace=namespace)
PatchArgumentParser._add_last_parsed_args(parsed_args)
try:
current_task._connect_argparse(
self, args=args, namespace=namespace,
parsed_args=parsed_args[0] if isinstance(parsed_args, tuple) else parsed_args
)
except Exception:
pass
if running_remotely() and original_parse_fn:
if PY2:
import itertools
def _get_sub_parsers_defaults(subparser, prev=[]):
actions_grp = [a._actions for a in subparser.choices.values()] if isinstance(
subparser, _SubParsersAction) else [subparser._actions]
sub_parsers_defaults = [[subparser]] if hasattr(
subparser, 'default') and subparser.default else []
for actions in actions_grp:
sub_parsers_defaults += [_get_sub_parsers_defaults(a, prev)
for a in actions if isinstance(a, _SubParsersAction) and
hasattr(a, 'default') and a.default]
return list(itertools.chain.from_iterable(sub_parsers_defaults))
sub_parsers_defaults = _get_sub_parsers_defaults(self)
if sub_parsers_defaults:
if args is None:
import sys as _sys
args = _sys.argv[1:]
else:
args = list(args)
for a in sub_parsers_defaults:
if a.default not in args:
args.append(a.default)
PatchArgumentParser._add_last_parsed_args(original_parse_fn(self, args=args, namespace=namespace))
else:
PatchArgumentParser._add_last_parsed_args(parsed_args or {})
PatchArgumentParser._calling_current_task = False
return PatchArgumentParser._last_parsed_args[-1]
PatchArgumentParser._add_last_arg_parser(self)
PatchArgumentParser._add_last_parsed_args(
{} if not original_parse_fn else original_parse_fn(self, args=args, namespace=namespace))
return PatchArgumentParser._last_parsed_args[-1]
@staticmethod
def _add_last_parsed_args(parsed_args):
PatchArgumentParser._last_parsed_args = (PatchArgumentParser._last_parsed_args or []) + [parsed_args]
@staticmethod
def _add_last_arg_parser(a_argparser):
PatchArgumentParser._last_arg_parser = (PatchArgumentParser._last_arg_parser or []) + [a_argparser]
def patch_argparse():
if not sys.modules.get('argparse') or hasattr(sys.modules['argparse'].ArgumentParser, '_parse_args_patched'):
return
sys.modules['argparse'].ArgumentParser._parse_args_patched = True
PatchArgumentParser._original_parse_args = sys.modules['argparse'].ArgumentParser.parse_args
PatchArgumentParser._original_parse_known_args = sys.modules['argparse'].ArgumentParser.parse_known_args
PatchArgumentParser._original_add_subparsers = sys.modules['argparse'].ArgumentParser.add_subparsers
sys.modules['argparse'].ArgumentParser.parse_args = PatchArgumentParser.parse_args
sys.modules['argparse'].ArgumentParser.parse_known_args = PatchArgumentParser.parse_known_args
sys.modules['argparse'].ArgumentParser.add_subparsers = PatchArgumentParser.add_subparsers
patch_argparse()
def call_original_argparser(self, args=None, namespace=None):
if PatchArgumentParser._original_parse_args:
return PatchArgumentParser._original_parse_args(self, args=args, namespace=namespace)
def argparser_parseargs_called():
return PatchArgumentParser._last_arg_parser is not None
def argparser_update_currenttask(task):
PatchArgumentParser._current_task = task
def get_argparser_last_args():
if not PatchArgumentParser._last_arg_parser or not PatchArgumentParser._last_parsed_args:
return []
return [(parser, args[0] if isinstance(args, tuple) else args)
for parser, args in zip(PatchArgumentParser._last_arg_parser, PatchArgumentParser._last_parsed_args)]
def add_params_to_parser(parser, params):
assert isinstance(parser, ArgumentParser)
assert isinstance(params, dict)
def get_type_details(v):
for t in (int, float, str):
try:
value = t(v)
return t, value
except ValueError:
continue
params.pop('', None)
for param, value in params.items():
type, type_value = get_type_details(value)
parser.add_argument('--%s' % param, type=type, default=type_value)
return parser
| true | true |
7900d2473c3fa2ce82d560135117afc6aa006234 | 7,330 | py | Python | lib/loss/loss_contrast.py | wenguanwang/ContrastiveSeg | 9a381b9799c16d81e18d8f9f25ab509b93fb56de | [
"MIT"
] | 2 | 2021-02-08T12:19:29.000Z | 2021-02-08T12:44:39.000Z | lib/loss/loss_contrast.py | wenguanwang/ContrastiveSeg | 9a381b9799c16d81e18d8f9f25ab509b93fb56de | [
"MIT"
] | null | null | null | lib/loss/loss_contrast.py | wenguanwang/ContrastiveSeg | 9a381b9799c16d81e18d8f9f25ab509b93fb56de | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABC
import torch
import torch.nn as nn
import torch.nn.functional as F
from lib.loss.loss_helper import FSAuxCELoss, FSAuxRMILoss
from lib.utils.tools.logger import Logger as Log
class PixelContrastLoss(nn.Module, ABC):
def __init__(self, configer):
super(PixelContrastLoss, self).__init__()
self.configer = configer
self.temperature = self.configer.get('contrast', 'temperature')
self.base_temperature = self.configer.get('contrast', 'base_temperature')
self.ignore_label = -1
if self.configer.exists('loss', 'params') and 'ce_ignore_index' in self.configer.get('loss', 'params'):
self.ignore_label = self.configer.get('loss', 'params')['ce_ignore_index']
self.max_samples = self.configer.get('contrast', 'max_samples')
self.max_views = self.configer.get('contrast', 'max_views')
def _hard_anchor_sampling(self, X, y_hat, y):
batch_size, feat_dim = X.shape[0], X.shape[-1]
classes = []
total_classes = 0
for ii in range(batch_size):
this_y = y_hat[ii]
this_classes = torch.unique(this_y)
this_classes = [x for x in this_classes if x > 0 and x != self.ignore_label]
this_classes = [x for x in this_classes if (this_y == x).nonzero().shape[0] > self.max_views]
classes.append(this_classes)
total_classes += len(this_classes)
if total_classes == 0:
return None, None
n_view = self.max_samples // total_classes
n_view = min(n_view, self.max_views)
X_ = torch.zeros((total_classes, n_view, feat_dim), dtype=torch.float).cuda()
y_ = torch.zeros(total_classes, dtype=torch.float).cuda()
X_ptr = 0
for ii in range(batch_size):
this_y_hat = y_hat[ii]
this_y = y[ii]
this_classes = classes[ii]
for cls_id in this_classes:
hard_indices = ((this_y_hat == cls_id) & (this_y != cls_id)).nonzero()
easy_indices = ((this_y_hat == cls_id) & (this_y == cls_id)).nonzero()
num_hard = hard_indices.shape[0]
num_easy = easy_indices.shape[0]
if num_hard >= n_view / 2 and num_easy >= n_view / 2:
num_hard_keep = n_view // 2
num_easy_keep = n_view - num_hard_keep
elif num_hard >= n_view / 2:
num_easy_keep = num_easy
num_hard_keep = n_view - num_easy_keep
elif num_easy >= n_view / 2:
num_hard_keep = num_hard
num_easy_keep = n_view - num_hard_keep
else:
Log.info('this shoud be never touched! {} {} {}'.format(num_hard, num_easy, n_view))
raise Exception
perm = torch.randperm(num_hard)
hard_indices = hard_indices[perm[:num_hard_keep]]
perm = torch.randperm(num_easy)
easy_indices = easy_indices[perm[:num_easy_keep]]
indices = torch.cat((hard_indices, easy_indices), dim=0)
X_[X_ptr, :, :] = X[ii, indices, :].squeeze(1)
y_[X_ptr] = cls_id
X_ptr += 1
return X_, y_
def _contrastive(self, feats_, labels_):
anchor_num, n_view = feats_.shape[0], feats_.shape[1]
labels_ = labels_.contiguous().view(-1, 1)
mask = torch.eq(labels_, torch.transpose(labels_, 0, 1)).float().cuda()
contrast_count = n_view
contrast_feature = torch.cat(torch.unbind(feats_, dim=1), dim=0)
anchor_feature = contrast_feature
anchor_count = contrast_count
anchor_dot_contrast = torch.div(torch.matmul(anchor_feature, torch.transpose(contrast_feature, 0, 1)),
self.temperature)
logits_max, _ = torch.max(anchor_dot_contrast, dim=1, keepdim=True)
logits = anchor_dot_contrast - logits_max.detach()
mask = mask.repeat(anchor_count, contrast_count)
neg_mask = 1 - mask
logits_mask = torch.ones_like(mask).scatter_(1,
torch.arange(anchor_num * anchor_count).view(-1, 1).cuda(),
0)
mask = mask * logits_mask
neg_logits = torch.exp(logits) * neg_mask
neg_logits = neg_logits.sum(1, keepdim=True)
exp_logits = torch.exp(logits)
log_prob = logits - torch.log(exp_logits + neg_logits)
mean_log_prob_pos = (mask * log_prob).sum(1) / mask.sum(1)
loss = - (self.temperature / self.base_temperature) * mean_log_prob_pos
loss = loss.mean()
return loss
def forward(self, feats, labels=None, predict=None):
labels = labels.unsqueeze(1).float().clone()
labels = torch.nn.functional.interpolate(labels,
(feats.shape[2], feats.shape[3]), mode='nearest')
labels = labels.squeeze(1).long()
assert labels.shape[-1] == feats.shape[-1], '{} {}'.format(labels.shape, feats.shape)
batch_size = feats.shape[0]
labels = labels.contiguous().view(batch_size, -1)
predict = predict.contiguous().view(batch_size, -1)
feats = feats.permute(0, 2, 3, 1)
feats = feats.contiguous().view(feats.shape[0], -1, feats.shape[-1])
feats_, labels_ = self._hard_anchor_sampling(feats, labels, predict)
loss = self._contrastive(feats_, labels_)
return loss
class ContrastAuxCELoss(nn.Module, ABC):
def __init__(self, configer=None):
super(ContrastAuxCELoss, self).__init__()
self.configer = configer
ignore_index = -1
if self.configer.exists('loss', 'params') and 'ce_ignore_index' in self.configer.get('loss', 'params'):
ignore_index = self.configer.get('loss', 'params')['ce_ignore_index']
Log.info('ignore_index: {}'.format(ignore_index))
self.loss_weight = self.configer.get('contrast', 'loss_weight')
self.use_rmi = self.configer.get('contrast', 'use_rmi')
if self.use_rmi:
self.seg_criterion = FSAuxRMILoss(configer=configer)
else:
self.seg_criterion = FSAuxCELoss(configer=configer)
self.contrast_criterion = PixelContrastLoss(configer=configer)
def forward(self, preds, target):
h, w = target.size(1), target.size(2)
assert "seg" in preds
assert "seg_aux" in preds
seg = preds['seg']
seg_aux = preds['seg_aux']
embedding = preds['embedding'] if 'embedding' in preds else None
pred = F.interpolate(input=seg, size=(h, w), mode='bilinear', align_corners=True)
pred_aux = F.interpolate(input=seg_aux, size=(h, w), mode='bilinear', align_corners=True)
loss = self.seg_criterion([pred_aux, pred], target)
if embedding is not None:
_, predict = torch.max(seg, 1)
loss_contrast = self.contrast_criterion(embedding, target, predict)
return loss + self.loss_weight * loss_contrast
return loss
| 37.783505 | 112 | 0.600136 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABC
import torch
import torch.nn as nn
import torch.nn.functional as F
from lib.loss.loss_helper import FSAuxCELoss, FSAuxRMILoss
from lib.utils.tools.logger import Logger as Log
class PixelContrastLoss(nn.Module, ABC):
def __init__(self, configer):
super(PixelContrastLoss, self).__init__()
self.configer = configer
self.temperature = self.configer.get('contrast', 'temperature')
self.base_temperature = self.configer.get('contrast', 'base_temperature')
self.ignore_label = -1
if self.configer.exists('loss', 'params') and 'ce_ignore_index' in self.configer.get('loss', 'params'):
self.ignore_label = self.configer.get('loss', 'params')['ce_ignore_index']
self.max_samples = self.configer.get('contrast', 'max_samples')
self.max_views = self.configer.get('contrast', 'max_views')
def _hard_anchor_sampling(self, X, y_hat, y):
batch_size, feat_dim = X.shape[0], X.shape[-1]
classes = []
total_classes = 0
for ii in range(batch_size):
this_y = y_hat[ii]
this_classes = torch.unique(this_y)
this_classes = [x for x in this_classes if x > 0 and x != self.ignore_label]
this_classes = [x for x in this_classes if (this_y == x).nonzero().shape[0] > self.max_views]
classes.append(this_classes)
total_classes += len(this_classes)
if total_classes == 0:
return None, None
n_view = self.max_samples // total_classes
n_view = min(n_view, self.max_views)
X_ = torch.zeros((total_classes, n_view, feat_dim), dtype=torch.float).cuda()
y_ = torch.zeros(total_classes, dtype=torch.float).cuda()
X_ptr = 0
for ii in range(batch_size):
this_y_hat = y_hat[ii]
this_y = y[ii]
this_classes = classes[ii]
for cls_id in this_classes:
hard_indices = ((this_y_hat == cls_id) & (this_y != cls_id)).nonzero()
easy_indices = ((this_y_hat == cls_id) & (this_y == cls_id)).nonzero()
num_hard = hard_indices.shape[0]
num_easy = easy_indices.shape[0]
if num_hard >= n_view / 2 and num_easy >= n_view / 2:
num_hard_keep = n_view // 2
num_easy_keep = n_view - num_hard_keep
elif num_hard >= n_view / 2:
num_easy_keep = num_easy
num_hard_keep = n_view - num_easy_keep
elif num_easy >= n_view / 2:
num_hard_keep = num_hard
num_easy_keep = n_view - num_hard_keep
else:
Log.info('this shoud be never touched! {} {} {}'.format(num_hard, num_easy, n_view))
raise Exception
perm = torch.randperm(num_hard)
hard_indices = hard_indices[perm[:num_hard_keep]]
perm = torch.randperm(num_easy)
easy_indices = easy_indices[perm[:num_easy_keep]]
indices = torch.cat((hard_indices, easy_indices), dim=0)
X_[X_ptr, :, :] = X[ii, indices, :].squeeze(1)
y_[X_ptr] = cls_id
X_ptr += 1
return X_, y_
def _contrastive(self, feats_, labels_):
anchor_num, n_view = feats_.shape[0], feats_.shape[1]
labels_ = labels_.contiguous().view(-1, 1)
mask = torch.eq(labels_, torch.transpose(labels_, 0, 1)).float().cuda()
contrast_count = n_view
contrast_feature = torch.cat(torch.unbind(feats_, dim=1), dim=0)
anchor_feature = contrast_feature
anchor_count = contrast_count
anchor_dot_contrast = torch.div(torch.matmul(anchor_feature, torch.transpose(contrast_feature, 0, 1)),
self.temperature)
logits_max, _ = torch.max(anchor_dot_contrast, dim=1, keepdim=True)
logits = anchor_dot_contrast - logits_max.detach()
mask = mask.repeat(anchor_count, contrast_count)
neg_mask = 1 - mask
logits_mask = torch.ones_like(mask).scatter_(1,
torch.arange(anchor_num * anchor_count).view(-1, 1).cuda(),
0)
mask = mask * logits_mask
neg_logits = torch.exp(logits) * neg_mask
neg_logits = neg_logits.sum(1, keepdim=True)
exp_logits = torch.exp(logits)
log_prob = logits - torch.log(exp_logits + neg_logits)
mean_log_prob_pos = (mask * log_prob).sum(1) / mask.sum(1)
loss = - (self.temperature / self.base_temperature) * mean_log_prob_pos
loss = loss.mean()
return loss
def forward(self, feats, labels=None, predict=None):
labels = labels.unsqueeze(1).float().clone()
labels = torch.nn.functional.interpolate(labels,
(feats.shape[2], feats.shape[3]), mode='nearest')
labels = labels.squeeze(1).long()
assert labels.shape[-1] == feats.shape[-1], '{} {}'.format(labels.shape, feats.shape)
batch_size = feats.shape[0]
labels = labels.contiguous().view(batch_size, -1)
predict = predict.contiguous().view(batch_size, -1)
feats = feats.permute(0, 2, 3, 1)
feats = feats.contiguous().view(feats.shape[0], -1, feats.shape[-1])
feats_, labels_ = self._hard_anchor_sampling(feats, labels, predict)
loss = self._contrastive(feats_, labels_)
return loss
class ContrastAuxCELoss(nn.Module, ABC):
def __init__(self, configer=None):
super(ContrastAuxCELoss, self).__init__()
self.configer = configer
ignore_index = -1
if self.configer.exists('loss', 'params') and 'ce_ignore_index' in self.configer.get('loss', 'params'):
ignore_index = self.configer.get('loss', 'params')['ce_ignore_index']
Log.info('ignore_index: {}'.format(ignore_index))
self.loss_weight = self.configer.get('contrast', 'loss_weight')
self.use_rmi = self.configer.get('contrast', 'use_rmi')
if self.use_rmi:
self.seg_criterion = FSAuxRMILoss(configer=configer)
else:
self.seg_criterion = FSAuxCELoss(configer=configer)
self.contrast_criterion = PixelContrastLoss(configer=configer)
def forward(self, preds, target):
h, w = target.size(1), target.size(2)
assert "seg" in preds
assert "seg_aux" in preds
seg = preds['seg']
seg_aux = preds['seg_aux']
embedding = preds['embedding'] if 'embedding' in preds else None
pred = F.interpolate(input=seg, size=(h, w), mode='bilinear', align_corners=True)
pred_aux = F.interpolate(input=seg_aux, size=(h, w), mode='bilinear', align_corners=True)
loss = self.seg_criterion([pred_aux, pred], target)
if embedding is not None:
_, predict = torch.max(seg, 1)
loss_contrast = self.contrast_criterion(embedding, target, predict)
return loss + self.loss_weight * loss_contrast
return loss
| true | true |
7900d46a2baa198770770d7af12bb901c90491f1 | 10,096 | py | Python | arvet/batch_analysis/tests/test_task.py | jskinn/arvet | 742cf3e7ee8848c4efebfaa887fc9c0fd90a06e9 | [
"BSD-2-Clause"
] | 2 | 2021-05-27T21:48:34.000Z | 2021-06-12T02:58:44.000Z | arvet/batch_analysis/tests/test_task.py | jskinn/arvet | 742cf3e7ee8848c4efebfaa887fc9c0fd90a06e9 | [
"BSD-2-Clause"
] | null | null | null | arvet/batch_analysis/tests/test_task.py | jskinn/arvet | 742cf3e7ee8848c4efebfaa887fc9c0fd90a06e9 | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) 2017, John Skinner
import unittest
import numpy as np
import arvet.database.tests.database_connection as dbconn
from arvet.config.path_manager import PathManager
import arvet.batch_analysis.task as task
class MockTask(task.Task):
def run_task(self, path_manager: PathManager):
pass
def get_unique_name(self) -> str:
return "mock_task_{0}".format(self.pk)
class TestTaskDatabase(unittest.TestCase):
@classmethod
def setUpClass(cls):
dbconn.connect_to_test_db()
def setUp(self):
# Remove the collection as the start of the test, so that we're sure it's empty
task.Task._mongometa.collection.drop()
@classmethod
def tearDownClass(cls):
# Clean up after ourselves by dropping the collection for this model
task.Task._mongometa.collection.drop()
def test_stores_and_loads_simple(self):
obj = MockTask(state=task.JobState.UNSTARTED)
obj.save()
# Load all the entities
all_entities = list(task.Task.objects.all())
self.assertGreaterEqual(len(all_entities), 1)
self.assertEqual(all_entities[0], obj)
all_entities[0].delete()
def test_stores_and_loads_all_params(self):
obj = MockTask(
state=task.JobState.RUNNING,
node_id='test-hpc',
job_id=15,
num_cpus=3,
num_gpus=150,
memory_requirements='4KB',
expected_duration='100:00:00'
)
obj.save()
# Load all the entities
all_entities = list(task.Task.objects.all())
self.assertGreaterEqual(len(all_entities), 1)
self.assertEqual(all_entities[0], obj)
all_entities[0].delete()
def test_stores_and_loads_after_change_state(self):
obj = MockTask(
state=task.JobState.RUNNING,
node_id='test-hpc',
job_id=15,
num_cpus=3,
num_gpus=150,
memory_requirements='4KB',
expected_duration='100:00:00'
)
obj.save()
all_entities = list(task.Task.objects.all())
self.assertGreaterEqual(len(all_entities), 1)
self.assertEqual(all_entities[0], obj)
obj.mark_job_failed()
obj.save()
all_entities = list(task.Task.objects.all())
self.assertGreaterEqual(len(all_entities), 1)
self.assertEqual(all_entities[0], obj)
obj.mark_job_started('test_node', 143)
obj.save()
all_entities = list(task.Task.objects.all())
self.assertGreaterEqual(len(all_entities), 1)
self.assertEqual(all_entities[0], obj)
obj.mark_job_complete()
obj.save()
all_entities = list(task.Task.objects.all())
self.assertGreaterEqual(len(all_entities), 1)
self.assertEqual(all_entities[0], obj)
class TestTask(unittest.TestCase):
def test_mark_job_started_changes_unstarted_to_running(self):
subject = MockTask(state=task.JobState.UNSTARTED)
self.assertTrue(subject.is_unstarted)
self.assertFalse(subject.is_running)
subject.mark_job_started('test', 12)
self.assertFalse(subject.is_unstarted)
self.assertTrue(subject.is_running)
def test_mark_job_started_doesnt_affect_already_running_jobs(self):
subject = MockTask(state=task.JobState.RUNNING, node_id='external', job_id=3)
self.assertFalse(subject.is_unstarted)
self.assertTrue(subject.is_running)
self.assertFalse(subject.is_finished)
subject.mark_job_started('test', 12)
self.assertFalse(subject.is_unstarted)
self.assertTrue(subject.is_running)
self.assertFalse(subject.is_finished)
self.assertEqual('external', subject.node_id)
self.assertEqual(3, subject.job_id)
def test_mark_job_started_doesnt_affect_finished_jobs(self):
subject = MockTask(state=task.JobState.DONE)
self.assertFalse(subject.is_unstarted)
self.assertFalse(subject.is_running)
self.assertTrue(subject.is_finished)
subject.mark_job_started('test', 12)
self.assertFalse(subject.is_unstarted)
self.assertFalse(subject.is_running)
self.assertTrue(subject.is_finished)
self.assertIsNone(subject.node_id)
self.assertIsNone(subject.job_id)
def test_mark_job_failed_changes_running_to_unstarted(self):
subject = MockTask(state=task.JobState.RUNNING, node_id='test', job_id=5)
self.assertFalse(subject.is_unstarted)
self.assertTrue(subject.is_running)
subject.mark_job_failed()
self.assertTrue(subject.is_unstarted)
self.assertFalse(subject.is_running)
self.assertIsNone(subject.node_id)
self.assertIsNone(subject.job_id)
def test_mark_job_failed_increases_failed_count(self):
subject = MockTask(state=task.JobState.RUNNING, node_id='test', job_id=5, failure_count=4)
self.assertFalse(subject.is_unstarted)
self.assertTrue(subject.is_running)
subject.mark_job_failed()
self.assertTrue(subject.is_unstarted)
self.assertFalse(subject.is_running)
self.assertEqual(5, subject.failure_count)
def test_mark_job_failed_doesnt_affect_unstarted_jobs(self):
subject = MockTask(state=task.JobState.UNSTARTED)
self.assertTrue(subject.is_unstarted)
self.assertFalse(subject.is_finished)
subject.mark_job_failed()
self.assertTrue(subject.is_unstarted)
self.assertFalse(subject.is_finished)
self.assertEqual(0, subject.failure_count)
def test_mark_job_failed_doesnt_affect_finished_jobs(self):
subject = MockTask(state=task.JobState.DONE)
self.assertFalse(subject.is_unstarted)
self.assertFalse(subject.is_running)
self.assertTrue(subject.is_finished)
subject.mark_job_failed()
self.assertFalse(subject.is_unstarted)
self.assertFalse(subject.is_running)
self.assertTrue(subject.is_finished)
self.assertIsNone(subject.node_id)
self.assertIsNone(subject.job_id)
def test_mark_job_complete_changes_running_to_finished(self):
subject = MockTask(state=task.JobState.RUNNING, node_id='test', job_id=5)
self.assertFalse(subject.is_unstarted)
self.assertTrue(subject.is_running)
subject.mark_job_complete()
self.assertFalse(subject.is_running)
self.assertTrue(subject.is_finished)
self.assertIsNone(subject.node_id)
self.assertIsNone(subject.job_id)
def test_mark_job_complete_doesnt_affect_unstarted_jobs(self):
subject = MockTask(state=task.JobState.UNSTARTED)
self.assertTrue(subject.is_unstarted)
self.assertFalse(subject.is_running)
self.assertFalse(subject.is_finished)
subject.mark_job_complete()
self.assertTrue(subject.is_unstarted)
self.assertFalse(subject.is_running)
self.assertFalse(subject.is_finished)
def test_mark_job_complete_doesnt_affect_finished_jobs(self):
subject = MockTask(state=task.JobState.DONE)
self.assertFalse(subject.is_unstarted)
self.assertFalse(subject.is_running)
self.assertTrue(subject.is_finished)
subject.mark_job_complete()
self.assertFalse(subject.is_unstarted)
self.assertFalse(subject.is_running)
self.assertTrue(subject.is_finished)
self.assertIsNone(subject.node_id)
self.assertIsNone(subject.job_id)
def test_change_job_id_doesnt_affect_state(self):
subject = MockTask(state=task.JobState.RUNNING)
self.assertFalse(subject.is_unstarted)
self.assertTrue(subject.is_running)
self.assertFalse(subject.is_finished)
subject.change_job_id('test', 12)
self.assertFalse(subject.is_unstarted)
self.assertTrue(subject.is_running)
self.assertFalse(subject.is_finished)
def test_change_job_id_changes_job_info(self):
subject = MockTask(state=task.JobState.RUNNING, node_id='external', job_id=3)
self.assertEqual('external', subject.node_id)
self.assertEqual(3, subject.job_id)
subject.change_job_id('test', 12)
self.assertEqual('test', subject.node_id)
self.assertEqual(12, subject.job_id)
def test_change_job_id_doesnt_affect_unstarted_jobs(self):
subject = MockTask(state=task.JobState.UNSTARTED)
self.assertTrue(subject.is_unstarted)
subject.change_job_id('test', 12)
self.assertTrue(subject.is_unstarted)
self.assertIsNone(subject.node_id)
self.assertIsNone(subject.job_id)
def test_change_job_id_doesnt_affect_finished_jobs(self):
subject = MockTask(state=task.JobState.DONE, node_id='external', job_id=3)
self.assertTrue(subject.is_finished)
self.assertEqual('external', subject.node_id)
self.assertEqual(3, subject.job_id)
subject.change_job_id('test', 12)
self.assertTrue(subject.is_finished)
self.assertEqual('external', subject.node_id)
self.assertEqual(3, subject.job_id)
def test_state_remains_consistent(self):
random = np.random.RandomState(144135)
subject = MockTask(state=task.JobState.UNSTARTED)
for idx in range(50):
change = random.randint(0, 4 if idx > 30 else 3)
if idx > 30 and change == 3:
subject.mark_job_complete()
elif change == 2:
subject.change_job_id('external', random.randint(0, 1000))
elif change == 1:
subject.mark_job_started('test', random.randint(0, 1000))
else:
subject.mark_job_failed()
# Make sure that the node id and job id match the state
if subject.is_unstarted or subject.is_finished:
self.assertIsNone(subject.node_id)
self.assertIsNone(subject.job_id)
else:
self.assertIsNotNone(subject.node_id)
self.assertIsNotNone(subject.job_id)
| 38.830769 | 98 | 0.682448 |
import unittest
import numpy as np
import arvet.database.tests.database_connection as dbconn
from arvet.config.path_manager import PathManager
import arvet.batch_analysis.task as task
class MockTask(task.Task):
def run_task(self, path_manager: PathManager):
pass
def get_unique_name(self) -> str:
return "mock_task_{0}".format(self.pk)
class TestTaskDatabase(unittest.TestCase):
@classmethod
def setUpClass(cls):
dbconn.connect_to_test_db()
def setUp(self):
task.Task._mongometa.collection.drop()
@classmethod
def tearDownClass(cls):
task.Task._mongometa.collection.drop()
def test_stores_and_loads_simple(self):
obj = MockTask(state=task.JobState.UNSTARTED)
obj.save()
all_entities = list(task.Task.objects.all())
self.assertGreaterEqual(len(all_entities), 1)
self.assertEqual(all_entities[0], obj)
all_entities[0].delete()
def test_stores_and_loads_all_params(self):
obj = MockTask(
state=task.JobState.RUNNING,
node_id='test-hpc',
job_id=15,
num_cpus=3,
num_gpus=150,
memory_requirements='4KB',
expected_duration='100:00:00'
)
obj.save()
all_entities = list(task.Task.objects.all())
self.assertGreaterEqual(len(all_entities), 1)
self.assertEqual(all_entities[0], obj)
all_entities[0].delete()
def test_stores_and_loads_after_change_state(self):
obj = MockTask(
state=task.JobState.RUNNING,
node_id='test-hpc',
job_id=15,
num_cpus=3,
num_gpus=150,
memory_requirements='4KB',
expected_duration='100:00:00'
)
obj.save()
all_entities = list(task.Task.objects.all())
self.assertGreaterEqual(len(all_entities), 1)
self.assertEqual(all_entities[0], obj)
obj.mark_job_failed()
obj.save()
all_entities = list(task.Task.objects.all())
self.assertGreaterEqual(len(all_entities), 1)
self.assertEqual(all_entities[0], obj)
obj.mark_job_started('test_node', 143)
obj.save()
all_entities = list(task.Task.objects.all())
self.assertGreaterEqual(len(all_entities), 1)
self.assertEqual(all_entities[0], obj)
obj.mark_job_complete()
obj.save()
all_entities = list(task.Task.objects.all())
self.assertGreaterEqual(len(all_entities), 1)
self.assertEqual(all_entities[0], obj)
class TestTask(unittest.TestCase):
def test_mark_job_started_changes_unstarted_to_running(self):
subject = MockTask(state=task.JobState.UNSTARTED)
self.assertTrue(subject.is_unstarted)
self.assertFalse(subject.is_running)
subject.mark_job_started('test', 12)
self.assertFalse(subject.is_unstarted)
self.assertTrue(subject.is_running)
def test_mark_job_started_doesnt_affect_already_running_jobs(self):
subject = MockTask(state=task.JobState.RUNNING, node_id='external', job_id=3)
self.assertFalse(subject.is_unstarted)
self.assertTrue(subject.is_running)
self.assertFalse(subject.is_finished)
subject.mark_job_started('test', 12)
self.assertFalse(subject.is_unstarted)
self.assertTrue(subject.is_running)
self.assertFalse(subject.is_finished)
self.assertEqual('external', subject.node_id)
self.assertEqual(3, subject.job_id)
def test_mark_job_started_doesnt_affect_finished_jobs(self):
subject = MockTask(state=task.JobState.DONE)
self.assertFalse(subject.is_unstarted)
self.assertFalse(subject.is_running)
self.assertTrue(subject.is_finished)
subject.mark_job_started('test', 12)
self.assertFalse(subject.is_unstarted)
self.assertFalse(subject.is_running)
self.assertTrue(subject.is_finished)
self.assertIsNone(subject.node_id)
self.assertIsNone(subject.job_id)
def test_mark_job_failed_changes_running_to_unstarted(self):
subject = MockTask(state=task.JobState.RUNNING, node_id='test', job_id=5)
self.assertFalse(subject.is_unstarted)
self.assertTrue(subject.is_running)
subject.mark_job_failed()
self.assertTrue(subject.is_unstarted)
self.assertFalse(subject.is_running)
self.assertIsNone(subject.node_id)
self.assertIsNone(subject.job_id)
def test_mark_job_failed_increases_failed_count(self):
subject = MockTask(state=task.JobState.RUNNING, node_id='test', job_id=5, failure_count=4)
self.assertFalse(subject.is_unstarted)
self.assertTrue(subject.is_running)
subject.mark_job_failed()
self.assertTrue(subject.is_unstarted)
self.assertFalse(subject.is_running)
self.assertEqual(5, subject.failure_count)
def test_mark_job_failed_doesnt_affect_unstarted_jobs(self):
subject = MockTask(state=task.JobState.UNSTARTED)
self.assertTrue(subject.is_unstarted)
self.assertFalse(subject.is_finished)
subject.mark_job_failed()
self.assertTrue(subject.is_unstarted)
self.assertFalse(subject.is_finished)
self.assertEqual(0, subject.failure_count)
def test_mark_job_failed_doesnt_affect_finished_jobs(self):
subject = MockTask(state=task.JobState.DONE)
self.assertFalse(subject.is_unstarted)
self.assertFalse(subject.is_running)
self.assertTrue(subject.is_finished)
subject.mark_job_failed()
self.assertFalse(subject.is_unstarted)
self.assertFalse(subject.is_running)
self.assertTrue(subject.is_finished)
self.assertIsNone(subject.node_id)
self.assertIsNone(subject.job_id)
def test_mark_job_complete_changes_running_to_finished(self):
subject = MockTask(state=task.JobState.RUNNING, node_id='test', job_id=5)
self.assertFalse(subject.is_unstarted)
self.assertTrue(subject.is_running)
subject.mark_job_complete()
self.assertFalse(subject.is_running)
self.assertTrue(subject.is_finished)
self.assertIsNone(subject.node_id)
self.assertIsNone(subject.job_id)
def test_mark_job_complete_doesnt_affect_unstarted_jobs(self):
subject = MockTask(state=task.JobState.UNSTARTED)
self.assertTrue(subject.is_unstarted)
self.assertFalse(subject.is_running)
self.assertFalse(subject.is_finished)
subject.mark_job_complete()
self.assertTrue(subject.is_unstarted)
self.assertFalse(subject.is_running)
self.assertFalse(subject.is_finished)
def test_mark_job_complete_doesnt_affect_finished_jobs(self):
subject = MockTask(state=task.JobState.DONE)
self.assertFalse(subject.is_unstarted)
self.assertFalse(subject.is_running)
self.assertTrue(subject.is_finished)
subject.mark_job_complete()
self.assertFalse(subject.is_unstarted)
self.assertFalse(subject.is_running)
self.assertTrue(subject.is_finished)
self.assertIsNone(subject.node_id)
self.assertIsNone(subject.job_id)
def test_change_job_id_doesnt_affect_state(self):
subject = MockTask(state=task.JobState.RUNNING)
self.assertFalse(subject.is_unstarted)
self.assertTrue(subject.is_running)
self.assertFalse(subject.is_finished)
subject.change_job_id('test', 12)
self.assertFalse(subject.is_unstarted)
self.assertTrue(subject.is_running)
self.assertFalse(subject.is_finished)
def test_change_job_id_changes_job_info(self):
subject = MockTask(state=task.JobState.RUNNING, node_id='external', job_id=3)
self.assertEqual('external', subject.node_id)
self.assertEqual(3, subject.job_id)
subject.change_job_id('test', 12)
self.assertEqual('test', subject.node_id)
self.assertEqual(12, subject.job_id)
def test_change_job_id_doesnt_affect_unstarted_jobs(self):
subject = MockTask(state=task.JobState.UNSTARTED)
self.assertTrue(subject.is_unstarted)
subject.change_job_id('test', 12)
self.assertTrue(subject.is_unstarted)
self.assertIsNone(subject.node_id)
self.assertIsNone(subject.job_id)
def test_change_job_id_doesnt_affect_finished_jobs(self):
subject = MockTask(state=task.JobState.DONE, node_id='external', job_id=3)
self.assertTrue(subject.is_finished)
self.assertEqual('external', subject.node_id)
self.assertEqual(3, subject.job_id)
subject.change_job_id('test', 12)
self.assertTrue(subject.is_finished)
self.assertEqual('external', subject.node_id)
self.assertEqual(3, subject.job_id)
def test_state_remains_consistent(self):
random = np.random.RandomState(144135)
subject = MockTask(state=task.JobState.UNSTARTED)
for idx in range(50):
change = random.randint(0, 4 if idx > 30 else 3)
if idx > 30 and change == 3:
subject.mark_job_complete()
elif change == 2:
subject.change_job_id('external', random.randint(0, 1000))
elif change == 1:
subject.mark_job_started('test', random.randint(0, 1000))
else:
subject.mark_job_failed()
if subject.is_unstarted or subject.is_finished:
self.assertIsNone(subject.node_id)
self.assertIsNone(subject.job_id)
else:
self.assertIsNotNone(subject.node_id)
self.assertIsNotNone(subject.job_id)
| true | true |
7900d49d1d2eaf87ea485b7e74eda877b00c7350 | 221 | py | Python | mocks/colorlamp/driver/handler.py | NetSys/dspace | c3e2942501288ae06b41d2daf1b81424c812b34d | [
"Apache-2.0"
] | 8 | 2021-05-28T13:17:34.000Z | 2021-11-16T07:55:52.000Z | mocks/colorlamp/driver/handler.py | digi-project/sosp21-artifact | 1b4a11c648e456c9ff9d74f16b09f4238d6694a0 | [
"BSD-3-Clause"
] | 15 | 2021-05-25T18:07:13.000Z | 2022-01-03T20:00:59.000Z | mocks/colorlamp/driver/handler.py | isabella232/dspace | c3e2942501288ae06b41d2daf1b81424c812b34d | [
"Apache-2.0"
] | 4 | 2021-05-23T21:40:45.000Z | 2021-05-31T12:27:44.000Z | import digi
import digi.on as on
@on.control
def h0(c):
for k, v in c.items():
v["status"] = v.get("intent",
v.get("status", "undef"))
if __name__ == '__main__':
digi.run()
| 15.785714 | 53 | 0.502262 | import digi
import digi.on as on
@on.control
def h0(c):
for k, v in c.items():
v["status"] = v.get("intent",
v.get("status", "undef"))
if __name__ == '__main__':
digi.run()
| true | true |
7900d5ab0506baaaa16a58f67a048474145f0bb8 | 1,115 | py | Python | plugins/speaker_sonos.py | mrusme/melon | c84fcf7762e1f2a3bd6ac904889107fa6d0240e6 | [
"MIT"
] | 4 | 2019-04-15T22:36:26.000Z | 2022-03-19T05:18:27.000Z | plugins/speaker_sonos.py | mrusme/melon | c84fcf7762e1f2a3bd6ac904889107fa6d0240e6 | [
"MIT"
] | null | null | null | plugins/speaker_sonos.py | mrusme/melon | c84fcf7762e1f2a3bd6ac904889107fa6d0240e6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# coding=utf8
from soco import SoCo
import socket
# http://docs.python-soco.com/en/latest/getting_started.html
class SpeakerSonos:
def __init__(self):
print("SpeakerSonos initialized!")
def do(self, params):
speaker = SoCo(socket.gethostbyname(params['host']))
print(speaker.groups)
if 'volume' in params:
speaker.volume = params['volume']
if 'clear_queue' in params:
speaker.clear_queue()
if 'add_playlist_id_to_queue' in params:
playlist = speaker.get_sonos_playlists()[params['add_playlist_id_to_queue']]
speaker.add_uri_to_queue(playlist.resources[0].uri)
if 'switch_to_tv' in params:
speaker.switch_to_tv()
if 'next' in params:
speaker.next()
elif 'previous' in params:
speaker.previous()
if 'play' in params:
speaker.play()
elif 'pause' in params:
speaker.pause()
if 'set_sleep_timer' in params:
speaker.set_sleep_timer(params['set_sleep_timer'] * 60)
| 27.195122 | 88 | 0.613453 |
from soco import SoCo
import socket
class SpeakerSonos:
def __init__(self):
print("SpeakerSonos initialized!")
def do(self, params):
speaker = SoCo(socket.gethostbyname(params['host']))
print(speaker.groups)
if 'volume' in params:
speaker.volume = params['volume']
if 'clear_queue' in params:
speaker.clear_queue()
if 'add_playlist_id_to_queue' in params:
playlist = speaker.get_sonos_playlists()[params['add_playlist_id_to_queue']]
speaker.add_uri_to_queue(playlist.resources[0].uri)
if 'switch_to_tv' in params:
speaker.switch_to_tv()
if 'next' in params:
speaker.next()
elif 'previous' in params:
speaker.previous()
if 'play' in params:
speaker.play()
elif 'pause' in params:
speaker.pause()
if 'set_sleep_timer' in params:
speaker.set_sleep_timer(params['set_sleep_timer'] * 60)
| true | true |
7900d64adfee9736390458fffaa5666126ab6721 | 673 | py | Python | app_common/maps/type_bank.py | zhanghe06/bearing_project | 78a20fc321f72d3ae05c7ab7e52e01d02904e3fc | [
"MIT"
] | 1 | 2020-06-21T04:08:26.000Z | 2020-06-21T04:08:26.000Z | app_common/maps/type_bank.py | zhanghe06/bearing_project | 78a20fc321f72d3ae05c7ab7e52e01d02904e3fc | [
"MIT"
] | 13 | 2019-10-18T17:19:32.000Z | 2022-01-13T00:44:43.000Z | app_common/maps/type_bank.py | zhanghe06/bearing_project | 78a20fc321f72d3ae05c7ab7e52e01d02904e3fc | [
"MIT"
] | 5 | 2019-02-07T03:15:16.000Z | 2021-09-04T14:06:28.000Z | #!/usr/bin/env python
# encoding: utf-8
"""
@author: zhanghe
@software: PyCharm
@file: type_bank.py
@time: 2019-08-17 18:23
"""
from __future__ import unicode_literals
from flask_babel import lazy_gettext as _
from app_common.maps.default import DEFAULT_SEARCH_CHOICES_INT, DEFAULT_SELECT_CHOICES_INT
# 银行类型(1:基本账户,2:一般账户)
TYPE_BANK_BASIC = 1
TYPE_BANK_GENERAL = 2
TYPE_BANK_DICT = {
TYPE_BANK_BASIC: _('Basic Account'), # 基本账户(对公)
TYPE_BANK_GENERAL: _('General Account'), # 一般账户(对公)
}
TYPE_BANK_SELECT_CHOICES = DEFAULT_SELECT_CHOICES_INT + TYPE_BANK_DICT.items() # 选择
TYPE_BANK_SEARCH_CHOICES = DEFAULT_SEARCH_CHOICES_INT + TYPE_BANK_DICT.items() # 搜索
| 24.035714 | 90 | 0.768202 |
from __future__ import unicode_literals
from flask_babel import lazy_gettext as _
from app_common.maps.default import DEFAULT_SEARCH_CHOICES_INT, DEFAULT_SELECT_CHOICES_INT
TYPE_BANK_BASIC = 1
TYPE_BANK_GENERAL = 2
TYPE_BANK_DICT = {
TYPE_BANK_BASIC: _('Basic Account'),
TYPE_BANK_GENERAL: _('General Account'),
}
TYPE_BANK_SELECT_CHOICES = DEFAULT_SELECT_CHOICES_INT + TYPE_BANK_DICT.items()
TYPE_BANK_SEARCH_CHOICES = DEFAULT_SEARCH_CHOICES_INT + TYPE_BANK_DICT.items()
| true | true |
7900d6acf9be3cb2028ad605adb2bd8c32e6bb7f | 784 | py | Python | ws2122-lspm/Lib/site-packages/pm4py/visualization/decisiontree/__init__.py | Malekhy/ws2122-lspm | e4dc8b801d12f862b8ef536a0f125f346f085a00 | [
"MIT"
] | 1 | 2022-01-19T04:02:46.000Z | 2022-01-19T04:02:46.000Z | ws2122-lspm/Lib/site-packages/pm4py/visualization/decisiontree/__init__.py | Malekhy/ws2122-lspm | e4dc8b801d12f862b8ef536a0f125f346f085a00 | [
"MIT"
] | 1 | 2021-11-19T07:21:48.000Z | 2021-11-19T07:21:48.000Z | ws2122-lspm/Lib/site-packages/pm4py/visualization/decisiontree/__init__.py | Malekhy/ws2122-lspm | e4dc8b801d12f862b8ef536a0f125f346f085a00 | [
"MIT"
] | 1 | 2022-01-14T17:15:38.000Z | 2022-01-14T17:15:38.000Z | '''
This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
PM4Py is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PM4Py is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PM4Py. If not, see <https://www.gnu.org/licenses/>.
'''
from pm4py.visualization.decisiontree import variants, visualizer
| 43.555556 | 76 | 0.741071 | from pm4py.visualization.decisiontree import variants, visualizer
| true | true |
7900d777362f253e961712b251030149b01865cf | 948 | py | Python | cpu.py | SudoSpartanDan/CribbagePythonGame | 1c71663b721a4048713d9616d1540953c6729bd8 | [
"MIT"
] | null | null | null | cpu.py | SudoSpartanDan/CribbagePythonGame | 1c71663b721a4048713d9616d1540953c6729bd8 | [
"MIT"
] | null | null | null | cpu.py | SudoSpartanDan/CribbagePythonGame | 1c71663b721a4048713d9616d1540953c6729bd8 | [
"MIT"
] | null | null | null | import random
from player import Player
from hand import Hand
class CPU(Player):
def __init__(self, name: str):
super().__init__(name)
self.hand = Hand()
def discard(self):
if(self.hand == None or len(self.hand) <= 0):
raise RuntimeError('No cards to discard')
return self.hand.pop(random.randrange(len(self.hand)))
def play(self, currentPlayPointLimit):
print('{0}\'s Hand: {1}'.format(self.name, str(self.playHand)))
if(self.playHand == None or len(self.playHand) <= 0):
raise RuntimeError('No play hand was created or it is empty')
playableCardIndexes = []
for i, card in enumerate(self.playHand):
if(card.valuePoints <= currentPlayPointLimit):
playableCardIndexes.append(i)
cardToPlayIndex = playableCardIndexes[random.randrange(len(playableCardIndexes))]
return self.playHand.pop(cardToPlayIndex) | 39.5 | 89 | 0.646624 | import random
from player import Player
from hand import Hand
class CPU(Player):
def __init__(self, name: str):
super().__init__(name)
self.hand = Hand()
def discard(self):
if(self.hand == None or len(self.hand) <= 0):
raise RuntimeError('No cards to discard')
return self.hand.pop(random.randrange(len(self.hand)))
def play(self, currentPlayPointLimit):
print('{0}\'s Hand: {1}'.format(self.name, str(self.playHand)))
if(self.playHand == None or len(self.playHand) <= 0):
raise RuntimeError('No play hand was created or it is empty')
playableCardIndexes = []
for i, card in enumerate(self.playHand):
if(card.valuePoints <= currentPlayPointLimit):
playableCardIndexes.append(i)
cardToPlayIndex = playableCardIndexes[random.randrange(len(playableCardIndexes))]
return self.playHand.pop(cardToPlayIndex) | true | true |
7900d8592362e59cdda73d392170065dfa23de31 | 1,572 | py | Python | qcschema/dev/wavefunction/result_wavefunction.py | bennybp/QCSchema | 25454ee1f4b971db7dc929b0861070bb8535bf51 | [
"BSD-3-Clause"
] | 58 | 2018-10-18T18:28:45.000Z | 2022-01-15T12:48:47.000Z | qcschema/dev/wavefunction/result_wavefunction.py | chenxin199261/QCSchema | 54fabe98ae3f31994371e0bfdfc6739dc5a84581 | [
"BSD-3-Clause"
] | 37 | 2017-06-12T21:21:07.000Z | 2018-09-10T15:29:33.000Z | qcschema/dev/wavefunction/result_wavefunction.py | chenxin199261/QCSchema | 54fabe98ae3f31994371e0bfdfc6739dc5a84581 | [
"BSD-3-Clause"
] | 22 | 2017-06-14T21:35:50.000Z | 2018-06-21T09:39:17.000Z | """
The primary specified return wavefunction quantities.
"""
result_wavefunction = {}
# Orbitals
result_wavefunction["orbitals_a"] = {
"type": "string",
"description": "Alpha-spin orbitals in the AO basis of the primary return. "
}
result_wavefunction["orbitals_b"] = {
"type": "string",
"description": "Beta-spin orbitals in the AO basis of the primary return."
}
# Density
result_wavefunction["density_a"] = {
"type": "string",
"description": "Alpha-spin density in the AO basis of the primary return."
}
result_wavefunction["density_b"] = {
"type": "string",
"description": "Beta-spin density in the AO basis of the primary return."
}
# Fock matrix
result_wavefunction["fock_a"] = {
"type": "string",
"description": "Alpha-spin Fock matrix in the AO basis of the primary return."
}
result_wavefunction["fock_b"] = {
"type": "string",
"description": "Beta-spin Fock matrix in the AO basis of the primary return."
}
# Eigenvalues
result_wavefunction["eigenvalues_a"] = {
"type": "string",
"description": "Alpha-spin orbital eigenvalues of the primary return."
}
result_wavefunction["eigenvalues_b"] = {
"type": "string",
"description": "Beta-spin orbital eigenvalues of the primary return."
}
# Occupations
result_wavefunction["occupations_a"] = {
"type": "string",
"description": "Alpha-spin orbital occupations of the primary return."
}
result_wavefunction["occupations_b"] = {
"type": "string",
"description": "Beta-spin orbital occupations of the primary return."
}
| 22.782609 | 82 | 0.683206 |
result_wavefunction = {}
result_wavefunction["orbitals_a"] = {
"type": "string",
"description": "Alpha-spin orbitals in the AO basis of the primary return. "
}
result_wavefunction["orbitals_b"] = {
"type": "string",
"description": "Beta-spin orbitals in the AO basis of the primary return."
}
result_wavefunction["density_a"] = {
"type": "string",
"description": "Alpha-spin density in the AO basis of the primary return."
}
result_wavefunction["density_b"] = {
"type": "string",
"description": "Beta-spin density in the AO basis of the primary return."
}
result_wavefunction["fock_a"] = {
"type": "string",
"description": "Alpha-spin Fock matrix in the AO basis of the primary return."
}
result_wavefunction["fock_b"] = {
"type": "string",
"description": "Beta-spin Fock matrix in the AO basis of the primary return."
}
result_wavefunction["eigenvalues_a"] = {
"type": "string",
"description": "Alpha-spin orbital eigenvalues of the primary return."
}
result_wavefunction["eigenvalues_b"] = {
"type": "string",
"description": "Beta-spin orbital eigenvalues of the primary return."
}
result_wavefunction["occupations_a"] = {
"type": "string",
"description": "Alpha-spin orbital occupations of the primary return."
}
result_wavefunction["occupations_b"] = {
"type": "string",
"description": "Beta-spin orbital occupations of the primary return."
}
| true | true |
7900d8af5798146104427537eab428dc891105e5 | 270 | py | Python | Lib/site-packages/jupyterhub/apihandlers/__init__.py | KarmaScripter/PiggyPy | 25ba1d0c8933a0cb655f09db6c228f74f4d52894 | [
"MIT"
] | null | null | null | Lib/site-packages/jupyterhub/apihandlers/__init__.py | KarmaScripter/PiggyPy | 25ba1d0c8933a0cb655f09db6c228f74f4d52894 | [
"MIT"
] | null | null | null | Lib/site-packages/jupyterhub/apihandlers/__init__.py | KarmaScripter/PiggyPy | 25ba1d0c8933a0cb655f09db6c228f74f4d52894 | [
"MIT"
] | null | null | null | from . import auth
from . import groups
from . import hub
from . import proxy
from . import services
from . import users
from .base import *
default_handlers = []
for mod in (auth, hub, proxy, users, groups, services):
default_handlers.extend(mod.default_handlers)
| 22.5 | 55 | 0.744444 | from . import auth
from . import groups
from . import hub
from . import proxy
from . import services
from . import users
from .base import *
default_handlers = []
for mod in (auth, hub, proxy, users, groups, services):
default_handlers.extend(mod.default_handlers)
| true | true |
7900d960bb77c55b384e70a32bb712279682d68e | 2,002 | py | Python | Score/Cosine_Score.py | Wenhao-Yang/DeepSpeaker-pytorch | 99eb8de3357c85e2b7576da2a742be2ffd773ead | [
"MIT"
] | 8 | 2020-08-26T13:32:56.000Z | 2022-01-18T21:05:46.000Z | Score/Cosine_Score.py | Wenhao-Yang/DeepSpeaker-pytorch | 99eb8de3357c85e2b7576da2a742be2ffd773ead | [
"MIT"
] | 1 | 2020-07-24T17:06:16.000Z | 2020-07-24T17:06:16.000Z | Score/Cosine_Score.py | Wenhao-Yang/DeepSpeaker-pytorch | 99eb8de3357c85e2b7576da2a742be2ffd773ead | [
"MIT"
] | 5 | 2020-12-11T03:31:15.000Z | 2021-11-23T15:57:55.000Z | #!/usr/bin/env python
# encoding: utf-8
"""
@Author: yangwenhao
@Contact: 874681044@qq.com
@Software: PyCharm
@File: Cosine.py
@Time: 19-6-26 下午9:43
@Overview: Implement Cosine Score for speaker identification!
Enrollment set files will be in the 'Data/enroll_set.npy' and the classes-to-index file is 'Data/enroll_classes.npy'
Test set files are in the 'Data/test_set.npy' and the utterances-to-index file is 'Data/test_classes.npy'
"""
import numpy as np
import torch.nn as nn
ENROLL_FILE = "Data/xvector/enroll/extract_adagrad-lr0.1-wd0.0-embed512-alpha10.npy"
ENROLL_CLASS = "Data/enroll_classes.npy"
TEST_FILE = "Data/xvector/test/extract_adagrad-lr0.1-wd0.0-embed512-alpha10.npy"
TEST_CLASS = "Data/test_classes.npy"
# test_vec = np.array([1,2,3,4])
# refe_vec = np.array([8,3,3,4])
def normalize(narray, order=2, axis=1):
norm = np.linalg.norm(narray, ord=order, axis=axis, keepdims=True)
return(narray/norm + np.finfo(np.float32).eps)
def cos_dis(test_vec, refe_vec):
vec1 = normalize(test_vec, axis=0)
vec2 = normalize(refe_vec, axis=0)
dis = np.matmul(vec1, vec2.T)
return(dis)
enroll_features = np.load(ENROLL_FILE, allow_pickle=True)
enroll_classes = np.load(ENROLL_CLASS, allow_pickle=True).item()
test_features = np.load(TEST_FILE, allow_pickle=True)
test_classes = np.load(TEST_CLASS, allow_pickle=True)
enroll_dict = dict()
for item in enroll_classes:
num=0
feat = np.zeros([512], dtype=float)
for (label, feature) in enroll_features:
if label==enroll_classes[item]:
feat += feature.detach().numpy()
num += 1
enroll_dict[item] = feat/num
similarity = {}
for (label, feature) in test_features:
utt = {}
for item in enroll_dict:
utt[item] = np.linalg.norm(feature.detach().numpy()-enroll_dict[item])
for utterance in test_classes:
if int(utterance[1])==label.item():
test_id = utterance[0]
similarity[test_id]=utt
print(similarity)
# cos_dis(test_vec, refe_vec)
| 31.777778 | 116 | 0.708791 |
import numpy as np
import torch.nn as nn
ENROLL_FILE = "Data/xvector/enroll/extract_adagrad-lr0.1-wd0.0-embed512-alpha10.npy"
ENROLL_CLASS = "Data/enroll_classes.npy"
TEST_FILE = "Data/xvector/test/extract_adagrad-lr0.1-wd0.0-embed512-alpha10.npy"
TEST_CLASS = "Data/test_classes.npy"
def normalize(narray, order=2, axis=1):
norm = np.linalg.norm(narray, ord=order, axis=axis, keepdims=True)
return(narray/norm + np.finfo(np.float32).eps)
def cos_dis(test_vec, refe_vec):
vec1 = normalize(test_vec, axis=0)
vec2 = normalize(refe_vec, axis=0)
dis = np.matmul(vec1, vec2.T)
return(dis)
enroll_features = np.load(ENROLL_FILE, allow_pickle=True)
enroll_classes = np.load(ENROLL_CLASS, allow_pickle=True).item()
test_features = np.load(TEST_FILE, allow_pickle=True)
test_classes = np.load(TEST_CLASS, allow_pickle=True)
enroll_dict = dict()
for item in enroll_classes:
num=0
feat = np.zeros([512], dtype=float)
for (label, feature) in enroll_features:
if label==enroll_classes[item]:
feat += feature.detach().numpy()
num += 1
enroll_dict[item] = feat/num
similarity = {}
for (label, feature) in test_features:
utt = {}
for item in enroll_dict:
utt[item] = np.linalg.norm(feature.detach().numpy()-enroll_dict[item])
for utterance in test_classes:
if int(utterance[1])==label.item():
test_id = utterance[0]
similarity[test_id]=utt
print(similarity)
| true | true |
7900d9a40824d06c9887dc864384391121ea5c4d | 2,455 | py | Python | qiskit/extensions/standard/rzz.py | christians94/qiskit-sdk-py | 5c1c68a5aa3dcccdf5c10f9eb307383ebb40826b | [
"Apache-2.0"
] | null | null | null | qiskit/extensions/standard/rzz.py | christians94/qiskit-sdk-py | 5c1c68a5aa3dcccdf5c10f9eb307383ebb40826b | [
"Apache-2.0"
] | 1 | 2018-08-08T17:56:06.000Z | 2018-08-08T17:56:06.000Z | qiskit/extensions/standard/rzz.py | christians94/qiskit-sdk-py | 5c1c68a5aa3dcccdf5c10f9eb307383ebb40826b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2017 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
two-qubit ZZ-rotation gate.
"""
from qiskit import CompositeGate
from qiskit import Gate
from qiskit import QuantumCircuit
from qiskit._instructionset import InstructionSet
from qiskit._quantumregister import QuantumRegister
from qiskit.extensions.standard import header # pylint: disable=unused-import
class RZZGate(Gate):
"""Two-qubit ZZ-rotation gate."""
def __init__(self, theta, ctl, tgt, circ=None):
"""Create new rzz gate."""
super().__init__("rzz", [theta], [ctl, tgt], circ)
def qasm(self):
"""Return OPENQASM string."""
ctl = self.arg[0]
tgt = self.arg[1]
theta = self.param[0]
return self._qasmif("rzz(%s) %s[%d],%s[%d];" % (theta,
ctl[0].name, ctl[1],
tgt[0].name, tgt[1]))
def inverse(self):
"""Invert this gate."""
self.param[0] = -self.param[0]
return self
def reapply(self, circ):
"""Reapply this gate to corresponding qubits in circ."""
self._modifiers(circ.rzz(self.param[0], self.arg[0], self.arg[1]))
def rzz(self, theta, ctl, tgt):
"""Apply RZZ to circuit."""
if isinstance(ctl, QuantumRegister) and \
isinstance(tgt, QuantumRegister) and len(ctl) == len(tgt):
instructions = InstructionSet()
for i in range(ctl.size):
instructions.add(self.rzz(theta, (ctl, i), (tgt, i)))
return instructions
self._check_qubit(ctl)
self._check_qubit(tgt)
self._check_dups([ctl, tgt])
return self._attach(RZZGate(theta, ctl, tgt, self))
# Add to QuantumCircuit and CompositeGate classes
QuantumCircuit.rzz = rzz
CompositeGate.rzz = rzz
| 33.630137 | 79 | 0.619552 |
from qiskit import CompositeGate
from qiskit import Gate
from qiskit import QuantumCircuit
from qiskit._instructionset import InstructionSet
from qiskit._quantumregister import QuantumRegister
from qiskit.extensions.standard import header
class RZZGate(Gate):
def __init__(self, theta, ctl, tgt, circ=None):
super().__init__("rzz", [theta], [ctl, tgt], circ)
def qasm(self):
ctl = self.arg[0]
tgt = self.arg[1]
theta = self.param[0]
return self._qasmif("rzz(%s) %s[%d],%s[%d];" % (theta,
ctl[0].name, ctl[1],
tgt[0].name, tgt[1]))
def inverse(self):
self.param[0] = -self.param[0]
return self
def reapply(self, circ):
self._modifiers(circ.rzz(self.param[0], self.arg[0], self.arg[1]))
def rzz(self, theta, ctl, tgt):
if isinstance(ctl, QuantumRegister) and \
isinstance(tgt, QuantumRegister) and len(ctl) == len(tgt):
instructions = InstructionSet()
for i in range(ctl.size):
instructions.add(self.rzz(theta, (ctl, i), (tgt, i)))
return instructions
self._check_qubit(ctl)
self._check_qubit(tgt)
self._check_dups([ctl, tgt])
return self._attach(RZZGate(theta, ctl, tgt, self))
QuantumCircuit.rzz = rzz
CompositeGate.rzz = rzz
| true | true |
7900da9e36f2ba7266ac4dc95795cd987e6d7000 | 6,502 | py | Python | Pipes/pipeline-scripts/NGOPT/ngopt.py | niaid/Nephele | 13f58f86cdb5afc449bdaa26616865b535cf3b25 | [
"Unlicense"
] | 7 | 2017-11-29T02:55:29.000Z | 2021-06-09T19:44:07.000Z | Pipes/pipeline-scripts/NGOPT/ngopt.py | niaid/Nephele | 13f58f86cdb5afc449bdaa26616865b535cf3b25 | [
"Unlicense"
] | 1 | 2018-07-12T18:18:14.000Z | 2018-07-12T18:18:14.000Z | Pipes/pipeline-scripts/NGOPT/ngopt.py | niaid/Nephele | 13f58f86cdb5afc449bdaa26616865b535cf3b25 | [
"Unlicense"
] | 8 | 2017-10-10T09:26:19.000Z | 2021-02-26T21:47:23.000Z | #!/usr/bin/env python
##############################################################
# $Id$
# Project: MiSeq Metagenomic Assembly pipeline for Nephele project
# Language: Python 2.7
# Author: Alex Levitsky
# History: July 2015 Start of development
##############################################################
__author__ = "Alex Levitsky"
__copyright__ = ""
__credits__ = ["Alex Levitsky"]
__license__ = ""
__version__ = "1.0.1-dev"
__maintainer__ = "Alex Levitsky"
__email__ = "levitskyaa@niaid.nih.gov"
__status__ = "Development"
import sys, os, random, time, glob
syscall = lambda cmd: (os.popen(cmd).read()).rstrip("\n")
def read_config( file_name, config ): #########################
config_file=open( file_name, 'r')
l=[]
for line in config_file:
if("" == line): # check for end of file
break
s=line.rstrip("\n")
s.strip()
if("" == s): # ignore empty lines
continue
if("#"==s[:1]): # ignore comments
continue
del l[:] # clear list
l=s.split(',')
config[l[0]]=l[1]
config_file.close()
### read_config ###
def send2log( message, log_file ): #######################
date = syscall("TZ='America/New_York' date")
os.system( "echo >> "+log_file)
if 0!=os.system( "echo '"+date+' '+message+"' >>"+log_file):
sys.exit(777)
### send2log ###
def exec_sys(cmd): #######################
#print >> sys.stderr, "Executing:",cmd
if 0!=os.system(cmd):
print >> sys.stderr, "ERROR when executing:",cmd
sys.exit(777)
### exec_sys ###
########### main ##############################
def main():
if len( sys.argv ) < 2:
print >> sys.stderr, "\n\n\nUsage: " + sys.argv[0] + " <configuration file>\n\n\n"
sys.exit(551)
# Read config file
conf_file = sys.argv[1]
if not os.path.isfile( conf_file ):
print >> sys.stderr, "ERROR: no config file:" + conf_file
sys.exit(555)
config = {}
read_config( conf_file,config )
work_dir=os.getcwd()
config['LOG_FILE']='logfile.txt'
log_file=work_dir+'/'+config['LOG_FILE']
##### Define optional and default parameters
for key in ['INPUT_TYPE', 'R1', 'R2', 'ZIP_FILE', 'LIB_FILE', 'BLAST_STEP','PREFIX']:
if(key not in config.keys()):
config[key]=''
##### Predefined and calculated options
if(''==config['LIB_FILE']):
config['INPUT_TYPE']='FASTQ_FILES'
if(''==config['PREFIX']):
config['PREFIX']='MiSEQ_metagenomic'
if(''==config['BLAST_STEP']):
config['BLAST_STEP']='YES'
send2log( 'MiSeq Metagenomic Assembly pipeline started', log_file )
# get env.json if available
if os.path.isfile('./env.json'):
send2log( 'env.json=', log_file )
syscall( 'cat ./env.json >> '+log_file)
# get number of cores
config['NUM_OF_PROC']=syscall('cat /proc/cpuinfo | grep processor | wc -l')
num_proc=int(config['NUM_OF_PROC'])
if(num_proc > 1):
num_proc-=1
config['NUM_OF_PROC']=str(num_proc)
send2log( 'number of cores='+config['NUM_OF_PROC'], log_file )
# get machine's memory
config['MEMORY']=syscall("cat /proc/meminfo | grep MemTotal | awk '{ print $2 }'")
mem=int(config['MEMORY'])
send2log( 'Memory='+config['MEMORY']+'KB', log_file )
w="MiSeq Metagenomic Assembly pipeline configuration\n"
for k in sorted(config.keys()):
if 'UseCode'==k:
continue
config[k]=config[k].replace("\"", "_")
config[k]=config[k].replace("\'", "_")
w=w+k+','+config[k]+"\n"
# print configuration to log file
send2log( w, log_file )
####################################################
os.chdir(work_dir)
# unzip reads
if os.path.isfile(work_dir+'/'+config['ZIP_FILE']):
# check files extension
w=''
if config['ZIP_FILE'][-4:]=='.zip':
send2log( 'unzip -oqj '+config['ZIP_FILE'], log_file )
w=syscall('unzip -oqj '+config['ZIP_FILE'])
send2log( w, log_file )
if (config['ZIP_FILE'][-7:]=='.tar.gz') or (config['ZIP_FILE'][-4:]=='.tgz'):
send2log( 'tar -zxvf '+config['ZIP_FILE'], log_file )
w=syscall('tar -zxvf '+config['ZIP_FILE'])
send2log( w, log_file )
if config['ZIP_FILE'][-8:]=='.tar.bz2':
send2log( 'tar -jxvf '+config['ZIP_FILE'], log_file )
w=syscall('tar -jxvf '+config['ZIP_FILE'])
send2log( w, log_file )
# unzip gzip files if any
w=''
w=syscall('ls *.gz')
if len(w)>3:
send2log( 'running gzip -d for *.gz files', log_file )
w=''
w=syscall('gzip -d *.gz')
else:
send2log( "ERROR: no zip archive with reads. Can not continue\n", log_file)
sys.exit(777)
if 'FASTQ_FILES'==config['INPUT_TYPE']:
# check reads files
w=''
w=syscall('ls *.fastq')
if len(w)<3:
w=''
w=syscall('ls *.fq')
if len(w)<3:
send2log( "ERROR: no reads files. Can not continue\n", log_file)
sys.exit(777)
l=[]
l=w.split('\n')
config['R1']=l[0]
config['R2']=l[1]
if not( os.path.exists(work_dir+'/'+config['R1']) and os.path.exists(work_dir+'/'+config['R2']) ):
send2log( "ERROR: no reads files. Can not continue\n", log_file)
sys.exit(777)
cmd='./bin/a5_pipeline.pl '+'--threads='+config['NUM_OF_PROC']+' --end=5 '+config['R1']+' '+config['R2']+' '+config['PREFIX']
send2log( "Running pipeline:\n"+cmd, log_file )
w=''
w=syscall( cmd+' 2>&1' )
send2log( w, log_file )
else:
if os.path.isfile(work_dir+'/'+config['LIB_FILE']):
send2log("contents of LIB file:", log_file)
syscall( 'cat '+config['LIB_FILE']+ ' >> ' +log_file)
send2log("\n", log_file)
else:
send2log( "ERROR: no LIB file. Can not continue\n", log_file)
sys.exit(777)
#cmd='./bin/a5_pipeline.pl '+config['LIB_FILE']+' '+config['PREFIX']
cmd='/opt/a5/bin/a5_pipeline.pl '+'--threads='+config['NUM_OF_PROC']+' --end=5 '+config['LIB_FILE']+' '+config['PREFIX']
send2log( "Running pipeline: \n"+cmd, log_file )
w=''
w=syscall( cmd+' 2>&1' )
send2log( w, log_file )
if 'YES'==config['BLAST_STEP']:
cmd ='./blast2nr.sh '+config['PREFIX']+' '+config['NUM_OF_PROC']
send2log( 'Executing:'+cmd, log_file)
w=syscall(cmd)
send2log( w, log_file )
send2log( 'MiSeq Metagenomic Assembly pipeline DONE',log_file )
if __name__ == "__main__":
main()
| 33.515464 | 131 | 0.557521 | true | true | |
7900dbc6aa4b4d75c778bd208f6acd60c9380ba2 | 758 | py | Python | rssbot/utils.py | autogestion/pubgate-rssbot | 3d87bc0554fadb3e0a3d09d1331478de4188b242 | [
"BSD-3-Clause"
] | 4 | 2018-11-01T05:54:22.000Z | 2022-03-01T13:35:51.000Z | rssbot/utils.py | autogestion/pubgate-rssbot | 3d87bc0554fadb3e0a3d09d1331478de4188b242 | [
"BSD-3-Clause"
] | null | null | null | rssbot/utils.py | autogestion/pubgate-rssbot | 3d87bc0554fadb3e0a3d09d1331478de4188b242 | [
"BSD-3-Clause"
] | 2 | 2018-11-12T14:54:04.000Z | 2020-12-14T19:39:53.000Z | import re
find_image_scheme = re.compile(r'(?P<image_construction><img\b[^>]*src="(?P<image_url>[^"]+?)"[^>]*?\/>)')
# find_link_around_image_scheme = re.compile(r"<a\b[^>]*>(.*?)<img\b(.*?)<\/a>")
def move_image_to_attachment(content, attachment_object):
# collect images from the post body
intext_image_list = re.findall(find_image_scheme, content)
if intext_image_list:
# delete images form text
content = re.sub(find_image_scheme, r"", content)
# insert link to image into attachments
attachment_object += [{
"type": "Document",
"mediaType": "image/jpeg",
"url": image[1],
"name": "null"
} for image in intext_image_list]
return content
| 28.074074 | 106 | 0.604222 | import re
find_image_scheme = re.compile(r'(?P<image_construction><img\b[^>]*src="(?P<image_url>[^"]+?)"[^>]*?\/>)')
# find_link_around_image_scheme = re.compile(r"<a\b[^>]*>(.*?)<img\b(.*?)<\/a>")
def move_image_to_attachment(content, attachment_object):
# collect images from the post body
intext_image_list = re.findall(find_image_scheme, content)
if intext_image_list:
# delete images form text
content = re.sub(find_image_scheme, r"", content)
# insert link to image into attachments
attachment_object += [{
"type": "Document",
"mediaType": "image/jpeg",
"url": image[1],
"name": "null"
} for image in intext_image_list]
return content
| true | true |
7900dc7a3aa3d85c5256af3c449f044adaee7723 | 3,160 | py | Python | tools/calibration/process_dataset_callbacks/collect_results_callback.py | zhoub/dldt | e42c01cf6e1d3aefa55e2c5df91f1054daddc575 | [
"Apache-2.0"
] | null | null | null | tools/calibration/process_dataset_callbacks/collect_results_callback.py | zhoub/dldt | e42c01cf6e1d3aefa55e2c5df91f1054daddc575 | [
"Apache-2.0"
] | null | null | null | tools/calibration/process_dataset_callbacks/collect_results_callback.py | zhoub/dldt | e42c01cf6e1d3aefa55e2c5df91f1054daddc575 | [
"Apache-2.0"
] | null | null | null | """
Copyright (C) 2018-2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import openvino.inference_engine as ie
from ..infer_raw_results import InferRawResults
from ..aggregated_statistics import AggregatedStatistics
class CollectResultsCallback:
def __init__(
self,
network: ie.IENetwork,
exec_network: ie.ExecutableNetwork,
collect_resuls: bool = True,
collect_layers: set = None,
collect_aggregated_statistics: bool = True,
iterations_count: int = 1,
dataset_size: int = 1):
if not network:
raise ValueError("network is not specified")
if not exec_network:
raise ValueError("exec_network is not specified")
self._network = network
self._exec_network = exec_network
self._aggregated_statistics = None
self._iterations_count = iterations_count
self._dataset_size = dataset_size
self._collect_results = collect_resuls
self._collect_layers = collect_layers
self._collect_aggregated_statistics = collect_aggregated_statistics
self._infer_raw_results = InferRawResults() if collect_resuls else None
self._latencies = list()
def callback(self, value, latency = None):
if self._collect_aggregated_statistics:
if not self._aggregated_statistics:
self._aggregated_statistics = AggregatedStatistics(
iterations_count = self._iterations_count,
dataset_size = self._dataset_size)
self._aggregated_statistics.add(self._network, self._exec_network, value)
if self._collect_results:
if self._collect_layers:
collect_value = dict()
for layer_name in value:
if layer_name in self._collect_layers:
collect_value[layer_name] = value[layer_name]
self._infer_raw_results.add(collect_value)
else:
self._infer_raw_results.add(value)
if latency:
self._latencies.append(latency)
@property
def aggregated_statistics(self) -> AggregatedStatistics:
return self._aggregated_statistics
@property
def infer_raw_result(self) -> InferRawResults:
return self._infer_raw_results
@property
def latencies(self) -> list:
return self._latencies
def release(self):
if self._aggregated_statistics:
self._aggregated_statistics.release()
if self._infer_raw_results:
self._infer_raw_results.release()
def get_accuracy_drop(self):
return None | 35.505618 | 85 | 0.67943 |
import openvino.inference_engine as ie
from ..infer_raw_results import InferRawResults
from ..aggregated_statistics import AggregatedStatistics
class CollectResultsCallback:
def __init__(
self,
network: ie.IENetwork,
exec_network: ie.ExecutableNetwork,
collect_resuls: bool = True,
collect_layers: set = None,
collect_aggregated_statistics: bool = True,
iterations_count: int = 1,
dataset_size: int = 1):
if not network:
raise ValueError("network is not specified")
if not exec_network:
raise ValueError("exec_network is not specified")
self._network = network
self._exec_network = exec_network
self._aggregated_statistics = None
self._iterations_count = iterations_count
self._dataset_size = dataset_size
self._collect_results = collect_resuls
self._collect_layers = collect_layers
self._collect_aggregated_statistics = collect_aggregated_statistics
self._infer_raw_results = InferRawResults() if collect_resuls else None
self._latencies = list()
def callback(self, value, latency = None):
if self._collect_aggregated_statistics:
if not self._aggregated_statistics:
self._aggregated_statistics = AggregatedStatistics(
iterations_count = self._iterations_count,
dataset_size = self._dataset_size)
self._aggregated_statistics.add(self._network, self._exec_network, value)
if self._collect_results:
if self._collect_layers:
collect_value = dict()
for layer_name in value:
if layer_name in self._collect_layers:
collect_value[layer_name] = value[layer_name]
self._infer_raw_results.add(collect_value)
else:
self._infer_raw_results.add(value)
if latency:
self._latencies.append(latency)
@property
def aggregated_statistics(self) -> AggregatedStatistics:
return self._aggregated_statistics
@property
def infer_raw_result(self) -> InferRawResults:
return self._infer_raw_results
@property
def latencies(self) -> list:
return self._latencies
def release(self):
if self._aggregated_statistics:
self._aggregated_statistics.release()
if self._infer_raw_results:
self._infer_raw_results.release()
def get_accuracy_drop(self):
return None | true | true |
7900dd2b02d354a18e38927e3530f070279d236b | 9,162 | py | Python | compiler/characterizer/measurements.py | lekez2005/OpenRAM | 608e4b81f1763727e7efe087d591c76956869fe6 | [
"BSD-3-Clause"
] | null | null | null | compiler/characterizer/measurements.py | lekez2005/OpenRAM | 608e4b81f1763727e7efe087d591c76956869fe6 | [
"BSD-3-Clause"
] | null | null | null | compiler/characterizer/measurements.py | lekez2005/OpenRAM | 608e4b81f1763727e7efe087d591c76956869fe6 | [
"BSD-3-Clause"
] | null | null | null | # See LICENSE for licensing information.
#
# Copyright (c) 2016-2019 Regents of the University of California and The Board
# of Regents for the Oklahoma Agricultural and Mechanical College
# (acting for and on behalf of Oklahoma State University)
# All rights reserved.
#
import debug
from tech import drc, parameter, spice
from abc import ABC, abstractmethod
from .stimuli import *
from .charutils import *
class spice_measurement(ABC):
"""Base class for spice stimulus measurements."""
def __init__(self, measure_name, measure_scale=None, has_port=True):
#Names must be unique for correct spice simulation, but not enforced here.
self.name = measure_name
self.measure_scale = measure_scale
self.has_port = has_port #Needed for error checking
#Some meta values used externally. variables are added here for consistency accross the objects
self.meta_str = None
self.meta_add_delay = False
@abstractmethod
def get_measure_function(self):
return None
@abstractmethod
def get_measure_values(self):
return None
def write_measure(self, stim_obj, input_tuple):
measure_func = self.get_measure_function()
if measure_func == None:
debug.error("Did not set measure function",1)
measure_vals = self.get_measure_values(*input_tuple)
measure_func(stim_obj, *measure_vals)
def retrieve_measure(self, port=None):
self.port_error_check(port)
if port != None:
value = parse_spice_list("timing", "{0}{1}".format(self.name.lower(), port))
else:
value = parse_spice_list("timing", "{0}".format(self.name.lower()))
if type(value)!=float or self.measure_scale == None:
return value
else:
return value*self.measure_scale
def port_error_check(self, port):
if self.has_port and port == None:
debug.error("Cannot retrieve measurement, port input was expected.",1)
elif not self.has_port and port != None:
debug.error("Unexpected port input received during measure retrieval.",1)
class delay_measure(spice_measurement):
"""Generates a spice measurement for the delay of 50%-to-50% points of two signals."""
def __init__(self, measure_name, trig_name, targ_name, trig_dir_str, targ_dir_str,\
trig_vdd=0.5, targ_vdd=0.5, measure_scale=None, has_port=True):
spice_measurement.__init__(self, measure_name, measure_scale, has_port)
self.set_meas_constants(trig_name, targ_name, trig_dir_str, targ_dir_str, trig_vdd, targ_vdd)
def get_measure_function(self):
return stimuli.gen_meas_delay
def set_meas_constants(self, trig_name, targ_name, trig_dir_str, targ_dir_str, trig_vdd, targ_vdd):
"""Set the constants for this measurement: signal names, directions, and trigger scales"""
self.trig_dir_str = trig_dir_str
self.targ_dir_str = targ_dir_str
self.trig_val_of_vdd = trig_vdd
self.targ_val_of_vdd = targ_vdd
self.trig_name_no_port = trig_name
self.targ_name_no_port = targ_name
#Time delays and ports are variant and needed as inputs when writing the measurement
def get_measure_values(self, trig_td, targ_td, vdd_voltage, port=None):
"""Constructs inputs to stimulus measurement function. Variant values are inputs here."""
self.port_error_check(port)
trig_val = self.trig_val_of_vdd * vdd_voltage
targ_val = self.targ_val_of_vdd * vdd_voltage
if port != None:
#For dictionary indexing reasons, the name is formatted differently than the signals
meas_name = "{}{}".format(self.name, port)
trig_name = self.trig_name_no_port.format(port)
targ_name = self.targ_name_no_port.format(port)
else:
meas_name = self.name
trig_name = self.trig_name_no_port
targ_name = self.targ_name_no_port
return (meas_name,trig_name,targ_name,trig_val,targ_val,self.trig_dir_str,self.targ_dir_str,trig_td,targ_td)
class slew_measure(delay_measure):
def __init__(self, measure_name, signal_name, slew_dir_str, measure_scale=None, has_port=True):
spice_measurement.__init__(self, measure_name, measure_scale, has_port)
self.set_meas_constants(signal_name, slew_dir_str)
def set_meas_constants(self, signal_name, slew_dir_str):
"""Set the values needed to generate a Spice measurement statement based on the name of the measurement."""
self.trig_dir_str = slew_dir_str
self.targ_dir_str = slew_dir_str
if slew_dir_str == "RISE":
self.trig_val_of_vdd = 0.1
self.targ_val_of_vdd = 0.9
elif slew_dir_str == "FALL":
self.trig_val_of_vdd = 0.9
self.targ_val_of_vdd = 0.1
else:
debug.error("Unrecognised slew measurement direction={}".format(slew_dir_str),1)
self.trig_name_no_port = signal_name
self.targ_name_no_port = signal_name
#Time delays and ports are variant and needed as inputs when writing the measurement
class power_measure(spice_measurement):
"""Generates a spice measurement for the average power between two time points."""
def __init__(self, measure_name, power_type="", measure_scale=None, has_port=True):
spice_measurement.__init__(self, measure_name, measure_scale, has_port)
self.set_meas_constants(power_type)
def get_measure_function(self):
return stimuli.gen_meas_power
def set_meas_constants(self, power_type):
"""Sets values useful for power simulations. This value is only meta related to the lib file (rise/fall)"""
#Not needed for power simulation
self.power_type = power_type #Expected to be "RISE"/"FALL"
def get_measure_values(self, t_initial, t_final, port=None):
"""Constructs inputs to stimulus measurement function. Variant values are inputs here."""
self.port_error_check(port)
if port != None:
meas_name = "{}{}".format(self.name, port)
else:
meas_name = self.name
return (meas_name,t_initial,t_final)
class voltage_when_measure(spice_measurement):
"""Generates a spice measurement to measure the voltage of a signal based on the voltage of another."""
def __init__(self, measure_name, trig_name, targ_name, trig_dir_str, trig_vdd, measure_scale=None, has_port=True):
spice_measurement.__init__(self, measure_name, measure_scale, has_port)
self.set_meas_constants(trig_name, targ_name, trig_dir_str, trig_vdd)
def get_measure_function(self):
return stimuli.gen_meas_find_voltage
def set_meas_constants(self, trig_name, targ_name, trig_dir_str, trig_vdd):
"""Sets values useful for power simulations. This value is only meta related to the lib file (rise/fall)"""
self.trig_dir_str = trig_dir_str
self.trig_val_of_vdd = trig_vdd
self.trig_name_no_port = trig_name
self.targ_name_no_port = targ_name
def get_measure_values(self, trig_td, vdd_voltage, port=None):
"""Constructs inputs to stimulus measurement function. Variant values are inputs here."""
self.port_error_check(port)
if port != None:
#For dictionary indexing reasons, the name is formatted differently than the signals
meas_name = "{}{}".format(self.name, port)
trig_name = self.trig_name_no_port.format(port)
targ_name = self.targ_name_no_port.format(port)
else:
meas_name = self.name
trig_name = self.trig_name_no_port
targ_name = self.targ_name_no_port
trig_voltage = self.trig_val_of_vdd*vdd_voltage
return (meas_name,trig_name,targ_name,trig_voltage,self.trig_dir_str,trig_td)
class voltage_at_measure(spice_measurement):
"""Generates a spice measurement to measure the voltage at a specific time.
The time is considered variant with different periods."""
def __init__(self, measure_name, targ_name, measure_scale=None, has_port=True):
spice_measurement.__init__(self, measure_name, measure_scale, has_port)
self.set_meas_constants(targ_name)
def get_measure_function(self):
return stimuli.gen_meas_find_voltage_at_time
def set_meas_constants(self, targ_name):
"""Sets values useful for power simulations. This value is only meta related to the lib file (rise/fall)"""
self.targ_name_no_port = targ_name
def get_measure_values(self, time_at, port=None):
"""Constructs inputs to stimulus measurement function. Variant values are inputs here."""
self.port_error_check(port)
if port != None:
#For dictionary indexing reasons, the name is formatted differently than the signals
meas_name = "{}{}".format(self.name, port)
targ_name = self.targ_name_no_port.format(port)
else:
meas_name = self.name
targ_name = self.targ_name_no_port
return (meas_name,targ_name,time_at)
| 45.356436 | 118 | 0.697337 |
import debug
from tech import drc, parameter, spice
from abc import ABC, abstractmethod
from .stimuli import *
from .charutils import *
class spice_measurement(ABC):
def __init__(self, measure_name, measure_scale=None, has_port=True):
self.name = measure_name
self.measure_scale = measure_scale
self.has_port = has_port
self.meta_str = None
self.meta_add_delay = False
@abstractmethod
def get_measure_function(self):
return None
@abstractmethod
def get_measure_values(self):
return None
def write_measure(self, stim_obj, input_tuple):
measure_func = self.get_measure_function()
if measure_func == None:
debug.error("Did not set measure function",1)
measure_vals = self.get_measure_values(*input_tuple)
measure_func(stim_obj, *measure_vals)
def retrieve_measure(self, port=None):
self.port_error_check(port)
if port != None:
value = parse_spice_list("timing", "{0}{1}".format(self.name.lower(), port))
else:
value = parse_spice_list("timing", "{0}".format(self.name.lower()))
if type(value)!=float or self.measure_scale == None:
return value
else:
return value*self.measure_scale
def port_error_check(self, port):
if self.has_port and port == None:
debug.error("Cannot retrieve measurement, port input was expected.",1)
elif not self.has_port and port != None:
debug.error("Unexpected port input received during measure retrieval.",1)
class delay_measure(spice_measurement):
def __init__(self, measure_name, trig_name, targ_name, trig_dir_str, targ_dir_str,\
trig_vdd=0.5, targ_vdd=0.5, measure_scale=None, has_port=True):
spice_measurement.__init__(self, measure_name, measure_scale, has_port)
self.set_meas_constants(trig_name, targ_name, trig_dir_str, targ_dir_str, trig_vdd, targ_vdd)
def get_measure_function(self):
return stimuli.gen_meas_delay
def set_meas_constants(self, trig_name, targ_name, trig_dir_str, targ_dir_str, trig_vdd, targ_vdd):
self.trig_dir_str = trig_dir_str
self.targ_dir_str = targ_dir_str
self.trig_val_of_vdd = trig_vdd
self.targ_val_of_vdd = targ_vdd
self.trig_name_no_port = trig_name
self.targ_name_no_port = targ_name
def get_measure_values(self, trig_td, targ_td, vdd_voltage, port=None):
self.port_error_check(port)
trig_val = self.trig_val_of_vdd * vdd_voltage
targ_val = self.targ_val_of_vdd * vdd_voltage
if port != None:
meas_name = "{}{}".format(self.name, port)
trig_name = self.trig_name_no_port.format(port)
targ_name = self.targ_name_no_port.format(port)
else:
meas_name = self.name
trig_name = self.trig_name_no_port
targ_name = self.targ_name_no_port
return (meas_name,trig_name,targ_name,trig_val,targ_val,self.trig_dir_str,self.targ_dir_str,trig_td,targ_td)
class slew_measure(delay_measure):
def __init__(self, measure_name, signal_name, slew_dir_str, measure_scale=None, has_port=True):
spice_measurement.__init__(self, measure_name, measure_scale, has_port)
self.set_meas_constants(signal_name, slew_dir_str)
def set_meas_constants(self, signal_name, slew_dir_str):
self.trig_dir_str = slew_dir_str
self.targ_dir_str = slew_dir_str
if slew_dir_str == "RISE":
self.trig_val_of_vdd = 0.1
self.targ_val_of_vdd = 0.9
elif slew_dir_str == "FALL":
self.trig_val_of_vdd = 0.9
self.targ_val_of_vdd = 0.1
else:
debug.error("Unrecognised slew measurement direction={}".format(slew_dir_str),1)
self.trig_name_no_port = signal_name
self.targ_name_no_port = signal_name
class power_measure(spice_measurement):
def __init__(self, measure_name, power_type="", measure_scale=None, has_port=True):
spice_measurement.__init__(self, measure_name, measure_scale, has_port)
self.set_meas_constants(power_type)
def get_measure_function(self):
return stimuli.gen_meas_power
def set_meas_constants(self, power_type):
self.power_type = power_type
def get_measure_values(self, t_initial, t_final, port=None):
self.port_error_check(port)
if port != None:
meas_name = "{}{}".format(self.name, port)
else:
meas_name = self.name
return (meas_name,t_initial,t_final)
class voltage_when_measure(spice_measurement):
def __init__(self, measure_name, trig_name, targ_name, trig_dir_str, trig_vdd, measure_scale=None, has_port=True):
spice_measurement.__init__(self, measure_name, measure_scale, has_port)
self.set_meas_constants(trig_name, targ_name, trig_dir_str, trig_vdd)
def get_measure_function(self):
return stimuli.gen_meas_find_voltage
def set_meas_constants(self, trig_name, targ_name, trig_dir_str, trig_vdd):
self.trig_dir_str = trig_dir_str
self.trig_val_of_vdd = trig_vdd
self.trig_name_no_port = trig_name
self.targ_name_no_port = targ_name
def get_measure_values(self, trig_td, vdd_voltage, port=None):
self.port_error_check(port)
if port != None:
meas_name = "{}{}".format(self.name, port)
trig_name = self.trig_name_no_port.format(port)
targ_name = self.targ_name_no_port.format(port)
else:
meas_name = self.name
trig_name = self.trig_name_no_port
targ_name = self.targ_name_no_port
trig_voltage = self.trig_val_of_vdd*vdd_voltage
return (meas_name,trig_name,targ_name,trig_voltage,self.trig_dir_str,trig_td)
class voltage_at_measure(spice_measurement):
def __init__(self, measure_name, targ_name, measure_scale=None, has_port=True):
spice_measurement.__init__(self, measure_name, measure_scale, has_port)
self.set_meas_constants(targ_name)
def get_measure_function(self):
return stimuli.gen_meas_find_voltage_at_time
def set_meas_constants(self, targ_name):
self.targ_name_no_port = targ_name
def get_measure_values(self, time_at, port=None):
self.port_error_check(port)
if port != None:
meas_name = "{}{}".format(self.name, port)
targ_name = self.targ_name_no_port.format(port)
else:
meas_name = self.name
targ_name = self.targ_name_no_port
return (meas_name,targ_name,time_at)
| true | true |
7900dd9d0973ef7c36be228897c5f941063ca9e6 | 17,766 | py | Python | webots/controllers/ur_controller/kinematics/inverse.py | EmilRyberg/P8LH7Grounding | 406fdf4ce9afd160df3d7105fedea563a284974b | [
"MIT"
] | 1 | 2021-02-09T12:13:28.000Z | 2021-02-09T12:13:28.000Z | webots/controllers/ur_controller/kinematics/inverse.py | EmilRyberg/P8LH7Grounding | 406fdf4ce9afd160df3d7105fedea563a284974b | [
"MIT"
] | null | null | null | webots/controllers/ur_controller/kinematics/inverse.py | EmilRyberg/P8LH7Grounding | 406fdf4ce9afd160df3d7105fedea563a284974b | [
"MIT"
] | null | null | null | import math
import numpy as np
from kinematics.forward import ForwardKinematics
from kinematics.kinematics import Kinematics
from kinematics.solution import InverseKinematicsShoulderSolution, InverseKinematicsSpecificSolution, \
InverseKinematicsSolution, InverseKinematicsWristSolution
class InverseKinematics(Kinematics):
def __init__(self):
super().__init__()
self.forward_kinematics = ForwardKinematics()
def __clamp_cos_sin_within_threshold(self, cos_or_sin):
new_val = cos_or_sin
if 1 < new_val <= 1.2:
new_val = 1.0
elif -1.2 <= new_val < -1:
new_val = -1.0
return new_val
def __compute_solution_for_theta_1(self, T06, theta_1, debug=False):
wrist_solution = InverseKinematicsWristSolution()
# Theta 5
P06 = T06[:, 3]
theta_5_1 = None
theta_5_2 = None
theta_5_cos = (P06[0] * math.sin(theta_1) - P06[1] * np.cos(
theta_1) - self.joint4_dh.d) / self.joint6_dh.d
theta_5_cos = self.__clamp_cos_sin_within_threshold(theta_5_cos)
if -1 <= theta_5_cos <= 1:
theta_5_1 = math.acos(theta_5_cos)
theta_5_2 = -math.acos(theta_5_cos)
sigma = 0.00001
if theta_5_1 is not None and not -sigma <= math.sin(theta_5_1) <= sigma:
wrist_solution.solution_wrist_up = self.__compute_solution_for_wrist(theta_1, theta_5_1, T06)
else:
wrist_solution.solution_wrist_up.is_valid_solution = False
if theta_5_2 is not None and not -sigma <= math.sin(theta_5_2) <= sigma:
wrist_solution.solution_wrist_down = self.__compute_solution_for_wrist(theta_1, theta_5_2, T06)
else:
wrist_solution.solution_wrist_down.is_valid_solution = False
if not wrist_solution.solution_wrist_up.is_valid_solution and not wrist_solution.solution_wrist_down.is_valid_solution:
wrist_solution.is_valid_solution = False
if debug:
print(f"Theta 5: {theta_5_1:.3f}, {theta_5_2:.3f}")
return wrist_solution
def __compute_solution_for_wrist(self, theta_1, theta_5, T06, debug=False):
shoulder_solution = InverseKinematicsShoulderSolution()
# Theta 6
T60 = np.linalg.inv(T06)
X60 = T60[:, 0]
Y60 = T60[:, 1]
theta_6_cos = (X60[0] * math.sin(theta_1) - Y60[0] * math.cos(theta_1)) / math.sin(
theta_5) # only using one of the theta 5's for now..
theta_6_sin = (-X60[1] * math.sin(theta_1) + Y60[1] * math.cos(theta_1)) / math.sin(
theta_5) # only using one of the theta 5's for now..
theta_6 = math.atan2(theta_6_sin, theta_6_cos)
if debug:
print(f"Theta 6: {theta_6:.3f}")
tm_dict = {}
# Theta 3
T01 = self.compute_transformation_matrix(theta_1, self.joint1_dh)
T45 = self.compute_transformation_matrix(theta_5, self.joint5_dh)
T56 = self.compute_transformation_matrix(theta_6, self.joint6_dh)
T46 = np.matmul(T45, T56)
T64 = np.linalg.inv(T46)
T10 = np.linalg.inv(T01)
T14 = np.matmul(np.matmul(T10, T06), T64)
P14 = T14[:, 3]
tm_dict["T06"] = T06
tm_dict["T01"] = T01
tm_dict["T45"] = T45
tm_dict["T56"] = T56
tm_dict["T64"] = T64
tm_dict["T10"] = T10
tm_dict["T14"] = T14
tm_dict["P14"] = P14
theta_3_cos = (math.sqrt(
P14[0] ** 2 + P14[2] ** 2) ** 2 - self.joint3_dh.a ** 2 - self.joint4_dh.a ** 2) / (
2 * (-self.joint3_dh.a) * (-self.joint4_dh.a))
if debug:
print("theta3_cos: ", theta_3_cos)
theta_3_cos = self.__clamp_cos_sin_within_threshold(theta_3_cos)
if not -1 <= theta_3_cos <= 1:
shoulder_solution.is_valid_solution = False
return shoulder_solution
theta_3_up = math.acos(theta_3_cos)
theta_3_down = -math.acos(theta_3_cos)
if debug:
print(f"Theta 3: Up: {theta_3_up:.3f} Down: {theta_3_down:.3f}")
shoulder_solution.solution_elbow_up = self.__compute_specific_solution(theta_1, theta_3_up, theta_5, theta_6, tm_dict)
shoulder_solution.solution_elbow_down = self.__compute_specific_solution(theta_1, theta_3_down, theta_5, theta_6, tm_dict)
return shoulder_solution
def __compute_specific_solution(self, theta_1, theta_3, theta_5, theta_6, tm_dict, debug=False):
specific_solution = InverseKinematicsSpecificSolution()
P14 = tm_dict["P14"]
phi_1 = math.atan2(-P14[2], -P14[0])
phi_2 = math.asin((-self.joint4_dh.a * math.sin(theta_3)) / math.sqrt(P14[0]**2 + P14[2]**2))
theta_2 = phi_1 - phi_2
if debug:
print(f"Theta 2: {theta_2:.3f}")
T01 = tm_dict["T01"]
T12 = self.compute_transformation_matrix(theta_2, self.joint2_dh)
T23 = self.compute_transformation_matrix(theta_3, self.joint3_dh)
T45 = tm_dict["T45"]
T56 = tm_dict["T56"]
T06 = tm_dict["T06"]
T03 = np.matmul(np.matmul(T01, T12), T23)
T30 = np.linalg.inv(T03)
T64 = tm_dict["T64"]
T34 = np.matmul(np.matmul(T30, T06), T64)
X34 = T34[:, 0]
theta_4 = math.atan2(X34[1], X34[0])
if debug:
print(f"Theta 4: {theta_4:.3f}")
specific_solution.thetas = [theta_1, theta_2, theta_3, theta_4, theta_5, theta_6]
return specific_solution
def __print_all_solutions(self, solution):
print("Inverse Solutions:")
if solution.solution_shoulder_left.is_valid_solution:
if solution.solution_shoulder_left.solution_wrist_up.is_valid_solution:
if solution.solution_shoulder_left.solution_wrist_up.solution_elbow_up.is_valid_solution:
print(
f"Shoulder left, wrist up, elbow up: {solution.solution_shoulder_left.solution_wrist_up.solution_elbow_up.thetas}")
if solution.solution_shoulder_left.solution_wrist_up.solution_elbow_down.is_valid_solution:
print(
f"Shoulder left, wrist up, elbow down: {solution.solution_shoulder_left.solution_wrist_up.solution_elbow_down.thetas}")
if solution.solution_shoulder_left.solution_wrist_down.is_valid_solution:
if solution.solution_shoulder_left.solution_wrist_down.solution_elbow_up.is_valid_solution:
print(
f"Shoulder left, wrist down, elbow up: {solution.solution_shoulder_left.solution_wrist_down.solution_elbow_up.thetas}")
if solution.solution_shoulder_left.solution_wrist_down.solution_elbow_down:
print(
f"Shoulder left, wrist down, elbow down: {solution.solution_shoulder_left.solution_wrist_down.solution_elbow_down.thetas}")
if solution.solution_shoulder_right.is_valid_solution:
if solution.solution_shoulder_right.solution_wrist_up.is_valid_solution:
if solution.solution_shoulder_right.solution_wrist_up.solution_elbow_up.is_valid_solution:
print(
f"Shoulder right, wrist up, elbow up: {solution.solution_shoulder_right.solution_wrist_up.solution_elbow_up.thetas}")
if solution.solution_shoulder_right.solution_wrist_up.solution_elbow_down.is_valid_solution:
print(
f"Shoulder right, wrist up, elbow down: {solution.solution_shoulder_right.solution_wrist_up.solution_elbow_up.thetas}")
if solution.solution_shoulder_right.solution_wrist_down.is_valid_solution:
if solution.solution_shoulder_right.solution_wrist_down.solution_elbow_up.is_valid_solution:
print(
f"Shoulder right, wrist down, elbow up: {solution.solution_shoulder_right.solution_wrist_down.solution_elbow_up.thetas}")
if solution.solution_shoulder_right.solution_wrist_down.solution_elbow_down.is_valid_solution:
print(
f"Shoulder right, wrist down, elbow down: {solution.solution_shoulder_right.solution_wrist_down.solution_elbow_up.thetas}")
def compute_joint_angles(self, T06, debug=False):
solution = InverseKinematicsSolution()
#Theta 1
P05 = np.dot(T06, [0, 0, -self.joint6_dh.d, 1])
phi_1 = math.atan2(P05[1], P05[0])
phi_2_cos = self.joint4_dh.d / math.sqrt(P05[0]**2 + P05[1]**2)
phi_2 = math.acos(phi_2_cos)
theta_1_1 = phi_1 + phi_2 + (np.pi / 2)
theta_1_2 = phi_1 - phi_2 + (np.pi / 2)
if debug:
print(f"Theta 1: {theta_1_1:.3f}, {theta_1_2:.3f}")
if not math.isnan(theta_1_1):
solution.solution_shoulder_left = self.__compute_solution_for_theta_1(T06, theta_1_1, debug)
else:
solution.solution_shoulder_left = InverseKinematicsWristSolution().is_valid_solution = False
if not math.isnan(theta_1_2):
solution.solution_shoulder_right = self.__compute_solution_for_theta_1(T06, theta_1_2, debug)
else:
solution.solution_shoulder_right = InverseKinematicsWristSolution().is_valid_solution = False
if debug:
self.__print_all_solutions(solution)
return solution
def get_solution_for_config_id(self, solution, config_id):
if config_id == 0:
return solution.solution_shoulder_left.solution_wrist_up.solution_elbow_up.thetas
elif config_id == 1:
return solution.solution_shoulder_left.solution_wrist_up.solution_elbow_down.thetas
elif config_id == 2:
return solution.solution_shoulder_left.solution_wrist_down.solution_elbow_up.thetas
elif config_id == 3:
return solution.solution_shoulder_left.solution_wrist_down.solution_elbow_down.thetas
elif config_id == 4:
return solution.solution_shoulder_right.solution_wrist_up.solution_elbow_up.thetas
elif config_id == 5:
return solution.solution_shoulder_right.solution_wrist_up.solution_elbow_down.thetas
elif config_id == 6:
return solution.solution_shoulder_right.solution_wrist_down.solution_elbow_up.thetas
elif config_id == 7:
return solution.solution_shoulder_right.solution_wrist_down.solution_elbow_down.thetas
else:
raise Exception("invalid config solution id")
def get_best_solution_for_config_id(self, T06, config_id):
solution = self.compute_joint_angles(T06)
if self.is_valid_solution_by_config_id(solution, config_id):
return self.get_solution_for_config_id(solution, config_id)
else:
index = config_id + 1
checked_all = False
while not checked_all:
if index >= 8:
index = 0
if index == config_id:
print('Found no valid solutions..')
return None
if self.is_valid_solution_by_config_id(solution, index):
return self.get_solution_for_config_id(solution, index)
index += 1
def is_valid_solution_by_config_id(self, solution, config_id):
if 0 <= config_id < 4 and solution.solution_shoulder_left.is_valid_solution:
if 0 <= config_id < 2 and solution.solution_shoulder_left.solution_wrist_up.is_valid_solution:
if config_id == 0 and solution.solution_shoulder_left.solution_wrist_up.solution_elbow_up.is_valid_solution:
return True
if config_id == 1 and solution.solution_shoulder_left.solution_wrist_up.solution_elbow_down.is_valid_solution:
return True
if 2 <= config_id < 4 and solution.solution_shoulder_left.solution_wrist_down.is_valid_solution:
if config_id == 2 and solution.solution_shoulder_left.solution_wrist_down.solution_elbow_up.is_valid_solution:
return True
if config_id == 3 and solution.solution_shoulder_left.solution_wrist_down.solution_elbow_down:
return True
if 4 <= config_id < 8 and solution.solution_shoulder_right.is_valid_solution:
if 4 <= config_id < 6 and solution.solution_shoulder_right.solution_wrist_up.is_valid_solution:
if config_id == 4 and solution.solution_shoulder_right.solution_wrist_up.solution_elbow_up.is_valid_solution:
return True
if config_id == 5 and solution.solution_shoulder_right.solution_wrist_up.solution_elbow_down.is_valid_solution:
return True
if 6 <= config_id < 8 and solution.solution_shoulder_right.solution_wrist_down.is_valid_solution:
if config_id == 6 and solution.solution_shoulder_right.solution_wrist_down.solution_elbow_up.is_valid_solution:
return True
if config_id == 7 and solution.solution_shoulder_right.solution_wrist_down.solution_elbow_down.is_valid_solution:
return True
else:
return False
def get_current_configuration_id(self, joint_angles):
T06 = self.forward_kinematics.compute_0_to_6_matrix(joint_angles)
solution = self.compute_joint_angles(T06)
differences = np.full(8, 1000)
if solution.solution_shoulder_left.is_valid_solution:
if solution.solution_shoulder_left.solution_wrist_up.is_valid_solution:
if solution.solution_shoulder_left.solution_wrist_up.solution_elbow_up.is_valid_solution:
differences[0] = 0
if solution.solution_shoulder_left.solution_wrist_up.solution_elbow_down.is_valid_solution:
differences[1] = 0
if solution.solution_shoulder_left.solution_wrist_down.is_valid_solution:
if solution.solution_shoulder_left.solution_wrist_down.solution_elbow_up.is_valid_solution:
differences[2] = 0
if solution.solution_shoulder_left.solution_wrist_down.solution_elbow_down:
differences[3] = 0
if solution.solution_shoulder_right.is_valid_solution:
if solution.solution_shoulder_right.solution_wrist_up.is_valid_solution:
if solution.solution_shoulder_right.solution_wrist_up.solution_elbow_up.is_valid_solution:
differences[4] = 0
if solution.solution_shoulder_right.solution_wrist_up.solution_elbow_down.is_valid_solution:
differences[5] = 0
if solution.solution_shoulder_right.solution_wrist_down.is_valid_solution:
if solution.solution_shoulder_right.solution_wrist_down.solution_elbow_up.is_valid_solution:
differences[6] = 0
if solution.solution_shoulder_right.solution_wrist_down.solution_elbow_down.is_valid_solution:
differences[7] = 0
for i in range(6):
if solution.solution_shoulder_left.is_valid_solution:
if solution.solution_shoulder_left.solution_wrist_up.is_valid_solution:
if solution.solution_shoulder_left.solution_wrist_up.solution_elbow_up.is_valid_solution:
differences[0] += abs(joint_angles[i] - solution.solution_shoulder_left.solution_wrist_up.solution_elbow_up.thetas[i])
if solution.solution_shoulder_left.solution_wrist_up.solution_elbow_down.is_valid_solution:
differences[1] += abs(joint_angles[i] - solution.solution_shoulder_left.solution_wrist_up.solution_elbow_down.thetas[i])
if solution.solution_shoulder_left.solution_wrist_down.is_valid_solution:
if solution.solution_shoulder_left.solution_wrist_down.solution_elbow_up.is_valid_solution:
differences[2] += abs(joint_angles[i] - solution.solution_shoulder_left.solution_wrist_down.solution_elbow_up.thetas[i])
if solution.solution_shoulder_left.solution_wrist_down.solution_elbow_down:
differences[3] += abs(joint_angles[i] - solution.solution_shoulder_left.solution_wrist_down.solution_elbow_down.thetas[i])
if solution.solution_shoulder_right.is_valid_solution:
if solution.solution_shoulder_right.solution_wrist_up.is_valid_solution:
if solution.solution_shoulder_right.solution_wrist_up.solution_elbow_up.is_valid_solution:
differences[4] += abs(joint_angles[i] - solution.solution_shoulder_right.solution_wrist_up.solution_elbow_up.thetas[i])
if solution.solution_shoulder_right.solution_wrist_up.solution_elbow_down.is_valid_solution:
differences[5] += abs(joint_angles[i] - solution.solution_shoulder_right.solution_wrist_up.solution_elbow_down.thetas[i])
if solution.solution_shoulder_right.solution_wrist_down.is_valid_solution:
if solution.solution_shoulder_right.solution_wrist_down.solution_elbow_up.is_valid_solution:
differences[6] += abs(joint_angles[i] - solution.solution_shoulder_right.solution_wrist_down.solution_elbow_up.thetas[i])
if solution.solution_shoulder_right.solution_wrist_down.solution_elbow_down.is_valid_solution:
differences[7] += abs(joint_angles[i] - solution.solution_shoulder_right.solution_wrist_down.solution_elbow_down.thetas[i])
print(differences)
return np.argmin(differences)
| 53.836364 | 147 | 0.672295 | import math
import numpy as np
from kinematics.forward import ForwardKinematics
from kinematics.kinematics import Kinematics
from kinematics.solution import InverseKinematicsShoulderSolution, InverseKinematicsSpecificSolution, \
InverseKinematicsSolution, InverseKinematicsWristSolution
class InverseKinematics(Kinematics):
def __init__(self):
super().__init__()
self.forward_kinematics = ForwardKinematics()
def __clamp_cos_sin_within_threshold(self, cos_or_sin):
new_val = cos_or_sin
if 1 < new_val <= 1.2:
new_val = 1.0
elif -1.2 <= new_val < -1:
new_val = -1.0
return new_val
def __compute_solution_for_theta_1(self, T06, theta_1, debug=False):
wrist_solution = InverseKinematicsWristSolution()
P06 = T06[:, 3]
theta_5_1 = None
theta_5_2 = None
theta_5_cos = (P06[0] * math.sin(theta_1) - P06[1] * np.cos(
theta_1) - self.joint4_dh.d) / self.joint6_dh.d
theta_5_cos = self.__clamp_cos_sin_within_threshold(theta_5_cos)
if -1 <= theta_5_cos <= 1:
theta_5_1 = math.acos(theta_5_cos)
theta_5_2 = -math.acos(theta_5_cos)
sigma = 0.00001
if theta_5_1 is not None and not -sigma <= math.sin(theta_5_1) <= sigma:
wrist_solution.solution_wrist_up = self.__compute_solution_for_wrist(theta_1, theta_5_1, T06)
else:
wrist_solution.solution_wrist_up.is_valid_solution = False
if theta_5_2 is not None and not -sigma <= math.sin(theta_5_2) <= sigma:
wrist_solution.solution_wrist_down = self.__compute_solution_for_wrist(theta_1, theta_5_2, T06)
else:
wrist_solution.solution_wrist_down.is_valid_solution = False
if not wrist_solution.solution_wrist_up.is_valid_solution and not wrist_solution.solution_wrist_down.is_valid_solution:
wrist_solution.is_valid_solution = False
if debug:
print(f"Theta 5: {theta_5_1:.3f}, {theta_5_2:.3f}")
return wrist_solution
def __compute_solution_for_wrist(self, theta_1, theta_5, T06, debug=False):
shoulder_solution = InverseKinematicsShoulderSolution()
T60 = np.linalg.inv(T06)
X60 = T60[:, 0]
Y60 = T60[:, 1]
theta_6_cos = (X60[0] * math.sin(theta_1) - Y60[0] * math.cos(theta_1)) / math.sin(
theta_5)
theta_6_sin = (-X60[1] * math.sin(theta_1) + Y60[1] * math.cos(theta_1)) / math.sin(
theta_5) # only using one of the theta 5's for now..
theta_6 = math.atan2(theta_6_sin, theta_6_cos)
if debug:
print(f"Theta 6: {theta_6:.3f}")
tm_dict = {}
T01 = self.compute_transformation_matrix(theta_1, self.joint1_dh)
T45 = self.compute_transformation_matrix(theta_5, self.joint5_dh)
T56 = self.compute_transformation_matrix(theta_6, self.joint6_dh)
T46 = np.matmul(T45, T56)
T64 = np.linalg.inv(T46)
T10 = np.linalg.inv(T01)
T14 = np.matmul(np.matmul(T10, T06), T64)
P14 = T14[:, 3]
tm_dict["T06"] = T06
tm_dict["T01"] = T01
tm_dict["T45"] = T45
tm_dict["T56"] = T56
tm_dict["T64"] = T64
tm_dict["T10"] = T10
tm_dict["T14"] = T14
tm_dict["P14"] = P14
theta_3_cos = (math.sqrt(
P14[0] ** 2 + P14[2] ** 2) ** 2 - self.joint3_dh.a ** 2 - self.joint4_dh.a ** 2) / (
2 * (-self.joint3_dh.a) * (-self.joint4_dh.a))
if debug:
print("theta3_cos: ", theta_3_cos)
theta_3_cos = self.__clamp_cos_sin_within_threshold(theta_3_cos)
if not -1 <= theta_3_cos <= 1:
shoulder_solution.is_valid_solution = False
return shoulder_solution
theta_3_up = math.acos(theta_3_cos)
theta_3_down = -math.acos(theta_3_cos)
if debug:
print(f"Theta 3: Up: {theta_3_up:.3f} Down: {theta_3_down:.3f}")
shoulder_solution.solution_elbow_up = self.__compute_specific_solution(theta_1, theta_3_up, theta_5, theta_6, tm_dict)
shoulder_solution.solution_elbow_down = self.__compute_specific_solution(theta_1, theta_3_down, theta_5, theta_6, tm_dict)
return shoulder_solution
def __compute_specific_solution(self, theta_1, theta_3, theta_5, theta_6, tm_dict, debug=False):
specific_solution = InverseKinematicsSpecificSolution()
P14 = tm_dict["P14"]
phi_1 = math.atan2(-P14[2], -P14[0])
phi_2 = math.asin((-self.joint4_dh.a * math.sin(theta_3)) / math.sqrt(P14[0]**2 + P14[2]**2))
theta_2 = phi_1 - phi_2
if debug:
print(f"Theta 2: {theta_2:.3f}")
T01 = tm_dict["T01"]
T12 = self.compute_transformation_matrix(theta_2, self.joint2_dh)
T23 = self.compute_transformation_matrix(theta_3, self.joint3_dh)
T45 = tm_dict["T45"]
T56 = tm_dict["T56"]
T06 = tm_dict["T06"]
T03 = np.matmul(np.matmul(T01, T12), T23)
T30 = np.linalg.inv(T03)
T64 = tm_dict["T64"]
T34 = np.matmul(np.matmul(T30, T06), T64)
X34 = T34[:, 0]
theta_4 = math.atan2(X34[1], X34[0])
if debug:
print(f"Theta 4: {theta_4:.3f}")
specific_solution.thetas = [theta_1, theta_2, theta_3, theta_4, theta_5, theta_6]
return specific_solution
def __print_all_solutions(self, solution):
print("Inverse Solutions:")
if solution.solution_shoulder_left.is_valid_solution:
if solution.solution_shoulder_left.solution_wrist_up.is_valid_solution:
if solution.solution_shoulder_left.solution_wrist_up.solution_elbow_up.is_valid_solution:
print(
f"Shoulder left, wrist up, elbow up: {solution.solution_shoulder_left.solution_wrist_up.solution_elbow_up.thetas}")
if solution.solution_shoulder_left.solution_wrist_up.solution_elbow_down.is_valid_solution:
print(
f"Shoulder left, wrist up, elbow down: {solution.solution_shoulder_left.solution_wrist_up.solution_elbow_down.thetas}")
if solution.solution_shoulder_left.solution_wrist_down.is_valid_solution:
if solution.solution_shoulder_left.solution_wrist_down.solution_elbow_up.is_valid_solution:
print(
f"Shoulder left, wrist down, elbow up: {solution.solution_shoulder_left.solution_wrist_down.solution_elbow_up.thetas}")
if solution.solution_shoulder_left.solution_wrist_down.solution_elbow_down:
print(
f"Shoulder left, wrist down, elbow down: {solution.solution_shoulder_left.solution_wrist_down.solution_elbow_down.thetas}")
if solution.solution_shoulder_right.is_valid_solution:
if solution.solution_shoulder_right.solution_wrist_up.is_valid_solution:
if solution.solution_shoulder_right.solution_wrist_up.solution_elbow_up.is_valid_solution:
print(
f"Shoulder right, wrist up, elbow up: {solution.solution_shoulder_right.solution_wrist_up.solution_elbow_up.thetas}")
if solution.solution_shoulder_right.solution_wrist_up.solution_elbow_down.is_valid_solution:
print(
f"Shoulder right, wrist up, elbow down: {solution.solution_shoulder_right.solution_wrist_up.solution_elbow_up.thetas}")
if solution.solution_shoulder_right.solution_wrist_down.is_valid_solution:
if solution.solution_shoulder_right.solution_wrist_down.solution_elbow_up.is_valid_solution:
print(
f"Shoulder right, wrist down, elbow up: {solution.solution_shoulder_right.solution_wrist_down.solution_elbow_up.thetas}")
if solution.solution_shoulder_right.solution_wrist_down.solution_elbow_down.is_valid_solution:
print(
f"Shoulder right, wrist down, elbow down: {solution.solution_shoulder_right.solution_wrist_down.solution_elbow_up.thetas}")
def compute_joint_angles(self, T06, debug=False):
solution = InverseKinematicsSolution()
P05 = np.dot(T06, [0, 0, -self.joint6_dh.d, 1])
phi_1 = math.atan2(P05[1], P05[0])
phi_2_cos = self.joint4_dh.d / math.sqrt(P05[0]**2 + P05[1]**2)
phi_2 = math.acos(phi_2_cos)
theta_1_1 = phi_1 + phi_2 + (np.pi / 2)
theta_1_2 = phi_1 - phi_2 + (np.pi / 2)
if debug:
print(f"Theta 1: {theta_1_1:.3f}, {theta_1_2:.3f}")
if not math.isnan(theta_1_1):
solution.solution_shoulder_left = self.__compute_solution_for_theta_1(T06, theta_1_1, debug)
else:
solution.solution_shoulder_left = InverseKinematicsWristSolution().is_valid_solution = False
if not math.isnan(theta_1_2):
solution.solution_shoulder_right = self.__compute_solution_for_theta_1(T06, theta_1_2, debug)
else:
solution.solution_shoulder_right = InverseKinematicsWristSolution().is_valid_solution = False
if debug:
self.__print_all_solutions(solution)
return solution
def get_solution_for_config_id(self, solution, config_id):
if config_id == 0:
return solution.solution_shoulder_left.solution_wrist_up.solution_elbow_up.thetas
elif config_id == 1:
return solution.solution_shoulder_left.solution_wrist_up.solution_elbow_down.thetas
elif config_id == 2:
return solution.solution_shoulder_left.solution_wrist_down.solution_elbow_up.thetas
elif config_id == 3:
return solution.solution_shoulder_left.solution_wrist_down.solution_elbow_down.thetas
elif config_id == 4:
return solution.solution_shoulder_right.solution_wrist_up.solution_elbow_up.thetas
elif config_id == 5:
return solution.solution_shoulder_right.solution_wrist_up.solution_elbow_down.thetas
elif config_id == 6:
return solution.solution_shoulder_right.solution_wrist_down.solution_elbow_up.thetas
elif config_id == 7:
return solution.solution_shoulder_right.solution_wrist_down.solution_elbow_down.thetas
else:
raise Exception("invalid config solution id")
def get_best_solution_for_config_id(self, T06, config_id):
solution = self.compute_joint_angles(T06)
if self.is_valid_solution_by_config_id(solution, config_id):
return self.get_solution_for_config_id(solution, config_id)
else:
index = config_id + 1
checked_all = False
while not checked_all:
if index >= 8:
index = 0
if index == config_id:
print('Found no valid solutions..')
return None
if self.is_valid_solution_by_config_id(solution, index):
return self.get_solution_for_config_id(solution, index)
index += 1
def is_valid_solution_by_config_id(self, solution, config_id):
if 0 <= config_id < 4 and solution.solution_shoulder_left.is_valid_solution:
if 0 <= config_id < 2 and solution.solution_shoulder_left.solution_wrist_up.is_valid_solution:
if config_id == 0 and solution.solution_shoulder_left.solution_wrist_up.solution_elbow_up.is_valid_solution:
return True
if config_id == 1 and solution.solution_shoulder_left.solution_wrist_up.solution_elbow_down.is_valid_solution:
return True
if 2 <= config_id < 4 and solution.solution_shoulder_left.solution_wrist_down.is_valid_solution:
if config_id == 2 and solution.solution_shoulder_left.solution_wrist_down.solution_elbow_up.is_valid_solution:
return True
if config_id == 3 and solution.solution_shoulder_left.solution_wrist_down.solution_elbow_down:
return True
if 4 <= config_id < 8 and solution.solution_shoulder_right.is_valid_solution:
if 4 <= config_id < 6 and solution.solution_shoulder_right.solution_wrist_up.is_valid_solution:
if config_id == 4 and solution.solution_shoulder_right.solution_wrist_up.solution_elbow_up.is_valid_solution:
return True
if config_id == 5 and solution.solution_shoulder_right.solution_wrist_up.solution_elbow_down.is_valid_solution:
return True
if 6 <= config_id < 8 and solution.solution_shoulder_right.solution_wrist_down.is_valid_solution:
if config_id == 6 and solution.solution_shoulder_right.solution_wrist_down.solution_elbow_up.is_valid_solution:
return True
if config_id == 7 and solution.solution_shoulder_right.solution_wrist_down.solution_elbow_down.is_valid_solution:
return True
else:
return False
def get_current_configuration_id(self, joint_angles):
T06 = self.forward_kinematics.compute_0_to_6_matrix(joint_angles)
solution = self.compute_joint_angles(T06)
differences = np.full(8, 1000)
if solution.solution_shoulder_left.is_valid_solution:
if solution.solution_shoulder_left.solution_wrist_up.is_valid_solution:
if solution.solution_shoulder_left.solution_wrist_up.solution_elbow_up.is_valid_solution:
differences[0] = 0
if solution.solution_shoulder_left.solution_wrist_up.solution_elbow_down.is_valid_solution:
differences[1] = 0
if solution.solution_shoulder_left.solution_wrist_down.is_valid_solution:
if solution.solution_shoulder_left.solution_wrist_down.solution_elbow_up.is_valid_solution:
differences[2] = 0
if solution.solution_shoulder_left.solution_wrist_down.solution_elbow_down:
differences[3] = 0
if solution.solution_shoulder_right.is_valid_solution:
if solution.solution_shoulder_right.solution_wrist_up.is_valid_solution:
if solution.solution_shoulder_right.solution_wrist_up.solution_elbow_up.is_valid_solution:
differences[4] = 0
if solution.solution_shoulder_right.solution_wrist_up.solution_elbow_down.is_valid_solution:
differences[5] = 0
if solution.solution_shoulder_right.solution_wrist_down.is_valid_solution:
if solution.solution_shoulder_right.solution_wrist_down.solution_elbow_up.is_valid_solution:
differences[6] = 0
if solution.solution_shoulder_right.solution_wrist_down.solution_elbow_down.is_valid_solution:
differences[7] = 0
for i in range(6):
if solution.solution_shoulder_left.is_valid_solution:
if solution.solution_shoulder_left.solution_wrist_up.is_valid_solution:
if solution.solution_shoulder_left.solution_wrist_up.solution_elbow_up.is_valid_solution:
differences[0] += abs(joint_angles[i] - solution.solution_shoulder_left.solution_wrist_up.solution_elbow_up.thetas[i])
if solution.solution_shoulder_left.solution_wrist_up.solution_elbow_down.is_valid_solution:
differences[1] += abs(joint_angles[i] - solution.solution_shoulder_left.solution_wrist_up.solution_elbow_down.thetas[i])
if solution.solution_shoulder_left.solution_wrist_down.is_valid_solution:
if solution.solution_shoulder_left.solution_wrist_down.solution_elbow_up.is_valid_solution:
differences[2] += abs(joint_angles[i] - solution.solution_shoulder_left.solution_wrist_down.solution_elbow_up.thetas[i])
if solution.solution_shoulder_left.solution_wrist_down.solution_elbow_down:
differences[3] += abs(joint_angles[i] - solution.solution_shoulder_left.solution_wrist_down.solution_elbow_down.thetas[i])
if solution.solution_shoulder_right.is_valid_solution:
if solution.solution_shoulder_right.solution_wrist_up.is_valid_solution:
if solution.solution_shoulder_right.solution_wrist_up.solution_elbow_up.is_valid_solution:
differences[4] += abs(joint_angles[i] - solution.solution_shoulder_right.solution_wrist_up.solution_elbow_up.thetas[i])
if solution.solution_shoulder_right.solution_wrist_up.solution_elbow_down.is_valid_solution:
differences[5] += abs(joint_angles[i] - solution.solution_shoulder_right.solution_wrist_up.solution_elbow_down.thetas[i])
if solution.solution_shoulder_right.solution_wrist_down.is_valid_solution:
if solution.solution_shoulder_right.solution_wrist_down.solution_elbow_up.is_valid_solution:
differences[6] += abs(joint_angles[i] - solution.solution_shoulder_right.solution_wrist_down.solution_elbow_up.thetas[i])
if solution.solution_shoulder_right.solution_wrist_down.solution_elbow_down.is_valid_solution:
differences[7] += abs(joint_angles[i] - solution.solution_shoulder_right.solution_wrist_down.solution_elbow_down.thetas[i])
print(differences)
return np.argmin(differences)
| true | true |
7900dde40c879fb214488626b1afbfb9444c7685 | 1,364 | py | Python | account/models.py | pauljherrera/avantiweb | 40b87e754e68a0e2adcf5e1640d5e2e0c8637d0a | [
"MIT"
] | null | null | null | account/models.py | pauljherrera/avantiweb | 40b87e754e68a0e2adcf5e1640d5e2e0c8637d0a | [
"MIT"
] | null | null | null | account/models.py | pauljherrera/avantiweb | 40b87e754e68a0e2adcf5e1640d5e2e0c8637d0a | [
"MIT"
] | null | null | null | from django.db import models
from django.conf import settings
from django.contrib.auth.models import User
from django.db.models.signals import post_save
# Create your models here.
class Profile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL)
date_of_birth = models.DateField(blank=True, null=True)
photo = models.ImageField(upload_to='users/%Y/%m/%d', blank=True)
course_bookmark = models.CharField(max_length=100, default='the-strategy')
module_bookmark = models.PositiveIntegerField(default=0)
def __str__(self):
return 'Profile for user {}'.format(self.user.username)
class Contact(models.Model):
user_from = models.ForeignKey(User, related_name='rel_from_set')
user_to = models.ForeignKey(User, related_name='rel_to_set')
created = models.DateTimeField(auto_now_add=True, db_index=True)
class Meta:
ordering = ('-created',)
def __str__(self):
return '{} follows {}'.format(self.user_from, self.user_to)
User.add_to_class('following', models.ManyToManyField('self',
through=Contact,
related_name='followers',
symmetrical=False))
# Signal to auto-create a profile when a User is created.
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
post_save.connect(create_user_profile, sender=User)
| 30.311111 | 75 | 0.744868 | from django.db import models
from django.conf import settings
from django.contrib.auth.models import User
from django.db.models.signals import post_save
class Profile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL)
date_of_birth = models.DateField(blank=True, null=True)
photo = models.ImageField(upload_to='users/%Y/%m/%d', blank=True)
course_bookmark = models.CharField(max_length=100, default='the-strategy')
module_bookmark = models.PositiveIntegerField(default=0)
def __str__(self):
return 'Profile for user {}'.format(self.user.username)
class Contact(models.Model):
user_from = models.ForeignKey(User, related_name='rel_from_set')
user_to = models.ForeignKey(User, related_name='rel_to_set')
created = models.DateTimeField(auto_now_add=True, db_index=True)
class Meta:
ordering = ('-created',)
def __str__(self):
return '{} follows {}'.format(self.user_from, self.user_to)
User.add_to_class('following', models.ManyToManyField('self',
through=Contact,
related_name='followers',
symmetrical=False))
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
post_save.connect(create_user_profile, sender=User)
| true | true |
7900de374b0b26464a2f4577d5e9c14335ef0962 | 330 | py | Python | shots/admin.py | leigh90/TheLumiere | 779ce93f2b27fd83f891803bdc5304b14767c794 | [
"MIT"
] | 1 | 2021-07-30T03:43:50.000Z | 2021-07-30T03:43:50.000Z | shots/admin.py | leigh90/TheLumiere | 779ce93f2b27fd83f891803bdc5304b14767c794 | [
"MIT"
] | null | null | null | shots/admin.py | leigh90/TheLumiere | 779ce93f2b27fd83f891803bdc5304b14767c794 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import Location,Category,Image
# Register your models here.
admin.site.register(Location)
admin.site.register(Category)
admin.site.register(Image)
class Image(admin.ModelAdmin):
search_fields = ('image_category') | 23.571429 | 43 | 0.778788 |
from __future__ import unicode_literals
from django.contrib import admin
from .models import Location,Category,Image
admin.site.register(Location)
admin.site.register(Category)
admin.site.register(Image)
class Image(admin.ModelAdmin):
search_fields = ('image_category') | true | true |
7900deb515d717fd4af09ce55feb3c93fa384b5e | 40,611 | py | Python | unit_tests/events/plugins/test_zaza_events_plugins_conncheck.py | wolsen/zaza | 351f3580b7b1ce4e74bd3b40caacbce218110476 | [
"ECL-2.0",
"Apache-2.0"
] | 10 | 2018-02-09T16:32:02.000Z | 2021-05-18T14:19:23.000Z | unit_tests/events/plugins/test_zaza_events_plugins_conncheck.py | wolsen/zaza | 351f3580b7b1ce4e74bd3b40caacbce218110476 | [
"ECL-2.0",
"Apache-2.0"
] | 243 | 2018-03-23T02:10:26.000Z | 2022-03-25T12:32:31.000Z | unit_tests/events/plugins/test_zaza_events_plugins_conncheck.py | wolsen/zaza | 351f3580b7b1ce4e74bd3b40caacbce218110476 | [
"ECL-2.0",
"Apache-2.0"
] | 34 | 2017-12-07T08:10:32.000Z | 2022-02-04T13:12:58.000Z | # Copyright 2021 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for zaza.events.plugins.conncheck.py."""
import mock
import subprocess
import unit_tests.utils as tests_utils
import zaza.events.plugins.conncheck as conncheck
class TestAutoConfigureFunction(tests_utils.BaseTestCase):
def setUp(self):
super().setUp()
self.patch_object(conncheck, 'logger', name='mock_logger')
self.patch_object(
conncheck, 'get_plugin_manager', name='mock_get_plugin_manager')
self.mock_collection = mock.Mock()
self.mock_conncheck_manager = mock.Mock()
def test_autoconfigure_no_config(self):
self.mock_get_plugin_manager.return_value = self.mock_conncheck_manager
conncheck.auto_configure_with_collection(self.mock_collection)
self.mock_get_plugin_manager.assert_called_once_with('DEFAULT')
self.mock_collection.add_logging_manager.assert_called_once_with(
self.mock_conncheck_manager)
def test_autoconfigure_with_config(self):
self.mock_get_plugin_manager.return_value = self.mock_conncheck_manager
config = {
'manager-name': 'a-manager',
'source': 'a-source',
}
conncheck.auto_configure_with_collection(self.mock_collection,
config=config)
self.mock_get_plugin_manager.assert_called_once_with('a-manager')
self.mock_collection.add_logging_manager.assert_called_once_with(
self.mock_conncheck_manager)
self.mock_conncheck_manager.configure.assert_called_once_with(
module_source='a-source')
class TestGetConncheckManager(tests_utils.BaseTestCase):
def test_get_conncheck_manager(self):
self.patch_object(conncheck, 'get_option', name='mock_get_option')
self.mock_get_option.return_value = 'a-name'
self.patch_object(conncheck, 'get_plugin_manager',
name='mock_get_plugin_manager')
self.mock_get_plugin_manager.return_value = 'a-manager'
self.assertEqual(conncheck.get_conncheck_manager(), 'a-manager')
self.mock_get_option.assert_called_once_with(
'zaza-events.modules.conncheck.manager-name', 'DEFAULT')
self.mock_get_plugin_manager.assert_called_once_with('a-name')
class TestGetPluginManager(tests_utils.BaseTestCase):
def test_get_plugin_manager(self):
self.patch_object(conncheck, '_conncheck_plugin_managers', new={})
self.patch_object(conncheck, 'ConnCheckPluginManager',
name='mock_ConnCheckPluginManager')
self.mock_ConnCheckPluginManager.return_value = 'a-manager'
self.assertEqual(conncheck.get_plugin_manager(), 'a-manager')
self.mock_ConnCheckPluginManager.assert_called_once_with(
managed_name='DEFAULT')
def test_get_plugin_manager_non_default(self):
self.patch_object(conncheck, '_conncheck_plugin_managers', new={})
self.patch_object(conncheck, 'ConnCheckPluginManager',
name='mock_ConnCheckPluginManager')
self.mock_ConnCheckPluginManager.return_value = 'a-manager'
self.assertEqual(conncheck.get_plugin_manager('a-name'), 'a-manager')
self.mock_ConnCheckPluginManager.assert_called_once_with(
managed_name='a-name')
def test_get_plugin_manager_check_caches(self):
self.patch_object(conncheck, '_conncheck_plugin_managers', new={},
name='mock__conncheck_plugin_managers')
self.mock__conncheck_plugin_managers['a-name'] = 'a-manager'
self.patch_object(conncheck, 'ConnCheckPluginManager',
name='mock_ConnCheckPluginManager')
self.mock_ConnCheckPluginManager.return_value = 'the-manager'
self.assertEqual(conncheck.get_plugin_manager('a-name'), 'a-manager')
self.mock_ConnCheckPluginManager.assert_not_called()
class TestConnCheckPluginManager(tests_utils.BaseTestCase):
def setUp(self):
super().setUp()
self.patch_object(conncheck, 'ConnCheckManager',
name='mock_ConnCheckManager')
self.mock_conncheck_manager = mock.Mock()
self.mock_ConnCheckManager.return_value = self.mock_conncheck_manager
self.mock_collection_object = mock.Mock()
self.mock_collection_object.logs_dir = "a-logs-dir"
self.mock_collection_object.log_format = conncheck.LogFormats.InfluxDB
self.mock_collection_object.collection = 'a-collection'
def test_init(self):
cpm = conncheck.ConnCheckPluginManager()
self.assertEqual(cpm.managed_name, 'DEFAULT')
self.assertEqual(cpm._conncheck_manager, self.mock_conncheck_manager)
cpm = conncheck.ConnCheckPluginManager(managed_name='a-manager')
self.assertEqual(cpm.managed_name, 'a-manager')
def test_configure(self):
cpm = conncheck.ConnCheckPluginManager()
self.patch_object(
cpm, 'configure_plugin', name='mock_cpm_configure_plugin')
cpm.configure(collection_object=self.mock_collection_object)
self.mock_cpm_configure_plugin.assert_called_once_with()
def test_configure_plugin(self):
cpm = conncheck.ConnCheckPluginManager(
module_source='a-source', tags='abc')
cpm.configure(collection_object=self.mock_collection_object)
self.mock_conncheck_manager.configure.assert_called_once_with(
collection='a-collection',
logs_dir='a-logs-dir',
module_source='a-source',
tags='abc')
def test_manager_property(self):
cpm = conncheck.ConnCheckPluginManager(
module_source='a-source', tags='abc')
self.assertEqual(cpm.manager, self.mock_conncheck_manager)
cpm._conncheck_manager = None
with self.assertRaises(AssertionError):
cpm.manager
def test_add_instance(self):
cpm = conncheck.ConnCheckPluginManager(
module_source='a-source', tags='abc')
cpm.add_instance('a-spec', this='that')
self.mock_conncheck_manager.add_instance.assert_called_once_with(
'a-spec', this='that')
def test_get_instance(self):
cpm = conncheck.ConnCheckPluginManager(
module_source='a-source', tags='abc')
self.mock_conncheck_manager.get_instance.return_value = 'an-instance'
self.assertEqual(cpm.get_instance('a-spec'), 'an-instance')
self.mock_conncheck_manager.get_instance.assert_called_once_with(
'a-spec')
def test_start(self):
cpm = conncheck.ConnCheckPluginManager(
module_source='a-source', tags='abc')
cpm.start('a-spec')
self.mock_conncheck_manager.start.assert_called_once_with('a-spec')
def test_stop(self):
cpm = conncheck.ConnCheckPluginManager(
module_source='a-source', tags='abc')
cpm.stop('a-spec')
self.mock_conncheck_manager.stop.assert_called_once_with('a-spec')
def test_finalise(self):
cpm = conncheck.ConnCheckPluginManager(
module_source='a-source', tags='abc')
cpm.finalise()
self.mock_conncheck_manager.finalise.assert_called_once_with()
def test_log_files(self):
cpm = conncheck.ConnCheckPluginManager(
module_source='a-source', tags='abc')
cpm.log_files()
self.mock_conncheck_manager.log_files.assert_called_once_with()
def test_clean_up(self):
cpm = conncheck.ConnCheckPluginManager(
module_source='a-source', tags='abc')
cpm.clean_up()
self.mock_conncheck_manager.clean_up.assert_called_once_with()
def test_reset(self):
cpm = conncheck.ConnCheckPluginManager(
module_source='a-source', tags='abc')
cpm.reset()
self.mock_conncheck_manager.clean_up.assert_called_once_with()
self.assertIsNone(cpm._conncheck_manager)
class TestConnCheckManager(tests_utils.BaseTestCase):
def setUp(self):
super().setUp()
self.c = conncheck.ConnCheckManager(
collection='a-collection',
logs_dir='/some/dir',
tags=['tag1'])
def test_init(self):
self.assertEqual(self.c.collection, 'a-collection')
self.assertEqual(self.c.logs_dir, '/some/dir')
self.assertEqual(self.c.tags, ['tag1'])
def test_add_instance(self):
self.patch_object(self.c, 'make_instance_with',
name='mock_make_instance_with')
self.mock_make_instance_with.return_value = 'an-instance'
self.c.add_instance('juju:0', this='that', some='thing')
self.mock_make_instance_with.assert_called_once_with(
'juju:0', this='that', some='thing', module_source='conncheck',
collection='a-collection')
self.assertIn('juju:0', self.c._instances)
self.assertEqual(self.c._instances['juju:0'], 'an-instance')
# add again to check for error
with self.assertRaises(RuntimeError):
self.c.add_instance('juju:0', this='that', some='thing')
def test_get_instance(self):
self.c._instances['juju:0'] = 'an-instance'
self.assertEqual(self.c.get_instance('juju:0'), 'an-instance')
def test_start(self):
mock_instance1 = mock.Mock()
mock_instance2 = mock.Mock()
self.c._instances = {'i1': mock_instance1,
'i2': mock_instance2}
self.c.start('i1')
mock_instance1.start.assert_called_once_with()
mock_instance2.start.assert_not_called()
mock_instance1.reset_mock()
self.c.start()
mock_instance1.start.assert_called_once_with()
mock_instance2.start.assert_called_once_with()
def test_stop(self):
mock_instance1 = mock.Mock()
mock_instance2 = mock.Mock()
self.c._instances = {'i1': mock_instance1,
'i2': mock_instance2}
self.c.stop('i1')
mock_instance1.stop.assert_called_once_with()
mock_instance2.stop.assert_not_called()
mock_instance1.reset_mock()
self.c.stop()
mock_instance1.stop.assert_called_once_with()
mock_instance2.stop.assert_called_once_with()
def test_finalise(self):
mock_instance1 = mock.Mock()
mock_instance2 = mock.Mock()
self.c._instances = {'i1': mock_instance1,
'i2': mock_instance2}
self.c.finalise()
mock_instance1.finalise.assert_called_once_with()
mock_instance2.finalise.assert_called_once_with()
mock_instance1.stop.assert_called_once_with()
mock_instance2.stop.assert_called_once_with()
mock_instance1.reset_mock()
mock_instance2.reset_mock()
self.c.finalise()
mock_instance1.stop.assert_not_called()
mock_instance2.stop.assert_not_called()
mock_instance1.finalise.assert_not_called()
mock_instance2.finalise.assert_not_called()
def test_log_files(self):
mock_instance1 = mock.Mock()
mock_instance2 = mock.Mock()
self.c._instances = {'i1': mock_instance1,
'i2': mock_instance2}
mock_instance1.get_logfile_to_local.return_value = 'i1.log'
mock_instance1.log_format = 'f'
mock_instance2.get_logfile_to_local.return_value = 'i2.log'
mock_instance2.log_format = 'f'
log_specs = list(self.c.log_files())
mock_instance1.finalise.assert_called_once_with()
mock_instance2.finalise.assert_called_once_with()
mock_instance1.get_logfile_to_local.assert_called_once_with(
'/some/dir')
mock_instance2.get_logfile_to_local.assert_called_once_with(
'/some/dir')
self.assertEqual(
log_specs,
[('i1', 'f', 'i1.log'),
('i2', 'f', 'i2.log')])
mock_instance1.get_logfile_to_local.reset_mock()
mock_instance2.get_logfile_to_local.reset_mock()
log_specs = list(self.c.log_files())
mock_instance1.get_logfile_to_local.assert_not_called()
mock_instance2.get_logfile_to_local.assert_not_called()
self.assertEqual(
log_specs,
[('i1', 'f', 'i1.log'),
('i2', 'f', 'i2.log')])
def test_clean_up(self):
self.patch_object(self.c, 'finalise', name='mock_finalise')
self.c.clean_up()
self.mock_finalise.assert_called_once_with()
def test_register_spec_handler(self):
self.patch_object(conncheck.ConnCheckManager,
'_spec_handlers',
name='mock_cls__spec_handlers',
new={})
def handler():
pass
conncheck.ConnCheckManager.register_spec_handler('juju', handler)
self.assertIn('juju', conncheck.ConnCheckManager._spec_handlers)
self.assertEqual(conncheck.ConnCheckManager._spec_handlers['juju'],
handler)
# verify can't be added twice.
with self.assertRaises(RuntimeError):
conncheck.ConnCheckManager.register_spec_handler('juju', handler)
def test_make_instance_with(self):
mock_handler = mock.Mock()
mock_handler.return_value = 'an-instance'
self.patch_object(conncheck.ConnCheckManager,
'_spec_handlers',
name='mock_cls__spec_handlers',
new={})
conncheck.ConnCheckManager.register_spec_handler('juju', mock_handler)
# first check for ':' in spec
with self.assertRaises(ValueError):
self.c.make_instance_with('i')
# Now check for unhandled spec
with self.assertRaises(KeyError):
self.c.make_instance_with('some:thing')
# finally make one with juju
self.assertEqual(
self.c.make_instance_with('juju:0', this='that', some='thing'),
'an-instance')
mock_handler.assert_called_once_with('0', this='that', some='thing')
class TestConnCheckInstanceBase(tests_utils.BaseTestCase):
def setUp(self):
super().setUp()
self.c = conncheck.ConnCheckInstanceBase(
name='base',
module_source='/some/source',
collection='a-collection')
def test_init(self):
c = conncheck.ConnCheckInstanceBase(
name='a-name',
log_format=conncheck.LogFormats.CSV,
config_file='thing.yaml',
install_dir='/opt',
module_source='/some/other/source',
install_user='a-user')
self.assertEqual(c.name, 'a-name')
self.assertEqual(c.log_format, conncheck.LogFormats.CSV)
self.assertEqual(c.config_file, 'thing.yaml')
self.assertEqual(c.install_dir, '/opt')
self.assertEqual(c.module_source, '/some/other/source')
self.assertEqual(c.install_user, 'a-user')
self.assertEqual(self.c.name, 'base')
self.assertEqual(self.c.log_format, conncheck.LogFormats.InfluxDB)
self.assertEqual(self.c.config_file, 'config.yaml')
self.assertEqual(self.c.install_dir, '.')
self.assertEqual(self.c.module_source, '/some/source')
self.assertEqual(self.c.install_user, 'conncheck')
def test__validate_not_existing_listener(self):
with self.assertRaises(AssertionError):
self.c._validate_not_existing_listener('thing', 1024)
self.c._validate_not_existing_listener('udp', 1024)
self.c._listeners = {('udp', 1024): None}
with self.assertRaises(RuntimeError):
self.c._validate_not_existing_listener('udp', 1024)
self.c._validate_not_existing_listener('udp', 1023)
def test_add_listener(self):
with self.assertRaises(NotImplementedError):
self.c.add_listener()
def test_add_listener_spec(self):
self.patch_object(self.c, 'write_configuration',
name='mock_c_write_configuration')
self.c.add_listener_spec('udp', 1024, '0.0.0.0', reply_size=50)
self.assertIn(('udp', 1024), self.c._listeners)
self.assertEqual(self.c._listeners[('udp', 1024)],
{'name': 'base:listen:udp:0.0.0.0:1024',
'ipv4': '0.0.0.0',
'port': 1024,
'protocol': 'udp',
'reply-size': 50})
self.mock_c_write_configuration.assert_called_once_with()
def test_add_speaker(self):
self.patch_object(self.c, '_get_remote_address',
name='mock__get_remote_address')
self.mock__get_remote_address.return_value = '1.2.3.4'
self.patch_object(self.c, 'add_speaker_spec',
name='mock_add_speaker_spec')
self.c.add_speaker('udp', 1024, instance='an-instance', address=None,
wait=10, interval=20, send_size=5)
self.mock__get_remote_address.assert_called_once_with(
'an-instance', 'udp', 1024)
self.mock_add_speaker_spec.assert_called_once_with(
'udp', 1024, '1.2.3.4', wait=10, interval=20, send_size=5)
def test__validate_not_existing_speaker(self):
with self.assertRaises(AssertionError):
self.c._validate_not_existing_speaker('thing', '1.2.3.4', 1024)
self.c._validate_not_existing_speaker('udp', '1.2.3.4', 1024)
self.c._speakers = {('udp', '1.2.3.4', 1024): None}
with self.assertRaises(RuntimeError):
self.c._validate_not_existing_speaker('udp', '1.2.3.4', 1024)
self.c._validate_not_existing_speaker('udp', '1.2.3.4', 1023)
def test_add_speaker_spec(self):
self.patch_object(self.c, 'write_configuration',
name='mock_c_write_configuration')
self.c.add_speaker_spec('udp', 1024, '1.2.3.4', send_size=50)
self.assertIn(('udp', '1.2.3.4', 1024), self.c._speakers)
self.assertEqual(self.c._speakers[('udp', '1.2.3.4', 1024)],
{'name': 'base:send:udp:1.2.3.4:1024',
'ipv4': '1.2.3.4',
'port': 1024,
'protocol': 'udp',
'send-size': 50,
'wait': 5,
'interval': 10})
self.mock_c_write_configuration.assert_called_once_with()
self.mock_c_write_configuration.reset_mock()
self.c.add_speaker_spec('http', 1024, '1.2.3.4', send_size=50)
self.assertIn(('http', '1.2.3.4', 1024), self.c._speakers)
self.assertEqual(self.c._speakers[('http', '1.2.3.4', 1024)],
{'name': 'base:request:http:1.2.3.4:1024',
'url': 'http://1.2.3.4:1024/{uuid}',
'protocol': 'http',
'wait': 5,
'interval': 10})
self.mock_c_write_configuration.assert_called_once_with()
self.mock_c_write_configuration.reset_mock()
with self.assertRaises(AssertionError):
self.c.add_speaker_spec('thing', 1024, '1.2.3.4', send_size=50)
def test__get_remote_address(self):
mock_instance = mock.Mock()
mock_instance._listeners = {('udp', 1024): {'ipv4': '1.2.3.4'}}
self.assertEqual(
self.c._get_remote_address(mock_instance, 'udp', 1024), '1.2.3.4')
def test__conncheck_home_dir(self):
self.patch('zaza.utilities.installers.user_directory',
name='mock_user_directory')
self.mock_user_directory.return_value = '/some/dir'
self.assertEqual(self.c._conncheck_home_dir, '/some/dir')
self.mock_user_directory.assert_called_once_with(
None, 'conncheck')
self.mock_user_directory.reset_mock()
# check property caches
self.assertEqual(self.c._conncheck_home_dir, '/some/dir')
self.mock_user_directory.assert_not_called()
def test_install_no_user_relative_homedir(self):
self.patch('zaza.utilities.installers.user_directory',
name='mock_user_directory')
self.mock_user_directory.return_value = '/some/dir'
self.patch('zaza.utilities.installers.user_exists',
name='mock_user_exists')
self.patch('zaza.utilities.installers.create_user',
name='mock_create_user')
self.mock_create_user.return_value = '/home/conncheck'
self.patch('zaza.utilities.installers.install_module_in_venv',
name='mock_install_module_in_venv')
self.patch('zaza.utilities.installers.SystemdControl',
name='mock_SystemdControl')
mock__systemd = mock.Mock()
self.mock_SystemdControl.return_value = mock__systemd
self.c._ssh_fn = 'ssh-fn'
self.c._scp_fn = 'scp-fn'
self.mock_user_exists.return_value = False
self.c.install()
self.mock_user_exists.assert_called_once_with('ssh-fn', 'conncheck')
self.mock_create_user.assert_called_once_with('ssh-fn', 'conncheck')
self.mock_install_module_in_venv.assert_called_once_with(
'/some/source', '/home/conncheck/.', 'scp-fn', 'ssh-fn',
run_user='conncheck')
mock__systemd.install.assert_called_once_with()
self.assertTrue(self.c._installed)
def test_install_user_exists_absolute_homedir(self):
self.patch('zaza.utilities.installers.user_directory',
name='mock_user_directory')
self.mock_user_directory.return_value = '/some/dir'
self.patch('zaza.utilities.installers.user_exists',
name='mock_user_exists')
self.patch('zaza.utilities.installers.create_user',
name='mock_create_user')
self.mock_create_user.return_value = '/home/conncheck'
self.patch('zaza.utilities.installers.install_module_in_venv',
name='mock_install_module_in_venv')
self.patch('zaza.utilities.installers.SystemdControl',
name='mock_SystemdControl')
mock__systemd = mock.Mock()
self.mock_SystemdControl.return_value = mock__systemd
self.c._ssh_fn = 'ssh-fn'
self.c._scp_fn = 'scp-fn'
self.mock_user_exists.return_value = True
self.c.install_dir = '/fixed'
self.c.install()
self.mock_user_exists.assert_called_once_with('ssh-fn', 'conncheck')
self.mock_create_user.assert_not_called()
self.mock_install_module_in_venv.assert_called_once_with(
'/some/source', '/fixed', 'scp-fn', 'ssh-fn',
run_user='conncheck')
mock__systemd.install.assert_called_once_with()
self.assertTrue(self.c._installed)
def test__verify_systemd_not_none(self):
self.c._systemd = 'thing'
self.c._verify_systemd_not_none()
self.c._systemd = None
with self.assertRaises(AssertionError):
self.c._verify_systemd_not_none()
def test_remote_log_filename_property(self):
self.patch('zaza.utilities.installers.user_directory',
name='mock_user_directory')
self.mock_user_directory.return_value = '/some/dir'
self.assertEqual(self.c.remote_log_filename, '/some/dir/conncheck.log')
def test_local_log_filename_property(self):
with self.assertRaises(NotImplementedError):
self.c.local_log_filename
def test_get_logfile_to_local(self):
self.patch('zaza.utilities.installers.user_directory',
name='mock_user_directory')
self.mock_user_directory.return_value = '/some/dir'
mock_scp_fn = mock.Mock()
self.c._scp_fn = mock_scp_fn
with mock.patch.object(
conncheck.ConnCheckInstanceBase, 'local_log_filename',
new_callable=mock.PropertyMock) as mock_local_log_filename:
mock_local_log_filename.return_value = 'some-filename'
self.assertEqual(self.c.get_logfile_to_local('/a/dir'),
'/a/dir/some-filename')
mock_scp_fn.assert_called_once_with('/some/dir/conncheck.log',
'/a/dir/some-filename',
copy_from=True)
def test_write_configuration_not_installed_not_running(self):
self.patch('zaza.utilities.installers.user_directory',
name='mock_user_directory')
self.mock_user_directory.return_value = '/some/dir'
self.patch_object(self.c, 'install', name='mock_c_install')
self.patch_object(self.c, 'is_running', name='mock_c_is_running')
self.mock_c_is_running.return_value = False
self.patch_object(self.c, 'restart', name='mock_c_restart')
mock_scp_fn = mock.Mock()
self.c._scp_fn = mock_scp_fn
mock_ssh_fn = mock.Mock()
self.c._ssh_fn = mock_ssh_fn
self.patch('yaml.dump', name='mock_yaml_dump')
self.patch('tempfile.TemporaryDirectory',
name='mock_TemporaryDirectory')
mock_td = mock.MagicMock()
mock_td.__enter__.return_value = '/target'
self.mock_TemporaryDirectory.return_value = mock_td
with tests_utils.patch_open() as (mock_open, mock_file):
self.c.write_configuration()
self.mock_c_install.assert_called_once_with()
mock_open.assert_called_once_with('/target/config.yaml', 'wt')
expected_config = {
'name': 'base',
'file-log-path': '/some/dir/conncheck.log',
'collection': 'a-collection',
'log-format': 'InfluxDB',
'listeners': [],
'speakers': []
}
self.mock_yaml_dump.assert_called_once_with(expected_config, mock_file)
mock_scp_fn.assert_called_once_with('/target/config.yaml',
'config.yaml')
mock_ssh_fn.assert_called_once_with(
['sudo', 'mv', 'config.yaml', '/some/dir/config.yaml'])
self.mock_c_is_running.assert_called_once_with()
self.mock_c_restart.assert_not_called()
def test_write_configuration_installed_and_running(self):
self.patch('zaza.utilities.installers.user_directory',
name='mock_user_directory')
self.mock_user_directory.return_value = '/some/dir'
self.patch_object(self.c, 'install', name='mock_c_install')
self.patch_object(self.c, 'is_running', name='mock_c_is_running')
self.mock_c_is_running.return_value = True
self.patch_object(self.c, 'restart', name='mock_c_restart')
mock_scp_fn = mock.Mock()
self.c._scp_fn = mock_scp_fn
mock_ssh_fn = mock.Mock()
self.c._ssh_fn = mock_ssh_fn
self.patch('yaml.dump', name='mock_yaml_dump')
self.patch('tempfile.TemporaryDirectory',
name='mock_TemporaryDirectory')
mock_td = mock.MagicMock()
mock_td.__enter__.return_value = '/target'
self.mock_TemporaryDirectory.return_value = mock_td
self.c._installed = True
with tests_utils.patch_open() as (mock_open, mock_file):
self.c.write_configuration()
self.mock_c_install.assert_not_called()
mock_open.assert_called_once_with('/target/config.yaml', 'wt')
expected_config = {
'name': 'base',
'file-log-path': '/some/dir/conncheck.log',
'collection': 'a-collection',
'log-format': 'InfluxDB',
'listeners': [],
'speakers': []
}
self.mock_yaml_dump.assert_called_once_with(expected_config, mock_file)
mock_scp_fn.assert_called_once_with('/target/config.yaml',
'config.yaml')
mock_ssh_fn.assert_called_once_with(
['sudo', 'mv', 'config.yaml', '/some/dir/config.yaml'])
self.mock_c_is_running.assert_called_once_with()
self.mock_c_restart.assert_called_once_with()
def test_is_running(self):
self.patch_object(self.c, '_verify_systemd_not_none',
name='mock__verify_systemd_not_none')
mock__systemd = mock.Mock()
mock__systemd.is_running.return_value = False
self.c._systemd = mock__systemd
self.assertFalse(self.c.is_running())
self.mock__verify_systemd_not_none.assert_called_once_with()
mock__systemd.is_running.assert_called_once_with()
def test_start(self):
self.patch_object(self.c, '_verify_systemd_not_none',
name='mock__verify_systemd_not_none')
mock__systemd = mock.Mock()
self.c._systemd = mock__systemd
self.c.start()
self.mock__verify_systemd_not_none.assert_called_once_with()
mock__systemd.start.assert_called_once_with()
def test_stop(self):
self.patch_object(conncheck, 'logger', name='mock_logger')
self.c._systemd = None
self.c.stop()
self.mock_logger.debug.assert_called_once_with(mock.ANY, self.c)
mock__systemd = mock.Mock()
self.c._systemd = mock__systemd
self.mock_logger.reset_mock()
self.c.stop()
mock__systemd.stop.assert_called_once_with()
def test_restart(self):
self.patch_object(self.c, '_verify_systemd_not_none',
name='mock__verify_systemd_not_none')
mock__systemd = mock.Mock()
self.c._systemd = mock__systemd
self.c.restart()
self.mock__verify_systemd_not_none.assert_called_once_with()
mock__systemd.restart.assert_called_once_with()
def test_finalise(self):
self.c._installed = False
mock__systemd = mock.Mock()
self.c._systemd = mock__systemd
self.patch_object(self.c, 'stop', name='mock_c_stop')
self.c.finalise()
self.mock_c_stop.assert_not_called()
mock__systemd.disable.assert_not_called()
self.c._installed = True
self.c.finalise()
self.mock_c_stop.assert_called_once_with()
mock__systemd.disable.assert_called_once_with()
def test_clean_up(self):
self.c._installed = False
mock__systemd = mock.Mock()
self.c._systemd = mock__systemd
self.patch_object(self.c, 'stop', name='mock_c_stop')
self.c.clean_up()
self.mock_c_stop.assert_not_called()
mock__systemd.disable.assert_not_called()
mock__systemd.remove.assert_not_called()
self.c._installed = True
self.c.clean_up()
self.mock_c_stop.assert_called_once_with()
mock__systemd.disable.assert_called_once_with()
mock__systemd.remove.assert_called_once_with()
class TestConnCheckInstanceJuju(tests_utils.BaseTestCase):
def setUp(self):
super().setUp()
self.patch('zaza.utilities.installers.make_juju_ssh_fn',
name='mock_make_juju_ssh_fn')
self.mock_ssh_fn = mock.Mock()
self.mock_make_juju_ssh_fn = self.mock_ssh_fn
self.patch('zaza.utilities.installers.make_juju_scp_fn',
name='mock_make_juju_scp_fn')
self.mock_scp_fn = mock.Mock()
self.mock_make_juju_scp_fn = self.mock_scp_fn
self.c = conncheck.ConnCheckInstanceJuju(
'0',
model='some-model',
user='a-user',
module_source='/some/source',
collection='a-collection')
def test_init(self):
c = conncheck.ConnCheckInstanceJuju(
'0/lxd/15',
log_format=conncheck.LogFormats.CSV,
config_file='thing.yaml',
install_dir='/opt',
module_source='/some/other/source',
install_user='a-user')
self.assertEqual(c.machine_or_unit_spec, '0/lxd/15')
self.assertEqual(c.name, '0/lxd/15')
self.assertEqual(c.log_format, conncheck.LogFormats.CSV)
self.assertEqual(c.config_file, 'thing.yaml')
self.assertEqual(c.install_dir, '/opt')
self.assertEqual(c.module_source, '/some/other/source')
self.assertEqual(c.install_user, 'a-user')
self.assertEqual(self.c.machine_or_unit_spec, '0')
self.assertEqual(self.c.name, '0')
self.assertEqual(self.c.log_format, conncheck.LogFormats.InfluxDB)
self.assertEqual(self.c.config_file, 'config.yaml')
self.assertEqual(self.c.install_dir, '.')
self.assertEqual(self.c.module_source, '/some/source')
self.assertEqual(self.c.install_user, 'conncheck')
def test_local_log_filename(self):
self.assertEqual(self.c.local_log_filename, '0.log')
self.c.machine_or_unit_spec = '0/lxd/15'
self.assertEqual(self.c.local_log_filename, '0_lxd_15.log')
def test__validate_spec(self):
MACHINE = self.c.JujuTypes.MACHINE
UNIT = self.c.JujuTypes.UNIT
valid_specs = (('0', MACHINE),
('9', MACHINE),
('15', MACHINE),
('0/lxd/10', MACHINE),
('1/LXD/4', MACHINE),
('some-unit-0/14', UNIT),
('other/23', UNIT))
invalid_specs = ('b', '1/spec/2', 'other-unit', 'd/10/10')
for spec, type_ in valid_specs:
self.c.machine_or_unit_spec = spec
self.c._validate_spec()
self.assertEqual(self.c._juju_type, type_)
for spec in invalid_specs:
self.c.machine_or_unit_spec = spec
with self.assertRaises(ValueError):
self.c._validate_spec()
def test_add_listener(self):
self.patch_object(self.c, '_validate_not_existing_listener',
name='mock__validate_not_existing_listener')
self.patch_object(self.c, '_get_address', name='mock__get_address')
self.mock__get_address.return_value = '1.2.3.4'
self.patch_object(self.c, 'add_listener_spec',
name='mock_add_listener_spec')
self.c.add_listener('udp', 1024, space='default', cidr='cidr')
self.mock__validate_not_existing_listener.assert_called_once_with(
'udp', 1024)
self.mock__get_address('default', 'cidr')
self.mock_add_listener_spec.assert_called_once_with(
'udp', 1024, '1.2.3.4', reply_size=1024)
def test__get_address(self):
self.patch_object(self.c, '_get_address_unit',
name='mock__get_address_unit')
self.mock__get_address_unit.return_value = '1.2.3.4'
self.patch_object(self.c, '_get_address_machine',
name='mock__get_address_machine')
self.mock__get_address_machine.return_value = '5.6.7.8'
self.c._juju_type = self.c.JujuTypes.UNIT
self.assertEqual(self.c._get_address(None, 'cidr'), '1.2.3.4')
self.mock__get_address_unit.assert_called_once_with(
'juju-info', 'cidr')
self.mock__get_address_unit.reset_mock()
self.c._juju_type = self.c.JujuTypes.MACHINE
self.assertEqual(self.c._get_address(None, 'cidr'), '5.6.7.8')
self.mock__get_address_machine.assert_called_once_with('cidr')
self.c._juju_type = None
with self.assertRaises(RuntimeError):
self.c._get_address(None, 'cidr')
def test__get_address_unit_single_address(self):
self.patch('subprocess.check_output', name='mock_check_output')
self.patch_object(conncheck, 'logger', name='mock_logger')
self.patch('yaml.safe_load', name='mock_yaml_safe_load')
self.mock_check_output.return_value = b'1.2.3.4'
self.mock_yaml_safe_load.return_value = '1.2.3.4\n'
self.assertEqual(self.c._get_address_unit('a-space', 'a-cidr'),
'1.2.3.4')
self.mock_check_output.assert_called_once_with(
['juju', 'run', '-u', '0', '--', 'network-get', '--format',
'yaml', '--bind-address', 'a-space'])
self.mock_yaml_safe_load.assert_called_once_with('1.2.3.4')
def test__get_address_unit_multiple_address(self):
self.patch('subprocess.check_output', name='mock_check_output')
self.patch_object(conncheck, 'logger', name='mock_logger')
self.patch('yaml.safe_load', name='mock_yaml_safe_load')
self.mock_check_output.return_value = b'1.2.3.4'
self.mock_yaml_safe_load.return_value = ['1.2.3.4', '5.6.7.8']
with self.assertRaises(NotImplementedError):
self.c._get_address_unit('a-space', 'a-cidr')
def test__get_address_unit_network_get_fails(self):
self.patch('subprocess.check_output', name='mock_check_output')
self.patch_object(conncheck, 'logger', name='mock_logger')
self.patch('yaml.safe_load', name='mock_yaml_safe_load')
self.mock_check_output.return_value = b'1.2.3.4'
def raise_(*args):
raise subprocess.CalledProcessError(cmd='bang', returncode=1)
self.mock_check_output.side_effect = raise_
with self.assertRaises(subprocess.CalledProcessError):
self.c._get_address_unit('a-space', 'a-cidr')
def test__get_address_machine(self):
with self.assertRaises(NotImplementedError):
self.c._get_address_machine()
class TestConnCheckInstanceSSH(tests_utils.BaseTestCase):
def setUp(self):
super().setUp()
self.patch('zaza.utilities.installers.make_ssh_fn',
name='mock_make_ssh_fn')
self.mock_ssh_fn = mock.Mock()
self.mock_make_ssh_fn = self.mock_ssh_fn
self.patch('zaza.utilities.installers.make_scp_fn',
name='mock_make_scp_fn')
self.mock_scp_fn = mock.Mock()
self.mock_make_scp_fn = self.mock_scp_fn
self.c = conncheck.ConnCheckInstanceSSH(
address='1.2.3.4',
key_file='a-file',
user='a-user',
module_source='/some/source',
collection='a-collection')
def test_init(self):
c = conncheck.ConnCheckInstanceSSH(
'5.6.7.8',
'my-key-file',
log_format=conncheck.LogFormats.CSV,
config_file='thing.yaml',
install_dir='/opt',
module_source='/some/other/source',
install_user='a-user')
self.assertEqual(c.address, '5.6.7.8')
self.assertEqual(c.key_file, 'my-key-file')
self.assertEqual(c.name, '5.6.7.8')
self.assertEqual(c.log_format, conncheck.LogFormats.CSV)
self.assertEqual(c.config_file, 'thing.yaml')
self.assertEqual(c.install_dir, '/opt')
self.assertEqual(c.module_source, '/some/other/source')
self.assertEqual(c.install_user, 'a-user')
self.assertEqual(self.c.address, '1.2.3.4')
self.assertEqual(self.c.key_file, 'a-file')
self.assertEqual(self.c.name, '1.2.3.4')
self.assertEqual(self.c.log_format, conncheck.LogFormats.InfluxDB)
self.assertEqual(self.c.config_file, 'config.yaml')
self.assertEqual(self.c.install_dir, '.')
self.assertEqual(self.c.module_source, '/some/source')
self.assertEqual(self.c.install_user, 'conncheck')
def test_local_log_filename(self):
self.c.address = 'user@1.2.3.4'
self.assertEqual(self.c.local_log_filename, 'user_1-2-3-4.log')
def test_add_listener(self):
self.patch_object(self.c, '_validate_not_existing_listener',
name='mock__validate_not_existing_listener')
self.patch_object(self.c, 'add_listener_spec',
name='mock_add_listener_spec')
self.c.add_listener('udp', 1024)
self.mock__validate_not_existing_listener.assert_called_once_with(
'udp', 1024)
self.mock_add_listener_spec.assert_called_once_with(
'udp', 1024, '0.0.0.0', reply_size=1024)
| 43.203191 | 79 | 0.641698 |
import mock
import subprocess
import unit_tests.utils as tests_utils
import zaza.events.plugins.conncheck as conncheck
class TestAutoConfigureFunction(tests_utils.BaseTestCase):
def setUp(self):
super().setUp()
self.patch_object(conncheck, 'logger', name='mock_logger')
self.patch_object(
conncheck, 'get_plugin_manager', name='mock_get_plugin_manager')
self.mock_collection = mock.Mock()
self.mock_conncheck_manager = mock.Mock()
def test_autoconfigure_no_config(self):
self.mock_get_plugin_manager.return_value = self.mock_conncheck_manager
conncheck.auto_configure_with_collection(self.mock_collection)
self.mock_get_plugin_manager.assert_called_once_with('DEFAULT')
self.mock_collection.add_logging_manager.assert_called_once_with(
self.mock_conncheck_manager)
def test_autoconfigure_with_config(self):
self.mock_get_plugin_manager.return_value = self.mock_conncheck_manager
config = {
'manager-name': 'a-manager',
'source': 'a-source',
}
conncheck.auto_configure_with_collection(self.mock_collection,
config=config)
self.mock_get_plugin_manager.assert_called_once_with('a-manager')
self.mock_collection.add_logging_manager.assert_called_once_with(
self.mock_conncheck_manager)
self.mock_conncheck_manager.configure.assert_called_once_with(
module_source='a-source')
class TestGetConncheckManager(tests_utils.BaseTestCase):
def test_get_conncheck_manager(self):
self.patch_object(conncheck, 'get_option', name='mock_get_option')
self.mock_get_option.return_value = 'a-name'
self.patch_object(conncheck, 'get_plugin_manager',
name='mock_get_plugin_manager')
self.mock_get_plugin_manager.return_value = 'a-manager'
self.assertEqual(conncheck.get_conncheck_manager(), 'a-manager')
self.mock_get_option.assert_called_once_with(
'zaza-events.modules.conncheck.manager-name', 'DEFAULT')
self.mock_get_plugin_manager.assert_called_once_with('a-name')
class TestGetPluginManager(tests_utils.BaseTestCase):
def test_get_plugin_manager(self):
self.patch_object(conncheck, '_conncheck_plugin_managers', new={})
self.patch_object(conncheck, 'ConnCheckPluginManager',
name='mock_ConnCheckPluginManager')
self.mock_ConnCheckPluginManager.return_value = 'a-manager'
self.assertEqual(conncheck.get_plugin_manager(), 'a-manager')
self.mock_ConnCheckPluginManager.assert_called_once_with(
managed_name='DEFAULT')
def test_get_plugin_manager_non_default(self):
self.patch_object(conncheck, '_conncheck_plugin_managers', new={})
self.patch_object(conncheck, 'ConnCheckPluginManager',
name='mock_ConnCheckPluginManager')
self.mock_ConnCheckPluginManager.return_value = 'a-manager'
self.assertEqual(conncheck.get_plugin_manager('a-name'), 'a-manager')
self.mock_ConnCheckPluginManager.assert_called_once_with(
managed_name='a-name')
def test_get_plugin_manager_check_caches(self):
self.patch_object(conncheck, '_conncheck_plugin_managers', new={},
name='mock__conncheck_plugin_managers')
self.mock__conncheck_plugin_managers['a-name'] = 'a-manager'
self.patch_object(conncheck, 'ConnCheckPluginManager',
name='mock_ConnCheckPluginManager')
self.mock_ConnCheckPluginManager.return_value = 'the-manager'
self.assertEqual(conncheck.get_plugin_manager('a-name'), 'a-manager')
self.mock_ConnCheckPluginManager.assert_not_called()
class TestConnCheckPluginManager(tests_utils.BaseTestCase):
def setUp(self):
super().setUp()
self.patch_object(conncheck, 'ConnCheckManager',
name='mock_ConnCheckManager')
self.mock_conncheck_manager = mock.Mock()
self.mock_ConnCheckManager.return_value = self.mock_conncheck_manager
self.mock_collection_object = mock.Mock()
self.mock_collection_object.logs_dir = "a-logs-dir"
self.mock_collection_object.log_format = conncheck.LogFormats.InfluxDB
self.mock_collection_object.collection = 'a-collection'
def test_init(self):
cpm = conncheck.ConnCheckPluginManager()
self.assertEqual(cpm.managed_name, 'DEFAULT')
self.assertEqual(cpm._conncheck_manager, self.mock_conncheck_manager)
cpm = conncheck.ConnCheckPluginManager(managed_name='a-manager')
self.assertEqual(cpm.managed_name, 'a-manager')
def test_configure(self):
cpm = conncheck.ConnCheckPluginManager()
self.patch_object(
cpm, 'configure_plugin', name='mock_cpm_configure_plugin')
cpm.configure(collection_object=self.mock_collection_object)
self.mock_cpm_configure_plugin.assert_called_once_with()
def test_configure_plugin(self):
cpm = conncheck.ConnCheckPluginManager(
module_source='a-source', tags='abc')
cpm.configure(collection_object=self.mock_collection_object)
self.mock_conncheck_manager.configure.assert_called_once_with(
collection='a-collection',
logs_dir='a-logs-dir',
module_source='a-source',
tags='abc')
def test_manager_property(self):
cpm = conncheck.ConnCheckPluginManager(
module_source='a-source', tags='abc')
self.assertEqual(cpm.manager, self.mock_conncheck_manager)
cpm._conncheck_manager = None
with self.assertRaises(AssertionError):
cpm.manager
def test_add_instance(self):
cpm = conncheck.ConnCheckPluginManager(
module_source='a-source', tags='abc')
cpm.add_instance('a-spec', this='that')
self.mock_conncheck_manager.add_instance.assert_called_once_with(
'a-spec', this='that')
def test_get_instance(self):
cpm = conncheck.ConnCheckPluginManager(
module_source='a-source', tags='abc')
self.mock_conncheck_manager.get_instance.return_value = 'an-instance'
self.assertEqual(cpm.get_instance('a-spec'), 'an-instance')
self.mock_conncheck_manager.get_instance.assert_called_once_with(
'a-spec')
def test_start(self):
cpm = conncheck.ConnCheckPluginManager(
module_source='a-source', tags='abc')
cpm.start('a-spec')
self.mock_conncheck_manager.start.assert_called_once_with('a-spec')
def test_stop(self):
cpm = conncheck.ConnCheckPluginManager(
module_source='a-source', tags='abc')
cpm.stop('a-spec')
self.mock_conncheck_manager.stop.assert_called_once_with('a-spec')
def test_finalise(self):
cpm = conncheck.ConnCheckPluginManager(
module_source='a-source', tags='abc')
cpm.finalise()
self.mock_conncheck_manager.finalise.assert_called_once_with()
def test_log_files(self):
cpm = conncheck.ConnCheckPluginManager(
module_source='a-source', tags='abc')
cpm.log_files()
self.mock_conncheck_manager.log_files.assert_called_once_with()
def test_clean_up(self):
cpm = conncheck.ConnCheckPluginManager(
module_source='a-source', tags='abc')
cpm.clean_up()
self.mock_conncheck_manager.clean_up.assert_called_once_with()
def test_reset(self):
cpm = conncheck.ConnCheckPluginManager(
module_source='a-source', tags='abc')
cpm.reset()
self.mock_conncheck_manager.clean_up.assert_called_once_with()
self.assertIsNone(cpm._conncheck_manager)
class TestConnCheckManager(tests_utils.BaseTestCase):
def setUp(self):
super().setUp()
self.c = conncheck.ConnCheckManager(
collection='a-collection',
logs_dir='/some/dir',
tags=['tag1'])
def test_init(self):
self.assertEqual(self.c.collection, 'a-collection')
self.assertEqual(self.c.logs_dir, '/some/dir')
self.assertEqual(self.c.tags, ['tag1'])
def test_add_instance(self):
self.patch_object(self.c, 'make_instance_with',
name='mock_make_instance_with')
self.mock_make_instance_with.return_value = 'an-instance'
self.c.add_instance('juju:0', this='that', some='thing')
self.mock_make_instance_with.assert_called_once_with(
'juju:0', this='that', some='thing', module_source='conncheck',
collection='a-collection')
self.assertIn('juju:0', self.c._instances)
self.assertEqual(self.c._instances['juju:0'], 'an-instance')
with self.assertRaises(RuntimeError):
self.c.add_instance('juju:0', this='that', some='thing')
def test_get_instance(self):
self.c._instances['juju:0'] = 'an-instance'
self.assertEqual(self.c.get_instance('juju:0'), 'an-instance')
def test_start(self):
mock_instance1 = mock.Mock()
mock_instance2 = mock.Mock()
self.c._instances = {'i1': mock_instance1,
'i2': mock_instance2}
self.c.start('i1')
mock_instance1.start.assert_called_once_with()
mock_instance2.start.assert_not_called()
mock_instance1.reset_mock()
self.c.start()
mock_instance1.start.assert_called_once_with()
mock_instance2.start.assert_called_once_with()
def test_stop(self):
mock_instance1 = mock.Mock()
mock_instance2 = mock.Mock()
self.c._instances = {'i1': mock_instance1,
'i2': mock_instance2}
self.c.stop('i1')
mock_instance1.stop.assert_called_once_with()
mock_instance2.stop.assert_not_called()
mock_instance1.reset_mock()
self.c.stop()
mock_instance1.stop.assert_called_once_with()
mock_instance2.stop.assert_called_once_with()
def test_finalise(self):
mock_instance1 = mock.Mock()
mock_instance2 = mock.Mock()
self.c._instances = {'i1': mock_instance1,
'i2': mock_instance2}
self.c.finalise()
mock_instance1.finalise.assert_called_once_with()
mock_instance2.finalise.assert_called_once_with()
mock_instance1.stop.assert_called_once_with()
mock_instance2.stop.assert_called_once_with()
mock_instance1.reset_mock()
mock_instance2.reset_mock()
self.c.finalise()
mock_instance1.stop.assert_not_called()
mock_instance2.stop.assert_not_called()
mock_instance1.finalise.assert_not_called()
mock_instance2.finalise.assert_not_called()
def test_log_files(self):
mock_instance1 = mock.Mock()
mock_instance2 = mock.Mock()
self.c._instances = {'i1': mock_instance1,
'i2': mock_instance2}
mock_instance1.get_logfile_to_local.return_value = 'i1.log'
mock_instance1.log_format = 'f'
mock_instance2.get_logfile_to_local.return_value = 'i2.log'
mock_instance2.log_format = 'f'
log_specs = list(self.c.log_files())
mock_instance1.finalise.assert_called_once_with()
mock_instance2.finalise.assert_called_once_with()
mock_instance1.get_logfile_to_local.assert_called_once_with(
'/some/dir')
mock_instance2.get_logfile_to_local.assert_called_once_with(
'/some/dir')
self.assertEqual(
log_specs,
[('i1', 'f', 'i1.log'),
('i2', 'f', 'i2.log')])
mock_instance1.get_logfile_to_local.reset_mock()
mock_instance2.get_logfile_to_local.reset_mock()
log_specs = list(self.c.log_files())
mock_instance1.get_logfile_to_local.assert_not_called()
mock_instance2.get_logfile_to_local.assert_not_called()
self.assertEqual(
log_specs,
[('i1', 'f', 'i1.log'),
('i2', 'f', 'i2.log')])
def test_clean_up(self):
self.patch_object(self.c, 'finalise', name='mock_finalise')
self.c.clean_up()
self.mock_finalise.assert_called_once_with()
def test_register_spec_handler(self):
self.patch_object(conncheck.ConnCheckManager,
'_spec_handlers',
name='mock_cls__spec_handlers',
new={})
def handler():
pass
conncheck.ConnCheckManager.register_spec_handler('juju', handler)
self.assertIn('juju', conncheck.ConnCheckManager._spec_handlers)
self.assertEqual(conncheck.ConnCheckManager._spec_handlers['juju'],
handler)
with self.assertRaises(RuntimeError):
conncheck.ConnCheckManager.register_spec_handler('juju', handler)
def test_make_instance_with(self):
mock_handler = mock.Mock()
mock_handler.return_value = 'an-instance'
self.patch_object(conncheck.ConnCheckManager,
'_spec_handlers',
name='mock_cls__spec_handlers',
new={})
conncheck.ConnCheckManager.register_spec_handler('juju', mock_handler)
# first check for ':' in spec
with self.assertRaises(ValueError):
self.c.make_instance_with('i')
# Now check for unhandled spec
with self.assertRaises(KeyError):
self.c.make_instance_with('some:thing')
# finally make one with juju
self.assertEqual(
self.c.make_instance_with('juju:0', this='that', some='thing'),
'an-instance')
mock_handler.assert_called_once_with('0', this='that', some='thing')
class TestConnCheckInstanceBase(tests_utils.BaseTestCase):
def setUp(self):
super().setUp()
self.c = conncheck.ConnCheckInstanceBase(
name='base',
module_source='/some/source',
collection='a-collection')
def test_init(self):
c = conncheck.ConnCheckInstanceBase(
name='a-name',
log_format=conncheck.LogFormats.CSV,
config_file='thing.yaml',
install_dir='/opt',
module_source='/some/other/source',
install_user='a-user')
self.assertEqual(c.name, 'a-name')
self.assertEqual(c.log_format, conncheck.LogFormats.CSV)
self.assertEqual(c.config_file, 'thing.yaml')
self.assertEqual(c.install_dir, '/opt')
self.assertEqual(c.module_source, '/some/other/source')
self.assertEqual(c.install_user, 'a-user')
self.assertEqual(self.c.name, 'base')
self.assertEqual(self.c.log_format, conncheck.LogFormats.InfluxDB)
self.assertEqual(self.c.config_file, 'config.yaml')
self.assertEqual(self.c.install_dir, '.')
self.assertEqual(self.c.module_source, '/some/source')
self.assertEqual(self.c.install_user, 'conncheck')
def test__validate_not_existing_listener(self):
with self.assertRaises(AssertionError):
self.c._validate_not_existing_listener('thing', 1024)
self.c._validate_not_existing_listener('udp', 1024)
self.c._listeners = {('udp', 1024): None}
with self.assertRaises(RuntimeError):
self.c._validate_not_existing_listener('udp', 1024)
self.c._validate_not_existing_listener('udp', 1023)
def test_add_listener(self):
with self.assertRaises(NotImplementedError):
self.c.add_listener()
def test_add_listener_spec(self):
self.patch_object(self.c, 'write_configuration',
name='mock_c_write_configuration')
self.c.add_listener_spec('udp', 1024, '0.0.0.0', reply_size=50)
self.assertIn(('udp', 1024), self.c._listeners)
self.assertEqual(self.c._listeners[('udp', 1024)],
{'name': 'base:listen:udp:0.0.0.0:1024',
'ipv4': '0.0.0.0',
'port': 1024,
'protocol': 'udp',
'reply-size': 50})
self.mock_c_write_configuration.assert_called_once_with()
def test_add_speaker(self):
self.patch_object(self.c, '_get_remote_address',
name='mock__get_remote_address')
self.mock__get_remote_address.return_value = '1.2.3.4'
self.patch_object(self.c, 'add_speaker_spec',
name='mock_add_speaker_spec')
self.c.add_speaker('udp', 1024, instance='an-instance', address=None,
wait=10, interval=20, send_size=5)
self.mock__get_remote_address.assert_called_once_with(
'an-instance', 'udp', 1024)
self.mock_add_speaker_spec.assert_called_once_with(
'udp', 1024, '1.2.3.4', wait=10, interval=20, send_size=5)
def test__validate_not_existing_speaker(self):
with self.assertRaises(AssertionError):
self.c._validate_not_existing_speaker('thing', '1.2.3.4', 1024)
self.c._validate_not_existing_speaker('udp', '1.2.3.4', 1024)
self.c._speakers = {('udp', '1.2.3.4', 1024): None}
with self.assertRaises(RuntimeError):
self.c._validate_not_existing_speaker('udp', '1.2.3.4', 1024)
self.c._validate_not_existing_speaker('udp', '1.2.3.4', 1023)
def test_add_speaker_spec(self):
self.patch_object(self.c, 'write_configuration',
name='mock_c_write_configuration')
self.c.add_speaker_spec('udp', 1024, '1.2.3.4', send_size=50)
self.assertIn(('udp', '1.2.3.4', 1024), self.c._speakers)
self.assertEqual(self.c._speakers[('udp', '1.2.3.4', 1024)],
{'name': 'base:send:udp:1.2.3.4:1024',
'ipv4': '1.2.3.4',
'port': 1024,
'protocol': 'udp',
'send-size': 50,
'wait': 5,
'interval': 10})
self.mock_c_write_configuration.assert_called_once_with()
self.mock_c_write_configuration.reset_mock()
self.c.add_speaker_spec('http', 1024, '1.2.3.4', send_size=50)
self.assertIn(('http', '1.2.3.4', 1024), self.c._speakers)
self.assertEqual(self.c._speakers[('http', '1.2.3.4', 1024)],
{'name': 'base:request:http:1.2.3.4:1024',
'url': 'http://1.2.3.4:1024/{uuid}',
'protocol': 'http',
'wait': 5,
'interval': 10})
self.mock_c_write_configuration.assert_called_once_with()
self.mock_c_write_configuration.reset_mock()
with self.assertRaises(AssertionError):
self.c.add_speaker_spec('thing', 1024, '1.2.3.4', send_size=50)
def test__get_remote_address(self):
mock_instance = mock.Mock()
mock_instance._listeners = {('udp', 1024): {'ipv4': '1.2.3.4'}}
self.assertEqual(
self.c._get_remote_address(mock_instance, 'udp', 1024), '1.2.3.4')
def test__conncheck_home_dir(self):
self.patch('zaza.utilities.installers.user_directory',
name='mock_user_directory')
self.mock_user_directory.return_value = '/some/dir'
self.assertEqual(self.c._conncheck_home_dir, '/some/dir')
self.mock_user_directory.assert_called_once_with(
None, 'conncheck')
self.mock_user_directory.reset_mock()
# check property caches
self.assertEqual(self.c._conncheck_home_dir, '/some/dir')
self.mock_user_directory.assert_not_called()
def test_install_no_user_relative_homedir(self):
self.patch('zaza.utilities.installers.user_directory',
name='mock_user_directory')
self.mock_user_directory.return_value = '/some/dir'
self.patch('zaza.utilities.installers.user_exists',
name='mock_user_exists')
self.patch('zaza.utilities.installers.create_user',
name='mock_create_user')
self.mock_create_user.return_value = '/home/conncheck'
self.patch('zaza.utilities.installers.install_module_in_venv',
name='mock_install_module_in_venv')
self.patch('zaza.utilities.installers.SystemdControl',
name='mock_SystemdControl')
mock__systemd = mock.Mock()
self.mock_SystemdControl.return_value = mock__systemd
self.c._ssh_fn = 'ssh-fn'
self.c._scp_fn = 'scp-fn'
self.mock_user_exists.return_value = False
self.c.install()
self.mock_user_exists.assert_called_once_with('ssh-fn', 'conncheck')
self.mock_create_user.assert_called_once_with('ssh-fn', 'conncheck')
self.mock_install_module_in_venv.assert_called_once_with(
'/some/source', '/home/conncheck/.', 'scp-fn', 'ssh-fn',
run_user='conncheck')
mock__systemd.install.assert_called_once_with()
self.assertTrue(self.c._installed)
def test_install_user_exists_absolute_homedir(self):
self.patch('zaza.utilities.installers.user_directory',
name='mock_user_directory')
self.mock_user_directory.return_value = '/some/dir'
self.patch('zaza.utilities.installers.user_exists',
name='mock_user_exists')
self.patch('zaza.utilities.installers.create_user',
name='mock_create_user')
self.mock_create_user.return_value = '/home/conncheck'
self.patch('zaza.utilities.installers.install_module_in_venv',
name='mock_install_module_in_venv')
self.patch('zaza.utilities.installers.SystemdControl',
name='mock_SystemdControl')
mock__systemd = mock.Mock()
self.mock_SystemdControl.return_value = mock__systemd
self.c._ssh_fn = 'ssh-fn'
self.c._scp_fn = 'scp-fn'
self.mock_user_exists.return_value = True
self.c.install_dir = '/fixed'
self.c.install()
self.mock_user_exists.assert_called_once_with('ssh-fn', 'conncheck')
self.mock_create_user.assert_not_called()
self.mock_install_module_in_venv.assert_called_once_with(
'/some/source', '/fixed', 'scp-fn', 'ssh-fn',
run_user='conncheck')
mock__systemd.install.assert_called_once_with()
self.assertTrue(self.c._installed)
def test__verify_systemd_not_none(self):
self.c._systemd = 'thing'
self.c._verify_systemd_not_none()
self.c._systemd = None
with self.assertRaises(AssertionError):
self.c._verify_systemd_not_none()
def test_remote_log_filename_property(self):
self.patch('zaza.utilities.installers.user_directory',
name='mock_user_directory')
self.mock_user_directory.return_value = '/some/dir'
self.assertEqual(self.c.remote_log_filename, '/some/dir/conncheck.log')
def test_local_log_filename_property(self):
with self.assertRaises(NotImplementedError):
self.c.local_log_filename
def test_get_logfile_to_local(self):
self.patch('zaza.utilities.installers.user_directory',
name='mock_user_directory')
self.mock_user_directory.return_value = '/some/dir'
mock_scp_fn = mock.Mock()
self.c._scp_fn = mock_scp_fn
with mock.patch.object(
conncheck.ConnCheckInstanceBase, 'local_log_filename',
new_callable=mock.PropertyMock) as mock_local_log_filename:
mock_local_log_filename.return_value = 'some-filename'
self.assertEqual(self.c.get_logfile_to_local('/a/dir'),
'/a/dir/some-filename')
mock_scp_fn.assert_called_once_with('/some/dir/conncheck.log',
'/a/dir/some-filename',
copy_from=True)
def test_write_configuration_not_installed_not_running(self):
self.patch('zaza.utilities.installers.user_directory',
name='mock_user_directory')
self.mock_user_directory.return_value = '/some/dir'
self.patch_object(self.c, 'install', name='mock_c_install')
self.patch_object(self.c, 'is_running', name='mock_c_is_running')
self.mock_c_is_running.return_value = False
self.patch_object(self.c, 'restart', name='mock_c_restart')
mock_scp_fn = mock.Mock()
self.c._scp_fn = mock_scp_fn
mock_ssh_fn = mock.Mock()
self.c._ssh_fn = mock_ssh_fn
self.patch('yaml.dump', name='mock_yaml_dump')
self.patch('tempfile.TemporaryDirectory',
name='mock_TemporaryDirectory')
mock_td = mock.MagicMock()
mock_td.__enter__.return_value = '/target'
self.mock_TemporaryDirectory.return_value = mock_td
with tests_utils.patch_open() as (mock_open, mock_file):
self.c.write_configuration()
self.mock_c_install.assert_called_once_with()
mock_open.assert_called_once_with('/target/config.yaml', 'wt')
expected_config = {
'name': 'base',
'file-log-path': '/some/dir/conncheck.log',
'collection': 'a-collection',
'log-format': 'InfluxDB',
'listeners': [],
'speakers': []
}
self.mock_yaml_dump.assert_called_once_with(expected_config, mock_file)
mock_scp_fn.assert_called_once_with('/target/config.yaml',
'config.yaml')
mock_ssh_fn.assert_called_once_with(
['sudo', 'mv', 'config.yaml', '/some/dir/config.yaml'])
self.mock_c_is_running.assert_called_once_with()
self.mock_c_restart.assert_not_called()
def test_write_configuration_installed_and_running(self):
self.patch('zaza.utilities.installers.user_directory',
name='mock_user_directory')
self.mock_user_directory.return_value = '/some/dir'
self.patch_object(self.c, 'install', name='mock_c_install')
self.patch_object(self.c, 'is_running', name='mock_c_is_running')
self.mock_c_is_running.return_value = True
self.patch_object(self.c, 'restart', name='mock_c_restart')
mock_scp_fn = mock.Mock()
self.c._scp_fn = mock_scp_fn
mock_ssh_fn = mock.Mock()
self.c._ssh_fn = mock_ssh_fn
self.patch('yaml.dump', name='mock_yaml_dump')
self.patch('tempfile.TemporaryDirectory',
name='mock_TemporaryDirectory')
mock_td = mock.MagicMock()
mock_td.__enter__.return_value = '/target'
self.mock_TemporaryDirectory.return_value = mock_td
self.c._installed = True
with tests_utils.patch_open() as (mock_open, mock_file):
self.c.write_configuration()
self.mock_c_install.assert_not_called()
mock_open.assert_called_once_with('/target/config.yaml', 'wt')
expected_config = {
'name': 'base',
'file-log-path': '/some/dir/conncheck.log',
'collection': 'a-collection',
'log-format': 'InfluxDB',
'listeners': [],
'speakers': []
}
self.mock_yaml_dump.assert_called_once_with(expected_config, mock_file)
mock_scp_fn.assert_called_once_with('/target/config.yaml',
'config.yaml')
mock_ssh_fn.assert_called_once_with(
['sudo', 'mv', 'config.yaml', '/some/dir/config.yaml'])
self.mock_c_is_running.assert_called_once_with()
self.mock_c_restart.assert_called_once_with()
def test_is_running(self):
self.patch_object(self.c, '_verify_systemd_not_none',
name='mock__verify_systemd_not_none')
mock__systemd = mock.Mock()
mock__systemd.is_running.return_value = False
self.c._systemd = mock__systemd
self.assertFalse(self.c.is_running())
self.mock__verify_systemd_not_none.assert_called_once_with()
mock__systemd.is_running.assert_called_once_with()
def test_start(self):
self.patch_object(self.c, '_verify_systemd_not_none',
name='mock__verify_systemd_not_none')
mock__systemd = mock.Mock()
self.c._systemd = mock__systemd
self.c.start()
self.mock__verify_systemd_not_none.assert_called_once_with()
mock__systemd.start.assert_called_once_with()
def test_stop(self):
self.patch_object(conncheck, 'logger', name='mock_logger')
self.c._systemd = None
self.c.stop()
self.mock_logger.debug.assert_called_once_with(mock.ANY, self.c)
mock__systemd = mock.Mock()
self.c._systemd = mock__systemd
self.mock_logger.reset_mock()
self.c.stop()
mock__systemd.stop.assert_called_once_with()
def test_restart(self):
self.patch_object(self.c, '_verify_systemd_not_none',
name='mock__verify_systemd_not_none')
mock__systemd = mock.Mock()
self.c._systemd = mock__systemd
self.c.restart()
self.mock__verify_systemd_not_none.assert_called_once_with()
mock__systemd.restart.assert_called_once_with()
def test_finalise(self):
self.c._installed = False
mock__systemd = mock.Mock()
self.c._systemd = mock__systemd
self.patch_object(self.c, 'stop', name='mock_c_stop')
self.c.finalise()
self.mock_c_stop.assert_not_called()
mock__systemd.disable.assert_not_called()
self.c._installed = True
self.c.finalise()
self.mock_c_stop.assert_called_once_with()
mock__systemd.disable.assert_called_once_with()
def test_clean_up(self):
self.c._installed = False
mock__systemd = mock.Mock()
self.c._systemd = mock__systemd
self.patch_object(self.c, 'stop', name='mock_c_stop')
self.c.clean_up()
self.mock_c_stop.assert_not_called()
mock__systemd.disable.assert_not_called()
mock__systemd.remove.assert_not_called()
self.c._installed = True
self.c.clean_up()
self.mock_c_stop.assert_called_once_with()
mock__systemd.disable.assert_called_once_with()
mock__systemd.remove.assert_called_once_with()
class TestConnCheckInstanceJuju(tests_utils.BaseTestCase):
def setUp(self):
super().setUp()
self.patch('zaza.utilities.installers.make_juju_ssh_fn',
name='mock_make_juju_ssh_fn')
self.mock_ssh_fn = mock.Mock()
self.mock_make_juju_ssh_fn = self.mock_ssh_fn
self.patch('zaza.utilities.installers.make_juju_scp_fn',
name='mock_make_juju_scp_fn')
self.mock_scp_fn = mock.Mock()
self.mock_make_juju_scp_fn = self.mock_scp_fn
self.c = conncheck.ConnCheckInstanceJuju(
'0',
model='some-model',
user='a-user',
module_source='/some/source',
collection='a-collection')
def test_init(self):
c = conncheck.ConnCheckInstanceJuju(
'0/lxd/15',
log_format=conncheck.LogFormats.CSV,
config_file='thing.yaml',
install_dir='/opt',
module_source='/some/other/source',
install_user='a-user')
self.assertEqual(c.machine_or_unit_spec, '0/lxd/15')
self.assertEqual(c.name, '0/lxd/15')
self.assertEqual(c.log_format, conncheck.LogFormats.CSV)
self.assertEqual(c.config_file, 'thing.yaml')
self.assertEqual(c.install_dir, '/opt')
self.assertEqual(c.module_source, '/some/other/source')
self.assertEqual(c.install_user, 'a-user')
self.assertEqual(self.c.machine_or_unit_spec, '0')
self.assertEqual(self.c.name, '0')
self.assertEqual(self.c.log_format, conncheck.LogFormats.InfluxDB)
self.assertEqual(self.c.config_file, 'config.yaml')
self.assertEqual(self.c.install_dir, '.')
self.assertEqual(self.c.module_source, '/some/source')
self.assertEqual(self.c.install_user, 'conncheck')
def test_local_log_filename(self):
self.assertEqual(self.c.local_log_filename, '0.log')
self.c.machine_or_unit_spec = '0/lxd/15'
self.assertEqual(self.c.local_log_filename, '0_lxd_15.log')
def test__validate_spec(self):
MACHINE = self.c.JujuTypes.MACHINE
UNIT = self.c.JujuTypes.UNIT
valid_specs = (('0', MACHINE),
('9', MACHINE),
('15', MACHINE),
('0/lxd/10', MACHINE),
('1/LXD/4', MACHINE),
('some-unit-0/14', UNIT),
('other/23', UNIT))
invalid_specs = ('b', '1/spec/2', 'other-unit', 'd/10/10')
for spec, type_ in valid_specs:
self.c.machine_or_unit_spec = spec
self.c._validate_spec()
self.assertEqual(self.c._juju_type, type_)
for spec in invalid_specs:
self.c.machine_or_unit_spec = spec
with self.assertRaises(ValueError):
self.c._validate_spec()
def test_add_listener(self):
self.patch_object(self.c, '_validate_not_existing_listener',
name='mock__validate_not_existing_listener')
self.patch_object(self.c, '_get_address', name='mock__get_address')
self.mock__get_address.return_value = '1.2.3.4'
self.patch_object(self.c, 'add_listener_spec',
name='mock_add_listener_spec')
self.c.add_listener('udp', 1024, space='default', cidr='cidr')
self.mock__validate_not_existing_listener.assert_called_once_with(
'udp', 1024)
self.mock__get_address('default', 'cidr')
self.mock_add_listener_spec.assert_called_once_with(
'udp', 1024, '1.2.3.4', reply_size=1024)
def test__get_address(self):
self.patch_object(self.c, '_get_address_unit',
name='mock__get_address_unit')
self.mock__get_address_unit.return_value = '1.2.3.4'
self.patch_object(self.c, '_get_address_machine',
name='mock__get_address_machine')
self.mock__get_address_machine.return_value = '5.6.7.8'
self.c._juju_type = self.c.JujuTypes.UNIT
self.assertEqual(self.c._get_address(None, 'cidr'), '1.2.3.4')
self.mock__get_address_unit.assert_called_once_with(
'juju-info', 'cidr')
self.mock__get_address_unit.reset_mock()
self.c._juju_type = self.c.JujuTypes.MACHINE
self.assertEqual(self.c._get_address(None, 'cidr'), '5.6.7.8')
self.mock__get_address_machine.assert_called_once_with('cidr')
self.c._juju_type = None
with self.assertRaises(RuntimeError):
self.c._get_address(None, 'cidr')
def test__get_address_unit_single_address(self):
self.patch('subprocess.check_output', name='mock_check_output')
self.patch_object(conncheck, 'logger', name='mock_logger')
self.patch('yaml.safe_load', name='mock_yaml_safe_load')
self.mock_check_output.return_value = b'1.2.3.4'
self.mock_yaml_safe_load.return_value = '1.2.3.4\n'
self.assertEqual(self.c._get_address_unit('a-space', 'a-cidr'),
'1.2.3.4')
self.mock_check_output.assert_called_once_with(
['juju', 'run', '-u', '0', '--', 'network-get', '--format',
'yaml', '--bind-address', 'a-space'])
self.mock_yaml_safe_load.assert_called_once_with('1.2.3.4')
def test__get_address_unit_multiple_address(self):
self.patch('subprocess.check_output', name='mock_check_output')
self.patch_object(conncheck, 'logger', name='mock_logger')
self.patch('yaml.safe_load', name='mock_yaml_safe_load')
self.mock_check_output.return_value = b'1.2.3.4'
self.mock_yaml_safe_load.return_value = ['1.2.3.4', '5.6.7.8']
with self.assertRaises(NotImplementedError):
self.c._get_address_unit('a-space', 'a-cidr')
def test__get_address_unit_network_get_fails(self):
self.patch('subprocess.check_output', name='mock_check_output')
self.patch_object(conncheck, 'logger', name='mock_logger')
self.patch('yaml.safe_load', name='mock_yaml_safe_load')
self.mock_check_output.return_value = b'1.2.3.4'
def raise_(*args):
raise subprocess.CalledProcessError(cmd='bang', returncode=1)
self.mock_check_output.side_effect = raise_
with self.assertRaises(subprocess.CalledProcessError):
self.c._get_address_unit('a-space', 'a-cidr')
def test__get_address_machine(self):
with self.assertRaises(NotImplementedError):
self.c._get_address_machine()
class TestConnCheckInstanceSSH(tests_utils.BaseTestCase):
def setUp(self):
super().setUp()
self.patch('zaza.utilities.installers.make_ssh_fn',
name='mock_make_ssh_fn')
self.mock_ssh_fn = mock.Mock()
self.mock_make_ssh_fn = self.mock_ssh_fn
self.patch('zaza.utilities.installers.make_scp_fn',
name='mock_make_scp_fn')
self.mock_scp_fn = mock.Mock()
self.mock_make_scp_fn = self.mock_scp_fn
self.c = conncheck.ConnCheckInstanceSSH(
address='1.2.3.4',
key_file='a-file',
user='a-user',
module_source='/some/source',
collection='a-collection')
def test_init(self):
c = conncheck.ConnCheckInstanceSSH(
'5.6.7.8',
'my-key-file',
log_format=conncheck.LogFormats.CSV,
config_file='thing.yaml',
install_dir='/opt',
module_source='/some/other/source',
install_user='a-user')
self.assertEqual(c.address, '5.6.7.8')
self.assertEqual(c.key_file, 'my-key-file')
self.assertEqual(c.name, '5.6.7.8')
self.assertEqual(c.log_format, conncheck.LogFormats.CSV)
self.assertEqual(c.config_file, 'thing.yaml')
self.assertEqual(c.install_dir, '/opt')
self.assertEqual(c.module_source, '/some/other/source')
self.assertEqual(c.install_user, 'a-user')
self.assertEqual(self.c.address, '1.2.3.4')
self.assertEqual(self.c.key_file, 'a-file')
self.assertEqual(self.c.name, '1.2.3.4')
self.assertEqual(self.c.log_format, conncheck.LogFormats.InfluxDB)
self.assertEqual(self.c.config_file, 'config.yaml')
self.assertEqual(self.c.install_dir, '.')
self.assertEqual(self.c.module_source, '/some/source')
self.assertEqual(self.c.install_user, 'conncheck')
def test_local_log_filename(self):
self.c.address = 'user@1.2.3.4'
self.assertEqual(self.c.local_log_filename, 'user_1-2-3-4.log')
def test_add_listener(self):
self.patch_object(self.c, '_validate_not_existing_listener',
name='mock__validate_not_existing_listener')
self.patch_object(self.c, 'add_listener_spec',
name='mock_add_listener_spec')
self.c.add_listener('udp', 1024)
self.mock__validate_not_existing_listener.assert_called_once_with(
'udp', 1024)
self.mock_add_listener_spec.assert_called_once_with(
'udp', 1024, '0.0.0.0', reply_size=1024)
| true | true |
7900df3bf1d0564e2953ce25cfb4f96a4dab68f2 | 10,914 | py | Python | scripts/slave/extract_build.py | bopopescu/build | 4e95fd33456e552bfaf7d94f7d04b19273d1c534 | [
"BSD-3-Clause"
] | null | null | null | scripts/slave/extract_build.py | bopopescu/build | 4e95fd33456e552bfaf7d94f7d04b19273d1c534 | [
"BSD-3-Clause"
] | null | null | null | scripts/slave/extract_build.py | bopopescu/build | 4e95fd33456e552bfaf7d94f7d04b19273d1c534 | [
"BSD-3-Clause"
] | 1 | 2020-07-23T11:05:06.000Z | 2020-07-23T11:05:06.000Z | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A tool to extract a build, executed by a buildbot slave.
"""
import optparse
import os
import shutil
import sys
import traceback
import urllib
from common import chromium_utils
from slave import build_directory
from slave import slave_utils
class ExtractHandler(object):
def __init__(self, url, archive_name):
self.url = url
self.archive_name = archive_name
class GSHandler(ExtractHandler):
def download(self):
status = slave_utils.GSUtilCopy(self.url, '.')
if 0 != status:
return False
try:
shutil.move(os.path.basename(self.url), self.archive_name)
except OSError:
os.remove(self.archive_name)
shutil.move(os.path.basename(self.url), self.archive_name)
return True
class WebHandler(ExtractHandler):
@chromium_utils.RunAndPrintDots
def download(self):
try:
rc = urllib.urlretrieve(self.url, self.archive_name)
print '\nDownload complete'
except IOError:
print '\nFailed to download build'
return False
return rc
def GetBuildUrl(options, build_revision, webkit_revision=None):
"""Compute the url to download the build from. This will use as a base
string, in order of preference:
0) options.build_archive_url
1) options.build_url
2) options.factory_properties.build_url
3) build url constructed from build_properties. This last type of
construction is not compatible with the 'force build' button.
Args:
options: options object as specified by parser below.
build_revision: Revision for the build.
webkit_revision: WebKit revision (optional)
"""
if options.build_archive_url:
return options.build_archive_url, None
base_filename, version_suffix = slave_utils.GetZipFileNames(
options.master_name,
options.build_number,
options.parent_build_number,
build_revision, webkit_revision, extract=True)
replace_dict = {
'base_filename': base_filename,
'parentname': options.parent_builder_name,
'parentslavename': options.parent_slave_name,
'parent_builddir': options.parent_build_dir,
}
# If builddir isn't specified, assume buildbot used the builder name
# as the root folder for the build.
if not replace_dict.get('parent_builddir') and replace_dict.get('parentname'):
replace_dict['parent_builddir'] = replace_dict.get('parentname', '')
url = options.build_url
if not url:
url = ('http://%(parentslavename)s/b/build/slave/%(parent_builddir)s/'
'chrome_staging')
if url[-4:] != '.zip': # assume filename not specified
# Append the filename to the base URL. First strip any trailing slashes.
url = url.rstrip('/')
url = '%s/%s' % (url, '%(base_filename)s.zip')
url = url % replace_dict
archive_name = url.split('/')[-1]
versioned_url = url.replace('.zip', version_suffix + '.zip')
return versioned_url, archive_name
def real_main(options):
""" Download a build, extract it to build\\BuildDir\\full-build-win32
and rename it to build\\BuildDir\\Target
"""
abs_build_dir = os.path.abspath(
build_directory.GetBuildOutputDirectory(options.src_dir))
target_build_output_dir = os.path.join(abs_build_dir, options.target)
# Generic name for the archive.
archive_name = 'full-build-%s.zip' % chromium_utils.PlatformName()
# Just take the zip off the name for the output directory name.
output_dir = os.path.join(abs_build_dir, archive_name.replace('.zip', ''))
src_dir = os.path.dirname(abs_build_dir)
if not options.build_revision and not options.build_archive_url:
(build_revision, webkit_revision) = slave_utils.GetBuildRevisions(
src_dir, options.webkit_dir, options.revision_dir)
else:
build_revision = options.build_revision
webkit_revision = options.webkit_revision
url, archive_name = GetBuildUrl(options, build_revision, webkit_revision)
if archive_name is None:
archive_name = 'build.zip'
base_url = None
else:
base_url = '/'.join(url.split('/')[:-1] + [archive_name])
if url.startswith('gs://'):
handler = GSHandler(url=url, archive_name=archive_name)
else:
handler = WebHandler(url=url, archive_name=archive_name)
# We try to download and extract 3 times.
for tries in range(1, 4):
print 'Try %d: Fetching build from %s...' % (tries, url)
failure = False
# If the url is valid, we download the file.
if not failure:
if not handler.download():
if options.halt_on_missing_build:
return slave_utils.ERROR_EXIT_CODE
failure = True
# If the versioned url failed, we try to get the latest build.
if failure:
if url.startswith('gs://') or not base_url:
continue
else:
print 'Fetching latest build at %s' % base_url
base_handler = handler.__class__(base_url, handler.archive_name)
if not base_handler.download():
continue
print 'Extracting build %s to %s...' % (archive_name, abs_build_dir)
try:
chromium_utils.RemoveDirectory(target_build_output_dir)
chromium_utils.ExtractZip(archive_name, abs_build_dir)
# For Chrome builds, the build will be stored in chrome-win32.
if 'full-build-win32' in output_dir:
chrome_dir = output_dir.replace('full-build-win32', 'chrome-win32')
if os.path.exists(chrome_dir):
output_dir = chrome_dir
print 'Moving build from %s to %s' % (output_dir, target_build_output_dir)
shutil.move(output_dir, target_build_output_dir)
except (OSError, IOError, chromium_utils.ExternalError):
print 'Failed to extract the build.'
# Print out the traceback in a nice format
traceback.print_exc()
# Try again...
continue
# If we got the latest build, then figure out its revision number.
if failure:
print "Trying to determine the latest build's revision number..."
try:
build_revision_file_name = os.path.join(
target_build_output_dir,
chromium_utils.FULL_BUILD_REVISION_FILENAME)
build_revision_file = open(build_revision_file_name, 'r')
print 'Latest build is revision: %s' % build_revision_file.read()
build_revision_file.close()
except IOError:
print "Could not determine the latest build's revision number"
if failure:
# We successfully extracted the archive, but it was the generic one.
return slave_utils.WARNING_EXIT_CODE
return 0
# If we get here, that means that it failed 3 times. We return a failure.
return slave_utils.ERROR_EXIT_CODE
def main():
option_parser = optparse.OptionParser()
option_parser.add_option('--target',
help='build target to archive (Debug or Release)')
option_parser.add_option('--src-dir', default='src',
help='path to the top-level sources directory')
option_parser.add_option('--build-dir', help='ignored')
option_parser.add_option('--master-name', help='Name of the buildbot master.')
option_parser.add_option('--build-number', type=int,
help='Buildbot build number.')
option_parser.add_option('--parent-build-dir',
help='Path to build directory on parent buildbot '
'builder.')
option_parser.add_option('--parent-builder-name',
help='Name of parent buildbot builder.')
option_parser.add_option('--parent-slave-name',
help='Name of parent buildbot slave.')
option_parser.add_option('--parent-build-number', type=int,
help='Buildbot parent build number.')
option_parser.add_option('--build-url',
help='Base url where to find the build to extract')
option_parser.add_option('--build-archive-url',
help='Exact url where to find the build to extract')
# TODO(cmp): Remove --halt-on-missing-build when the buildbots are upgraded
# to not use this argument.
option_parser.add_option('--halt-on-missing-build', action='store_true',
help='whether to halt on a missing build')
option_parser.add_option('--build_revision',
help='Revision of the build that is being '
'archived. Overrides the revision found on '
'the local disk')
option_parser.add_option('--webkit_revision',
help='Webkit revision of the build that is being '
'archived. Overrides the revision found on '
'the local disk')
option_parser.add_option('--webkit-dir', help='WebKit directory path, '
'relative to the src/ dir.')
option_parser.add_option('--revision-dir',
help=('Directory path that shall be used to decide '
'the revision number for the archive, '
'relative to the src/ dir.'))
option_parser.add_option('--build-output-dir', help='ignored')
chromium_utils.AddPropertiesOptions(option_parser)
options, args = option_parser.parse_args()
if args:
print 'Unknown options: %s' % args
return 1
if not options.master_name:
options.master_name = options.build_properties.get('mastername', '')
if not options.build_number:
options.build_number = options.build_properties.get('buildnumber')
if not options.parent_build_dir:
options.parent_build_dir = options.build_properties.get('parent_builddir')
if not options.parent_builder_name:
options.parent_builder_name = options.build_properties.get('parentname')
if not options.parent_slave_name:
options.parent_slave_name = options.build_properties.get('parentslavename')
if not options.parent_build_number:
options.parent_build_number = options.build_properties.get(
'parent_buildnumber')
if not options.build_url:
options.build_url = options.factory_properties.get('build_url')
if not options.halt_on_missing_build:
options.halt_on_missing_build = options.factory_properties.get(
'halt_on_missing_build')
if not options.target:
options.target = options.factory_properties.get('target', 'Release')
if not options.webkit_dir:
options.webkit_dir = options.factory_properties.get('webkit_dir')
if not options.revision_dir:
options.revision_dir = options.factory_properties.get('revision_dir')
options.src_dir = (options.factory_properties.get('extract_build_src_dir')
or options.src_dir)
return real_main(options)
if '__main__' == __name__:
sys.exit(main())
| 39.400722 | 80 | 0.678761 |
"""A tool to extract a build, executed by a buildbot slave.
"""
import optparse
import os
import shutil
import sys
import traceback
import urllib
from common import chromium_utils
from slave import build_directory
from slave import slave_utils
class ExtractHandler(object):
def __init__(self, url, archive_name):
self.url = url
self.archive_name = archive_name
class GSHandler(ExtractHandler):
def download(self):
status = slave_utils.GSUtilCopy(self.url, '.')
if 0 != status:
return False
try:
shutil.move(os.path.basename(self.url), self.archive_name)
except OSError:
os.remove(self.archive_name)
shutil.move(os.path.basename(self.url), self.archive_name)
return True
class WebHandler(ExtractHandler):
@chromium_utils.RunAndPrintDots
def download(self):
try:
rc = urllib.urlretrieve(self.url, self.archive_name)
print '\nDownload complete'
except IOError:
print '\nFailed to download build'
return False
return rc
def GetBuildUrl(options, build_revision, webkit_revision=None):
"""Compute the url to download the build from. This will use as a base
string, in order of preference:
0) options.build_archive_url
1) options.build_url
2) options.factory_properties.build_url
3) build url constructed from build_properties. This last type of
construction is not compatible with the 'force build' button.
Args:
options: options object as specified by parser below.
build_revision: Revision for the build.
webkit_revision: WebKit revision (optional)
"""
if options.build_archive_url:
return options.build_archive_url, None
base_filename, version_suffix = slave_utils.GetZipFileNames(
options.master_name,
options.build_number,
options.parent_build_number,
build_revision, webkit_revision, extract=True)
replace_dict = {
'base_filename': base_filename,
'parentname': options.parent_builder_name,
'parentslavename': options.parent_slave_name,
'parent_builddir': options.parent_build_dir,
}
# as the root folder for the build.
if not replace_dict.get('parent_builddir') and replace_dict.get('parentname'):
replace_dict['parent_builddir'] = replace_dict.get('parentname', '')
url = options.build_url
if not url:
url = ('http://%(parentslavename)s/b/build/slave/%(parent_builddir)s/'
'chrome_staging')
if url[-4:] != '.zip': # assume filename not specified
# Append the filename to the base URL. First strip any trailing slashes.
url = url.rstrip('/')
url = '%s/%s' % (url, '%(base_filename)s.zip')
url = url % replace_dict
archive_name = url.split('/')[-1]
versioned_url = url.replace('.zip', version_suffix + '.zip')
return versioned_url, archive_name
def real_main(options):
""" Download a build, extract it to build\\BuildDir\\full-build-win32
and rename it to build\\BuildDir\\Target
"""
abs_build_dir = os.path.abspath(
build_directory.GetBuildOutputDirectory(options.src_dir))
target_build_output_dir = os.path.join(abs_build_dir, options.target)
# Generic name for the archive.
archive_name = 'full-build-%s.zip' % chromium_utils.PlatformName()
# Just take the zip off the name for the output directory name.
output_dir = os.path.join(abs_build_dir, archive_name.replace('.zip', ''))
src_dir = os.path.dirname(abs_build_dir)
if not options.build_revision and not options.build_archive_url:
(build_revision, webkit_revision) = slave_utils.GetBuildRevisions(
src_dir, options.webkit_dir, options.revision_dir)
else:
build_revision = options.build_revision
webkit_revision = options.webkit_revision
url, archive_name = GetBuildUrl(options, build_revision, webkit_revision)
if archive_name is None:
archive_name = 'build.zip'
base_url = None
else:
base_url = '/'.join(url.split('/')[:-1] + [archive_name])
if url.startswith('gs://'):
handler = GSHandler(url=url, archive_name=archive_name)
else:
handler = WebHandler(url=url, archive_name=archive_name)
# We try to download and extract 3 times.
for tries in range(1, 4):
print 'Try %d: Fetching build from %s...' % (tries, url)
failure = False
# If the url is valid, we download the file.
if not failure:
if not handler.download():
if options.halt_on_missing_build:
return slave_utils.ERROR_EXIT_CODE
failure = True
# If the versioned url failed, we try to get the latest build.
if failure:
if url.startswith('gs://') or not base_url:
continue
else:
print 'Fetching latest build at %s' % base_url
base_handler = handler.__class__(base_url, handler.archive_name)
if not base_handler.download():
continue
print 'Extracting build %s to %s...' % (archive_name, abs_build_dir)
try:
chromium_utils.RemoveDirectory(target_build_output_dir)
chromium_utils.ExtractZip(archive_name, abs_build_dir)
# For Chrome builds, the build will be stored in chrome-win32.
if 'full-build-win32' in output_dir:
chrome_dir = output_dir.replace('full-build-win32', 'chrome-win32')
if os.path.exists(chrome_dir):
output_dir = chrome_dir
print 'Moving build from %s to %s' % (output_dir, target_build_output_dir)
shutil.move(output_dir, target_build_output_dir)
except (OSError, IOError, chromium_utils.ExternalError):
print 'Failed to extract the build.'
# Print out the traceback in a nice format
traceback.print_exc()
# Try again...
continue
# If we got the latest build, then figure out its revision number.
if failure:
print "Trying to determine the latest build's revision number..."
try:
build_revision_file_name = os.path.join(
target_build_output_dir,
chromium_utils.FULL_BUILD_REVISION_FILENAME)
build_revision_file = open(build_revision_file_name, 'r')
print 'Latest build is revision: %s' % build_revision_file.read()
build_revision_file.close()
except IOError:
print "Could not determine the latest build's revision number"
if failure:
# We successfully extracted the archive, but it was the generic one.
return slave_utils.WARNING_EXIT_CODE
return 0
# If we get here, that means that it failed 3 times. We return a failure.
return slave_utils.ERROR_EXIT_CODE
def main():
option_parser = optparse.OptionParser()
option_parser.add_option('--target',
help='build target to archive (Debug or Release)')
option_parser.add_option('--src-dir', default='src',
help='path to the top-level sources directory')
option_parser.add_option('--build-dir', help='ignored')
option_parser.add_option('--master-name', help='Name of the buildbot master.')
option_parser.add_option('--build-number', type=int,
help='Buildbot build number.')
option_parser.add_option('--parent-build-dir',
help='Path to build directory on parent buildbot '
'builder.')
option_parser.add_option('--parent-builder-name',
help='Name of parent buildbot builder.')
option_parser.add_option('--parent-slave-name',
help='Name of parent buildbot slave.')
option_parser.add_option('--parent-build-number', type=int,
help='Buildbot parent build number.')
option_parser.add_option('--build-url',
help='Base url where to find the build to extract')
option_parser.add_option('--build-archive-url',
help='Exact url where to find the build to extract')
# TODO(cmp): Remove --halt-on-missing-build when the buildbots are upgraded
# to not use this argument.
option_parser.add_option('--halt-on-missing-build', action='store_true',
help='whether to halt on a missing build')
option_parser.add_option('--build_revision',
help='Revision of the build that is being '
'archived. Overrides the revision found on '
'the local disk')
option_parser.add_option('--webkit_revision',
help='Webkit revision of the build that is being '
'archived. Overrides the revision found on '
'the local disk')
option_parser.add_option('--webkit-dir', help='WebKit directory path, '
'relative to the src/ dir.')
option_parser.add_option('--revision-dir',
help=('Directory path that shall be used to decide '
'the revision number for the archive, '
'relative to the src/ dir.'))
option_parser.add_option('--build-output-dir', help='ignored')
chromium_utils.AddPropertiesOptions(option_parser)
options, args = option_parser.parse_args()
if args:
print 'Unknown options: %s' % args
return 1
if not options.master_name:
options.master_name = options.build_properties.get('mastername', '')
if not options.build_number:
options.build_number = options.build_properties.get('buildnumber')
if not options.parent_build_dir:
options.parent_build_dir = options.build_properties.get('parent_builddir')
if not options.parent_builder_name:
options.parent_builder_name = options.build_properties.get('parentname')
if not options.parent_slave_name:
options.parent_slave_name = options.build_properties.get('parentslavename')
if not options.parent_build_number:
options.parent_build_number = options.build_properties.get(
'parent_buildnumber')
if not options.build_url:
options.build_url = options.factory_properties.get('build_url')
if not options.halt_on_missing_build:
options.halt_on_missing_build = options.factory_properties.get(
'halt_on_missing_build')
if not options.target:
options.target = options.factory_properties.get('target', 'Release')
if not options.webkit_dir:
options.webkit_dir = options.factory_properties.get('webkit_dir')
if not options.revision_dir:
options.revision_dir = options.factory_properties.get('revision_dir')
options.src_dir = (options.factory_properties.get('extract_build_src_dir')
or options.src_dir)
return real_main(options)
if '__main__' == __name__:
sys.exit(main())
| false | true |
7900df46c3f5fbc6ffed783bcf49342a16fafa3b | 8,813 | py | Python | EQUATIONS/InternalEnergyEquation.py | mmicromegas/ransX | 2faaa786e00cfd14dce0e18f0793cd0252428d2a | [
"BSD-2-Clause"
] | 4 | 2019-04-22T11:43:47.000Z | 2020-09-16T00:28:15.000Z | EQUATIONS/InternalEnergyEquation.py | mmicromegas/ransX | 2faaa786e00cfd14dce0e18f0793cd0252428d2a | [
"BSD-2-Clause"
] | 34 | 2019-07-01T09:11:00.000Z | 2022-03-30T13:35:43.000Z | EQUATIONS/InternalEnergyEquation.py | mmicromegas/ransX | 2faaa786e00cfd14dce0e18f0793cd0252428d2a | [
"BSD-2-Clause"
] | 1 | 2020-09-16T00:28:17.000Z | 2020-09-16T00:28:17.000Z | import numpy as np
import matplotlib.pyplot as plt
from UTILS.Calculus import Calculus
from UTILS.SetAxisLimit import SetAxisLimit
from UTILS.Tools import Tools
from UTILS.Errors import Errors
import sys
# Theoretical background https://arxiv.org/abs/1401.5176
# Mocak, Meakin, Viallet, Arnett, 2014, Compressible Hydrodynamic Mean-Field #
# Equations in Spherical Geometry and their Application to Turbulent Stellar #
# Convection Data #
class InternalEnergyEquation(Calculus, SetAxisLimit, Tools, Errors, object):
def __init__(self, filename, ig, fext, intc, tke_diss, data_prefix):
super(InternalEnergyEquation, self).__init__(ig)
# load data to structured array
eht = self.customLoad(filename)
# load grid
xzn0 = self.getRAdata(eht, 'xzn0')
nx = self.getRAdata(eht, 'nx')
# pick equation-specific Reynolds-averaged mean fields according to:
# https://github.com/mmicromegas/ransX/blob/master/DOCS/ransXimplementationGuide.pdf
dd = self.getRAdata(eht, 'dd')[intc]
ux = self.getRAdata(eht, 'ux')[intc]
pp = self.getRAdata(eht, 'pp')[intc]
ddux = self.getRAdata(eht, 'ddux')[intc]
ddei = self.getRAdata(eht, 'ddei')[intc]
ddeiux = self.getRAdata(eht, 'ddeiux')[intc]
divu = self.getRAdata(eht, 'divu')[intc]
ppdivu = self.getRAdata(eht, 'ppdivu')[intc]
ddenuc1 = self.getRAdata(eht, 'ddenuc1')[intc]
ddenuc2 = self.getRAdata(eht, 'ddenuc2')[intc]
# store time series for time derivatives
t_timec = self.getRAdata(eht, 'timec')
t_dd = self.getRAdata(eht, 'dd')
t_ddei = self.getRAdata(eht, 'ddei')
t_fht_ei = t_ddei / t_dd
# construct equation-specific mean fields
fht_ux = ddux / dd
fht_ei = ddei / dd
fei = ddeiux - ddux * ddei / dd
##########################
# INTERNAL ENERGY EQUATION
##########################
# LHS -dq/dt
self.minus_dt_dd_fht_ei = -self.dt(t_dd * t_fht_ei, xzn0, t_timec, intc)
# LHS -div dd fht_ux fht_ei
self.minus_div_dd_fht_ux_fht_ei = -self.Div(dd * fht_ux * fht_ei, xzn0)
# RHS -div fei
self.minus_div_fei = -self.Div(fei, xzn0)
# RHS -div ftt (not included) heat flux
self.minus_div_ftt = -np.zeros(nx)
# RHS -P d = - pp Div ux
self.minus_pp_div_ux = -pp * self.Div(ux, xzn0)
# RHS -Wp = -eht_ppf_df
self.minus_eht_ppf_df = -(ppdivu - pp * divu)
# RHS source + dd enuc
self.plus_dd_fht_enuc = ddenuc1 + ddenuc2
# RHS dissipated turbulent kinetic energy
self.plus_disstke = +tke_diss
# -res
self.minus_resEiEquation = -(self.minus_dt_dd_fht_ei + self.minus_div_dd_fht_ux_fht_ei +
self.minus_div_fei + self.minus_div_ftt + self.minus_pp_div_ux + self.minus_eht_ppf_df +
self.plus_dd_fht_enuc + self.plus_disstke)
##############################
# END INTERNAL ENERGY EQUATION
##############################
# assign global data to be shared across whole class
self.data_prefix = data_prefix
self.xzn0 = xzn0
self.fht_ei = fht_ei
self.fext = fext
def plot_ei(self, LAXIS, bconv, tconv, xbl, xbr, ybu, ybd, ilg):
"""Plot mean Favrian internal energy stratification in the model"""
if self.ig != 1 and self.ig != 2:
print("ERROR(InternalEnergyEquation.py):" + self.errorGeometry(self.ig))
sys.exit()
# load x GRID
grd1 = self.xzn0
# load DATA to plot
plt1 = self.fht_ei
# create FIGURE
plt.figure(figsize=(7, 6))
# format AXIS, make sure it is exponential
plt.gca().yaxis.get_major_formatter().set_powerlimits((0, 0))
# set plot boundaries
to_plot = [plt1]
self.set_plt_axis(LAXIS, xbl, xbr, ybu, ybd, to_plot)
# plot DATA
plt.title(r'internal energy')
plt.plot(grd1, plt1, color='brown', label=r'$\widetilde{\varepsilon}_I$')
# convective boundary markers
plt.axvline(bconv, linestyle='--', linewidth=0.7, color='k')
plt.axvline(tconv, linestyle='--', linewidth=0.7, color='k')
# define and show x/y LABELS
if self.ig == 1:
setxlabel = r"x (cm)"
setylabel = r"$\widetilde{\varepsilon}_I$ (erg g$^{-1}$)"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
elif self.ig == 2:
setxlabel = r"r (cm)"
setylabel = r"$\widetilde{\varepsilon}_I$ (erg g$^{-1}$)"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
# show LEGEND
plt.legend(loc=ilg, prop={'size': 18})
# display PLOT
plt.show(block=False)
# save PLOT
if self.fext == 'png':
plt.savefig('RESULTS/' + self.data_prefix + 'mean_ei.png')
elif self.fext == 'eps':
plt.savefig('RESULTS/' + self.data_prefix + 'mean_ei.eps')
def plot_ei_equation(self, LAXIS, bconv, tconv, xbl, xbr, ybu, ybd, ilg):
"""Plot internal energy equation in the model"""
if self.ig != 1 and self.ig != 2:
print("ERROR(InternalEnergyEquation.py):" + self.errorGeometry(self.ig))
sys.exit()
# load x GRID
grd1 = self.xzn0
lhs0 = self.minus_dt_dd_fht_ei
lhs1 = self.minus_div_dd_fht_ux_fht_ei
rhs0 = self.minus_div_fei
rhs1 = self.minus_div_ftt
rhs2 = self.minus_pp_div_ux
rhs3 = self.minus_eht_ppf_df
rhs4 = self.plus_dd_fht_enuc
rhs5 = self.plus_disstke
res = self.minus_resEiEquation
# create FIGURE
plt.figure(figsize=(7, 6))
# format AXIS, make sure it is exponential
plt.gca().yaxis.get_major_formatter().set_powerlimits((0, 0))
# set plot boundaries
to_plot = [lhs0, lhs1, rhs0, rhs1, rhs2, rhs3, rhs4, rhs5, res]
self.set_plt_axis(LAXIS, xbl, xbr, ybu, ybd, to_plot)
# plot DATA
plt.title('internal energy equation')
if self.ig == 1:
plt.plot(grd1, lhs0, color='#FF6EB4', label=r"$-\partial_t (\overline{\rho} \widetilde{\epsilon}_I )$")
plt.plot(grd1, lhs1, color='k', label=r"$-\nabla_x (\overline{\rho}\widetilde{u}_x \widetilde{\epsilon}_I$)")
plt.plot(grd1, rhs0, color='#FF8C00', label=r"$-\nabla_x f_I $")
plt.plot(grd1, rhs1, color='c', label=r"$-\nabla_x f_T$ (not incl.)")
plt.plot(grd1, rhs2, color='#802A2A', label=r"$-\bar{P} \bar{d}$")
plt.plot(grd1, rhs3, color='r', label=r"$-W_P$")
plt.plot(grd1, rhs4, color='b', label=r"$+\overline{\rho}\widetilde{\epsilon}_{nuc}$")
plt.plot(grd1, rhs5, color='m', label=r"$+\varepsilon_k$")
plt.plot(grd1, res, color='k', linestyle='--', label=r"res $\sim N_\epsilon$")
elif self.ig == 2:
plt.plot(grd1, lhs0, color='#FF6EB4', label=r"$-\partial_t (\overline{\rho} \widetilde{\epsilon}_I )$")
plt.plot(grd1, lhs1, color='k', label=r"$-\nabla_r (\overline{\rho}\widetilde{u}_r \widetilde{\epsilon}_I$)")
plt.plot(grd1, rhs0, color='#FF8C00', label=r"$-\nabla_r f_I $")
plt.plot(grd1, rhs1, color='c', label=r"$-\nabla_r f_T$ (not incl.)")
plt.plot(grd1, rhs2, color='#802A2A', label=r"$-\bar{P} \bar{d}$")
plt.plot(grd1, rhs3, color='r', label=r"$-W_P$")
plt.plot(grd1, rhs4, color='b', label=r"$+\overline{\rho}\widetilde{\epsilon}_{nuc}$")
plt.plot(grd1, rhs5, color='m', label=r"$+\varepsilon_k$")
plt.plot(grd1, res, color='k', linestyle='--', label=r"res $\sim N_\epsilon$")
# convective boundary markers
plt.axvline(bconv, linestyle='--', linewidth=0.7, color='k')
plt.axvline(tconv, linestyle='--', linewidth=0.7, color='k')
# define and show x/y LABELS
if self.ig == 1:
setxlabel = r"x (cm)"
setylabel = r"erg cm$^{-3}$ s$^{-1}$"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
elif self.ig == 2:
setxlabel = r"r (cm)"
setylabel = r"erg cm$^{-3}$ s$^{-1}$"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
# show LEGEND
plt.legend(loc=ilg, prop={'size': 10}, ncol=2)
# display PLOT
plt.show(block=False)
# save PLOT
if self.fext == 'png':
plt.savefig('RESULTS/' + self.data_prefix + 'ei_eq.png')
elif self.fext == 'eps':
plt.savefig('RESULTS/' + self.data_prefix + 'ei_eq.eps') | 36.874477 | 125 | 0.572336 | import numpy as np
import matplotlib.pyplot as plt
from UTILS.Calculus import Calculus
from UTILS.SetAxisLimit import SetAxisLimit
from UTILS.Tools import Tools
from UTILS.Errors import Errors
import sys
class InternalEnergyEquation(Calculus, SetAxisLimit, Tools, Errors, object):
def __init__(self, filename, ig, fext, intc, tke_diss, data_prefix):
super(InternalEnergyEquation, self).__init__(ig)
eht = self.customLoad(filename)
xzn0 = self.getRAdata(eht, 'xzn0')
nx = self.getRAdata(eht, 'nx')
dd = self.getRAdata(eht, 'dd')[intc]
ux = self.getRAdata(eht, 'ux')[intc]
pp = self.getRAdata(eht, 'pp')[intc]
ddux = self.getRAdata(eht, 'ddux')[intc]
ddei = self.getRAdata(eht, 'ddei')[intc]
ddeiux = self.getRAdata(eht, 'ddeiux')[intc]
divu = self.getRAdata(eht, 'divu')[intc]
ppdivu = self.getRAdata(eht, 'ppdivu')[intc]
ddenuc1 = self.getRAdata(eht, 'ddenuc1')[intc]
ddenuc2 = self.getRAdata(eht, 'ddenuc2')[intc]
t_timec = self.getRAdata(eht, 'timec')
t_dd = self.getRAdata(eht, 'dd')
t_ddei = self.getRAdata(eht, 'ddei')
t_fht_ei = t_ddei / t_dd
fht_ux = ddux / dd
fht_ei = ddei / dd
fei = ddeiux - ddux * ddei / dd
ht_ei +
self.minus_div_fei + self.minus_div_ftt + self.minus_pp_div_ux + self.minus_eht_ppf_df +
self.plus_dd_fht_enuc + self.plus_disstke)
plt.axvline(tconv, linestyle='--', linewidth=0.7, color='k')
if self.ig == 1:
setxlabel = r"x (cm)"
setylabel = r"$\widetilde{\varepsilon}_I$ (erg g$^{-1}$)"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
elif self.ig == 2:
setxlabel = r"r (cm)"
setylabel = r"$\widetilde{\varepsilon}_I$ (erg g$^{-1}$)"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
plt.legend(loc=ilg, prop={'size': 18})
plt.show(block=False)
if self.fext == 'png':
plt.savefig('RESULTS/' + self.data_prefix + 'mean_ei.png')
elif self.fext == 'eps':
plt.savefig('RESULTS/' + self.data_prefix + 'mean_ei.eps')
def plot_ei_equation(self, LAXIS, bconv, tconv, xbl, xbr, ybu, ybd, ilg):
if self.ig != 1 and self.ig != 2:
print("ERROR(InternalEnergyEquation.py):" + self.errorGeometry(self.ig))
sys.exit()
grd1 = self.xzn0
lhs0 = self.minus_dt_dd_fht_ei
lhs1 = self.minus_div_dd_fht_ux_fht_ei
rhs0 = self.minus_div_fei
rhs1 = self.minus_div_ftt
rhs2 = self.minus_pp_div_ux
rhs3 = self.minus_eht_ppf_df
rhs4 = self.plus_dd_fht_enuc
rhs5 = self.plus_disstke
res = self.minus_resEiEquation
plt.figure(figsize=(7, 6))
plt.gca().yaxis.get_major_formatter().set_powerlimits((0, 0))
to_plot = [lhs0, lhs1, rhs0, rhs1, rhs2, rhs3, rhs4, rhs5, res]
self.set_plt_axis(LAXIS, xbl, xbr, ybu, ybd, to_plot)
plt.title('internal energy equation')
if self.ig == 1:
plt.plot(grd1, lhs0, color='#FF6EB4', label=r"$-\partial_t (\overline{\rho} \widetilde{\epsilon}_I )$")
plt.plot(grd1, lhs1, color='k', label=r"$-\nabla_x (\overline{\rho}\widetilde{u}_x \widetilde{\epsilon}_I$)")
plt.plot(grd1, rhs0, color='#FF8C00', label=r"$-\nabla_x f_I $")
plt.plot(grd1, rhs1, color='c', label=r"$-\nabla_x f_T$ (not incl.)")
plt.plot(grd1, rhs2, color='#802A2A', label=r"$-\bar{P} \bar{d}$")
plt.plot(grd1, rhs3, color='r', label=r"$-W_P$")
plt.plot(grd1, rhs4, color='b', label=r"$+\overline{\rho}\widetilde{\epsilon}_{nuc}$")
plt.plot(grd1, rhs5, color='m', label=r"$+\varepsilon_k$")
plt.plot(grd1, res, color='k', linestyle='--', label=r"res $\sim N_\epsilon$")
elif self.ig == 2:
plt.plot(grd1, lhs0, color='#FF6EB4', label=r"$-\partial_t (\overline{\rho} \widetilde{\epsilon}_I )$")
plt.plot(grd1, lhs1, color='k', label=r"$-\nabla_r (\overline{\rho}\widetilde{u}_r \widetilde{\epsilon}_I$)")
plt.plot(grd1, rhs0, color='#FF8C00', label=r"$-\nabla_r f_I $")
plt.plot(grd1, rhs1, color='c', label=r"$-\nabla_r f_T$ (not incl.)")
plt.plot(grd1, rhs2, color='#802A2A', label=r"$-\bar{P} \bar{d}$")
plt.plot(grd1, rhs3, color='r', label=r"$-W_P$")
plt.plot(grd1, rhs4, color='b', label=r"$+\overline{\rho}\widetilde{\epsilon}_{nuc}$")
plt.plot(grd1, rhs5, color='m', label=r"$+\varepsilon_k$")
plt.plot(grd1, res, color='k', linestyle='--', label=r"res $\sim N_\epsilon$")
plt.axvline(bconv, linestyle='--', linewidth=0.7, color='k')
plt.axvline(tconv, linestyle='--', linewidth=0.7, color='k')
if self.ig == 1:
setxlabel = r"x (cm)"
setylabel = r"erg cm$^{-3}$ s$^{-1}$"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
elif self.ig == 2:
setxlabel = r"r (cm)"
setylabel = r"erg cm$^{-3}$ s$^{-1}$"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
plt.legend(loc=ilg, prop={'size': 10}, ncol=2)
plt.show(block=False)
if self.fext == 'png':
plt.savefig('RESULTS/' + self.data_prefix + 'ei_eq.png')
elif self.fext == 'eps':
plt.savefig('RESULTS/' + self.data_prefix + 'ei_eq.eps') | true | true |
7900e013a5cc4ed433eea3300f9c40a98db9911f | 12,327 | py | Python | cif_tools.py | cwaitt/zse | 4330397ddf84dafaa0af7bddd25756e008cb3ff5 | [
"MIT"
] | 3 | 2021-07-08T19:38:40.000Z | 2022-02-18T10:51:11.000Z | cif_tools.py | cwaitt/zse | 4330397ddf84dafaa0af7bddd25756e008cb3ff5 | [
"MIT"
] | null | null | null | cif_tools.py | cwaitt/zse | 4330397ddf84dafaa0af7bddd25756e008cb3ff5 | [
"MIT"
] | 6 | 2020-09-29T18:19:54.000Z | 2022-03-18T14:44:15.000Z | __all__ = ['read_cif','cif_site_labels']
from ase.io import read
from ase.spacegroup import spacegroup
import sys
import os
import logging
from math import *
import numpy as np
import pkg_resources
import warnings
warnings.filterwarnings("ignore")
path = '.temp_files/'
filepath = pkg_resources.resource_filename(__name__,path)
'''
NOTE ABOUT CIF FILE FORMATS:
CIFs must include '_symmetry_Int_Taables_number' to be read by ASE.
If this is not included please edit your CIF file to include this information.
'''
def get_atom_lines(alllines):
order = []
for i,line in enumerate(alllines):
if '_atom' in line:
order.append(line)
start = i+1
end = None
for i,line in enumerate(alllines[start:]):
if len(line.split()) == 0:
end = start+i-1
break
if not end:
end = len(alllines)-1
new_order = []
for i,o in enumerate(order):
if 'site_label' in o:
new_order.append(i)
if 'site_type_symbol' in o:
new_order.append(i)
if 'fract_x' in o:
new_order.append(i)
if 'fract_y' in o:
new_order.append(i)
if 'fract_z' in o:
new_order.append(i)
return start,end,new_order
def fix_cif(cif):
f = open(cif,"r")
alllines = f.readlines()
f.close()
for i, line in enumerate(alllines):
if 'IT_coordinate_system_code' in line:
fields = line.split()
alllines[i] = '_symmetry_space_group_setting {0} \n'.format(fields[-1])
if '_atom_site_type_symbol' in line and '_atom_site_label' in alllines[i+1]:
alllines[i],alllines[i+1] = alllines[i+1],alllines[i]
file_name = cif.rstrip('.cif')
temp_file = '{0}/{1}_temp.cif'.format(filepath,file_name.split('/')[-1])
f = open(temp_file,"w")
f.writelines(alllines)
f.close()
atoms = read(temp_file);
os.remove(temp_file)
return atoms, alllines
def get_tsites(cif):
from ase.geometry import get_distances
tsites = []
tpos = []
z,alllines = fix_cif(cif)
si = [atom.index for atom in z if atom.symbol!='O']
start,end,order = get_atom_lines(alllines)
for line in alllines[start:end+1]:
if 'Si' in line or 'T' in line:
line = line.split()
temp_label = line[order[0]]
if not any(str.isdigit(c) for c in temp_label):
temp_label = line[order[1]]
if 'Si' in temp_label:
temp_label = temp_label.replace('Si','T')
tsites.append(temp_label)
pos = [float(line[order[2]]),float(line[order[3]]),float(line[order[4]])]
tpos.append([round(num,2) for num in pos])
tpos = np.array(tpos)
pos = z[si].get_scaled_positions()
tinds = []
tmults = []
t_class = []
for tp in tpos:
for i,p in enumerate(pos):
p = [round(num,2) for num in p]
diff = abs(tp-p)
if sum(diff) <= 0.03:
tinds.append(si[i])
for i in range(1,len(tsites)):
tmults.append(tinds[i]-tinds[i-1])
tmults.append(si[-1]-tinds[-1]+1)
#
# si = [atom.index for atom in z if atom.symbol=='Si']
# o = [atom.index for atom in z if atom.symbol=='O']
# si_pos = z[si].positions
# cell = z.cell
# distances = get_distances(si_pos,si_pos,cell=cell,pbc=[1,1,1])[1]
#
# for i in tinds:
# orig_ind = si.index(i)
# dists = sorted(distances[orig_ind])
# t_class.append([round(num,2) for num in dists])
#
#
# for i,d in enumerate(t_class):
# for j,t in enumerate(distances):
# dist = [round(num,2) for num in sorted(t)]
# if np.array_equal(dist,d):
# dist = [round(num,2) for num in sorted(t)]
# d = np.array(d)
# dist = np.array(dist)
# diff = abs(d - dist)
# if sum(diff) <= 0.1:
# tmults[i]+=1
n = len(si)
sn = sum(tmults)
if n != sn:
print('Something Went Wrong With T Sites')
return tsites, tmults, tinds
def get_osites(cif):
from ase.geometry import get_distances
osites = []
opos = []
z,alllines = fix_cif(cif)
start,end,order = get_atom_lines(alllines)
for line in alllines[start:end+1]:
if 'O' in line:
line = line.split()
temp_label = line[order[0]]
if not any(str.isdigit(c) for c in temp_label):
temp_label = line[order[1]]
osites.append(temp_label)
pos = [float(line[order[2]]),float(line[order[3]]),float(line[order[4]])]
opos.append([round(num,2) for num in pos])
opos = np.array(opos)
pos = z.get_scaled_positions()
oinds = []
omults = []
o_class = []
si = [atom.index for atom in z if atom.symbol=='Si']
o = [atom.index for atom in z if atom.symbol=='O']
o_pos = z[o].get_scaled_positions()
for op in opos:
for i,p in enumerate(o_pos):
p = np.array([round(num,2) for num in p])
diff = abs(op-p)
if sum(diff) <= 0.02:
oinds.append(o[i])
for i in range(1,len(osites)):
omults.append(oinds[i]-oinds[i-1])
omults.append(o[-1]-oinds[-1]+1)
# all_pos = z.positions
# o_pos = z[o].positions
# si_pos = z[si].positions
# cell = z.cell
# distances = get_distances(o_pos,all_pos,cell=cell,pbc=[1,1,1])[1]
#
# for i in oinds:
# orig_ind = o.index(i)
# dists = sorted(distances[orig_ind])
# o_class.append([round(num,2) for num in dists])
#
# for i,d in enumerate(o_class):
# for j,t in enumerate(distances):
# dist = [round(num,2) for num in sorted(t)]
# d = np.array(d)
# dist = np.array(dist)
# diff = abs(d - dist)
# if sum(diff) <= 0.05:
# omults[i]+=1
n = len(o)
sn = sum(omults)
if n != sn:
print('Something Went Wrong With O Sites')
return osites, omults, oinds
def read_cif(cif):
atoms, alllines = fix_cif(cif)
ts,tm,tinds = get_tsites(cif)
os,om,oinds = get_osites(cif)
return atoms,ts,tm,tinds,os,om,oinds
def cif_site_labels(cif):
atoms,ts,tm,tinds,os,om,oinds = read_cif(cif)
labels = {}
for i,t in enumerate(ts):
for j in range(tm[i]):
labels[tinds[i]+j] = t
for i,o in enumerate(os):
for j in range(om[i]):
labels[oinds[i]+j] = o
return labels
''' DEPRECRATED FUNCTIONS'''
def float_with_error(x):
"""
some value in cif accompanies error like "1.234(5)
"""
if "?" in x:
return 0
pos = x.find("(")
if pos >= 0:
x = x[:pos]
return float(x)
def get_mults(cif):
# read the cif file
F = open(cif,"r")
alllines = F.readlines()
F.close()
# Parse out data from the cif file
for i,line in enumerate(alllines):
if '_cell_length_a' in line:
fields = line.split()
field = fields[-1]
field = float_with_error(field)
La = field
if '_cell_length_b' in line:
fields = line.split()
field = fields[-1]
field = float_with_error(field)
Lb = field
if '_cell_length_c' in line:
fields = line.split()
field = fields[-1]
field = float_with_error(field)
Lc = field
if '_cell_angle_alpha' in line:
fields = line.split()
field = fields[-1]
field = float_with_error(field)
alpha = field
if '_cell_angle_beta' in line:
fields = line.split()
field = fields[-1]
field = float_with_error(field)
beta = field
if '_cell_angle_gamma' in line:
fields = line.split()
field = fields[-1]
field = float_with_error(field)
gamma = field
if '_space_group_symop' in line or '_symmetry_equiv_pos' in line or '_space_group' in line:
n = i
lastline = len(alllines)
loops = []
for i,line in enumerate(alllines):
if 'loop' in line:
loops.append(i)
ops = []
for i in range(n+1,loops[1]):
n+=1
line = alllines[i]
if 'x' in line or 'X' in line:
ops.append(line.replace("'",''))
for i in range(len(ops)):
ops[i] = ops[i].replace("0/", "0./") # also for e.g. 10/9
ops[i] = ops[i].replace("1/", "1./")
ops[i] = ops[i].replace("2/", "2./")
ops[i] = ops[i].replace("3/", "3./")
ops[i] = ops[i].replace("4/", "4./")
ops[i] = ops[i].replace("5/", "5./")
ops[i] = ops[i].replace("6/", "6./")
ops[i] = ops[i].replace("7/", "7./")
ops[i] = ops[i].replace("8/", "8./")
ops[i] = ops[i].replace("9/", "9./")
osites = []
tsites = []
atoms = []
for j in range(n,lastline):
line = alllines[j]
if '_' not in line:
fields = line.split()
if len(fields) >3:
tmp = (fields[0],float(fields[2]),float(fields[3]),float(fields[4]))
if 'O' in fields[0]:
osites.append(fields[0])
if 'T' in fields[0]:
tsites.append(fields[0])
atoms.append(tmp)
for i in range(len(atoms)):
(name,xn,yn,zn) = atoms[i]
xn = (xn + 10.0) % 1.0
yn = (yn + 10.0) % 1.0
zn = (zn + 10.0) % 1.0
atoms[i] = (name,xn,yn,zn)
# perfrom symmetry operations
label_list = []
symbols = []
positions = []
for i in atoms:
label_list.append(i[0])
eps = 0.01
imax = len(atoms)
i=0
while (i<imax):
label,x,y,z=atoms[i]
for op in ops:
op = op.replace("'",'')
op = op.lower()
xn,yn,zn = eval(op)
xn = (xn + 10.0) % 1.0
yn = (yn + 10.0) % 1.0
zn = (zn + 10.0) % 1.0
new_atom = True
for at in atoms:
if (abs(at[1]-xn) < eps and abs(at[2]-yn) < eps and abs(at[3]-zn) < eps):
new_atom = False
if new_atom:
p1 = np.array([at[1],at[2],at[3]])
p2 = np.array([xn,yn,zn])
diff = abs(p1-p2)
diff = np.round(diff,2)
count = np.count_nonzero(diff)
if count ==1 and 1 in diff:
new_atom = False
if new_atom:
atoms.append( (label,xn,yn,zn) )
label_list.append(label)
i += 1
imax =len(atoms)
#atoms2 = Atoms(symbols,scaled_positions=positions,cell = [La,Lb,Lc,alpha,beta,gamma])
# count up the osits
label_list = sorted(label_list)
omults = []
for o in osites:
count = label_list.count(o)
omults.append(count)
tmults = []
for t in tsites:
count = label_list.count(t)
tmults.append(count)
return tsites, tmults, osites, omults
def get_indices(cif):
'''
This is a tool that will read a CIF file and return the unique T-sites,
their multiplicities, and an example atom index.
It also does the same for the unique O-sites in the framework.
This tool only works on CIFs that are formatted the same way as the IZA
Structure Database CIFs.
'''
tsites, tmults, osites, omults = get_mults(cif)
f = open(cif,"r")
alllines = f.read()
f.close()
for i, line in enumerate(alllines):
if 'IT_coordinate_system_code' in line:
fields = line.split()
alllines[i] = '_symmetry_space_group_setting {0}'.format(fields[-1])
atoms = read(cif)
oinds = [atom.index for atom in atoms if atom.symbol=='O']
index = 0
first_os = []
for i,m in enumerate(omults):
first_os.append(oinds[index])
index+=m
tinds = [atom.index for atom in atoms if atom.symbol !='O']
index = 0
first_ts = []
for i,m, in enumerate(tmults):
first_ts.append(tinds[index])
index+=m
return tsites,tmults,first_ts, osites, omults, first_os
| 29.632212 | 99 | 0.532814 | __all__ = ['read_cif','cif_site_labels']
from ase.io import read
from ase.spacegroup import spacegroup
import sys
import os
import logging
from math import *
import numpy as np
import pkg_resources
import warnings
warnings.filterwarnings("ignore")
path = '.temp_files/'
filepath = pkg_resources.resource_filename(__name__,path)
def get_atom_lines(alllines):
order = []
for i,line in enumerate(alllines):
if '_atom' in line:
order.append(line)
start = i+1
end = None
for i,line in enumerate(alllines[start:]):
if len(line.split()) == 0:
end = start+i-1
break
if not end:
end = len(alllines)-1
new_order = []
for i,o in enumerate(order):
if 'site_label' in o:
new_order.append(i)
if 'site_type_symbol' in o:
new_order.append(i)
if 'fract_x' in o:
new_order.append(i)
if 'fract_y' in o:
new_order.append(i)
if 'fract_z' in o:
new_order.append(i)
return start,end,new_order
def fix_cif(cif):
f = open(cif,"r")
alllines = f.readlines()
f.close()
for i, line in enumerate(alllines):
if 'IT_coordinate_system_code' in line:
fields = line.split()
alllines[i] = '_symmetry_space_group_setting {0} \n'.format(fields[-1])
if '_atom_site_type_symbol' in line and '_atom_site_label' in alllines[i+1]:
alllines[i],alllines[i+1] = alllines[i+1],alllines[i]
file_name = cif.rstrip('.cif')
temp_file = '{0}/{1}_temp.cif'.format(filepath,file_name.split('/')[-1])
f = open(temp_file,"w")
f.writelines(alllines)
f.close()
atoms = read(temp_file);
os.remove(temp_file)
return atoms, alllines
def get_tsites(cif):
from ase.geometry import get_distances
tsites = []
tpos = []
z,alllines = fix_cif(cif)
si = [atom.index for atom in z if atom.symbol!='O']
start,end,order = get_atom_lines(alllines)
for line in alllines[start:end+1]:
if 'Si' in line or 'T' in line:
line = line.split()
temp_label = line[order[0]]
if not any(str.isdigit(c) for c in temp_label):
temp_label = line[order[1]]
if 'Si' in temp_label:
temp_label = temp_label.replace('Si','T')
tsites.append(temp_label)
pos = [float(line[order[2]]),float(line[order[3]]),float(line[order[4]])]
tpos.append([round(num,2) for num in pos])
tpos = np.array(tpos)
pos = z[si].get_scaled_positions()
tinds = []
tmults = []
t_class = []
for tp in tpos:
for i,p in enumerate(pos):
p = [round(num,2) for num in p]
diff = abs(tp-p)
if sum(diff) <= 0.03:
tinds.append(si[i])
for i in range(1,len(tsites)):
tmults.append(tinds[i]-tinds[i-1])
tmults.append(si[-1]-tinds[-1]+1)
n = len(si)
sn = sum(tmults)
if n != sn:
print('Something Went Wrong With T Sites')
return tsites, tmults, tinds
def get_osites(cif):
from ase.geometry import get_distances
osites = []
opos = []
z,alllines = fix_cif(cif)
start,end,order = get_atom_lines(alllines)
for line in alllines[start:end+1]:
if 'O' in line:
line = line.split()
temp_label = line[order[0]]
if not any(str.isdigit(c) for c in temp_label):
temp_label = line[order[1]]
osites.append(temp_label)
pos = [float(line[order[2]]),float(line[order[3]]),float(line[order[4]])]
opos.append([round(num,2) for num in pos])
opos = np.array(opos)
pos = z.get_scaled_positions()
oinds = []
omults = []
o_class = []
si = [atom.index for atom in z if atom.symbol=='Si']
o = [atom.index for atom in z if atom.symbol=='O']
o_pos = z[o].get_scaled_positions()
for op in opos:
for i,p in enumerate(o_pos):
p = np.array([round(num,2) for num in p])
diff = abs(op-p)
if sum(diff) <= 0.02:
oinds.append(o[i])
for i in range(1,len(osites)):
omults.append(oinds[i]-oinds[i-1])
omults.append(o[-1]-oinds[-1]+1)
n = len(o)
sn = sum(omults)
if n != sn:
print('Something Went Wrong With O Sites')
return osites, omults, oinds
def read_cif(cif):
atoms, alllines = fix_cif(cif)
ts,tm,tinds = get_tsites(cif)
os,om,oinds = get_osites(cif)
return atoms,ts,tm,tinds,os,om,oinds
def cif_site_labels(cif):
atoms,ts,tm,tinds,os,om,oinds = read_cif(cif)
labels = {}
for i,t in enumerate(ts):
for j in range(tm[i]):
labels[tinds[i]+j] = t
for i,o in enumerate(os):
for j in range(om[i]):
labels[oinds[i]+j] = o
return labels
def float_with_error(x):
if "?" in x:
return 0
pos = x.find("(")
if pos >= 0:
x = x[:pos]
return float(x)
def get_mults(cif):
F = open(cif,"r")
alllines = F.readlines()
F.close()
for i,line in enumerate(alllines):
if '_cell_length_a' in line:
fields = line.split()
field = fields[-1]
field = float_with_error(field)
La = field
if '_cell_length_b' in line:
fields = line.split()
field = fields[-1]
field = float_with_error(field)
Lb = field
if '_cell_length_c' in line:
fields = line.split()
field = fields[-1]
field = float_with_error(field)
Lc = field
if '_cell_angle_alpha' in line:
fields = line.split()
field = fields[-1]
field = float_with_error(field)
alpha = field
if '_cell_angle_beta' in line:
fields = line.split()
field = fields[-1]
field = float_with_error(field)
beta = field
if '_cell_angle_gamma' in line:
fields = line.split()
field = fields[-1]
field = float_with_error(field)
gamma = field
if '_space_group_symop' in line or '_symmetry_equiv_pos' in line or '_space_group' in line:
n = i
lastline = len(alllines)
loops = []
for i,line in enumerate(alllines):
if 'loop' in line:
loops.append(i)
ops = []
for i in range(n+1,loops[1]):
n+=1
line = alllines[i]
if 'x' in line or 'X' in line:
ops.append(line.replace("'",''))
for i in range(len(ops)):
ops[i] = ops[i].replace("0/", "0./") # also for e.g. 10/9
ops[i] = ops[i].replace("1/", "1./")
ops[i] = ops[i].replace("2/", "2./")
ops[i] = ops[i].replace("3/", "3./")
ops[i] = ops[i].replace("4/", "4./")
ops[i] = ops[i].replace("5/", "5./")
ops[i] = ops[i].replace("6/", "6./")
ops[i] = ops[i].replace("7/", "7./")
ops[i] = ops[i].replace("8/", "8./")
ops[i] = ops[i].replace("9/", "9./")
osites = []
tsites = []
atoms = []
for j in range(n,lastline):
line = alllines[j]
if '_' not in line:
fields = line.split()
if len(fields) >3:
tmp = (fields[0],float(fields[2]),float(fields[3]),float(fields[4]))
if 'O' in fields[0]:
osites.append(fields[0])
if 'T' in fields[0]:
tsites.append(fields[0])
atoms.append(tmp)
for i in range(len(atoms)):
(name,xn,yn,zn) = atoms[i]
xn = (xn + 10.0) % 1.0
yn = (yn + 10.0) % 1.0
zn = (zn + 10.0) % 1.0
atoms[i] = (name,xn,yn,zn)
# perfrom symmetry operations
label_list = []
symbols = []
positions = []
for i in atoms:
label_list.append(i[0])
eps = 0.01
imax = len(atoms)
i=0
while (i<imax):
label,x,y,z=atoms[i]
for op in ops:
op = op.replace("'",'')
op = op.lower()
xn,yn,zn = eval(op)
xn = (xn + 10.0) % 1.0
yn = (yn + 10.0) % 1.0
zn = (zn + 10.0) % 1.0
new_atom = True
for at in atoms:
if (abs(at[1]-xn) < eps and abs(at[2]-yn) < eps and abs(at[3]-zn) < eps):
new_atom = False
if new_atom:
p1 = np.array([at[1],at[2],at[3]])
p2 = np.array([xn,yn,zn])
diff = abs(p1-p2)
diff = np.round(diff,2)
count = np.count_nonzero(diff)
if count ==1 and 1 in diff:
new_atom = False
if new_atom:
atoms.append( (label,xn,yn,zn) )
label_list.append(label)
i += 1
imax =len(atoms)
label_list = sorted(label_list)
omults = []
for o in osites:
count = label_list.count(o)
omults.append(count)
tmults = []
for t in tsites:
count = label_list.count(t)
tmults.append(count)
return tsites, tmults, osites, omults
def get_indices(cif):
tsites, tmults, osites, omults = get_mults(cif)
f = open(cif,"r")
alllines = f.read()
f.close()
for i, line in enumerate(alllines):
if 'IT_coordinate_system_code' in line:
fields = line.split()
alllines[i] = '_symmetry_space_group_setting {0}'.format(fields[-1])
atoms = read(cif)
oinds = [atom.index for atom in atoms if atom.symbol=='O']
index = 0
first_os = []
for i,m in enumerate(omults):
first_os.append(oinds[index])
index+=m
tinds = [atom.index for atom in atoms if atom.symbol !='O']
index = 0
first_ts = []
for i,m, in enumerate(tmults):
first_ts.append(tinds[index])
index+=m
return tsites,tmults,first_ts, osites, omults, first_os
| true | true |
7900e0ee2d6332f9b7ff856016da19bff7c11496 | 74 | py | Python | jacdac/midi_output/__init__.py | microsoft/jacdac-python | 712ad5559e29065f5eccb5dbfe029c039132df5a | [
"MIT"
] | 1 | 2022-02-15T21:30:36.000Z | 2022-02-15T21:30:36.000Z | jacdac/midi_output/__init__.py | microsoft/jacdac-python | 712ad5559e29065f5eccb5dbfe029c039132df5a | [
"MIT"
] | null | null | null | jacdac/midi_output/__init__.py | microsoft/jacdac-python | 712ad5559e29065f5eccb5dbfe029c039132df5a | [
"MIT"
] | 1 | 2022-02-08T19:32:45.000Z | 2022-02-08T19:32:45.000Z | # Autogenerated file.
from .client import MidiOutputClient # type: ignore
| 24.666667 | 51 | 0.797297 |
from .client import MidiOutputClient
| true | true |
7900e18324371e2870e0cf7dd9a4d0470bb97f1c | 672 | py | Python | electeez_auth/test_otp.py | Joneswn/Baloti | c499666dd9e2553fac88130dea2b6e9df8278234 | [
"MIT"
] | 1 | 2022-02-24T17:30:53.000Z | 2022-02-24T17:30:53.000Z | electeez_auth/test_otp.py | Joneswn/Baloti | c499666dd9e2553fac88130dea2b6e9df8278234 | [
"MIT"
] | null | null | null | electeez_auth/test_otp.py | Joneswn/Baloti | c499666dd9e2553fac88130dea2b6e9df8278234 | [
"MIT"
] | 2 | 2021-10-06T11:52:41.000Z | 2022-01-20T11:07:27.000Z | from datetime import timedelta
import pytest
from django.utils import timezone
from electeez_auth.models import User
@pytest.mark.django_db
def test_otp(client):
user = User.objects.create(email='otp@example.com')
token = user.otp_new(redirect='valid')
response = client.post(token.path)
assert response['Location'] == 'valid'
# can't use the link twice
response = client.post(token.path)
assert response['Location'] != 'valid'
# try expired link
token = user.otp_new()
token.otp_expiry = timezone.now() - timedelta(minutes=1)
token.save()
response = client.post(token.path)
assert response['Location'] != 'valid'
| 25.846154 | 60 | 0.696429 | from datetime import timedelta
import pytest
from django.utils import timezone
from electeez_auth.models import User
@pytest.mark.django_db
def test_otp(client):
user = User.objects.create(email='otp@example.com')
token = user.otp_new(redirect='valid')
response = client.post(token.path)
assert response['Location'] == 'valid'
response = client.post(token.path)
assert response['Location'] != 'valid'
# try expired link
token = user.otp_new()
token.otp_expiry = timezone.now() - timedelta(minutes=1)
token.save()
response = client.post(token.path)
assert response['Location'] != 'valid'
| true | true |
7900e27a0a7531447f93206bf2dcde0bf5f2b194 | 819 | py | Python | docker/test/integration/minifi/core/MqttBrokerContainer.py | rustammendel/nifi-minifi-cpp | 3a3615debb9129e7b954827debccaecc68b66006 | [
"Apache-2.0"
] | null | null | null | docker/test/integration/minifi/core/MqttBrokerContainer.py | rustammendel/nifi-minifi-cpp | 3a3615debb9129e7b954827debccaecc68b66006 | [
"Apache-2.0"
] | null | null | null | docker/test/integration/minifi/core/MqttBrokerContainer.py | rustammendel/nifi-minifi-cpp | 3a3615debb9129e7b954827debccaecc68b66006 | [
"Apache-2.0"
] | null | null | null | import logging
from .Container import Container
class MqttBrokerContainer(Container):
def __init__(self, name, vols, network, image_store, command=None):
super().__init__(name, 'mqtt-broker', vols, network, image_store, command)
def get_startup_finished_log_entry(self):
return "mosquitto version [0-9\\.]+ running"
def deploy(self):
if not self.set_deployed():
return
logging.info('Creating and running MQTT broker docker container...')
self.client.containers.run(
self.image_store.get_image(self.get_engine()),
detach=True,
name=self.name,
network=self.network.name,
ports={'1883/tcp': 1883},
entrypoint=self.command)
logging.info('Added container \'%s\'', self.name)
| 32.76 | 82 | 0.634921 | import logging
from .Container import Container
class MqttBrokerContainer(Container):
def __init__(self, name, vols, network, image_store, command=None):
super().__init__(name, 'mqtt-broker', vols, network, image_store, command)
def get_startup_finished_log_entry(self):
return "mosquitto version [0-9\\.]+ running"
def deploy(self):
if not self.set_deployed():
return
logging.info('Creating and running MQTT broker docker container...')
self.client.containers.run(
self.image_store.get_image(self.get_engine()),
detach=True,
name=self.name,
network=self.network.name,
ports={'1883/tcp': 1883},
entrypoint=self.command)
logging.info('Added container \'%s\'', self.name)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.