content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
from sqlalchemy import Integer, String, Column, Float
from api.db.base_class import BaseLayerTable
from geoalchemy2 import Geometry
class CommunityLocations(BaseLayerTable):
"""
https://catalogue.data.gov.bc.ca/dataset/first-nation-community-locations
"""
__tablename__ = 'fn_community_locations'
# ogc_fid is an automatic primary key created by ogr2ogr when loading this dataset.
ogc_fid = Column(Integer, primary_key=True, autoincrement=True)
COMMUNITY_LOCATION_ID = Column(Integer)
FIRST_NATION_BC_NAME = Column(String)
FIRST_NATION_FEDERAL_NAME = Column(String)
FIRST_NATION_FEDERAL_ID = Column(Integer)
URL_TO_BC_WEBSITE = Column(String)
URL_TO_FEDERAL_WEBSITE = Column(String)
URL_TO_FIRST_NATION_WEBSITE = Column(String)
MEMBER_ORGANIZATION_NAMES = Column(String)
LANGUAGE_GROUP = Column(String)
BC_REGIONAL_OFFICE = Column(String)
MAPSHEET_NUMBER = Column(String)
PREFERRED_NAME = Column(String)
ALTERNATIVE_NAME_1 = Column(String)
ALTERNATIVE_NAME_2 = Column(String)
ADDRESS_LINE1 = Column(String)
ADDRESS_LINE2 = Column(String)
OFFICE_CITY = Column(String)
OFFICE_PROVINCE = Column(String)
OFFICE_POSTAL_CODE = Column(String)
LOCATION_DESCRIPTION = Column(String)
SITE_NAME = Column(String)
SITE_NUMBER = Column(String)
COMMENTS = Column(String)
OBJECTID = Column(Integer)
SE_ANNO_CAD_DATA = Column(String)
fme_feature_type = Column(String)
SHAPE = Column(Geometry('POINT', 4326), index=True)
class TreatyAreas(BaseLayerTable):
"""
https://catalogue.data.gov.bc.ca/dataset/first-nations-treaty-areas
"""
__tablename__ = 'fn_treaty_areas'
# ogc_fid is an automatic primary key created by ogr2ogr when loading this dataset.
ogc_fid = Column(Integer, primary_key=True, autoincrement=True)
TREATY_AREA_ID = Column(Integer)
TREATY = Column(String)
EFFECTIVE_DATE = Column(String)
FIRST_NATION_NAME = Column(String)
AREA_TYPE = Column(String)
LAND_TYPE = Column(String)
GEOGRAPHIC_LOCATION = Column(String)
CHAPTER_REFERENCE = Column(String)
APPENDIX_REFERENCE = Column(String)
COMMENTS = Column(String)
FEATURE_CODE = Column(String)
SE_ANNO_CAD_DATA = Column(String)
OBJECTID = Column(Integer)
FEATURE_AREA_SQM = Column(Float(53))
FEATURE_LENGTH_M = Column(Float(53))
fme_feature_type = Column(String)
SHAPE = Column(Geometry(srid=4326), index=True)
class TreatyLands(BaseLayerTable):
"""
https://catalogue.data.gov.bc.ca/dataset/first-nations-treaty-lands
"""
__tablename__ = 'fn_treaty_lands'
# ogc_fid is an automatic primary key created by ogr2ogr when loading this dataset.
ogc_fid = Column(Integer, primary_key=True, autoincrement=True)
TREATY_LAND_ID = Column(Integer)
TREATY = Column(String)
EFFECTIVE_DATE = Column(String)
FIRST_NATION_NAME = Column(String)
LAND_TYPE = Column(String)
CHAPTER_REFERENCE = Column(String)
APPENDIX_REFERENCE = Column(String)
COMMENTS = Column(String)
FEATURE_CODE = Column(String)
OBJECTID = Column(Integer)
SE_ANNO_CAD_DATA = Column(String)
FEATURE_AREA_SQM = Column(Float(53))
FEATURE_LENGTH_M = Column(Float(53))
fme_feature_type = Column(String)
SHAPE = Column(Geometry(srid=4326), index=True)
|
nilq/baby-python
|
python
|
import csv
def find_labels(xmap, contig_start_label, contig_end_label, contig_orientation):
"""
this does not account for split mapped molecules
i.e., assumes one molecule ID per line in XMAP
fix later
"""
# swap if -:
#print(contig_start_label, contig_end_label)
if contig_orientation == "-":
contig_start_label,contig_end_label = contig_end_label,contig_start_label
#print(contig_start_label, contig_end_label)
print("mol_id"+"\t"+"five_prime_labels"+"\t"+"three_prime_labels")
# cvs module:
with open(xmap, 'r') as f:
reader = csv.reader(f, delimiter='\t')
xmap_lines = [line for line in reader if "#" not in line[0]]
for x in xmap_lines:
# tuples, replace paren, remove trailing comma, comma split, and every other ele (contig labels):
contig_labels_only = x[13].replace("(", "").replace(")", ",")[:-1].split(',')[::2]
contig_labels_only_int = [int(i) for i in contig_labels_only] # make int from str!
# check that molecule crosses into SDs:
if contig_orientation == "-":
smaller_five_prime = [i for i in contig_labels_only_int if i > contig_start_label] # 5' of SDs
larger_five_prime = [i for i in contig_labels_only_int if i <= contig_start_label] # into SDs
smaller_three_prime = [i for i in contig_labels_only_int if i >= contig_end_label] # into SDs
larger_three_prime = [i for i in contig_labels_only_int if i < contig_end_label] # 3' of SDs
# if crosses either side of SDs, count labels outside SDs:
if len(smaller_five_prime) and len(larger_five_prime) and len(smaller_three_prime) and len(larger_three_prime) > 1:
print(x[1], len([i for i in contig_labels_only_int if i > contig_start_label]), len([i for i in contig_labels_only_int if i < contig_end_label]))
elif len(smaller_five_prime) and len(larger_five_prime) > 1:
print(x[1], len([i for i in contig_labels_only_int if i > contig_start_label]), len([i for i in contig_labels_only_int if i < contig_end_label]))
elif len(smaller_three_prime) and len(larger_three_prime) > 1:
print(x[1], len([i for i in contig_labels_only_int if i > contig_start_label]), len([i for i in contig_labels_only_int if i < contig_end_label]))
# check that molecule crosses into SDs (note the change in < or > below):
elif contig_orientation == "+":
smaller_five_prime = [i for i in contig_labels_only_int if i < contig_start_label] # 5' of SDs
larger_five_prime = [i for i in contig_labels_only_int if i >= contig_start_label] # into SDs
smaller_three_prime = [i for i in contig_labels_only_int if i <= contig_end_label] # into SDs
larger_three_prime = [i for i in contig_labels_only_int if i > contig_end_label] # 3' of SDs
# if crosses either side of SDs, count labels outside SDs:
if len(smaller_five_prime) and len(larger_five_prime) and len(smaller_three_prime) and len(larger_three_prime) > 1:
print(x[1], len([i for i in contig_labels_only_int if i < contig_start_label]), len([i for i in contig_labels_only_int if i > contig_end_label]))
elif len(smaller_five_prime) and len(larger_five_prime) > 1:
print(x[1], len([i for i in contig_labels_only_int if i < contig_start_label]), len([i for i in contig_labels_only_int if i > contig_end_label]))
elif len(smaller_three_prime) and len(larger_three_prime) > 1:
print(x[1], len([i for i in contig_labels_only_int if i < contig_start_label]), len([i for i in contig_labels_only_int if i > contig_end_label]))
return
find_labels('results/11029B_initial_genome_check/11029B_fullContig211_molecules.xmap', 2214, 2253, "-")
|
nilq/baby-python
|
python
|
####################
# Gui V3 16 Sept. 2017 Malacophonous
#####################
'''
API for Gui v3
guiAPP
guiWIN
guiWID
'''
from buildingblocks import guiRectangle,guiLines
import random as r
class Widget():
def __init__(self,_x,_y,_w,_h):
self.x = _x
self.y = _y
self.w = _w
self.h = _h
self.children = []
self.parent = None
self.toplevel = False
self.visible = True
self.batch = None
self.visual = None
self.group = None
self.holdsFocus = True
self.style = None
self.pad = 3
def __repr__(self):
return ('I am a {0} Widget at {1},{2} with a width {3} and height {4} in group {5}'
.format(self.__class__,self.x,self.y,self.w,self.h,self.group))
def setVisibleTo(self,state,recurse = True):
self.visible = state
if state == True:
self.visual = [guiRectangle(self.x,self.y,self.x+self.w,self.y+self.h,
self.batch,
[r.randint(0,255),r.randint(0,255),r.randint(0,255)]+[255])]
elif state == False and self.visual is not None:
#should work unless there is a disconnect between the self.visual and the actual draw batch item
for component in self.visual:
component.delete()
self.visual = None
if recurse:
for child in self.children:
child.setVisibleTo(state,recurse)
def highlight(self):
if not self.toplevel:
pcolor = self.parent.visual[0].vertexlist.colors
else:
pcolor = [0,0,0,255]
if self.visible:
self.visual.append(guiLines(self.x-self.pad,self.y-self.pad,self.x+self.w+self.pad,self.y+self.h+self.pad,self.batch,
[255 - pcolor[0],255-pcolor[1],255-pcolor[2],255]))
def dehighlight(self):
if self.visible:
self.visual[-1].delete()
def setStyleTo(self,style):
pass
def setGroupTo(self,group):
self.group = group
def setBatchTo(self,batch,recurse = True):
self.batch = batch
if recurse:
for child in self.children:
child.setBatchto(batch,recurse)
def setParentTo(self,newparent):
if self not in newparent.children:
if self.parent is not None:
# remove from old parent's children
self.parent.children.remove(self)
# set new parent as parent
self.parent = newparent
# add self to new parent's children
newparent.children.append(self)
else:
print('{0} already parented to {1}'.format(self,self.parent))
def hitTest(self,x,y):
for child in self.children:
hit = child.hitTest(x,y)
if hit is not None and hit.visible:
return hit
else:
return self._hitFinal(x,y)
def _hitFinal(self,x,y):
if (0<x-self.x<self.w and 0<y-self.y<self.h and self.visible):
return self
def translate(self,dx,dy):
right = self.x+dx+self.w
left = self.x+dx
top = self.y+dy+self.h
bottom = self.y+dy
if self.toplevel:
px,py,pw,ph = (0,0,self.parent.width,self.parent.height)
else:
px,py,pw,ph = (self.parent.x,self.parent.y,self.parent.w,self.parent.h)
if right >= px+pw:
self.x = px+pw-self.w
dx = 0
elif left <= px:
self.x = px
dx = 0
else:
self.x = left
if top >= py + ph:
self.y = py + ph - self.h
dy = 0
elif bottom <= py:
self.y = py
dy = 0
else:
self.y = bottom
if self.visible:
self.visual[0].move(dx,dy)
self.visual[1].move(dx,dy)
for child in self.children:
child._move(dx,dy)
def _move(self,dx,dy):
#only for use for a widget's children when that widget is being translated
#this should cut down on conditional checks for translations of widgets with lots of children
if self.visible:
self.visual[0].move(dx,dy)
for child in self.children:
child._move(dx,dy)
def gainedMouseOver(self,window):
pass
def lostMouseOver(self,window):
pass
def gainedFocus(self,window):
@window.event
def on_mouse_drag(x,y,dx,dy,buttons,modifiers):
self.translate(dx,dy)
self.highlight()
def lostFocus(self,window):
self.dehighlight()
|
nilq/baby-python
|
python
|
# coding: utf-8
# ======================================================================
# DZI-IIIF
# DeepZoom(dzi)形式のファイルをIIIF Image APIでアクセスできるようにする
# ======================================================================
# 2020-05-21 Ver.0.1: Initial Version, No info.json handling.
# 2020-05-22 Ver.0.2: Add info.json handling.
# ======================================================================
# dziiiif_dzifile.py: DZI 形式ファイルの読み込みと加工
# ======================================================================
# * 20200518 メモ suzuki
# 当分 Collection Type の Deep Zoom はサポートしない
# root が Image のものだけサポートする
# * 20200520 メモ suzuki
# 画像処理モジュールとして Pillow のインストールが必要
# >pip install Pillow (管理者モードで実行)
# グローバル変数の輸入
import dziiiif_common as glo
# モジュールの輸入
import os
import xml.etree.ElementTree as ET
import re
from PIL import Image
import math
import io
import sys
# XMLファイル(定義情報)の名前
dzi_xmlfile = 'dzc_output.xml'
# 画像フォルダの名前
dzi_imgfiles = 'dzc_output_files'
# タグ名から xml namespace 名を分離
def getxmlns(s):
m = re.search(r'^\{[^\}]*\}', s)
return m.group()
#fed
# タグ名からxml namespaceを除いた名前を求める
def stripxmlns(s):
m = re.search(r'[^\{\}]*$', s)
return m.group();
#fed
# XMLファイルの解析を試みる
# root を返す
def tryXMLparse(s):
try:
return ET.parse(s).getroot()
except ET.ParseError:
return ET.fromstring('<X xmlns="Illegal_XML_file"></X>')
#yrt
#fed
# XMLファイルの読み込み
def getxmlinfo():
xmlpath = os.path.join(glo.data_path, glo.identifier, dzi_xmlfile) # XMLファイルへのパスを生成
if os.path.isfile(xmlpath):
root = tryXMLparse(xmlpath) # XMLファイルの読み込み
glo.xmlns = getxmlns(root.tag)
if (stripxmlns(root.tag) == 'Image'): # rootタグが Image なら必要なデータを読み出す
a = root.attrib
glo.dzi_tilesize = glo.readint(a['TileSize'])
glo.dzi_overlap= glo.readint(a['Overlap'])
glo.dzi_format= a['Format']
f = False
for child in root:
if (stripxmlns(child.tag) == 'Size'):
a = child.attrib
glo.dzi_w = glo.readint(a['Width'])
glo.dzi_h = glo.readint(a['Height'])
glo.dzi_maxlevel = math.ceil(max(math.log2(glo.dzi_w), math.log2(glo.dzi_h)))
f = True
else:
pass
#fi
#rof
if (not f):
glo.change_status_at(glo.status_code.NOT_FOUND, 'dzifile.getxmlinfo; Size tag')
else:
glo.change_status_at(glo.status_code.NOT_FOUND, 'dzifile.getxmlinfo; Image tag')
#fi
else:
glo.change_status_at(glo.status_code.NOT_FOUND, 'dzifile.getxmlinfo; XML file')
#fi
#fed
# x を 0 <= x < dzi_w の範囲に収める
def adjustX(x):
return min(max(x, 0), glo.dzi_w - 1)
#fed
# y を 0 <= y < dzi_h の範囲に収める
def adjustY(y):
return min(max(y, 0), glo.dzi_h - 1)
#fed
# 切り取る画像領域の確定
def getregion():
if (glo.region == glo.region_mode.full):
x1 = 0
y1 = 0
w = glo.dzi_w
h = glo.dzi_h
elif (glo.region == glo.region_mode.square):
wh = min(glo.dzi_w, glo.dzi_h) # 短い方に合わせる
x1 = int((glo.dzi_w - wh) / 2.0)
y1 = int((glo.dzi_h - wh) / 2.0)
w = wh
h = wh
elif (glo.region == glo.region_mode.pixel):
x1 = adjustX(glo.region_x)
y1 = adjustY(glo.region_y)
x2 = adjustX(glo.region_x + glo.region_w)
y2 = adjustY(glo.region_y + glo.region_h)
w = max(x2 - x1, 0)
h = max(y2 - y1, 0)
elif (glo.region == glo.region_mode.percent):
x = math.floor(dzi_w * glo.region_x / 100.0)
y = math.floor(dzi_h * glo.region_y / 100.0)
w = math.floor(dzi_w * glo.region_w / 100.0)
h = math.floor(dzi_h * glo.region_h / 100.0)
x1 = adjustX(x)
y1 = adjustY(y)
x2 = adjustX(x + w)
y2 = adjustY(y + h)
w = max(x2 - x1, 0)
h = max(y2 - y1, 0)
else:
glo.change_status_at(glo.status_code.INTERNAL_SERVER_ERROR, 'dzifile.getregion') # 起こるはずないんだけど
#fi
if (w == 0 or h == 0): # 領域が0もしくは画像外
glo.change_status_at(glo.status_code.BAD_REQUEST, 'dzifile.getregion; w == 0 || h == 0')
#fi
glo.dzi_region_x = x1
glo.dzi_region_y = y1
glo.dzi_region_w = w
glo.dzi_region_h = h
#fed
# 画像サイズの確定
def getsize():
if (glo.size == glo.size_mode.full or glo.size == glo.size_mode.max ):
glo.outimage_w = glo.dzi_region_w
glo.outimage_h = glo.dzi_region_h
elif (glo.size == glo.size_mode.w_align):
glo.outimage_w = glo.size_w
glo.outimage_h = math.floor(glo.size_w * glo.dzi_region_h / glo.dzi_region_w)
elif (glo.size == glo.size_mode.h_align):
glo.outimage_w = math.floor(glo.size_h * glo.dzi_region_w / glo.dzi_region_h)
glo.outimage_h = glo.size_h
elif (glo.size == glo.size_mode.percent):
glo.outimage_w = math.floor(glo.dzi_region_w * glo.size_percent / 100.0)
glo.outimage_h = math.floor(glo.dzi_region_h * glo.size_percent / 100.0)
elif (glo.size == glo.size_mode.wh):
glo.outimage_w = glo.size_w
glo.outimage_h = glo.size_h
elif (glo.size == glo.size_mode.wh_align):
dzi_ratio = glo.dzi_region_w / glo.dzi_region_h # 大きいほど縦長,小さいほど横長
size_ratio = glo.size_w / glo.size_h
if (dzi_ratio >= size_ratio): # 画像の方が指定サイズより横長:幅を優先で納める
glo.outimage_w = glo.size_w
glo.outimage_h = math.floor(glo.size_w * glo.dzi_region_h / glo.dzi_region_w)
else: # 画像の方が指定サイズより縦:高さを優先で納める
glo.outimage_w = math.floor(glo.size_h * glo.dzi_region_w / glo.dzi_region_h)
glo.outimage_h = glo.size_h
#fi
else:
glo.change_status_at(glo.status_code.INTERNAL_SERVER_ERROR, 'dzifile.getsize') # 起こるはずないんだけど
#fi
if (glo.outimage_w == 0 or glo.outimage_h == 0): # 画像サイスが0
glo.change_status_at(glo.status_code.BAD_REQUEST, 'dzifile.getsize; outimage_w == 0 || outimage_h == 0')
#fi
if (glo.outimage_w > glo.wh_max or glo.outimage_h > glo.wh_max): # 画像サイスが制限を超えている
glo.change_status_at(glo.status_code.BAD_REQUEST, 'dzifile.getsize; outimage_w > wh_max || outimage_h > wh_max')
#fi
#fed
# 出力ファイルの生成
def makeoutputimage():
mag_ratio = max(glo.outimage_w / glo.dzi_region_w, glo.outimage_h / glo.dzi_region_h) # 拡大・縮小率
tileimage_index = math.floor(math.log2(1 / mag_ratio)) # 読み込むタイル画像の倍率(逆数のlog2の指数)
tileimage_folder_id = str(glo.dzi_maxlevel - tileimage_index).strip() # タイル画像のフォルダ
# タイル画像座標系における領域
scaledimage_x = math.floor(glo.dzi_region_x / (2 ** tileimage_index))
scaledimage_y = math.floor(glo.dzi_region_y / (2 ** tileimage_index))
scaledimage_w = math.floor(glo.dzi_region_w / (2 ** tileimage_index))
scaledimage_h = math.floor(glo.dzi_region_h / (2 ** tileimage_index))
# 読み込むタイル画像のインデックスと領域
tileimage_i1 = math.floor(scaledimage_x / glo.dzi_tilesize)
tileimage_i2 = math.floor((scaledimage_x + scaledimage_w - 1) / glo.dzi_tilesize)
tileimage_j1 = math.floor(scaledimage_y / glo.dzi_tilesize)
tileimage_j2 = math.floor((scaledimage_y +scaledimage_h - 1) / glo.dzi_tilesize)
tileimage_x = tileimage_i1 * glo.dzi_tilesize
tileimage_y = tileimage_j1 * glo.dzi_tilesize
tileimage_w = (tileimage_i2 - tileimage_i1 + 1) * glo.dzi_tilesize
tileimage_h = (tileimage_j2 - tileimage_j1 + 1) * glo.dzi_tilesize
# タイル画像から読み込む入力画像の領域
inimage_x = scaledimage_x - tileimage_x
inimage_y = scaledimage_y - tileimage_y
inimage_w = scaledimage_w
inimage_h = scaledimage_h
# 読み込むタイル画像
image1 = Image.new('RGB', (tileimage_w, tileimage_h), (0, 0, 0))
# 画像の読み込み
for i in range(tileimage_i1, tileimage_i2 + 1):
for j in range (tileimage_j1, tileimage_j2 + 1):
inimage_fn = str(i).strip()+"_"+str(j).strip()+"."+glo.dzi_format # ファイル名
inimage_path = os.path.join(glo.data_path, glo.identifier, dzi_imgfiles, tileimage_folder_id, inimage_fn) # タイル画像ファイルへのパスを生成
glo.inimage_path = inimage_path
if (os.path.isfile(inimage_path)):
inimage = Image.open(inimage_path)
x1 = glo.dzi_overlap if (i > tileimage_i1) else 0
y1 = glo.dzi_overlap if (j > tileimage_j1) else 0
x2 = x1 + glo.dzi_tilesize
y2 = y1 + glo.dzi_tilesize
cimg = inimage.crop((x1, y1, x2, y2))
image1.paste(cimg, ((i - tileimage_i1) * glo.dzi_tilesize, (j - tileimage_j1) * glo.dzi_tilesize))
else:
glo.change_status_at(glo.status_code.NOT_FOUND, 'dzifile.makeoutputimage; DZI image file')
break
#fi
#rof
#rof
# 読んだタイル画像から画像を切り出す
if (glo.status == glo.status_code.OK):
image2 = image1.crop((inimage_x, inimage_y, inimage_x + inimage_w, inimage_y + inimage_h))
glo.outimage = image2.resize((glo.outimage_w, glo.outimage_h))
o = io.BytesIO()
glo.outimage.save(o, format=glo.format_PILstr[glo.format], quality=glo.outimage_quality)
glo.outstream = o.getvalue()
glo.outstream_size = len(glo.outstream)
#fi
#fed
|
nilq/baby-python
|
python
|
# Returns the upper triangular part of a matrix (2-D tensor)
# torch.triu(input, diagonal=0, *, out=None) → Tensor
# The argument 'diagonal' controls which diagonal to consider.
import torch
source_tensor = torch.ones((10, 10))
# print(source_tensor)
tensor = (torch.triu(source_tensor) == 1).transpose(0, 1)
print(tensor)
print(tensor.float().masked_fill(tensor == 0, float('-inf')).masked_fill(tensor == 1, float(0.0)))
|
nilq/baby-python
|
python
|
import cant_utils as cu
import numpy as np
import matplotlib.pyplot as plt
import glob
import bead_util as bu
import tkinter
import tkinter.filedialog
import os, sys
from scipy.optimize import curve_fit
import bead_util as bu
from scipy.optimize import minimize_scalar as minimize
import pickle as pickle
import time
####################################################
####### Input parameters for data processing #######
TESTING = True
ddict = bu.load_dir_file( "/home/charles/opt_lev_classy/scripts/cant_force/dir_file.txt" )
#print ddict
respdir = 'Y'
resp_axis = 1 # imaging response direction
cant_axis = 1 # stage control axis
straighten_axis = 2 # axis with coherent drive to straighten
bin_size = 5 # um of cantilever travel
load_charge_cal = True
maxfiles = 1000
plot_forward_backward = False #True
#subtract_background = True
drivefreq = 18.0
cant_volts_to_um = 8.0 # 80 um / 10 V
#fig_title = ('Force vs. Cantilever Position: %s Hz, %s - %s, ' + bead) % (drivefreq, gas, num)
#dirs = [530, 531, 532, 533, 534, 535, 536, 537, 538, 539, 540, 541, 542, 543] # 0 um sep
dirs = [544, 545, 546, 547, 548, 549, 550, 551, 552, 553, 554, 555, 556, 557] # 10 um sep
#dirs = [558, 559, 560, 561, 562, 563, 564, 565, 566, 567, 568, 569, 570, 571] # 20 um sep
tf_path = './trans_funcs/Hout_20160808.p'
step_cal_path = './calibrations/step_cal_20160808.p'
thermal_cal_file_path = '/data/20160808/bead1/1_5mbar_zcool_final.h5'
fcurve_path = '/home/charles/gravity/data/force_curves.p'
force_curve_dic = pickle.load( open(fcurve_path, 'rb') )
# Identify Sep and Rbead
def proc_dir(d):
dv = ddict[d]
dir_obj = cu.Data_dir(dv[0], [0,0,dv[-1]], dv[1])
dir_obj.load_dir(cu.diag_loader, maxfiles = maxfiles)
dir_obj.load_H(tf_path)
if load_charge_cal:
dir_obj.load_step_cal(step_cal_path)
else:
dir_obj.charge_step_calibration = step_calibration
dir_obj.gravity_signals = force_curve_dic
dir_obj.calibrate_H()
dir_obj.diagonalize_files(reconstruct_lowf=True, lowf_thresh=200., #plot_Happ=True, \
build_conv_facs=True, drive_freq=18.)
amps = []
for fil_obj in dir_obj.fobjs:
stagestuff = fil_obj.get_stage_settings(axis=straighten_axis)
amp = stagestuff[2] * cant_volts_to_um
amps.append(amp)
uamps = np.unique(amps)
if len(uamps) > 1:
print('STUPIDITYERROR: Multiple dirve amplitudes in directory')
newlist = []
for i in [0,1,2]:
if i == straighten_axis:
newlist.append(uamps[0])
else:
newlist.append(0.0)
dir_obj.drive_amplitude = newlist
return dir_obj
dir_objs = list(map(proc_dir, dirs))
colors_yeay = bu.get_color_map( len(dir_objs) )
f, axarr = plt.subplots(3,2,sharey='all',sharex='all',figsize=(10,12),dpi=100)
for ind, obj in enumerate(dir_objs):
col = colors_yeay[ind]
cal_facs = obj.conv_facs
obj.get_avg_force_v_pos(cant_axis = cant_axis, bin_size = bin_size)
obj.get_avg_diag_force_v_pos(cant_axis = cant_axis, bin_size = bin_size)
keys = list(obj.avg_force_v_pos.keys())
for key in keys:
amp = obj.drive_amplitude[straighten_axis]
if straighten_axis == 0:
lab = 'X: '
elif straighten_axis == 1:
lab = 'Y: '
elif straighten_axis == 2:
lab = 'Z: '
lab = lab + str(amp) + ' um'
for resp_axis in [0,1,2]:
xdat = obj.avg_force_v_pos[key][resp_axis,0][0]
ydat = (obj.avg_force_v_pos[key][resp_axis,0][1]) * cal_facs[resp_axis]
errs = (obj.avg_force_v_pos[key][resp_axis,0][2]) * cal_facs[resp_axis]
xdat_d = obj.avg_diag_force_v_pos[key][resp_axis,0][0]
ydat_d = obj.avg_diag_force_v_pos[key][resp_axis,0][1]
errs_d = obj.avg_diag_force_v_pos[key][resp_axis,0][2]
xdatf = obj.avg_force_v_pos[key][resp_axis,1][0]
xdatb = obj.avg_force_v_pos[key][resp_axis,-1][0]
ydatf = (obj.avg_force_v_pos[key][resp_axis,1][1]) * cal_facs[resp_axis]
ydatb = (obj.avg_force_v_pos[key][resp_axis,-1][1]) * cal_facs[resp_axis]
errsf = (obj.avg_force_v_pos[key][resp_axis,1][2]) * cal_facs[resp_axis]
errsb = (obj.avg_force_v_pos[key][resp_axis,-1][2]) * cal_facs[resp_axis]
xdatf_d = obj.avg_diag_force_v_pos[key][resp_axis,1][0]
xdatb_d = obj.avg_diag_force_v_pos[key][resp_axis,-1][0]
ydatf_d = obj.avg_diag_force_v_pos[key][resp_axis,1][1]
ydatb_d = obj.avg_diag_force_v_pos[key][resp_axis,-1][1]
errsf_d = obj.avg_diag_force_v_pos[key][resp_axis,1][2]
errsb_d = obj.avg_diag_force_v_pos[key][resp_axis,-1][2]
offsetf = 0.0
offsetf_d = 0.0
offsetb = 0.0
offsetb_d = 0.0
offset = 0.0
offset_d = 0.0
if plot_forward_backward:
axarr[resp_axis,0].errorbar(xdatf, (ydatf+offsetf)*1e15, errsf*1e15, \
label = lab, fmt='<-', ms=5, color = col, mew=0.0)
axarr[resp_axis,1].errorbar(xdatf_d, (ydatf_d+offsetf_d)*1e15, errsf_d*1e15, \
label = lab, fmt='<-', ms=5, color = col, mew=0.0)
axarr[resp_axis,0].errorbar(xdatb, (ydatb+offsetb)*1e15, errsb*1e15, \
fmt='>-', ms=5, color = col, mew=0.0)
axarr[resp_axis,1].errorbar(xdatb_d, (ydatb_d+offsetb_d)*1e15, errsb_d*1e15, \
fmt='>-', ms=5, color = col, mew=0.0)
else:
axarr[resp_axis,0].errorbar(xdat, (ydat+offset)*1e15, errs*1e15, \
label = lab, fmt='.-', ms=10, color = col)
axarr[resp_axis,1].errorbar(xdat_d, (ydat_d+offset_d)*1e15, errs_d*1e15, \
label = lab, fmt='.-', ms=10, color = col)
arrs = [axarr,]
for arr in arrs:
arr[0,0].set_title('Raw Imaging Response')
arr[0,1].set_title('Diagonalized Forces')
for col in [0,1]:
arr[2,col].set_xlabel('Distance from Cantilever [um]')
arr[0,0].set_ylabel('X-direction Force [fN]')
arr[1,0].set_ylabel('Y-direction Force [fN]')
arr[2,0].set_ylabel('Z-direction Force [fN]')
arr[0,0].legend(loc=0, numpoints=1, ncol=2, fontsize=9)
plt.show()
|
nilq/baby-python
|
python
|
import logging
import time
import cv2 as cv
import numpy as np
from scipy.sparse import lil_matrix
from scipy.optimize import least_squares
def project(points, camera_params, K, dist=np.array([])):
"""
Project 2D points using given camera parameters (R matrix and t vector), intrinsic matrix,
and distortion parameters.
:params points: 3D points to reproject
:params camera_params: camera parameters (N x 12) with [:9] containing rotation matrix params
and [9:12] containing translation vector parameters
:params K: camera intrinsic matrix
:params dist: distortion parameters (N x 5)
:return points_proj: Numpy array of reprojected points
"""
points_proj = []
for idx in range(len(camera_params)): # idx applies to both points and cam_params, they are = length vectors
R = camera_params[idx][:9].reshape(3, 3)
rvec, _ = cv.Rodrigues(R)
t = camera_params[idx][9:]
pt = points[idx]
pt = np.expand_dims(pt, axis=0)
pt, _ = cv.projectPoints(pt, rvec, t, K, distCoeffs=dist)
pt = np.squeeze(np.array(pt))
points_proj.append(pt)
return np.array(points_proj)
def fun(params, n_cameras, n_points, camera_indices, point_indices, points_2d, K):
"""
Main optimization function to minimize. Takes the difference between reprojected points
and 2D points from views.
:params params: numpy array of [camera parameters; 3D points]
:params n_cameras: total number of cameras/views
:params n_points: total number of 3D points
:params camera_indices: numpy array of camera/view indicies corresponding with each 3D point
:params point_indices: numpy array of indicies for 3D points corresponidng to each observation
:params points_2d: 2D points corresponding to observations
:params K: camera intrinsics matrix
"""
camera_params = params[:n_cameras * 12].reshape((n_cameras, 12))
points_3d = params[n_cameras * 12:].reshape((n_points, 3))
points_proj = project(points_3d[point_indices], camera_params[camera_indices], K)
return (points_proj - points_2d).ravel()
def bundle_adjustment_sparsity(n_cameras, n_points, camera_indices, point_indices):
"""
Handles the sparsity for Jacobian.
:params n_cameras: total number of cameras/views
:params n_points: total number of 3D points
:params camera_indices: numpy array of camera/view indicies corresponding with each 3D point
:params point_indices: numpy array of indicies for 3D points corresponidng to each observation
"""
m = camera_indices.size * 2
n = n_cameras * 12 + n_points * 3
A = lil_matrix((m, n), dtype=int)
i = np.arange(camera_indices.size)
for s in range(12):
A[2 * i, camera_indices * 12 + s] = 1
A[2 * i + 1, camera_indices * 12 + s] = 1
for s in range(3):
A[2 * i, n_cameras * 12 + point_indices * 3 + s] = 1
A[2 * i + 1, n_cameras * 12 + point_indices * 3 + s] = 1
return A
class BundleAdjustment:
def __init__(self, wpSet, K, dist, completed_views):
self.completed_views = completed_views
self.wpSet = wpSet
self.points_3d = wpSet.world_points
self.points_2d = []
self.point_indices = []
self.camera_indices = []
self.view_idx = {}
self.camera_params = []
self.focal_len = (K[0, 0] + K[1, 1]) / 2
self.dist = dist[0][:2]
self.K = K
self.correspondences = wpSet.correspondences
self.n_cameras = None
self.n_points = None
def view2idx(self):
"""
Takes in a list of views and converts them to indices. For each 2D
point, a view index is assigned."""
for view in self.completed_views:
if view.id not in self.view_idx:
self.view_idx[view.id] = len(self.view_idx)
rot_vec = np.squeeze(view.rotation) # 1 x 9
params = np.concatenate((rot_vec, view.translation.reshape((1, 3))), axis=None).tolist()
# print(view.name, params)
self.camera_params.append(params)
self.camera_params = np.array(self.camera_params)
for i, row in self.correspondences.iterrows():
self.points_2d.append(row['FeatureIndex'][0])
self.camera_indices.append(self.view_idx[row['ViewId'][0]])
self.points_2d.append(row['FeatureIndex'][1])
self.camera_indices.append(self.view_idx[row['ViewId'][1]])
self.point_indices.append(i)
self.point_indices.append(i)
self.camera_indices = np.array(self.camera_indices)
self.point_indices = np.array(self.point_indices)
self.points_2d = np.array(self.points_2d)
self.points_3d = np.array(self.points_3d)
self.n_points = self.points_3d.shape[0]
self.n_cameras = self.camera_params.shape[0]
logging.info(f"Number of views processed: {self.n_cameras}.")
logging.info(f"Number of 3D points processed: {self.n_points}.")
np.savez('optimize_data', camera_params=self.camera_params, points_3d=self.points_3d,
camera_indices=self.camera_indices, point_indices=self.point_indices, points_2d=self.points_2d)
def optimize(self):
"""
Performs optimization on reprojection error function by updating poses and 3D points.
:return poses: (N x 12) numpy array containing optimized pose information with [:9] containing rotation matrix params
and [9:12] containing translation vector parameters
:return points_3d: optimized 3D points
"""
self.view2idx()
x0 = np.hstack((self.camera_params.ravel(), self.points_3d.ravel()))
print(len(self.camera_params.ravel()), len(self.points_3d.ravel()))
fun(x0, self.n_cameras, self.n_points, self.camera_indices, self.point_indices, self.points_2d, self.K)
A = bundle_adjustment_sparsity(self.n_cameras, self.n_points, self.camera_indices, self.point_indices)
t0 = time.time()
res = least_squares(fun, x0, jac_sparsity=A, verbose=2, x_scale='jac', ftol=1e-4, method='trf', xtol=1e-12,
args=(self.n_cameras, self.n_points, self.camera_indices, self.point_indices,
self.points_2d, self.K))
t1 = time.time()
logging.info(f"Optimized {self.n_points} in {t1-t0} seconds.")
points_3d = res.x[self.n_cameras * 12:].reshape(self.n_points, 3)
poses = res.x[:self.n_cameras * 12].reshape(self.n_cameras, 12)
return poses, points_3d
|
nilq/baby-python
|
python
|
# Copyright (c) 2010-2019 openpyxl
import pytest
from openpyxl.xml.functions import fromstring, tostring
from openpyxl.tests.helper import compare_xml
@pytest.fixture
def Marker():
from ..marker import Marker
return Marker
class TestMarker:
def test_ctor(self, Marker):
marker = Marker(symbol=None, size=5)
xml = tostring(marker.to_tree())
expected = """
<marker>
<symbol val="none"/>
<size val="5"/>
<spPr xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main">
<a:ln>
<a:prstDash val="solid" />
</a:ln>
</spPr>
</marker>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, Marker):
src = """
<marker>
<symbol val="square"/>
<size val="5"/>
</marker>
"""
node = fromstring(src)
marker = Marker.from_tree(node)
assert marker == Marker(symbol="square", size=5)
@pytest.fixture
def DataPoint():
from ..marker import DataPoint
return DataPoint
class TestDataPoint:
def test_ctor(self, DataPoint):
dp = DataPoint(idx=9)
xml = tostring(dp.to_tree())
expected = """
<dPt>
<idx val="9"/>
<spPr>
<a:ln xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main">
<a:prstDash val="solid"/>
</a:ln>
</spPr>
</dPt>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, DataPoint):
src = """
<dPt>
<idx val="9"/>
<marker>
<symbol val="triangle"/>
<size val="5"/>
</marker>
<bubble3D val="0"/>
</dPt>
"""
node = fromstring(src)
dp = DataPoint.from_tree(node)
assert dp.idx == 9
assert dp.bubble3D is False
|
nilq/baby-python
|
python
|
from telegram import InlineKeyboardButton
def generate_buttons(labels):
buttons = [[InlineKeyboardButton(labels[0], callback_data=labels[0]),
InlineKeyboardButton(labels[1], callback_data=labels[1])],
[InlineKeyboardButton(labels[2], callback_data=labels[2]),
InlineKeyboardButton(labels[3], callback_data=labels[3])]]
return buttons
question = {0:"Qual'è l'unità di misura della tensione elettrica?",
1:"Qual'è il modulo del numero complesso 4+i3 ?",
2:"Qual'è l'unità di misura della corrente elettrica?",
3:"Qual'è l'unità di misura della potenza?",
4:"Qual'è la derivata di sin(x) rispetto a x?",
5:"Sia f(x)=k1*x + k2. Qual'è la derivata di f(x) rispetto a x?",
6:"Quanto vale il valor medio di sin(x) calcolato su un periodo?",
7:"Qual'è l'unità di misura del campo elettrico?",
8:"Qual'è l'unità di misura del campo magnetico?",
9:"Qual'è l'unità di misura della frequenza?",
10:"Come si calcola l'energia cinetica?",
11:"Se f è una funzione lineare ed f(x1) vale 5. Quanto vale f(5 x1)?",
12:"Sia f una funzione periodica. Il suo integrale su un periodo vale 10. Quanto vale l'integrale su tre periodi?",
13:"20°C, espressi in kelvin, sono pari a:",
14:"Durante la fase di evaporazione, la temperatura dell'acqua:",
15:"Il numero complesso 5-i3 in quale quadrante si trova?",
16:"Il numero complesso -5-i5 ha fase pari a:",
17:"Due vettori hannno modulo pari a 5 e sono sfasati di 180 gradi. Il modulo della loro somma vale:",
18:"Due vettori hannno modulo pari a 5 e sono sfasati di 90 gradi. Il modulo della loro somma vale:",
19:"Un corpo solido ha un peso di 9810 N. Quanto vale, approssimativamente, la sua massa?"}
answer = {0:generate_buttons(['metri (m)', 'joule (J)', 'volt (V)', 'newton (N)']),
1:generate_buttons(['16', '5', '9', '25']),
2:generate_buttons(['ampere (A)', 'watt (W)', 'Farad (F)', 'weber (Wb)']),
3:generate_buttons(['watt (W)', 'joule (J)', 'pascal (Pa)', 'kelvin (K)']),
4:generate_buttons(['log(x)', '1/tan(x)', '-cos(x)', 'cos(x)']),
5:generate_buttons(['k2+k1', 'k1', 'k2', 'k1*k2']),
6:generate_buttons(['0', '1', 'infinito', '-1']),
7:generate_buttons(['V', 'kg', 'm/V', 'V/m']),
8:generate_buttons(['rad', 'A/m', 'm2', 'Hz']),
9:generate_buttons(['m', 'Wb', 'Hz', '°C']),
10:generate_buttons(['mv', '0.5 m v^2', 'm g h', 'v^2/2']),
11:generate_buttons(['1', '5', '25', '0.5']),
12:generate_buttons(['10', '3.3', '15', '30']),
13:generate_buttons(['-273.15 K', '293.15 K','10000 K','-20 K']),
14:generate_buttons(['aumenta', 'è costante', 'diminuisce', 'vale 100 K']),
15:generate_buttons(['primo', 'secondo', 'terzo', 'quarto']),
16:generate_buttons(['0 gradi', '-135 gradi', '90 gradi', '-pi/2']),
17:generate_buttons(['10', 'indefinito', '5/sqrt(2)', '0']),
18:generate_buttons(['0', '5/sqrt(2)', '10', '5*sqrt(2)']),
19:generate_buttons(['100 kg', '1 tonnellata', '10 kg', '1000 g'])}
right_answer = {0:"volt (V)",
1:"5",
2:"ampere (A)",
3:"watt (W)",
4:"cos(x)",
5:"k1",
6:"0",
7:"V/m",
8:"A/m",
9:"Hz",
10:"0.5 m v^2",
11:"25",
12:"30",
13:"293.15 K",
14:"è costante",
15:"quarto",
16:"-135 gradi",
17:"0",
18:"5*sqrt(2)",
19:"1 tonnellata"}
|
nilq/baby-python
|
python
|
import base64
from authlib.common.encoding import to_bytes, to_unicode
import fence.utils
def create_basic_header(username, password):
"""
Create an authorization header from the username and password according to
RFC 2617 (https://tools.ietf.org/html/rfc2617).
Use this to send client credentials in the authorization header.
"""
text = "{}:{}".format(username, password)
auth = to_unicode(base64.b64encode(to_bytes(text)))
return {"Authorization": "Basic " + auth}
def create_basic_header_for_client(oauth_client):
"""
Wrap ``create_basic_header`` to make a header for the client.
"""
return create_basic_header(oauth_client.client_id, oauth_client.client_secret)
|
nilq/baby-python
|
python
|
# ywo queue to stack
import queue
class ArrayQueue(object):
def __init__(self):
self.queue1 = queue.Queue(5)
self.help = queue.Queue(5)
def push(self, data):
"""模仿压栈"""
if self.queue1.full() == True:
raise RuntimeError('the stack is full')
self.queue1.put(data)
def pop(self):
"""模仿弹栈"""
if self.queue1.empty():
raise RuntimeError('the stack is empty')
while self.queue1.qsize() != 1:
self.help.put(self.queue1.get())
result = self.queue1.get()
temp = self.queue1
self.queue1 = self.help
self.help = temp
return result
if __name__ == '__main__':
q = ArrayQueue()
for i in range(5):
q.push(i)
for i in range(5):
print(q.pop())
# q.push(3)
|
nilq/baby-python
|
python
|
'''
Written by Jinsung Yoon
Date: Jul 9th 2018 (Revised Oct 19th 2018)
Generative Adversarial Imputation Networks (GAIN) Implementation on MNIST
Reference: J. Yoon, J. Jordon, M. van der Schaar, "GAIN: Missing Data Imputation using Generative Adversarial Nets," ICML, 2018.
Paper Link: http://medianetlab.ee.ucla.edu/papers/ICML_GAIN.pdf
Appendix Link: http://medianetlab.ee.ucla.edu/papers/ICML_GAIN_Supp.pdf
Contact: jsyoon0823@g.ucla.edu
'''
#%% Packages
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import os
from tqdm import tqdm
import tflib as lib
import tflib.plot
import math
import pandas as pd
def calcNMSE(truth, pred):
mse = np.sum(np.abs(truth-pred), axis=1)
truthNorm = np.sum(truth, axis=1)
print(mse.shape, truthNorm.shape, min(mse), min(truthNorm))
nmse = mse/truthNorm
print(np.mean(nmse), np.median(nmse))
return(np.mean(nmse), np.median(nmse))
def preprocessData(X, maxZerosInCell, maxZerosInGene):
cellSums = (X==0).sum(axis=1)
selectedCells = cellSums <= maxZerosInCell
geneSums = (X==0).sum(axis=0)
selectedGenes = geneSums <= maxZerosInGene
#print(cellSums, geneSums, maxZerosInCell, maxZerosInGene)
#print(geneSums, np.sum(selectedGenes))
selectedCellsIdxs = np.array([i for i, v in enumerate(selectedCells) if v])
selectedGenesIdxs = np.array([i for i, v in enumerate(selectedGenes) if v])
#print(selectedCellsIdxs, selectedGenesIdxs)
X_f = X[selectedCellsIdxs[:, None], selectedGenesIdxs]
X_f_log = np.log(X_f+1)
#print("==============")
#print(X[:5, :5])
#print(X_f[:5, :5])
#print(X_f_log[:5, :5])
maxPerCell = X_f_log.max(axis=1)
print(np.min(X), np.max(X), np.min(maxPerCell), np.max(maxPerCell))
#print(maxPerCell[:5], len(maxPerCell))
X_f_log_norm = X_f_log/maxPerCell[:,None]
return(X_f_log_norm, selectedGenes, selectedCells, maxPerCell)
def preprocessData2(X, maxZerosInCell, maxZerosInGene):
cellSums = (X==0).sum(axis=1)
selectedCells = cellSums <= maxZerosInCell
geneSums = (X==0).sum(axis=0)
selectedGenes = geneSums <= maxZerosInGene
#print(cellSums, geneSums, maxZerosInCell, maxZerosInGene)
#print(geneSums, np.sum(selectedGenes))
selectedCellsIdxs = np.array([i for i, v in enumerate(selectedCells) if v])
selectedGenesIdxs = np.array([i for i, v in enumerate(selectedGenes) if v])
#print(selectedCellsIdxs, selectedGenesIdxs)
X_f = X[selectedCellsIdxs[:, None], selectedGenesIdxs]
s = X_f.sum(axis=1)
s_m = np.median(s)
s_ = s/s_m
X_f_norm = X_f/s_[:,None]
X_f_norm_log = np.log(X_f_norm+1)
#print("==============")
#print(X[:5, :5])
#print(X_f[:5, :5])
#print(X_f_log[:5, :5])
return(X_f_norm_log, selectedGenes, selectedCells, s_)
def nonZeroMean(v):
meu = 0 if np.count_nonzero(v) == 0 else np.median(v[v!=0]).round(decimals=2)
return(meu)
def getMask(X, alpha, sparsity=101):
geneAvgs = pd.DataFrame(X).apply(nonZeroMean)
geneSums = (X==0).sum(axis=0)
maxNZerosInGene = sparsity*X.shape[0]/100
#print(geneAvgs)
#print(geneAvgs)
mask = np.ones(X.shape)
#X_bar = mask*geneAvgs[None,:]
mask[(geneAvgs[None,:] > alpha) & (geneSums <= maxNZerosInGene) & (X == 0)] = 0
return(mask, geneAvgs)
def transformBack(X_, X, M):
X_ = M*X+(1-M)*X_
X_t = np.transpose(X_)
return(X_t)
def transformBackAll(X_, X, M, filteredGenes, filteredCells, maxPerCell):
X_ = M*X+(1-M)*X_
print(np.min(X_), np.max(X_))
X_ = X_*maxPerCell[:,None]
print(np.min(X_), np.max(X_))
X_ = np.exp(X_)-1
print(np.min(X_), np.max(X_))
X_t = np.transpose(X_)
return(X_t)
# Rows are genes, Cols are cells
data_suffix = 'PBMC'#'dropout_index_5_seed_20000'
out_suffix = 'PBMC'#'5_20000'
data = pd.read_csv('simulation_data/simulation_data_'+data_suffix+'.csv', delimiter=',', header=None)
data_full = pd.read_csv('simulation_data/simulation_data_'+data_suffix+'_logn_true.csv', delimiter=',', header=None)
data_full = data_full.T.to_numpy()
#data = data.loc[1:10,1:6].T
#print(data.to_numpy)
data = data.to_numpy()#[:1000,:]
print("Data with ", data.shape[0], " cells, and ", data.shape[1], " genes")
#print(data[:5, :5])
maxZerosInCell = 95*data.shape[1]/100
maxZerosInGene = 95*data.shape[0]/100
small_zero_thre = 2#0.6#0.7
data_f, filteredGenes, filteredCells, maxPerCell = preprocessData2(data, maxZerosInCell, maxZerosInGene)
selectedCellsIdxs = np.array([i for i, v in enumerate(filteredCells) if v])
selectedGenesIdxs = np.array([i for i, v in enumerate(filteredGenes) if v])
print(len(selectedCellsIdxs), len(selectedGenesIdxs), selectedCellsIdxs, selectedGenesIdxs)
data_full = data_full[selectedGenesIdxs[:, None], selectedCellsIdxs]
np.savetxt('imputation_gain_data/'+out_suffix+'_selectedGenes.csv', selectedGenesIdxs+1 , delimiter=',', fmt="%d")
#print(data_f, filteredGenes, filteredCells, maxPerCell)
mask, geneAvgs = getMask(data_f, small_zero_thre, 101)
#data_f = data_f.to_numpy()
print("Impute Matrix After Preprocessing ", data_f.shape)
#print(mask.shape)
print(data[:5, :5])
print(data_f[:5, :5])
print(mask[:5, :5])
np.savetxt('imputation_gain_data/'+out_suffix+'_logn.csv', data_f, delimiter=',', fmt="%f")
#np.savetxt('imputation_gain_data/'+out_suffix+'_mask.csv', mask, delimiter=',', fmt="%f")
#mask2 = pd.read_csv('simulation_data/mask.csv', delimiter=',', header=None).T.to_numpy()
print("Mask Dim ", mask.shape)
#print(mask[:5, :5])
#res = mask2*2+mask
#np.savetxt('imputation_gain_data/'+out_suffix+'_mask_diff.csv', res, delimiter=',', fmt="%i")
#np.savetxt('imputation_gain_data/'+out_suffix+'_mask_avgs.csv', geneAvgs, delimiter=',', fmt="%.3f")
#idxs = np.where(res[1,]==2)
#print(geneAvgs[idxs[0]], data_f[1, idxs[0]])
#exit(0)
#%% System Parameters
# 1. Mini batch size
mb_size = 128
# 3. Hint rate
p_hint = 0.9
# 4. Loss Hyperparameters
alpha = 5
# 5. Imput Dim (Fixed)
Dim = data_f.shape[1]
# Mask Vector and Hint Vector Generation
def sample_M(m, n, p):
A = np.random.uniform(0., 1., size = [m, n])
B = A > p
C = 1.*B
#C[:,7150:7155] = 0
return C
def sample_M_bias(m, n, p, probs):
#probs = probs/sum(probs)
num = int(p*n)
l = np.array([np.random.choice(n, num, False, probs) for i in range(m)])
rows = np.repeat(range(m), num)
cols = l.reshape(-1)
#print(l, rows, cols)
mask = np.ones((m, n))
mask[rows, cols] = 0
return(mask)
######################
## TensorFlow
######################
#%% Necessary Functions
# 1. Xavier Initialization Definition
def xavier_init(size):
in_dim = size[0]
xavier_stddev = 1. / tf.sqrt(in_dim / 2.)
return tf.random_normal(shape = size, stddev = xavier_stddev)
# 2. Plot (4 x 4 subfigures)
def plot(samples):
fig = plt.figure(figsize = (5,5))
gs = gridspec.GridSpec(5,5)
gs.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(sample.reshape(298,11), cmap='Greys_r')
return fig
'''
GAIN Consists of 3 Components
- Generator
- Discriminator
- Hint Mechanism
'''
#%% GAIN Architecture
#%% 1. Input Placeholders
# 1.1. Data Vector
X = tf.placeholder(tf.float32, shape = [None, Dim])
# 1.2. Mask Vector
M = tf.placeholder(tf.float32, shape = [None, Dim])
# 1.3. Hint vector
H = tf.placeholder(tf.float32, shape = [None, Dim])
# 1.4. Random Noise Vector
Z = tf.placeholder(tf.float32, shape = [None, Dim])
NZ = tf.placeholder(tf.float32, shape = [None, Dim])
#%% 2. Discriminator
D_W1 = tf.Variable(xavier_init([Dim*2, 256])) # Data + Hint as inputs
D_b1 = tf.Variable(tf.zeros(shape = [256]))
D_W2 = tf.Variable(xavier_init([256, 128]))
D_b2 = tf.Variable(tf.zeros(shape = [128]))
D_W3 = tf.Variable(xavier_init([128, Dim]))
D_b3 = tf.Variable(tf.zeros(shape = [Dim])) # Output is multi-variate
theta_D = [D_W1, D_W2, D_W3, D_b1, D_b2, D_b3]
#%% 3. Generator
G_W1 = tf.Variable(xavier_init([Dim*2, 256])) # Data + Mask as inputs (Random Noises are in Missing Components)
G_b1 = tf.Variable(tf.zeros(shape = [256]))
EMB_SIZE=128
G_W2 = tf.Variable(xavier_init([256, EMB_SIZE]))
G_b2 = tf.Variable(tf.zeros(shape = [EMB_SIZE]))
G_W3 = tf.Variable(xavier_init([EMB_SIZE, Dim]))
G_b3 = tf.Variable(tf.zeros(shape = [Dim]))
theta_G = [G_W1, G_W2, G_W3, G_b1, G_b2, G_b3]
#%% GAIN Function
#%% 1. Generator
def generator(x,z,m):
inp = m * x + (1-m) * z # Fill in random noise on the missing values
inputs = tf.concat(axis = 1, values = [inp,m]) # Mask + Data Concatenate
G_h1 = tf.nn.relu(tf.matmul(inputs, G_W1) + G_b1)
G_h2 = tf.nn.relu(tf.matmul(G_h1, G_W2) + G_b2)
#G_prob = tf.nn.sigmoid(tf.matmul(G_h2, G_W3) + G_b3) # [0,1] normalized Output
G_prob = tf.nn.relu(tf.matmul(G_h2, G_W3) + G_b3) # [0,1] normalized Output
return G_prob, G_h2
#%% 2. Discriminator
def discriminator(x, m, g, h):
inp = m * x + (1-m) * g # Replace missing values to the imputed values
inputs = tf.concat(axis = 1, values = [inp,h]) # Hint + Data Concatenate
D_h1 = tf.nn.relu(tf.matmul(inputs, D_W1) + D_b1)
D_h2 = tf.nn.relu(tf.matmul(D_h1, D_W2) + D_b2)
D_logit = tf.matmul(D_h2, D_W3) + D_b3
D_prob = tf.nn.sigmoid(D_logit) # [0,1] Probability Output
return D_prob
#%% 3. Others
# Random sample generator for Z
def sample_Z(m, n):
return np.random.uniform(0., 1., size = [m, n])
def sample_idx(m, n):
A = np.random.permutation(m)
idx = A[:n]
return idx
#%% Structure
G_sample, G_embed = generator(X,Z,M)
D_prob = discriminator(X, M, G_sample, H)
#%% Loss
NZC = 1#NZ#+1-NZ
D_loss1 = -tf.reduce_mean(M * NZC * tf.log(D_prob + 1e-8) + (1-M) * NZC * tf.log(1. - D_prob + 1e-8)) * 2
G_loss1 = -tf.reduce_mean((1-M) * NZC * tf.log(D_prob + 1e-8)) / tf.reduce_mean(tf.maximum(1-M * NZC, 1) )
MSE_train_loss = tf.reduce_mean( NZC * (M * X - M * G_sample)**2) / tf.reduce_mean(tf.maximum(M * NZC, 1) )
D_loss = D_loss1
G_loss = G_loss1 + alpha * MSE_train_loss
#%% MSE Performance metric
MSE_test_loss = tf.reduce_mean(((1-M) * NZC * X - (1-M) * NZC * G_sample)**2) / tf.reduce_mean(tf.maximum((1-M) * NZC, 1) )
#%% Solver
D_solver = tf.train.AdamOptimizer().minimize(D_loss, var_list=theta_D)
G_solver = tf.train.AdamOptimizer().minimize(G_loss, var_list=theta_G)
# Sessions
os.environ["CUDA_VISIBLE_DEVICES"]="0"
saver = tf.train.Saver()
sess = tf.Session()
sess.run(tf.global_variables_initializer())
#%%
# Output Initialization
if not os.path.exists('imputation_gain_data/'):
os.makedirs('imputation_gain_data/')
# Iteration Initialization
trainX = data_f
trainM = mask
testX = trainX[:,:]
testM = trainM[:,:]
Train_No = trainX.shape[0]
geneProbs = np.sum(trainM)
#print([nonZeroMean(trainX[:,i]) for i in range(7150, 7155)])
print("NZeros=", np.count_nonzero(trainX)/(trainX.shape[0]*trainX.shape[1]*1.0), np.count_nonzero(testX)/(trainX.shape[0]*trainX.shape[1]*1.0))
print("NZeros=", np.count_nonzero(1-testM)/(trainX.shape[0]*trainX.shape[1]*1.0), np.count_nonzero(trainX*(1-testM))*1.0/np.count_nonzero(1-testM))
cutoff_i = 0
cutoffs = [2, 1, 0.75, 0.5, 0.5, 0.5]
sparsity = [101, 101, 101, 60, 85, 90]
maxIters = [2000, 10000, 25000, 70000, 70000, 70000]
maxIters = [1000, 3000, 6000, 70000, 70000, 70000]
percent_nonzero = 0#np.count_nonzero(trainX*(1-np.transpose(testM)))*1.0/np.count_nonzero(1-testM)
#%% Start Iterations
for it in tqdm(range(70020)):
#%% Inputs
mb_idx = sample_idx(Train_No, mb_size)
X_mb = trainX[mb_idx,:]
Z_mb = sample_Z(mb_size, Dim)
M_mb = trainM[mb_idx,:]
H_mb1 = sample_M(mb_size, Dim, 1-p_hint)
H_mb = M_mb * H_mb1 + 0.5 * (1-H_mb1)
New_X_mb = M_mb * X_mb + (1-M_mb) * Z_mb # Missing Data Introduce
_, D_loss_curr = sess.run([D_solver, D_loss1], feed_dict = {X: X_mb, M: M_mb, Z: New_X_mb, H: H_mb, NZ:(X_mb > 0)*1})
_, G_loss_curr, MSE_train_loss_curr, MSE_test_loss_curr = sess.run([G_solver, G_loss1, MSE_train_loss, MSE_test_loss],
feed_dict = {X: X_mb, M: M_mb, Z: New_X_mb, H: H_mb, NZ:(X_mb > 0)*1})
#%% Intermediate Losses
if it % 100000 == 0:
print('Iter: {}'.format(it))
print('Train_loss: {:.4}'.format(MSE_train_loss_curr))
print('Test_loss: {:.4}'.format(MSE_test_loss_curr))
print()
#%% Output figure
if it % 1000 == 0 and it > 1:
#if it == 3000 or it == 25000 or it == 50000 or it == 70000:
preds, MSE_train_loss_curr, MSE_test_loss_curr, d_pr = sess.run([G_sample, MSE_train_loss, MSE_test_loss, D_prob],
feed_dict = {X: testX, M: testM,
Z: testM * testX + (1-testM) * sample_Z(testM.shape[0], Dim), H: testM, NZ:(testX > 0)*1})
imp = transformBack(preds, testX, testM)
print(imp.shape, testM.shape, testX.shape, data_full.shape)
#print([nonZeroMean(imp[:,i]) for i in range(7150, 7155)])
mse = ((data_full-imp)**2).mean(axis=None)
mse_preds = ((data_full-np.transpose(preds))**2).mean(axis=None)
mse_masked = (((data_full-imp)*(1-np.transpose(testM)))**2).mean(axis=None)
print("threshold:", cutoff_i, cutoffs[cutoff_i], maxIters[cutoff_i], sparsity[cutoff_i])
print("MSE=", mse)
nz = (1-testM).sum(axis=1)
print("AvgNImputsPerCell=", np.min(nz), np.max(nz), np.median(nz))
print("NZeros=", np.count_nonzero(imp)/(imp.shape[0]*imp.shape[1]*1.0), np.count_nonzero(testX)/(imp.shape[0]*imp.shape[1]*1.0))
print("NZeros=", np.count_nonzero(1-testM)/(imp.shape[0]*imp.shape[1]*1.0), np.count_nonzero(imp*(1-np.transpose(testM)))*1.0/np.count_nonzero(1-testM))
percent_nonzero = np.count_nonzero(imp*(1-np.transpose(testM)))*1.0/np.count_nonzero(1-testM)
#np.savetxt('imputation_gain_data/'+'/gain_preds_pr.txt', d_pr, delimiter=',', fmt="%f")
lib.plot.plot('imputation_gain_data/'+'/loss', "MSE_train", MSE_train_loss_curr)
lib.plot.plot('imputation_gain_data/'+'/loss', "MSE_test", MSE_test_loss_curr)
lib.plot.plot('imputation_gain_data/'+'/MSE', "MSE", mse)
lib.plot.plot('imputation_gain_data/'+'/MSE', "MSE preds", mse_preds)
lib.plot.plot('imputation_gain_data/'+'/MSE', "MSE imp only", mse_masked)
lib.plot.plot('imputation_gain_data/'+'/NZeros', "NZeros_imp", np.count_nonzero(imp)/(imp.shape[0]*imp.shape[1]*1.0))
lib.plot.plot('imputation_gain_data/'+'/NZeros', "NZeros_masked_imp", np.count_nonzero(imp*(1-np.transpose(testM)))*1.0/np.count_nonzero(1-testM))
lib.plot.flush()
if it % 5000 == 0 and it > 1:
#imp_final = transformBackAll(preds, testX, testM, filteredGenes, filteredCells, maxPerCell)
np.savetxt('imputation_gain_data/'+'/gain_'+out_suffix+'_transformed.csv', imp, delimiter=',', fmt="%f")
#np.savetxt('imputation_gain_data/'+'/gain_'+out_suffix+"_"+str(it)+'.csv', imp_final, delimiter=',', fmt="%f")
if percent_nonzero > 0.95 or it > maxIters[cutoff_i]:
np.savetxt('imputation_gain_data/'+'/gain_'+out_suffix+'_'+str(it)+'_'+str(cutoffs[cutoff_i])+'_transformed.csv', imp, delimiter=',', fmt="%f")
cutoff_i += 1
mask, geneAvgs = getMask(data_f, cutoffs[cutoff_i], sparsity[cutoff_i])
trainM = mask
testM = trainM[:,:]
trainX = np.transpose(imp)
testX = trainX[:,:]
percent_nonzero = 0
print("\n=========================\nNew Cutoff : ", cutoffs[cutoff_i])
print("NZeros=", np.count_nonzero(1-testM)/(imp.shape[0]*imp.shape[1]*1.0), np.count_nonzero(imp*(1-np.transpose(testM)))*1.0/np.count_nonzero(1-testM))
lib.plot.flush()
if it == 100000:#1000:
mask, geneAvgs = getMask(data_f, 1)#0.4)
trainM = mask
testM = trainM[:,:]
trainX = np.transpose(imp)
testX = trainX[:,:]
if it == 250000:#25000:
mask, geneAvgs = getMask(data_f, 0.75)#0.25)
trainM = mask
testM = trainM[:,:]
trainX = np.transpose(imp)
testX = trainX[:,:]
if it == 500000:#50000:
mask, geneAvgs = getMask(data_f, 0.5)#0.2)
trainM = mask
testM = trainM[:,:]
trainX = np.transpose(imp)
testX = trainX[:,:]
lib.plot.tick()
#if it % 20000 == 1:
# saver.save(sess, "imputation_gain_data/gain_model", global_step=it)
|
nilq/baby-python
|
python
|
import datetime
import names
from django.contrib.auth import get_user_model
from usersetting.models import UserSetting
User = get_user_model()
def initialize_usersetting(email):
while(True):
try:
nickname = names.get_first_name()
UserSetting.objects.get(nickname=nickname)
except:
break
enlisted_date = datetime.datetime.now().strftime("%Y-%m-%d")
delisted_date = (datetime.datetime.now() + datetime.timedelta(days=548)).strftime("%Y-%m-%d")
promotion1_date = (datetime.datetime.now() + datetime.timedelta(days=60)).strftime("%Y-%m-%d")
promotion2_date = (datetime.datetime.now() + datetime.timedelta(days=240)).strftime("%Y-%m-%d")
promotion3_date = (datetime.datetime.now() + datetime.timedelta(days=400)).strftime("%Y-%m-%d")
UserSetting.objects.create(email=User.objects.get(email=email),
nickname=nickname,
major='army',
type='soldier',
enlisted_date=enlisted_date,
delisted_date=delisted_date,
promotion1_date=promotion1_date,
promotion2_date=promotion2_date,
promotion3_date=promotion3_date)
|
nilq/baby-python
|
python
|
import numpy as np
import pandas as pd
def create_rolling_ts(
input_data,
lookback=5,
return_target=True,
apply_datefeatures=True,
return_np_array=False
):
"""
Make flat data by using pd.concat instead, pd.concat([df1, df2]).
Slow function.
Save data as preprocessed?
"""
x = []
y = []
rows = len(input_data)
features = input_data.copy()
target = input_data.copy()
for i in range(rows - lookback):
"""Create embeddings for the date-features"""
if apply_datefeatures:
rolling_features = date_features(features.iloc[i: i + lookback])
else:
rolling_features = features.iloc[i: i + lookback]
rolling_target = target.iloc[i + lookback: i + lookback + 1]
x.append(rolling_features)
y.append(rolling_target)
if return_np_array:
x = np.array(x)
y = np.array(y)
if return_target:
return x, y
return x
def date_features(df):
if isinstance(df, pd.core.series.Series):
df = pd.DataFrame(df, index=df.index)
df.loc[:, 'day_of_year'] = df.index.dayofyear
df.loc[:, 'month'] = df.index.month
df.loc[:, 'day_of_week'] = df.index.day
df.loc[:, 'hour'] = df.index.hour
return df
def split_data(data, train_size, valid_size):
"""
Implement data based splitting.
Do normalization.
"""
train_size = int(len(data) * train_size)
valid_size = int(train_size + len(data) * valid_size)
try:
train_set = data.iloc[: train_size]
valid_set = data.iloc[train_size: valid_size]
test_set = data.iloc[valid_size: ]
return train_set, valid_set, test_set
except Exception as e:
print(f'Exception from _split_data: {e}')
def square(i):
return i ** 2
def flatten(data):
"""Make data n X 1 dimensional"""
return data.reshape(data.shape[0], -1)
def is_pandas(df):
return isinstance(df, (pd.core.frame.DataFrame, pd.core.series.Series))
"""
EXPERIMENTAL
"""
# transform a time series dataset into a supervised learning dataset
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = pd.DataFrame(data)
cols = list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
# put it all together
agg = pd.concat(cols, axis=1)
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg.values
# split a univariate dataset into train/test sets
def train_test_split(data, n_test):
return data[:-n_test, :], data[-n_test:, :]
# walk-forward validation for univariate data
def walk_forward_validation(data, n_test):
predictions = list()
# split dataset
train, test = train_test_split(data, n_test)
# seed history with training dataset
history = [x for x in train]
# step over each time-step in the test set
for i in range(len(test)):
# split test row into input and output columns
testX, testy = test[i, :-1], test[i, -1]
# fit model on history and make a prediction
yhat = xgboost_forecast(history, testX)
# store forecast in list of predictions
predictions.append(yhat)
# add actual observation to history for the next loop
history.append(test[i])
# summarize progress
print('>expected=%.1f, predicted=%.1f' % (testy, yhat))
# estimate prediction error
error = mean_absolute_error(test[:, -1], predictions)
return error, test[:, -1], predictions
|
nilq/baby-python
|
python
|
import time
def merge(data, low, high, middle, drawData, timetick):
color=[]
for i in range (len(data)):
color.append('sky blue')
left = data[low:middle+1]
right = data[middle+1:high+1]
i = 0
j = 0
for k in range(low, high+1):
if i< len(left) and j<len(right):
if left[i] <= right[j] :
data[k] = left[i]
i += 1
else:
data[k]=right[j]
j+=1
elif i<len(left):
data[k]=left[i]
i += 1
else:
data[k]=right[j]
j += 1
for p in range (len(data)):
if p==low + i or p==middle + 1 + j :
color[p] = 'yellow'
else: color[p] = 'sky blue'
drawData(data, color)
time.sleep(timetick)
def merge_sort(data, low, high, drawData, timetick):
if low < high:
middle=(low + high)//2
merge_sort(data, low,middle, drawData, timetick)
merge_sort(data, middle+1, high, drawData, timetick)
merge(data, low, high, middle, drawData, timetick)
drawData(data, ['sky blue' for x in range (len(data))])
time.sleep(timetick)
|
nilq/baby-python
|
python
|
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton, CallbackQuery
from pyrogram import Client, emoji
from datetime import datetime, timedelta
from shamil.voicechat import mp
@Client.on_callback_query()
async def cb_handler(client: Client, query: CallbackQuery):
if query.data == "replay":
group_call = mp.group_call
if not mp.playlist:
return
group_call.restart_playout()
await mp.update_start_time()
start_time = mp.start_time
playlist = mp.playlist
if not start_time:
await query.edit_message_text(f"{emoji.PLAY_BUTTON} **Nothing Playing!**")
return
utcnow = datetime.utcnow().replace(microsecond=0)
if mp.msg.get('current') is not None:
playlist=mp.playlist
if not playlist:
pl = f"{emoji.NO_ENTRY} **Empty Playlist!**"
else:
if len(playlist) == 1:
pl = f"{emoji.REPEAT_SINGLE_BUTTON} **Playlist**:\n"
else:
pl = f"{emoji.PLAY_BUTTON} **Playlist**:\n"
pl += "\n".join([
f"**{i}**. **{x.audio.title}**"
for i, x in enumerate(playlist)
])
await mp.msg['current'].delete()
mp.msg['current'] = await playlist[0].reply_text(
f"{pl}\n\n{emoji.PLAY_BUTTON} {utcnow - start_time} / "
f"{timedelta(seconds=playlist[0].audio.duration)}",
parse_mode="Markdown",
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("Replay", callback_data="replay"),
InlineKeyboardButton("Pause", callback_data="pause"),
InlineKeyboardButton("Skip", callback_data="skip")
],
]
)
)
elif query.data == "pause":
mp.group_call.pause_playout()
await mp.update_start_time(reset=True)
playlist = mp.playlist
if not playlist:
pl = f"{emoji.NO_ENTRY} **Empty Playlist!**"
else:
if len(playlist) == 1:
pl = f"{emoji.REPEAT_SINGLE_BUTTON} **Playlist**:\n"
else:
pl = f"{emoji.PLAY_BUTTON} **Playlist**:\n"
pl += "\n".join([
f"**{i}**. **{x.audio.title}**"
for i, x in enumerate(playlist)
])
reply = await query.edit_message_text(f"{emoji.PLAY_OR_PAUSE_BUTTON} **Paused Playing 🤐!**\n\n{pl}",
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("Replay", callback_data="replay"),
InlineKeyboardButton("Resume️", callback_data="resume"),
InlineKeyboardButton("Skip", callback_data="skip")
],
]
)
)
elif query.data == "resume":
mp.group_call.resume_playout()
playlist=mp.playlist
if not playlist:
pl = f"{emoji.NO_ENTRY} **Empty Playlist!**"
else:
if len(playlist) == 1:
pl = f"{emoji.REPEAT_SINGLE_BUTTON} **Playlist**:\n"
else:
pl = f"{emoji.PLAY_BUTTON} **Playlist**:\n"
pl += "\n".join([
f"**{i}**. **{x.audio.title}**"
for i, x in enumerate(playlist)
])
await query.edit_message_text(f"{emoji.PLAY_OR_PAUSE_BUTTON} **Resumed Playing 🤗!**\n\n{pl}",
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("Replay", callback_data="replay"),
InlineKeyboardButton("Pause", callback_data="pause"),
InlineKeyboardButton("Skip", callback_data="skip")
],
]
)
)
elif query.data=="skip":
playlist = mp.playlist
await mp.skip_current_playing()
if not playlist:
pl = f"{emoji.NO_ENTRY} **Empty Playlist!**"
else:
if len(playlist) == 1:
pl = f"{emoji.REPEAT_SINGLE_BUTTON} **Playlist**:\n"
else:
pl = f"{emoji.PLAY_BUTTON} **Playlist**:\n"
pl += "\n".join([
f"**{i}**. **{x.audio.title}**"
for i, x in enumerate(playlist)
])
try:
await query.edit_message_text(f"⏭ **Skipped Track 🤷♀️!**\n\n{pl}",
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("Replay", callback_data="replay"),
InlineKeyboardButton("Pause", callback_data="pause"),
InlineKeyboardButton("Skip", callback_data="skip")
],
]
)
)
except:
pass
elif query.data=="help":
await query.edit_message_text("🙋♂️ **Hi Bruh**, \nIts Me Samantha Here 👋🏻 ...",
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("Close", callback_data="close"),
],
]
)
)
elif query.data=="close":
await query.message.delete()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import yaml
import json
def main():
my_list = range(8)
my_list.append('0 through 7 are cool numbers')
my_list.append({})
my_list[-1]['subnet_mask'] = '255.255.255.0'
my_list[-1]['gateway'] = '192.168.1.1'
with open("first_yaml_file.yml", "w") as f:
f.write(yaml.dump(my_list, default_flow_style=False))
with open("first_json_file.json", "w") as g:
json.dump(my_list, g)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
import os
import glob
import sys
class CAPSProfile:
def __init__(self):
self.streams = [] # List of strings, e.g. XY.ABCD.*.*
self.stations = [] # List of network, station tuples, e.g. (XY,ABCD)
self.oldStates = [] # Content of old state file
'''
Plugin handler for the CAPS plugin.
'''
class SeedlinkPluginHandler:
# Create defaults
def __init__(self):
self.profiles = {}
def push(self, seedlink):
try:
maxTimeDiff = float(seedlink._get('plugins.caps.maxTimeDiff', False))
except:
maxTimeDiff = 86400
inOrder = ""
try:
if seedlink._get('plugins.caps.inOrder', False):
inOrder = " --in-order"
except:
pass
# Check and set defaults
try:
address = seedlink.param('sources.caps.address')
except KeyError:
address = "localhost:18002"
try:
streams = [chaId.strip() for chaId in seedlink.param(
'sources.caps.streams').split(',')]
except KeyError:
seedlink.setParam('sources.caps.streams', "*.*")
streams = ["*.*"]
try:
encoding = seedlink.param('sources.caps.encoding')
except KeyError:
seedlink.setParam('sources.caps.encoding', "STEIM2")
# parse address URL and create capsId of form:
# host[.port][_user]
addressFormatError = "Error: invalid address format, expected " \
"[[caps|capss]://][user:pass@]host[:port]"
# protocol
toks = address.split("://")
if len(toks) > 2:
raise Exception(addressFormatError)
elif len(toks) == 2:
protocol = toks[0]
address = toks[1]
if protocol != "caps" and protocol != "capss":
raise Exception(addressFormatError)
else:
protocol = "caps"
# authentication
toks = address.split("@")
if len(toks) > 2:
raise Exception(addressFormatError)
elif len(toks) == 2:
capsId = "%s_%s" % (toks[1].replace(
":", "."), toks[0].split(":")[0])
else:
capsId = address.replace(":", ".")
address = "%s://%s" % (protocol, address)
if capsId not in self.profiles:
profile = CAPSProfile()
self.profiles[capsId] = profile
else:
profile = self.profiles[capsId]
for chaId in streams:
toks = chaId.split('.')
if len(toks) != 2:
raise Exception(
"Error: invalid stream format, expected [LOC.CHA]")
streamID = seedlink.net + "." + seedlink.sta + "." + chaId
profile.streams.append(streamID)
profile.stations.append((seedlink.net, seedlink.sta))
log = os.path.join(seedlink.config_dir, "caps2sl.%s.state" % capsId)
streamsFile = os.path.join(
seedlink.config_dir, "caps2sl.%s.req" % capsId)
seedlink.setParam('sources.caps.address', address)
seedlink.setParam('sources.caps.log', log)
seedlink.setParam('sources.caps.streamsFile', streamsFile)
seedlink.setParam('sources.caps.maxTimeDiff', maxTimeDiff)
seedlink.setParam('sources.caps.inOrder', inOrder)
seedlink.setParam('seedlink.caps.id', capsId)
return capsId
def flush(self, seedlink):
# Populate request file per address
for id, profile in self.profiles.items():
caps2slreq = os.path.join(
seedlink.config_dir, "caps2sl.%s.req" % id)
fd = open(caps2slreq, "w")
for streamId in profile.streams:
fd.write("%s\n" % streamId)
fd.close()
try:
caps2slstate = os.path.join(seedlink.config_dir, "caps2sl.%s.state" % id)
# Read existing state file
fd = open(caps2slstate, "r")
profile.oldStates = [line.strip() for line in fd.readlines()]
except:
pass
# Delete all existing state files
for fl in glob.glob(os.path.join(seedlink.config_dir, "caps2sl.*.state")):
try:
os.remove(fl)
except:
sys.stderr.write("Failed to remove old state file: %s\n" % str(fl))
# Clean up state file to contain only configured stations
for id, profile in self.profiles.items():
caps2slstate = os.path.join(seedlink.config_dir, "caps2sl.%s.state" % id)
newStates = []
for (net, sta) in profile.stations:
for line in profile.oldStates:
if line.startswith(net + "." + sta + "."):
newStates.append(line)
if len(newStates) > 0:
fd = open(caps2slstate, "w")
for line in newStates:
fd.write(line + "\n")
fd.close()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import time
import xlrd
from public import saveScreenshot
from core import LoginPage
from test import support
from public.log import logger
from public.pyse import Pyse
class LoginTest(unittest.TestCase):
def setUp(self):
self.driver = Pyse("chrome")
self.driver.wait(10)
self.url = "http://dxttest.dxtmobile.com/dxtyhch/a/login"
self.verificationErrors = []
def action_login(self, case_id='case_0000', case_summary=u'正确验证', username=u'73103741刘婷', password=u'000000'):
login_page = LoginPage.LoginPage(self.driver, self.url, u"萃花销售助手 登录")
login_page.iopen()
logger.info(u"======== 【" + case_id + u"】" + case_summary + u" ========")
logger.info("username:"+username+" password:"+password)
# 调用用户名输入组件
login_page.type_username(username)
login_page.type_password(password)
login_page.submit()
time.sleep(3)
saveScreenshot.saveScreenshot(self.driver, u"登录")
try:
assert (self.driver.get_title() == u"萃花销售助手"), u"登录成功"
logger.info(u"登录成功")
except:
logger.info(u"登录失败")
@staticmethod
def getTestFunc(case_id, case_summary, username, password):
def func(self):
self.action_login(case_id, case_summary, username, password)
return func
def tearDown(self):
self.driver.close()
self.assertEqual([], self.verificationErrors)
def __generateTestCases():
data = xlrd.open_workbook(r"./data/login_126mail_data.xls")
# 通过索引顺序获取Excel表
table = data.sheets()[0]
for args in range(1, table.nrows):
txt = table.row_values(args)
setattr(LoginTest, 'test_login_%s' % txt[1], LoginTest.getTestFunc(*txt))
__generateTestCases()
if __name__ == '__main__':
support.run_unittest(LoginTest)
|
nilq/baby-python
|
python
|
import os
import pprint
import random
import collections
from ai_list_common import (
ListInterface,
ListBase,
ListTypeNgram,
TestRunnerCheckNgram,
)
class NgramList(ListTypeNgram, ListBase, ListInterface):
def check_num_of_gram(self, num_of_gram):
if super().check_num_of_gram(num_of_gram):
return
if num_of_gram < 1:
err = f'num_of_gram: {num_of_gram}'
raise self.InvalidNumOfNgramError(err)
def make_starting_token_list(self):
keywords = self.config.KEYWORDS_NGRAM[:]
random.shuffle(keywords)
self.starting_token_list = [keywords[0][1]]
def make_token_list(self):
txt = self.text_target
ngram_list = []
num_max = len(txt) - (self.num_of_gram - 1)
for i in range(num_max):
sta = i
end = i + self.num_of_gram
ngram_list.append(txt[sta:end])
self.token_list = ngram_list
msg = 'list_size: {}'.format(len(ngram_list))
self.logger.i(msg)
def make_type_name_list(self, txt):
pass
def make_morph_list(self, txt):
pass
def update_starting_token_list(self):
pass
def update_token_list(self):
pass
def _test_starting_token_list(self):
tokens_start = self.get_starting_token_list()
max = len(tokens_start) - 1
msg_fmt = 'start chrs: {}'.format
if max > 10:
msg = msg_fmt(tokens_start[0:10])
else:
msg = msg_fmt(tokens_start)
self.logger.i(msg)
def _test_token_list(self):
tokens = self.get_token_list()
max = len(tokens)
msg_fmt = 'ngram list: {}'.format
if max > 10:
msg = msg_fmt(tokens[0:10])
else:
msg = msg_fmt(tokens)
self.logger.i(msg)
def _test_token_counts(self):
tokens = self.get_token_list()
path_name = '_gram_counts.txt'
path = os.path.join(self.config.PATH_TMP, path_name)
if self.num_of_gram == 1:
with open(path, mode='w', encoding='utf-8') as file:
file
counts = collections.Counter(tokens)
with open(path, mode='a', encoding='utf-8') as file:
pprint.pprint(counts, stream=file)
msg = 'Making counts file is Done!'
self.logger.i(msg)
if __name__ == '__main__':
TestRunnerCheckNgram(NgramList)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import six
from aiida import orm
from aiida.common.lang import classproperty
from aiida.plugins import factories
from aiida_quantumespresso.calculations import BasePwCpInputGenerator
class PwCalculation(BasePwCpInputGenerator):
"""`CalcJob` implementation for the pw.x code of Quantum ESPRESSO."""
_automatic_namelists = {
'scf': ['CONTROL', 'SYSTEM', 'ELECTRONS'],
'nscf': ['CONTROL', 'SYSTEM', 'ELECTRONS'],
'bands': ['CONTROL', 'SYSTEM', 'ELECTRONS'],
'relax': ['CONTROL', 'SYSTEM', 'ELECTRONS', 'IONS'],
'md': ['CONTROL', 'SYSTEM', 'ELECTRONS', 'IONS'],
'vc-md': ['CONTROL', 'SYSTEM', 'ELECTRONS', 'IONS', 'CELL'],
'vc-relax': ['CONTROL', 'SYSTEM', 'ELECTRONS', 'IONS', 'CELL'],
}
# Keywords that cannot be set by the user but will be set by the plugin
_blocked_keywords = [
('CONTROL', 'pseudo_dir'),
('CONTROL', 'outdir'),
('CONTROL', 'prefix'),
('SYSTEM', 'ibrav'),
('SYSTEM', 'celldm'),
('SYSTEM', 'nat'),
('SYSTEM', 'ntyp'),
('SYSTEM', 'a'),
('SYSTEM', 'b'),
('SYSTEM', 'c'),
('SYSTEM', 'cosab'),
('SYSTEM', 'cosac'),
('SYSTEM', 'cosbc'),
]
_use_kpoints = True
# Not using symlink in pw to allow multiple nscf to run on top of the same scf
_default_symlink_usage = False
@classproperty
def xml_filepaths(cls):
"""Return a list of XML output filepaths relative to the remote working directory that should be retrieved."""
filepaths = []
for filename in cls.xml_filenames:
filepath = os.path.join(cls._OUTPUT_SUBFOLDER, '{}.save'.format(cls._PREFIX), filename)
filepaths.append(filepath)
return filepaths
@classmethod
def define(cls, spec):
# yapf:disable
super(PwCalculation, cls).define(spec)
spec.input('metadata.options.parser_name', valid_type=six.string_types, default='quantumespresso.pw')
spec.input('kpoints', valid_type=orm.KpointsData,
help='kpoint mesh or kpoint path')
spec.input('hubbard_file', valid_type=orm.SinglefileData, required=False,
help='SinglefileData node containing the output Hubbard parameters from a HpCalculation')
spec.output('output_parameters', valid_type=orm.Dict,
help='The `output_parameters` output node of the successful calculation.')
spec.output('output_structure', valid_type=orm.StructureData, required=False,
help='The `output_structure` output node of the successful calculation if present.')
spec.output('output_trajectory', valid_type=orm.TrajectoryData, required=False)
spec.output('output_band', valid_type=orm.BandsData, required=False,
help='The `output_band` output node of the successful calculation if present.')
spec.output('output_kpoints', valid_type=orm.KpointsData, required=False)
spec.output('output_atomic_occupations', valid_type=orm.Dict, required=False)
spec.default_output_node = 'output_parameters'
# Unrecoverable errors: resources like the retrieved folder or its expected contents are missing
spec.exit_code(200, 'ERROR_NO_RETRIEVED_FOLDER',
message='The retrieved folder data node could not be accessed.')
spec.exit_code(201, 'ERROR_NO_RETRIEVED_TEMPORARY_FOLDER',
message='The retrieved temporary folder could not be accessed.')
spec.exit_code(210, 'ERROR_OUTPUT_STDOUT_MISSING',
message='The retrieved folder did not contain the required stdout output file.')
spec.exit_code(220, 'ERROR_OUTPUT_XML_MISSING',
message='The retrieved folder did not contain the required required XML file.')
spec.exit_code(221, 'ERROR_OUTPUT_XML_MULTIPLE',
message='The retrieved folder contained multiple XML files.')
# Unrecoverable errors: required retrieved files could not be read, parsed or are otherwise incomplete
spec.exit_code(300, 'ERROR_OUTPUT_FILES',
message='Both the stdout and XML output files could not be read or parsed.')
spec.exit_code(310, 'ERROR_OUTPUT_STDOUT_READ',
message='The stdout output file could not be read.')
spec.exit_code(311, 'ERROR_OUTPUT_STDOUT_PARSE',
message='The stdout output file could not be parsed.')
spec.exit_code(312, 'ERROR_OUTPUT_STDOUT_INCOMPLETE',
message='The stdout output file was incomplete.')
spec.exit_code(320, 'ERROR_OUTPUT_XML_READ',
message='The XML output file could not be read.')
spec.exit_code(321, 'ERROR_OUTPUT_XML_PARSE',
message='The XML output file could not be parsed.')
spec.exit_code(322, 'ERROR_OUTPUT_XML_FORMAT',
message='The XML output file has an unsupported format.')
spec.exit_code(350, 'ERROR_UNEXPECTED_PARSER_EXCEPTION',
message='The parser raised an unexpected exception.')
# Significant errors but calculation can be used to restart
spec.exit_code(400, 'ERROR_OUT_OF_WALLTIME',
message='The calculation stopped prematurely because it ran out of walltime.')
spec.exit_code(410, 'ERROR_ELECTRONIC_CONVERGENCE_NOT_REACHED',
message='The electronic minimization cycle did not reach self-consistency.')
spec.exit_code(500, 'ERROR_IONIC_CONVERGENCE_NOT_REACHED',
message='The ionic minimization cycle did not converge for the given thresholds.')
spec.exit_code(501, 'ERROR_IONIC_CONVERGENCE_REACHED_EXCEPT_IN_FINAL_SCF',
message='Then ionic minimization cycle converged but the thresholds are exceeded in the final SCF.')
spec.exit_code(502, 'ERROR_IONIC_CYCLE_EXCEEDED_NSTEP',
message='The ionic minimization cycle did not converge after the maximum number of steps.')
spec.exit_code(510, 'ERROR_IONIC_CYCLE_ELECTRONIC_CONVERGENCE_NOT_REACHED',
message='The electronic minimization cycle failed during an ionic minimization cycle.')
spec.exit_code(511, 'ERROR_IONIC_CONVERGENCE_REACHED_FINAL_SCF_FAILED',
message='The ionic minimization cycle converged, but electronic convergence was not reached in the '
'final SCF.')
spec.exit_code(520, 'ERROR_IONIC_CYCLE_BFGS_HISTORY_FAILURE',
message='The ionic minimization cycle terminated prematurely because of two consecutive failures in the '
'BFGS algorithm.')
spec.exit_code(521, 'ERROR_IONIC_CYCLE_BFGS_HISTORY_AND_FINAL_SCF_FAILURE',
message='The ionic minimization cycle terminated prematurely because of two consecutive failures in the '
'BFGS algorithm and electronic convergence failed in the final SCF.')
spec.exit_code(531, 'ERROR_CHARGE_IS_WRONG',
message='The electronic minimization cycle did not reach self-consistency.')
spec.exit_code(541, 'ERROR_SYMMETRY_NON_ORTHOGONAL_OPERATION',
message='The variable cell optimization broke the symmetry of the k-points.')
@classproperty
def input_file_name_hubbard_file(cls):
"""
The relative file name of the file containing the Hubbard parameters if they should
be read from file instead of specified in the input file cards. Requires the
aiida-quantumespresso-hp plugin to be installed
"""
try:
HpCalculation = factories.CalculationFactory('quantumespresso.hp')
except Exception:
raise RuntimeError('this is determined by the aiida-quantumespresso-hp plugin but it is not installed')
return HpCalculation.input_file_name_hubbard_file
@classmethod
def input_helper(cls, *args, **kwargs):
"""
Validate if the keywords are valid Quantum ESPRESSO pw.x keywords, and
also helps in preparing the input parameter dictionary in a
'standardized' form (e.g., converts ints to floats when required,
or if the flag flat_mode is specified, puts the keywords in the right
namelists).
This function calls :py:func:`aiida_quantumespresso.calculations.helpers.pw_input_helper`,
see its docstring for further information.
"""
from . import helpers
return helpers.pw_input_helper(*args, **kwargs)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from com.pnfsoftware.jeb.client.api import IClientContext
from com.pnfsoftware.jeb.core import IRuntimeProject
from com.pnfsoftware.jeb.core.units import IUnit
from com.pnfsoftware.jeb.core.units.code import IDecompilerUnit, DecompilationOptions, DecompilationContext
from com.pnfsoftware.jeb.core.units.code.android import IDexUnit, IDexDecompilerUnit
from com.pnfsoftware.jeb.core.units.code.java import IJavaMethod, IJavaTry
from com.pnfsoftware.jeb.core.util import DecompilerHelper
def displayTree(e, level=0):
dispatch(e,level)
if e:
elts = e.getSubElements()
for e in elts:
displayTree(e, level+1)
def dispatch(ele,level):
if isinstance(ele,IJavaTry):
print "---------- try body start -----------"
print ele.getTryBody() # try块语句序列
print "---------- try body end -------------"
print "CatchCount >>> " , ele.getCatchCount() # catch块个数
for idx in range(ele.getCatchCount()):
print ""
print ""
print "---------- catch body start -----------",idx
print "Type >>> ",ele.getCatchType(idx) # catch块括号内异常类型
print "Identifier >>> ",ele.getCatchIdentifier(idx) # catch块括号内标识符
print "catch body >>> "
print ele.getCatchBody(idx) # catch块语句序列
print "---------- catch body end -------------",idx
print ""
print ""
print "finally body >>>",ele.getFinallyBody() # final块语句序列
exit(0)
pass
else:
pass
# IJavaTry
def Test(ctx):
assert isinstance(ctx,IClientContext)
input_path = r"D:\tmp\2\project\about_dex_diff\code\jsq\jsq.dex"
sign = "Lnet/cavas/show/af;->a(Lorg/apache/http/client/HttpClient;Ljava/util/Queue;)V"
unit = ctx.open(input_path); assert isinstance(unit,IUnit)
prj = ctx.getMainProject(); assert isinstance(prj,IRuntimeProject)
dexUnit = prj.findUnit(IDexUnit); assert isinstance(dexUnit,IDexUnit)
dexDecompilerUnit = DecompilerHelper.getDecompiler(dexUnit); assert isinstance(dexDecompilerUnit,IDexDecompilerUnit)
opt = DecompilationOptions.Builder().newInstance().flags(IDecompilerUnit.FLAG_NO_DEFERRED_DECOMPILATION).build()
bool = dexDecompilerUnit.decompileAllClasses(DecompilationContext(opt))
print(bool)
javaMethod = dexDecompilerUnit.getMethod(sign,False); assert isinstance(javaMethod,IJavaMethod)
print("---------------- tree ----------------")
displayTree(javaMethod)
'''
目标代码:
final class af implements Runnable {
private void a(HttpClient arg6, Queue arg7) {
String v0 = (String)arg7.poll();
if(this.b != null && v0 != null) {
HttpPost v1 = new HttpPost(this.b.replace(" ", "%20"));
v1.setEntity(new EntityTemplate(new ag(this, v0)));
try {
HttpResponse v0_4 = arg6.execute(((HttpUriRequest)v1));
c.a("offer", Integer.valueOf(v0_4.getStatusLine().getStatusCode()));
if(v0_4.getStatusLine().getStatusCode() == 200) {
this.a(arg6, arg7);
return;
}
}
catch(ClientProtocolException v0_3) {
try {
c.c(d.a, "Caught ClientProtocolException in PingUrlRunnable");
return;
label_35:
c.c(d.a, "Caught IOException in PingUrlRunnable");
return;
}
catch(Throwable v0_1) {
throw v0_1;
}
}
catch(IOException v0_2) {
goto label_35;
return;
}
catch(Throwable v0_1) {
throw v0_1;
}
}
}
}
'''
'''
输出:
True
---------------- tree ----------------
---------- try body start -----------
org.apache.http.HttpResponse v0_4 = execute(arg6, ((org.apache.http.client.methods.HttpUriRequest), v1))
a("offer", valueOf(getStatusCode(getStatusLine(v0_4))))
If@-2003461530
---------- try body end -------------
CatchCount >>> 3
---------- catch body start ----------- 0
Type >>> org.apache.http.client.ClientProtocolException
Identifier >>> v0_3
catch body >>>
Try@1198833152
---------- catch body end ------------- 0
---------- catch body start ----------- 1
Type >>> java.io.IOException
Identifier >>> v0_2
catch body >>>
goto label_35
return
---------- catch body end ------------- 1
---------- catch body start ----------- 2
Type >>> java.lang.Throwable
Identifier >>> v0_1
catch body >>>
throw v0_1
---------- catch body end ------------- 2
finally body >>> None
'''
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
#
# ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University of Chicago
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2017 University of California, Davis
#
# See COPYING for license information.
#
# ----------------------------------------------------------------------
#
## @file pylith/problems/ImplicitLgDeform.py
##
## @brief Python ImplicitLgDeform object for solving equations using
## an implicit formulation with rigid body motions and small strains.
##
## Factory: pde_formulation
from Implicit import Implicit
# ImplicitLgDeform class
class ImplicitLgDeform(Implicit):
"""
Python ImplicitLgDeform object for solving equations using an implicit
formulation with rigid body motions and small strains.
Factory: pde_formulation.
"""
class Inventory(Implicit.Inventory):
"""
Python object for managing ImplicitLgDeform facilities and properties.
Provide appropriate solver for small strains as the default.
"""
## @class Inventory
## Python object for managing ExplicitLumped facilities and properties.
##
## \b Properties
## @li None
##
## \b Facilities
## @li \b solver Algebraic solver.
import pyre.inventory
from SolverNonlinear import SolverNonlinear
solver = pyre.inventory.facility("solver", family="solver",
factory=SolverNonlinear)
solver.meta['tip'] = "Algebraic solver."
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self, name="implicitlgdeform"):
"""
Constructor.
"""
Implicit.__init__(self, name)
return
def elasticityIntegrator(self):
"""
Get integrator for elastic material.
"""
from pylith.feassemble.ElasticityImplicitLgDeform import ElasticityImplicitLgDeform
return ElasticityImplicitLgDeform()
# PRIVATE METHODS ////////////////////////////////////////////////////
def _configure(self):
"""
Set members based using inventory.
"""
Implicit._configure(self)
self.solver = self.inventory.solver
return
# FACTORIES ////////////////////////////////////////////////////////////
def pde_formulation():
"""
Factory associated with ImplicitLgDeform.
"""
return ImplicitLgDeform()
# End of file
|
nilq/baby-python
|
python
|
# coding=utf-8
import sys
from enum import Enum
from typing import List
class VarType(Enum):
INVALID = -1
EXIT = 0
OPERATION = 1
CONDITION = 2
SUBROUTINE = 3
START = 4
END = 5
SELECTED = 6 # 包含选择的条件语句 即yes分支或者no分支
class ConnectType(Enum):
NONE = 0,
NORMAL = 1,
YSE = 2,
NO = 3,
LEFT = 4,
RIGHT = 5,
TOP = 6,
BOTTOM = 7
class Var:
def __init__(self, num: int, varType: VarType, info: str, select: str = "N/A"):
self.num = num
self.varType = varType
self.info = info
self.select = select
def copy(self):
return Var(self.num, self.varType, self.info, self.select)
def toDef(self):
if self.varType == VarType.CONDITION:
return f"cnd{self.num}=>condition: {self.info}"
elif self.varType == VarType.OPERATION:
return f"opt{self.num}=>operation: {self.info}"
elif self.varType == VarType.SUBROUTINE:
return f"sub{self.num}=>subroutine: {self.info}"
elif self.varType == VarType.START:
return f"st=>start: 开始"
elif self.varType == VarType.END:
return f"ed=>end: 结束"
def toConnectName(self):
if self.varType == VarType.CONDITION:
return f"cnd{self.num}"
elif self.varType == VarType.OPERATION:
return f"opt{self.num}"
elif self.varType == VarType.SUBROUTINE:
return f"sub{self.num}"
elif self.varType == VarType.START:
return f"st"
elif self.varType == VarType.END:
return f"ed"
if self.varType == VarType.SELECTED:
return f"cnd{self.num}({self.select})"
class Node:
def __init__(self, info: str, varType: VarType, connectType: ConnectType):
self.info = info
self.varType = varType
self.connectType = connectType
class VarTable:
# 特殊节点, 输出时进行特出处理, 因此只需要保证类型正确即可
NoneVar = Var(-1, VarType.INVALID, "") # 无效节点
StartVar = Var(0, VarType.START, "") # 开始节点
EndVar = Var(1, VarType.END, "") # 结束节点
def __init__(self):
self.table: List[Var] = [self.StartVar, self.EndVar]
self.currentID = 1
def addVar(self, info, varType: VarType) -> Var:
self.currentID += 1
var = Var(self.currentID, varType, info)
self.table.append(var)
return var
def getVarByNode(self, node: Node) -> Var:
if node.varType == VarType.START:
return self.StartVar.copy()
elif node.varType == VarType.END:
return self.EndVar.copy()
for v in self.table:
if v.info == node.info and v.varType == node.varType:
return v.copy()
return self.addVar(node.info, node.varType)
def getVarByID(self, ID: int):
return self.table[ID]
def getVarNum(self):
return len(self.table)
def genCode(self, f):
for var in self.table:
f.write(var.toDef())
f.write("\n")
class ConnectTable:
def __init__(self):
self.length = 15
self.graph: List[List[ConnectType]] = [[ConnectType.NONE for col in range(self.length)] for row in
range(self.length)]
self.ConnectNameDict = {
ConnectType.YSE: "yes", ConnectType.NO: "no",
ConnectType.LEFT: "left", ConnectType.RIGHT: "right",
ConnectType.TOP: "top", ConnectType.BOTTOM: "bottom"
}
def resize(self, toSize: int):
differ = toSize - self.length
for row in self.graph:
for i in range(differ):
row.append(ConnectType.NONE)
self.length = toSize
def addConnect(self, head: int, tail: int, conType: ConnectType):
if head >= self.length or tail >= self.length:
self.resize(max(head, tail))
self.graph[head][tail] = conType
def getNameByCon(self, con: ConnectType):
return self.ConnectNameDict[con]
def genCode(self, varTable: VarTable, f):
code = []
self.DFS(varTable, 0, code)
for c in self.reduceCode(code):
f.write(c)
def DFS(self, varTable: VarTable, row, code):
for col in range(len(self.graph[row])):
con = self.graph[row][col]
if con != ConnectType.NONE:
if con == ConnectType.NORMAL:
name = f"{varTable.getVarByID(row).toConnectName()}"
else:
name = f"{varTable.getVarByID(row).toConnectName()}({self.getNameByCon(con)})"
code.append(name)
code.append("->")
code.append(f"{varTable.getVarByID(col).toConnectName()}")
code.append("\n")
self.DFS(varTable, col, code)
@staticmethod
def reduceCode(code: List[str]):
newCode = []
length = len(code)
i = 0
while i < length:
if code[i] != "\n":
newCode.append(code[i])
elif i + 1 < length and code[i - 1] == code[i + 1]:
newCode.append("->")
i += 2
else:
newCode.append("\n")
i += 1
# 由于部分分支节点需要多次遍历, 因此不能在深度优先算法中直接将遍历过的路径重置
# 否则分支节点只会出现一次
lineCode = []
s = ""
for c in newCode:
s += c
if c == "\n":
lineCode.append(s)
s = ""
return frozenset(lineCode)
def checkIntegrity(self, varTable: VarTable):
"""检查每个节点是否有入度, 每个条件节点是否有两个分支"""
length = varTable.getVarNum()
node = 2 # 跳过start和end节点
while node < length:
self.checkReference(node, varTable)
if varTable.getVarByID(node).varType == VarType.CONDITION:
self.checkBranch(node, varTable)
node += 1
def checkBranch(self, node, varTable: VarTable):
yNode = False
nNode = False
for col in range(len(self.graph)):
if self.graph[node][col] == ConnectType.YSE:
yNode = True
elif self.graph[node][col] == ConnectType.NO:
nNode = True
if not yNode:
raise CheckException(f"Node ({varTable.getVarByID(node).info}) is missing a yes branch")
elif not nNode:
raise CheckException(f"Node ({varTable.getVarByID(node).info}) is missing a no branch")
def checkReference(self, node, varTable: VarTable):
referenced = False
for row in range(len(self.graph)):
if self.graph[row][node] != ConnectType.NONE:
referenced = True
break
if not referenced:
raise CheckException(f"Node ({varTable.getVarByID(node).info}) is not referenced by any node")
class Line:
def __init__(self, num: int, value: str):
self.num = num
self.value = value
NoneLine = Line(0, "NoneLine")
class Parser:
connectNameDict = {
"y": ConnectType.YSE, "n": ConnectType.NO,
"l": ConnectType.LEFT, "r": ConnectType.RIGHT,
"t": ConnectType.TOP, "b": ConnectType.BOTTOM
}
def __init__(self, filename: str):
self.varTable: VarTable = VarTable()
self.connectTable: ConnectTable = ConnectTable()
self.filename = filename
self.currentLine = NoneLine
def compile(self, filename: str = "flowOutput"):
try:
self.parseFile()
# 代码生成前, 先检查关系完整性
self.connectTable.checkIntegrity(self.varTable)
self.genCode(filename)
print("Compile Finish.\n0 Error 0 Warning.")
finally:
pass
# except Exception as e:
# sys.stderr.write("Compile Failed.\n")
# sys.stderr.write(str(e))
def parseFile(self):
with open(self.filename, "r", encoding="utf8") as f:
num = 0
for line in f.readlines():
num += 1
if not line.isspace():
self.parseLine(Line(num, line.replace("\n", "")))
def parseLine(self, line: Line):
self.currentLine = line
nodes = line.value.split()
if len(nodes) < 2:
raise CheckException(f"Error: Line {line.num}: The num of nodes less than 2")
for i in range(len(nodes) - 1):
node = self.parseNode(nodes[i])
varFst: Var = self.varTable.getVarByNode(node)
varSnd: Var = self.varTable.getVarByNode(self.parseNode(nodes[i + 1]))
self.connectTable.addConnect(varFst.num, varSnd.num, node.connectType)
def parseNode(self, varStr: str) -> Node:
# 只要此部分代码被执行, 在if中定义的变量离开if语句依然有效
if varStr[0] == '<':
varType = VarType.CONDITION
elif varStr[0] == '[':
varType = VarType.OPERATION
elif varStr[0] == '{':
varType = VarType.SUBROUTINE
elif varStr == "st":
return Node("", VarType.START, ConnectType.NORMAL)
elif varStr == "ed":
return Node("", VarType.END, ConnectType.NORMAL)
else:
raise CheckException(f"Undefined type of {varStr}")
varStr = self.removeBrackets(varStr) # 移除两端的括号
if ":" in varStr:
info, typename = varStr.split(":")
return Node(info, varType, self.connectNameDict[typename])
else:
return Node(varStr, varType, ConnectType.NORMAL)
@staticmethod
def removeBrackets(s: str):
colonIdx = s.find(":")
if colonIdx != -1:
s = s[1:colonIdx - 1] + s[colonIdx:]
else:
s = s[1:-1]
return s
def genCode(self, filename: str):
with open(filename, "w", encoding="utf8") as f:
self.varTable.genCode(f)
f.write("\n")
self.connectTable.genCode(self.varTable, f)
class CheckException(Exception):
def __init__(self, info):
Exception.__init__(self, info)
if __name__ == "__main__":
print(sys.argv)
if len(sys.argv) >= 2:
parser = Parser(sys.argv[1])
parser.compile(sys.argv[1] + "_out")
else:
parser = Parser("flowTest/hashMapMain")
parser.compile("flowOutput")
# 优化方案
# 分支节点选择最长的路径作为向下的路径
# 即从分支节点出发, 到两个分支的交汇点, 选择路径最长的分支
# 但是如果某个路径为0, 则依然选择0路径为向下路径
|
nilq/baby-python
|
python
|
def lcm(*values):
values = set([abs(int(v)) for v in values])
if values and 0 not in values:
n = n0 = max(values)
values.remove(n)
while any( n % m for m in values ):
n += n0
return n
return 0
lcm(-6, 14)
42
lcm(2, 0)
0
lcm(12, 18)
36
lcm(12, 18, 22)
396
|
nilq/baby-python
|
python
|
from GTDLambda import *
from TileCoder import *
import numpy
class DirectStepsToLeftDemon(GTDLambda):
def __init__(self, featureVectorLength, alpha):
GTDLambda.__init__(self, featureVectorLength, alpha)
def gamma(self, state, observation):
encoder = observation['encoder']
if (encoder == -1):
return 0
elif (encoder == 1023):
#This represents the extreme position
return 0
else:
return 1
def rho(self, action):
if (action == 2):
#our policy is to always move left.
#ie. how many steps if we were to go directly to the left.
return 1
else:
return 0
def test():
d = DirectStepsToLeftDemon(8*8*8, 0.1/8)
numTilings = 8
numTiles = 8
encoderPosition = 600
speed = 100
firstState = firstState = tileCode(numTilings, numTilings * numTiles * numTiles, [((encoderPosition-510.0)/(1023.0-510.0)) * numTiles, ((speed + 200.0) / 400.0) * numTiles])
encoderPosition = 1023
speed = 0
secondState = firstState = tileCode(numTilings, numTilings * numTiles * numTiles, [((encoderPosition-510.0)/(1023.0-510.0)) * numTiles, ((speed + 200.0) / 400.0) * numTiles])
d.learn(firstState, 2, secondState, 1023)
|
nilq/baby-python
|
python
|
import os
import numpy as np
from tqdm import tqdm
from PIL import Image
from imagededup.methods import PHash
def run(root: str):
phasher = PHash()
img = Image.open("/home/huakun/datasets/meinv/anime/IMG_0903.PNG")
size = np.asarray(img.size)
scale = 0.1
new_size = (size * scale).astype(int)
img.resize(new_size).resize(size).save("/home/huakun/datasets/meinv/anime/IMG_0903-to-remove.PNG")
encodings = phasher.encode_images(image_dir=root)
duplicates = phasher.find_duplicates(encoding_map=encodings)
removed = set()
file_removed = []
for key, value in tqdm(duplicates.items()):
if len(value):
if key in removed:
continue
else:
for v in value:
file_2_remove = f"{root}/{v}"
file_removed.append(file_2_remove)
os.remove(file_2_remove)
removed.add(v)
if __name__ == '__main__':
import fire
fire.Fire(run)
|
nilq/baby-python
|
python
|
#!python
"""
Keras implementation of CapsNet in Hinton's paper Dynamic Routing Between Capsules.
The current version maybe only works for TensorFlow backend. Actually it will be straightforward to re-write to TF code.
Adopting to other backends should be easy, but I have not tested this.
Usage:
python capsulenet.py
python capsulenet.py --epochs 50
python capsulenet.py --epochs 50 --routings 3
... ...
Result:
Validation accuracy > 99.5% after 20 epochs. Converge to 99.66% after 50 epochs.
About 110 seconds per epoch on a single GTX1070 GPU card
Author: Xifeng Guo, E-mail: `guoxifeng1990@163.com`, Github: `https://github.com/XifengGuo/CapsNet-Keras`
This is a forked version for training on audio spectrograms.
"""
import numpy as np
import os
import tensorflow as tf
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import argparse
from tensorflow.keras import callbacks
from tensorflow.keras import layers, models, optimizers
from tensorflow.keras import backend as K
from tensorflow.keras.utils import to_categorical, normalize, multi_gpu_model
from capsulelayers import CapsuleLayer, PrimaryCap, Length, Mask
from utils import plot_log, MetricCallback
from sklearn.metrics import confusion_matrix, recall_score, accuracy_score
from sklearn.preprocessing import LabelBinarizer
K.set_image_data_format('channels_last')
def CapsNet(input_shape, n_class, routings):
"""
A Capsule Network on MNIST.
:param input_shape: data shape, 3d, [width, height, channels]
:param n_class: number of classes
:param routings: number of routing iterations
:return: Two Keras Models, the first one used for training, and the second one for evaluation.
`eval_model` can also be used for training.
"""
x = layers.Input(shape=input_shape)
# Layer 1: Just a conventional Conv2D layer
conv1 = layers.Conv2D(filters=256,
kernel_size=9,
strides=1,
padding='valid',
activation='relu',
name='conv1')(x)
# Layer 2: Conv2D layer with `squash` activation, then reshape to [None, num_capsule, dim_capsule]
primarycaps = PrimaryCap(conv1,
dim_capsule=8,
n_channels=32,
kernel_size=9,
strides=2,
padding='valid')
# Layer 3: Capsule layer. Routing algorithm works here.
digitcaps = CapsuleLayer(num_capsule=n_class,
dim_capsule=16,
routings=routings,
name='digitcaps')(primarycaps)
# Layer 4: This is an auxiliary layer to replace each capsule with its length. Just to match the true label's shape.
# If using tensorflow, this will not be necessary. :)
out_caps = Length(name='capsnet')(digitcaps)
# Decoder network.
y = layers.Input(shape=(n_class, ))
masked_by_y = Mask()(
[digitcaps, y]
) # The true label is used to mask the output of capsule layer. For training
masked = Mask(
)(digitcaps) # Mask using the capsule with maximal length. For prediction
# Shared Decoder model in training and prediction
decoder = models.Sequential(name='decoder')
decoder.add(layers.Dense(512, activation='relu', input_dim=16 * n_class))
decoder.add(layers.Dense(1024, activation='relu'))
decoder.add(layers.Dense(np.prod(input_shape), activation='sigmoid'))
decoder.add(layers.Reshape(target_shape=input_shape, name='out_recon'))
# Models for training and evaluation (prediction)
train_model = models.Model([x, y], [out_caps, decoder(masked_by_y)])
eval_model = models.Model(x, [out_caps, decoder(masked)])
# manipulate model
noise = layers.Input(shape=(n_class, 16))
noised_digitcaps = layers.Add()([digitcaps, noise])
masked_noised_y = Mask()([noised_digitcaps, y])
manipulate_model = models.Model([x, y, noise], decoder(masked_noised_y))
return train_model, eval_model, manipulate_model
def margin_loss(y_true, y_pred):
"""
Margin loss for Eq.(4). When y_true[i, :] contains not just one `1`, this loss should work too. Not test it.
:param y_true: [None, n_classes]
:param y_pred: [None, num_capsule]
:return: a scalar loss value.
"""
L = y_true * K.square(K.maximum(0., 0.9 - y_pred)) + \
0.5 * (1 - y_true) * K.square(K.maximum(0., y_pred - 0.1))
return K.mean(K.sum(L, 1))
def train(model, eval_model, data, args):
"""
Training a CapsuleNet
:param model: the CapsuleNet model
:param data: a tuple containing training and testing data, like `((x_train, y_train), (x_test, y_test))`
:param args: arguments
:return: The trained model
"""
# unpacking the data
(x_train, y_train), (x_test, y_test), classes = data
print("x_train {}, y_train {}, x_test {}, y_test {}".format(
x_train.shape, y_train.shape, x_test.shape, y_test.shape))
# callbacks
log = callbacks.CSVLogger(args.save_dir + '/log.csv')
tb = callbacks.TensorBoard(log_dir=args.save_dir + '/tensorboard-logs',
batch_size=args.batch_size,
histogram_freq=int(args.debug))
checkpoint = callbacks.ModelCheckpoint(args.save_dir +
'/weights-{epoch:02d}.h5',
monitor='val_rec_macro',
mode='max',
save_best_only=True,
save_weights_only=True,
verbose=1)
lr_decay = callbacks.LearningRateScheduler(
schedule=lambda epoch: args.lr * (args.lr_decay**epoch))
if os.path.isfile(args.save_dir + '/trained_model.h5'):
model.load_weights(args.save_dir + '/trained_model.h5')
# compile the model
model.compile(optimizer=optimizers.Adam(lr=args.lr),
loss=[margin_loss, 'mse'],
loss_weights=[1., args.lam_recon],
metrics={'capsnet': 'accuracy'})
mc = MetricCallback(validation_data=((x_test, y_test), (y_test, x_test)),
labels=classes,
batch_size=args.batch_size)
model.fit([x_train, y_train], [y_train, x_train],
batch_size=args.batch_size,
epochs=args.epochs,
validation_data=[[x_test, y_test], [y_test, x_test]],
callbacks=[mc, log, tb, checkpoint, lr_decay],
shuffle=True)
model.save_weights(args.save_dir + '/trained_model.h5')
print('Trained model saved to \'%s/trained_model.h5\'' % args.save_dir)
plot_log(args.save_dir + '/log.csv', show=True)
y_pred = eval_model.predict(
x_test, batch_size=args.batch_size)[0].astype("float32")
acc = accuracy_score(y_test, y_pred)
cm = confusion_matrix(y_test, y_pred)
recall = recall_score(y_test, y_pred, average="macro")
print("Accuracy: {:.2f}%".format(acc * 100))
print("Recall score: {:.2f}%".format(recall * 100))
print("Confusion matrix:\n{}".format(cm))
return model
def test(model, data, args):
x_test, y_test = data
y_pred, x_recon = model.predict(x_test, batch_size=100)
print('-' * 30 + 'Begin: test' + '-' * 30)
acc = accuracy_score(y_test, y_pred)
cm = confusion_matrix(y_test, y_pred)
recall = recall_score(y_test, y_pred, average="macro")
print("Accuracy: {:.2f}%".format(acc * 100))
print("Recall score: {:.2f}%".format(recall * 100))
print("Confusion matrix:\n{}".format(cm))
def load_audiodata(args):
#1 load training data
x_train = np.load(args.data_train)
#x_train = (x_train - min_train) / (max_train - min_train)
y_train = np.load(args.labels_train)
lb = LabelBinarizer()
y_train = lb.fit_transform(y_train)
x_train = x_train.reshape(-1, x_train.shape[1], x_train.shape[2],
1).astype("float32")
#2 load test data
x_test = np.load(args.data_test)
#x_test = (x_test - min_train) / (max_train - min_train)
y_test = np.load(args.labels_test)
y_test = lb.transform(y_test)
x_test = x_test.reshape(-1, x_test.shape[1], x_test.shape[2],
1).astype("float32")
print("Training dataset {}x{}x{}x{} .. labels {}".format(
x_train.shape[0], x_train.shape[1], x_train.shape[2], x_train.shape[3],
y_train.shape))
print("Test dataset {}x{}x{}x{} .. labels {}".format(
x_test.shape[0], x_test.shape[1], x_test.shape[2], x_test.shape[3],
y_test.shape))
assert not np.any(np.isnan(x_train))
assert not np.any(np.isnan(x_test))
return x_train, y_train, x_test, y_test, lb.classes_
if __name__ == "__main__":
# setting the hyper parameters
parser = argparse.ArgumentParser(
description="Capsule Network on 3D Audio data.")
parser.add_argument('--epochs', default=50, type=int)
parser.add_argument('--batch-size', default=10, type=int)
parser.add_argument('--lr',
default=0.001,
type=float,
help="Initial learning rate")
parser.add_argument(
'--lr-decay',
default=0.9,
type=float,
help=
"The value multiplied by lr at each epoch. Set a larger value for larger epochs"
)
parser.add_argument('--lam-recon',
default=0.392,
type=float,
help="The coefficient for the loss of decoder")
parser.add_argument(
'-r',
'--routings',
default=3,
type=int,
help="Number of iterations used in routing algorithm. should > 0")
parser.add_argument('--debug',
action='store_true',
help="Save weights by TensorBoard")
parser.add_argument('--save_dir', default='./result')
parser.add_argument('-t',
'--testing',
action='store_true',
help="Test the trained model on testing dataset")
parser.add_argument(
'-w',
'--weights',
default=None,
help="The path of the saved weights. Should be specified when testing")
parser.add_argument('-tr',
'--data-train',
default=None,
help="Training dataset numpy file")
parser.add_argument('-l-tr',
'--labels-train',
default=None,
help="Training labels numpy file")
parser.add_argument('-te',
'--data-test',
default=None,
help="Test dataset numpy file")
parser.add_argument('-l-te',
'--labels-test',
default=None,
help="Test labels numpy file")
args = parser.parse_args()
print(args)
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
# load data and define model
x_train, y_train, x_test, y_test, classes = load_audiodata(args)
model, eval_model, manipulate_model = CapsNet(
input_shape=x_train.shape[1:],
n_class=int(y_train.shape[1]),
routings=args.routings)
model.summary()
# train or test
if args.weights is not None: # init the model weights with provided one
model.load_weights(args.weights)
if not args.testing:
train(model=model,
eval_model=eval_model,
data=((x_train, y_train), (x_test, y_test), classes),
args=args)
else: # as long as weights are given, will run testing
if args.weights is None:
print(
'No weights are provided. Will test using random initialized weights.'
)
test(model=eval_model, data=(x_test, y_test), args=args)
|
nilq/baby-python
|
python
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A tf.learn implementation of tensor_forest (extremely random forests)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.contrib.tensor_forest.client import eval_metrics
from tensorflow.contrib.tensor_forest.data import data_ops
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import session_run_hook
KEYS_NAME = 'keys'
LOSS_NAME = 'rf_training_loss'
def _assert_float32(tensors):
"""Assert all tensors are float32.
Args:
tensors: `Tensor` or `dict` of `Tensor` objects.
Raises:
TypeError: if any tensor is not float32.
"""
if not isinstance(tensors, dict):
tensors = [tensors]
else:
tensors = tensors.values()
for tensor in tensors:
if tensor.dtype.base_dtype != dtypes.float32:
raise TypeError('Expected dtype=float32, %s.' % tensor)
class TensorForestLossHook(session_run_hook.SessionRunHook):
"""Monitor to request stop when loss stops decreasing."""
def __init__(self, early_stopping_rounds):
self.early_stopping_rounds = early_stopping_rounds
self.min_loss = None
self.last_step = -1
# self.steps records the number of steps for which the loss has been
# non-decreasing
self.steps = 0
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(
{'global_step': contrib_framework.get_global_step(),
'current_loss': run_context.session.graph.get_operation_by_name(
LOSS_NAME).outputs[0]})
def after_run(self, run_context, run_values):
current_loss = run_values.results['current_loss']
current_step = run_values.results['global_step']
self.steps += 1
# Gaurd against the global step going backwards, which might happen
# if we recover from something.
if self.last_step == -1 or self.last_step > current_step:
logging.info('TensorForestLossHook resetting last_step.')
self.last_step = current_step
self.steps = 0
return
if self.min_loss is None or current_loss < self.min_loss:
self.min_loss = current_loss
self.steps = 0
if self.steps > self.early_stopping_rounds:
logging.info('TensorForestLossHook requesting stop.')
run_context.request_stop()
def get_model_fn(params, graph_builder_class, device_assigner,
weights_name=None, keys_name=None):
"""Return a model function given a way to construct a graph builder."""
def _model_fn(features, targets):
"""Function that returns predictions, training loss, and training op."""
weights = None
keys = None
if weights_name and weights_name in features:
weights = features.pop(weights_name)
if keys_name and keys_name in features:
keys = features.pop(keys_name)
processed_features, spec = data_ops.ParseDataTensorOrDict(features)
_assert_float32(processed_features)
if targets is not None:
targets = data_ops.ParseLabelTensorOrDict(targets)
_assert_float32(targets)
graph_builder = graph_builder_class(params, device_assigner=device_assigner)
inference = {eval_metrics.INFERENCE_PROB_NAME:
graph_builder.inference_graph(processed_features,
data_spec=spec)}
if not params.regression:
inference[eval_metrics.INFERENCE_PRED_NAME] = math_ops.argmax(
inference[eval_metrics.INFERENCE_PROB_NAME], 1)
if keys:
inference[KEYS_NAME] = keys
# targets might be None if we're doing prediction (which brings up the
# question of why we force everything to adhere to a single model_fn).
training_loss = None
training_graph = None
if targets is not None:
training_loss = graph_builder.training_loss(processed_features, targets,
data_spec=spec,
name=LOSS_NAME)
training_graph = control_flow_ops.group(
graph_builder.training_graph(
processed_features, targets, data_spec=spec,
input_weights=weights),
state_ops.assign_add(contrib_framework.get_global_step(), 1))
# Put weights back in
if weights is not None:
features[weights_name] = weights
return (inference, training_loss, training_graph)
return _model_fn
class TensorForestEstimator(evaluable.Evaluable, trainable.Trainable):
"""An estimator that can train and evaluate a random forest."""
def __init__(self, params, device_assigner=None, model_dir=None,
graph_builder_class=tensor_forest.RandomForestGraphs,
config=None, weights_name=None, keys_name=None,
feature_engineering_fn=None, early_stopping_rounds=100):
self.params = params.fill()
self.graph_builder_class = graph_builder_class
self.early_stopping_rounds = early_stopping_rounds
self._estimator = estimator.Estimator(
model_fn=get_model_fn(params, graph_builder_class, device_assigner,
weights_name=weights_name, keys_name=keys_name),
model_dir=model_dir,
config=config,
feature_engineering_fn=feature_engineering_fn)
def evaluate(
self, x=None, y=None, input_fn=None, feed_fn=None, batch_size=None,
steps=None, metrics=None, name=None):
"""See evaluable.Evaluable."""
return self._estimator.evaluate(
input_fn=input_fn, x=x, y=y, feed_fn=feed_fn,
batch_size=batch_size, steps=steps,
metrics=metrics, name=name)
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
"""See trainable.Trainable."""
if not monitors:
monitors = [TensorForestLossHook(self.early_stopping_rounds)]
self._estimator.fit(input_fn=input_fn, x=x, y=y,
batch_size=batch_size, steps=steps, monitors=monitors,
max_steps=max_steps)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_proba(
self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns prediction probabilities for given features (classification).
Args:
x: features.
input_fn: Input function. If set, x and y must be None.
batch_size: Override default batch size.
outputs: list of `str`, name of the output to predict.
If `None`, returns all.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted probabilities (or an iterable of predicted
probabilities if as_iterable is True).
Raises:
ValueError: If both or neither of x and input_fn were given.
"""
results = self._estimator.predict(
x=x, input_fn=input_fn, batch_size=batch_size, outputs=outputs,
as_iterable=as_iterable)
if as_iterable:
return (x[eval_metrics.INFERENCE_PROB_NAME] for x in results)
else:
return results[eval_metrics.INFERENCE_PROB_NAME]
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict(
self, x=None, input_fn=None, axis=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
Args:
x: features.
input_fn: Input function. If set, x must be None.
axis: Axis on which to argmax (for classification).
Last axis is used by default.
batch_size: Override default batch size.
outputs: list of `str`, name of the output to predict.
If `None`, returns all.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted classes or regression values (or an iterable of
predictions if as_iterable is True).
"""
results = self._estimator.predict(
x=x, input_fn=input_fn, batch_size=batch_size, outputs=outputs,
as_iterable=as_iterable)
predict_name = (eval_metrics.INFERENCE_PROB_NAME if self.params.regression
else eval_metrics.INFERENCE_PRED_NAME)
if as_iterable:
return (x[predict_name] for x in results)
else:
return results[predict_name]
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_with_keys(
self, x=None, input_fn=None, axis=None, batch_size=None, outputs=None,
as_iterable=True):
"""Same as predict but also returns the example keys."""
results = self._estimator.predict(
x=x, input_fn=input_fn, batch_size=batch_size, outputs=outputs,
as_iterable=as_iterable)
predict_name = (eval_metrics.INFERENCE_PROB_NAME if self.params.regression
else eval_metrics.INFERENCE_PRED_NAME)
if as_iterable:
return ((x[predict_name], x.get(KEYS_NAME, None)) for x in results)
else:
return results[predict_name], results.get(KEYS_NAME, None)
def export(self,
export_dir,
input_fn,
signature_fn=None,
default_batch_size=1):
"""See BaseEstimator.export."""
# Reset model function with basic device assigner.
# Servo doesn't support distributed inference
# but it will try to respect device assignments if they're there.
# pylint: disable=protected-access
orig_model_fn = self._estimator._model_fn
self._estimator._model_fn = get_model_fn(
self.params, self.graph_builder_class,
tensor_forest.RandomForestDeviceAssigner())
result = self._estimator.export(
export_dir=export_dir,
use_deprecated_input_fn=True,
signature_fn=(signature_fn or
(export.regression_signature_fn
if self.params.regression else
export.classification_signature_fn_with_prob)),
default_batch_size=default_batch_size,
prediction_key=eval_metrics.INFERENCE_PROB_NAME)
self._estimator._model_fn = orig_model_fn
# pylint: enable=protected-access
return result
|
nilq/baby-python
|
python
|
import configparser
import collections
import os
import json
import copy
from .utils import parse_timedelta
from .scrape import get_all_scrapers
import argparse
# Configuration handling
class AltJobOptions(collections.UserDict):
"""
Wrap argparse and configparser objects into one configuration dict object
"""
def __init__(self):
# overwriting arguments
parser1 = argparse.ArgumentParser(
# Turn off help, so we print all options in response to -h
add_help=False)
# CLI only arguments
parser1.add_argument('-c','--config_file', help='configuration file(s). Default locations will be checked and loaded if file exists: `~/.alt_job/alt_job.conf`, `~/alt_job.conf` or `./alt_job.conf`', metavar='<File path>', nargs='+')
parser1.add_argument('-t','--template_conf', action='store_true', help='print a template config file and exit. ')
parser1.add_argument('-V','--version', action='store_true', help='print Alt Job version and exit. ')
args1, remaining_argv = parser1.parse_known_args()
if args1.config_file:
config_file = ConfigurationFile(files=args1.config_file)
else:
config_file = ConfigurationFile()
# Determine enlabled scrapers
config_file['alt_job']['enabled_scrapers'] = [ w for w in get_all_scrapers() if w in config_file ]
# Determine the default arguments
defaults_args=config_file['alt_job']
# Parse rest of arguments
# Don't suppress add_help here so it will handle -h
parser2 = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[parser1],
description="""Atl Job scrapes a bunch of green/social/alternative websites to send digest of new job postings by email. Also generates an Excel file with job postings informations.""",
prog='python3 -m alt_job', formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser2.set_defaults(**defaults_args)
# Arguments that overwrites [alt_job] config values
parser2.add_argument("-x", "--xlsx_output", metavar='<File path>', help='Write all NEW jobs to Excel file')
parser2.add_argument("-s", "--enabled_scrapers", metavar='<Website>', help="List of enabled scrapers. By default it's all scrapers configured in config file(s)", nargs='+')
parser2.add_argument("-j", "--jobs_datafile", metavar='<File path>',
help="""JSON file to store ALL jobs data. Default is '~/jobs.json'.
Use 'null' keyword to disable the storage of the datafile, all jobs will be considered as new and will be loaded""")
parser2.add_argument("--workers", metavar='<Number>', help="Number of websites to scrape asynchronously", type=int)
parser2.add_argument("--full", "--load_all_jobs",action="store_true", help="Load the full job description page to parse additionnal data. This settings is applied to all scrapers")
parser2.add_argument("--all", "--load_all_new_pages",action="store_true", help="Load new job listing pages until older jobs are found. This settings is applied to all scrapers")
parser2.add_argument("--quick", "--no_load_all_jobs", action='store_true', help='Do not load the full job description page to parse additionnal data (Much more faster). This settings is applied to all scrapers')
parser2.add_argument("--first", "--no_load_all_new_pages", action='store_true', help='Load only the first job listing page. This settings is applied to all scrapers')
parser2.add_argument("--mailto", metavar="<Email>", help='Emails to notify of new job postings', nargs='+')
parser2.add_argument("--log_level", metavar='<String>', help='Alt job log level. Exemple: DEBUG')
parser2.add_argument("--scrapy_log_level", metavar='<String>', help='Scrapy log level. Exemple: DEBUG')
args2 = parser2.parse_args(remaining_argv)
# Update 'alt_job' section witll all parsed arguments
config_file['alt_job'].update(vars(args2))
config_file['alt_job'].update(vars(args1))
if args2.full and args2.quick:
raise ValueError("Incompatible options: --full is enable with --quick")
if args2.full and args2.quick:
raise ValueError("Incompatible options: --all is enable with --first")
# Overwriting load_all_new_pages and load_full_jobs if passed --first or --quick
if args2.first:
for website in [ k for k in config_file.keys() if k in get_all_scrapers() ]:
config_file[website]['load_all_new_pages']=False
if args2.quick:
for website in [ k for k in config_file.keys() if k in get_all_scrapers() ]:
config_file[website]['load_full_jobs']=False
if args2.full:
for website in [ k for k in config_file.keys() if k in get_all_scrapers() ]:
config_file[website]['load_full_jobs']=True
if args2.all:
for website in [ k for k in config_file.keys() if k in get_all_scrapers() ]:
config_file[website]['load_all_new_pages']=True
self.data=config_file
# Config default values
DEFAULT_CONFIG={
'alt_job':{
'log_level':'INFO',
'scrapy_log_level':'ERROR',
'jobs_datafile':'',
'workers':5,
'smtphost':'',
'mailfrom':'',
'smtpuser':'',
'smtppass':'',
'smtpport':'587',
'smtptls':'Yes',
'mailto':'[]'
}
}
BOOL_VALUES=['use_google_cache', 'smtptls', 'load_full_jobs', 'load_all_new_pages', 'attach_jobs_description']
JSON_VALUES=['mailto', 'start_urls']
INT_VALUES=['smtpport', 'workers']
class ConfigurationFile(collections.UserDict):
'''Build config dict from file. Parse the config file(s) and return dict config.
Return a tuple (config dict, read files list).
The dict returned contain all possible config values. Default values are applied if not specified in the file(s) or string.
'''
def __init__(self, data=None, files=None, string=None):
super().__init__(data)
self.files=files if files else []
# Init config parser
self.parser=configparser.ConfigParser()
# Load default configuration
self.parser.read_dict(DEFAULT_CONFIG)
if string:
self.parser.read_string(string)
else:
if not self.files:
self.files=find_config_files()
if not self.files:
print("Could not find default config: `~/.alt_job/alt_job.conf`, `~/alt_job.conf` or `./alt_job.conf`")
else:
for f in self.files:
try :
with open(f,'r') as fp:
self.parser.read_file(fp)
except (FileNotFoundError, OSError) as err :
raise ValueError("Could not read config %s. Make sure the file exists and you have correct access right."%(f)) from err
self.data=copy.deepcopy(self.parser._sections)
self.data['alt_job']['config_file']=self.files
# casting int, booleans and json data sctructure
for scraper in self.data:
for config_option in self.data[scraper]:
# List of BOOL config values
if config_option in BOOL_VALUES:
self.data[scraper][config_option]=getbool(self.parser, scraper, config_option)
# list of JSON config values
if config_option in JSON_VALUES:
self.data[scraper][config_option]=getjson(self.parser, scraper, config_option)
# List of INT config values
if config_option in INT_VALUES:
self.data[scraper][config_option]=getint(self.parser, scraper, config_option)
def getjson(conf, section, key):
'''Return json loaded structure from a configparser object. Empty list if the loaded value is null.
Arguments:
- `conf`: configparser object
- `section`: config section
- `key`: alt_job config key
'''
try:
loaded=json.loads(conf.get(section, key))
return loaded if loaded else []
except ValueError as err:
raise ValueError("Could not read JSON value in config file for key '{}' and string: '{}'".format(key, conf.get(section,key))) from err
def getbool(conf, section, key):
'''Return bool value from a configparser object.
Arguments:
- `conf`: configparser object
- `section`: config section
- `key`: alt_job config key
'''
try:
return conf.getboolean(section, key)
except ValueError as err:
raise ValueError("Could not read boolean value in config file for key '{}' and string '{}'. Must be Yes/No".format(key, conf.get(section,key))) from err
def getint(conf, section, key):
'''Return int value from a configparser object.
Arguments:
- `conf`: configparser object
- `section`: config section
- `key`: alt_job config key
'''
try:
return conf.getint(section, key)
except ValueError as err:
raise ValueError("Could not read int value in config file for key '{}' and string '{}'. Must be an integer".format(key, conf.get(section,key))) from err
def find_files(env_location, potential_files, default_content="", create=False):
'''Find existent files based on folders name and file names.
Arguments:
- `env_location`: list of environment variable to use as a base path. Exemple: ['HOME', 'XDG_CONFIG_HOME', 'APPDATA', 'PWD']
- `potential_files`: list of filenames. Exemple: ['.alt_job/alt_job.conf', 'alt_job.conf']
- `default_content`: Write default content if the file does not exist
- `create`: Create the file in the first existing env_location with default content if the file does not exist
'''
potential_paths=[]
existent_files=[]
# build potential_paths of config file
for env_var in env_location:
if env_var in os.environ:
for file_path in potential_files:
potential_paths.append(os.path.join(os.environ[env_var],file_path))
# If file exist, add to list
for p in potential_paths:
if os.path.isfile(p):
existent_files.append(p)
# If no file foud and create=True, init new template config
if len(existent_files)==0 and create:
os.makedirs(os.path.dirname(potential_paths[0]), exist_ok=True)
with open(potential_paths[0],'w') as config_file:
config_file.write(default_content)
print("Init new file: %s"%(p))
existent_files.append(potential_paths[0])
return(existent_files)
def find_config_files(create=False):
'''
Returns the location of existing `alt_job.conf` file at `./alt_job.conf` and/or `~/alt_job.conf` or under `~/.alt_job/` folder
'''
files=['.alt_job/alt_job.conf', 'alt_job.conf']
env=['HOME', 'XDG_CONFIG_HOME', 'APPDATA', 'PWD']
return(find_files(env, files))
# Configuration template -------------------------
TEMPLATE_FILE="""
[alt_job]
##### General config #####
# Logging
log_level=INFO
scrapy_log_level=ERROR
# Jobs data file, default is ~/jobs.json
# jobs_datafile=/home/user/Jobs/jobs-mtl.json
# Asynchronous workers, number of site to scan at the same time
# Default to 5.
# workers=10
##### Mail sender #####
# Email server settings
smtphost=smtp.gmail.com
mailfrom=you@gmail.com
smtpuser=you@gmail.com
smtppass=password
smtpport=587
smtptls=Yes
# Email notif settings
mailto=["you@gmail.com"]
##### Scrapers #####
# Scraper name
[goodwork.ca]
# URL to start the scraping, required for all scrapers
url=https://www.goodwork.ca/jobs.php?prov=QC
[cdeacf.ca]
url=http://cdeacf.ca/recherches?f%5B0%5D=type%3Aoffre_demploi
# Load full jobs details: If supported by the scraper,
# this will follow each job posting link in listing and parse full job description.
# turn on to parse all job informations
# Default to False!
load_full_jobs=True
[arrondissement.com]
url=https://www.arrondissement.com/tout-list-emplois/
# Load all new pages: If supported by the scraper,
# this will follow each "next page" links and parse next listing page
# until older (in database) job postings are found.
# Default to False!
load_all_new_pages=True
[chantier.qc.ca]
url=https://chantier.qc.ca/decouvrez-leconomie-sociale/offres-demploi
load_full_jobs=Yes
# Disabled scraper
# [engages.ca]
# url=https://www.engages.ca/emplois?search%5Bkeyword%5D=&search%5Bjob_sector%5D=&search%5Bjob_city%5D=Montr%C3%A9al
[enviroemplois.org]
# Multiple start URLs crawl
start_urls=["https://www.enviroemplois.org/offres-d-emploi?sector=®ion=6&job_kind=&employer=",
"https://www.enviroemplois.org/offres-d-emploi?sector=®ion=3&job_kind=&employer="]
"""
|
nilq/baby-python
|
python
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from multi_var_gradient_decent import LinearRegressionUsingGD
from mpl_toolkits.mplot3d import axes3d
from sklearn.metrics import mean_squared_error, r2_score
def create_mesh_grid(start, end):
theta_1 = np.linspace(start, end, 30)
theta_2 = np.linspace(start, end, 30)
theta_1, theta_2 = np.meshgrid(theta_1, theta_2)
return theta_1, theta_2
def plot_result(x1, x2, y, regression_model):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x1, x2, y, c='b', marker='o')
ax.set_xlabel('\u03B81')
ax.set_ylabel('\u03B82')
ax.set_zlabel('h(\u03B81, \u03B82)')
X1, X2 = create_mesh_grid(0, 1)
w = regression_model.w
Z = w[0][0] + w[1][0] * X1 + w[2][0] * X2
ax.plot_wireframe(X1, X2, Z, color='red')
plt.show()
def plot_cost_function_2d(theta_1, theta_2, cost):
fig, ax = plt.subplots(1, 1)
ax.contourf(theta_1,
theta_2,
cost,
levels=[0, 1, 2, 4, 6, 8, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100],
cmap=cm.coolwarm,
antialiased=True)
plt.xlabel('\u03B81')
plt.ylabel('\u03B82')
plt.show()
def get_cost_function(theta_1, theta_2, x1, y, points_n):
m = theta_1.shape[0]
cost = np.zeros([theta_1.shape[0], theta_1.shape[1]])
for i in range(points_n):
residuals = ((theta_1 * x1[i] + theta_2) - y[i]) ** 2
cost += residuals
cost = cost / (2 * m)
return cost
def plot_cost_function(x, y, points_n):
theta_1, theta_2, = create_mesh_grid(-5, 15)
cost = get_cost_function(theta_1, theta_2, x, y, points_n)
plot_cost_function_2d(theta_1, theta_2, cost)
def plot_raw_data(x1, x2, y):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x1, x2, y, c='b', marker='o')
plt.xlabel('\u03B80')
plt.ylabel('\u03B81')
ax.set_zlabel('h(\u03B81, \u03B82)')
plt.show()
def create_data(points_n):
np.random.seed(0)
x = np.random.rand(points_n, 2)
x1 = np.sort(x[:, 0].reshape(points_n, 1))
x2 = np.sort(x[:, 1].reshape(points_n, 1))
y = 2 + 3 * x1 + np.random.rand(points_n, 1) + 6 * x2 + np.random.rand(points_n, 1)
return x, x1, x2, y
def main():
points_n = 100
x, x1, x2, y = create_data(points_n)
plot_raw_data(x1, x2, y)
plot_cost_function(x1, y, points_n)
plot_cost_function(x2, y, points_n)
# Model initialization
regression_model = LinearRegressionUsingGD(lr=0.05, n_iterations=10000)
# Fit the data(train the model)
regression_model.fit(x, y)
# Predict
y_predicted = regression_model.predict(x)
# model evaluation
rmse = mean_squared_error(y, y_predicted)
r2 = r2_score(y, y_predicted)
# For sci-kit learn implementation:
print('Weights:', regression_model.w)
print('Root mean squared error: ', rmse)
print('R2 score: ', r2)
# plot
plot_result(x1, x2, y, regression_model)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
import mxnet as mx
import utils
from model_utils import validate_model
from gluon_zoo import save_mobilenet1_0
from from_tensorflow import tf_dump_model
from os import path
def test_tf_resnet50_v1():
sym_path = "./data/tf_resnet50_v1.json"
prm_path = "./data/tf_resnet50_v1.params"
# if not path.exists(sym_path) or not path.exists(prm_path):
if True:
tf_dump_model("resnet50_v1")
ctx = [mx.gpu(int(i)) for i in "4".split(',') if i.strip()]
validate_model(sym_path, prm_path, ctx)
def test_tf_mobilenet():
sym_path = "./data/tf_mobilenet.json"
prm_path = "./data/tf_mobilenet.params"
# if not path.exists(sym_path) or not path.exists(prm_path):
if True:
tf_dump_model("mobilenet")
ctx = [mx.gpu(int(i)) for i in "4".split(',') if i.strip()]
validate_model(sym_path, prm_path, ctx)
def test_mobilenet1_0():
sym_path = "./data/mobilenet1_0.json"
prm_path = "./data/mobilenet1_0.params"
if not path.exists(sym_path) or not path.exists(prm_path):
save_mobilenet1_0()
ctx = [mx.gpu(int(i)) for i in "4".split(',') if i.strip()]
validate_model(sym_path, prm_path, ctx, iter_num=999999, dump_model=True)
def test_mobilenet_v2_1_0():
sym_path = "./data/mobilenetv2_1.0.json"
prm_path = "./data/mobilenetv2_1.0.params"
ctx = [mx.gpu(int(i)) for i in "4".split(',') if i.strip()]
validate_model(sym_path, prm_path, ctx)
def test_tf_inceptionv3():
sym_path = "./data/tf_inception_v3.json"
prm_path = "./data/tf_inception_v3.params"
if not path.exists(sym_path) or not path.exists(prm_path):
tf_dump_model("inception_v3")
ctx = [mx.gpu(int(i)) for i in "4".split(',') if i.strip()]
# validate_model(sym_path, prm_path, ctx, input_size=299, dump_model=True)
validate_model(sym_path, prm_path, ctx, input_size=299, iter_num=99999999)
def test_alexnet():
sym_path = "./data/alexnet.json"
prm_path = "./data/alexnet.params"
ctx = [mx.gpu(int(i)) for i in "4".split(',') if i.strip()]
# validate_model(sym_path, prm_path, batch_size=700, ctx=ctx, dump_model=True)
validate_model(sym_path, prm_path, batch_size=700, ctx=ctx, iter_num=9999999)
def test_cifar10_resnet20_v1():
sym_path = "./data/cifar_resnet20_v1.json"
prm_path = "./data/cifar_resnet20_v1.params"
ctx = [mx.gpu(int(i)) for i in "4".split(',') if i.strip()]
# validate_model(sym_path, prm_path, ctx, input_size=32,
# ds_name='cifar10', dump_model=True)
validate_model(sym_path, prm_path, ctx, input_size=32,
ds_name='cifar10', iter_num=9999999)
def test_resnet(suffix):
sym_path = "./data/resnet" + suffix + ".json"
prm_path = "./data/resnet" + suffix + ".params"
ctx = [mx.gpu(int(i)) for i in "4".split(',') if i.strip()]
# validate_model(sym_path, prm_path, ctx, lambd=16, dump_model=True)
validate_model(sym_path, prm_path, ctx, lambd=16, iter_num=999999)
def test_densenet161():
sym_path = "./data/densenet161.json"
prm_path = "./data/densenet161.params"
ctx = [mx.gpu(int(i)) for i in "1,2,3,4,5".split(',') if i.strip()]
# validate_model(sym_path, prm_path, ctx, batch_size=16, dump_model=True)
validate_model(sym_path, prm_path, ctx, batch_size=16, iter_num=9999999)
def test_qd10_resnetv1_20():
sym_path = "./data/quick_raw_qd_animal10_2_cifar_resnet20_v2.json"
prm_path = "./data/quick_raw_qd_animal10_2_cifar_resnet20_v2.params"
ctx = [mx.gpu(int(i)) for i in "4".split(',') if i.strip()]
# validate_model(sym_path, prm_path, ctx, num_channel=1,
# input_size=28, ds_name='quickdraw', dump_model=True)
validate_model(sym_path, prm_path, ctx, num_channel=1,
input_size=28, ds_name='quickdraw', iter_num=999999)
def test_shufflenet_v1():
sym_path = "./data/shufflenet_v1.json"
prm_path = "./data/shufflenet_v1.params"
ctx = [mx.gpu(int(i)) for i in "4".split(',') if i.strip()]
# validate_model(sym_path, prm_path, ctx, dump_model=True)
validate_model(sym_path, prm_path, ctx, iter_num=9999999)
def test_squeezenet():
sym_path = "./data/squeezenet1.0.json"
prm_path = "./data/squeezenet1.0.params"
ctx = [mx.gpu(int(i)) for i in "4".split(',') if i.strip()]
# validate_model(sym_path, prm_path, ctx, batch_size=60, dump_model=True)
validate_model(sym_path, prm_path, ctx, batch_size=60, iter_num=9999999)
def test_vgg19():
sym_path = "./data/vgg19.json"
prm_path = "./data/vgg19.params"
ctx = [mx.gpu(int(i)) for i in "3".split(',') if i.strip()]
# validate_model(sym_path, prm_path, ctx, dump_model=True)
validate_model(sym_path, prm_path, ctx, iter_num=999999)
def test_quickdraw():
sym_path = "./data/quickdraw_wlt_augmentation_epoch-4-0.8164531394275162.json"
prm_path = "./data/quickdraw_wlt_augmentation_epoch-4-0.8164531394275162.params"
ctx = [mx.gpu(int(i)) for i in "4".split(',') if i.strip()]
# validate_model(sym_path, prm_path, ctx, input_size=28, num_channel=1,
# ds_name="quickdraw", dump_model=True)
validate_model(sym_path, prm_path, ctx, input_size=28, num_channel=1,
ds_name="quickdraw", iter_num=9999999)
def test_trec():
sym_path = "./data/trec.json"
prm_path = "./data/trec.params"
ctx = [mx.gpu(int(i)) for i in "3".split(',') if i.strip()]
validate_model(sym_path, prm_path, ctx, ds_name="trec",
input_shape=(38, 16), input_prec=16,
dump_model=True, dump_shape=(38, 1))
# validate_model(sym_path, prm_path, ctx, ds_name="trec",
# input_shape=(38, 16), iter_num=999999)
if __name__ == '__main__':
utils.log_init()
# test_mobilenet1_0()
'''
2020-01-10 15:34:15
top1: 70.76% --> 63.08%
top5: 89.97% --> 85.02%
Iteration: 3123
Total Sample: 49984
'''
# test_mobilenet_v2_1_0() # 73% --> 0%
# test_tf_inceptionv3()
'''
2020-01-10 16:08:03
top1: 55.57% --> 53.74%
top5: 77.56% --> 76.01%
Iteration: 3123
Total Sample: 49984
'''
# test_alexnet()
'''
2020-01-10 16:23:24
top1: 55.92% --> 55.15%
top5: 78.74% --> 78.20%
Iteration: 70
Total Sample: 49700
'''
# test_cifar10_resnet20_v1()
'''
2020-01-10 16:37:35
top1: 92.88% --> 92.83%
top5: 99.78% --> 99.75%
Iteration: 623
Total Sample: 9984
'''
# test_resnet("50_v1")
'''
2020-01-10 17:04:50
top1: 77.38% --> 75.81%
top5: 93.58% --> 93.06%
Iteration: 3123
Total Sample: 49984
'''
# test_resnet("18_v1")
'''
2020-01-10 16:55:48
top1: 70.94% --> 70.14%
top5: 89.92% --> 89.54%
Iteration: 3123
Total Sample: 49984
'''
# test_resnet("50_v1d_0.86") # not valid: Pooling count_include_pad:True
# test_resnet("18_v1b_0.89")
'''
2020-01-10 17:00:43
top1: 67.20% --> 63.82%
top5: 87.45% --> 85.60%
Iteration: 3123
Total Sample: 49984
'''
# test_resnet("50_v2")
'''
2020-01-10 17:29:01
top1: 77.15% --> 74.13%
top5: 93.44% --> 91.76%
Iteration: 3123
Total Sample: 49984
'''
# test_densenet161()
'''
2020-01-10 20:33:58
top1: 77.61% --> 77.32%
top5: 93.82% --> 93.62%
Iteration: 3127
Total Sample: 49984
'''
# test_qd10_resnetv1_20()
'''
2020-01-10 17:57:44
top1: 85.72% --> 85.73%
top5: 98.71% --> 98.70%
Iteration: 17330
Total Sample: 277296
'''
# test_shufflenet_v1()
'''
2020-01-10 17:34:01
top1: 63.48% --> 60.38%
top5: 85.11% --> 82.88%
Iteration: 3123
Total Sample: 49984
'''
# test_squeezenet()
'''
2020-01-10 17:26:18
top1: 57.20% --> 54.49%
top5: 80.03% --> 77.86%
Iteration: 832
Total Sample: 49980
'''
# test_vgg19()
'''
2020-01-10 17:40:53
top1: 74.12% --> 73.68%
top5: 91.77% --> 91.66%
Iteration: 3123
Total Sample: 49984
'''
# test_quickdraw()
'''
2020-01-10 16:39:51
top1: 81.66% --> 81.57%
top5: 98.22% --> 98.20%
Iteration: 17330
Total Sample: 277296
'''
test_trec()
'''
2020-01-10
top1: -->
top5: -->
Iteration:
Total Sample:
'''
# TODO: test
# test_tf_mobilenet() # 0% --> 0%, maybe due to pad
# test_tf_resnet50_v1() # 0% --> 0%
|
nilq/baby-python
|
python
|
import click
import reader
from reader._cli import setup_logging
def make_add_response_headers_middleware(wsgi_app, headers):
def wsgi_app_wrapper(environ, start_response):
def start_response_wrapper(status, response_headers, exc_info=None):
response_headers.extend(headers)
return start_response(status, response_headers, exc_info)
return wsgi_app(environ, start_response_wrapper)
return wsgi_app_wrapper
@click.command()
@click.pass_obj
@click.option('-h', '--host', default='localhost', help="The interface to bind to.")
@click.option('-p', '--port', default=8080, type=int, help="The port to bind to.")
@click.option(
'--plugin',
multiple=True,
envvar=reader._APP_PLUGIN_ENVVAR,
help="Import path to a web app plug-in. Can be passed multiple times.",
)
@click.option('-v', '--verbose', count=True)
def serve(config, host, port, plugin, verbose):
"""Start a local HTTP reader server."""
setup_logging(verbose)
from werkzeug.serving import run_simple
from . import create_app
if plugin:
config['app']['plugins'] = dict.fromkeys(plugin)
# TODO: remove this once we make debug_storage a storage_arg
config['default']['reader'].pop('debug_storage', None)
app = create_app(config)
app.wsgi_app = make_add_response_headers_middleware(
app.wsgi_app,
[('Referrer-Policy', 'same-origin')],
)
run_simple(host, port, app)
|
nilq/baby-python
|
python
|
from collections import defaultdict
"""
Block
"""
class Block:
def __init__(self, author, round, payload, qc, id, txn_id):
self.author = author
self.round = round
self.payload = payload
self.qc = qc
self.id = id
self.txn_id = txn_id
self.children = []
#Parent id is redundant. Get from qc.vote info.parent id
#Also not needed
|
nilq/baby-python
|
python
|
try:
from webdriver_manager.chrome import ChromeDriverManager
except: raise ImportError("'webdriver-manager' package not installed")
try:
from selenium.webdriver.common.keys import Keys
from selenium import webdriver
except: raise ImportError("'selenium' package not installed")
from bs4 import BeautifulSoup
import pandas as pd
import time
usr_agent = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive',
}
def scrappi(url, n_pages):
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.headless = True
driver = webdriver.Chrome(ChromeDriverManager().install(), options = chrome_options)
driver.create_options()
driver.get(url)
for _ in range(n_pages):
driver.find_element_by_tag_name('body').send_keys(Keys.END)
time.sleep(3)
html = driver.page_source
soup = BeautifulSoup(html, 'html.parser')
videos = soup.find_all("div", {"id": "dismissible"})
lst = []
for video in videos:
dictionary = {}
dictionary['Title'] = video.find("a", {"id": "video-title"}).text
dictionary['Video_url'] = "https://www.youtube.com/" + video.find("a", {"id": "video-title"})['href']
meta = video.find("div", {"id": "metadata-line"}).find_all('span')
dictionary['Views'] = meta[0].text
dictionary['Days'] = meta[1].text
lst.append(dictionary)
return pd.DataFrame(lst)
|
nilq/baby-python
|
python
|
import requests
user = 'alexey'
password = 'styagaylo'
base_url = 'http://httpbin.org/'
def test_my_first_api():
r = requests.post(base_url + 'post', data={'user': user, 'password': password})
assert r.status_code == 200, "Unexpected status code: {}".format(r.status_code)
assert r.json()['url'] == base_url + 'post', "Unexpected url: {}".format(r.json()['url'])
assert r.json()['form']['user'] == user, "Unexpected user: {}".format(r.json()['form']['user'])
assert r.json()['form']['password'] == password, "Unexpected password: {}".format(r.json()['form']['password'])
|
nilq/baby-python
|
python
|
import plotly.graph_objects as go
import dash
import dash_core_components as dcc
import dash_html_components as html
from matplotlib import pylab
import matplotlib.pyplot as plt
import networkx as nx
# read results from the file
docs = eval(open('final_out_dupe_detect.txt', 'r').read())
final_docs = [[i[0], list(set(i[2]) & set(i[3]))] for i in docs]
# initiate graph
G = nx.Graph()
duplicate_count = 2 # change this to see documents with more than this number of duplicates
nodes = list(set([i[0] for i in final_docs if len(i[1]) > duplicate_count] + [j for i in final_docs for j in i[1] if len(i[1]) > duplicate_count]))
edges = []
for i in final_docs:
for j in i[1]:
if i[0] != j and len(i[1]) > duplicate_count:
edges.append((i[0], j))
G.add_nodes_from(nodes)
for e in edges:
G.add_edge(*e)
print(nx.info(G))
def save_graph(graph, file_name):
# initialze Figure
plt.figure(num=None, figsize=(20, 20), dpi=80)
plt.axis('off')
fig = plt.figure(1)
pos = nx.spring_layout(graph)
nx.draw_networkx_nodes(graph, pos)
nx.draw_networkx_edges(graph, pos)
nx.draw_networkx_labels(graph, pos)
cut = 1.00
xmax = cut * max(xx for xx, yy in pos.values())
ymax = cut * max(yy for xx, yy in pos.values())
plt.xlim(0, xmax)
plt.ylim(0, ymax)
plt.savefig(file_name)
plt.show()
pylab.close()
del fig
# save_graph(G,'out.png')
pos=nx.spring_layout(G)
edge_x = []
edge_y = []
for edge in G.edges():
x0, y0 = pos[edge[0]]
x1, y1 = pos[edge[1]]
edge_x.append(x0)
edge_x.append(x1)
edge_x.append(None)
edge_y.append(y0)
edge_y.append(y1)
edge_y.append(None)
edge_trace = go.Scatter(
x=edge_x, y=edge_y,
line=dict(width=0.5, color='#888'),
hoverinfo='none',
mode='lines')
node_x = []
node_y = []
for node in G.nodes():
x, y = pos[node]
node_x.append(x)
node_y.append(y)
node_trace = go.Scatter(
x=node_x, y=node_y,
mode='markers',
hoverinfo='text',
marker=dict(
showscale=True,
# colorscale options
# 'Greys' | 'YlGnBu' | 'Greens' | 'YlOrRd' | 'Bluered' | 'RdBu' |
# 'Reds' | 'Blues' | 'Picnic' | 'Rainbow' | 'Portland' | 'Jet' |
# 'Hot' | 'Blackbody' | 'Earth' | 'Electric' | 'Viridis' |
colorscale='YlGnBu',
reversescale=True,
color=[],
size=10,
colorbar=dict(
thickness=15,
title='Node Connections',
xanchor='left',
titleside='right'
),
line_width=2))
node_adjacencies = []
node_text = []
for node, adjacencies in enumerate(G.adjacency()):
node_adjacencies.append(len(adjacencies[1]))
node_text.append(str(adjacencies[0])+' - # of connections: '+str(len(adjacencies[1]))+" <a href='https://plotly.com>grdg</a>")
node_trace.marker.color = node_adjacencies
node_trace.text = node_text
fig = go.FigureWidget(data=[edge_trace, node_trace],
layout=go.Layout(
title='<br>Duplicate Detection Results',
titlefont_size=16,
showlegend=False,
hovermode='closest',
margin=dict(b=20,l=5,r=5,t=40),
annotations=[ dict(
text="Python code: <a href='https://plotly.com/ipython-notebooks/network-graphs/'> https://plotly.com/ipython-notebooks/network-graphs/</a>",
showarrow=False,
xref="paper", yref="paper",
x=0.005, y=-0.002 ) ],
xaxis=dict(showgrid=False, zeroline=False, showticklabels=True),
yaxis=dict(showgrid=False, zeroline=False, showticklabels=True))
)
def update_point(trace, points, selector):
print('points.poin_inds')
fig.data[0].on_click(update_point)
app = dash.Dash()
app.layout = html.Div([
dcc.Graph(figure=fig)
])
app.run_server(debug=True, use_reloader=False)
|
nilq/baby-python
|
python
|
import re
import os
import math
import subprocess
from MadGraphControl.MadGraphUtils import *
nevents = 10000
mode = 0
mass=0.500000e+03
channel="mumu"
gsmuL=-1
gseL=-1
gbmuL=-1
gbeL=-1
JOname = runArgs.jobConfig[0]
matches = re.search("M([0-9]+).*\.py", JOname)
if matches is None:
raise RuntimeError("Cannot find mass string.")
else:
mass = float(matches.group(1))
if "sbLQmumu" in JOname:
channel="mumu"
gsmuL=1.0
gseL=0.0
gbmuL=1.0
gbeL=0.0
elif "sbLQee" in JOname:
channel="ee"
gsmuL=0.0
gseL=1.0
gbmuL=0.0
gbeL=1.0
else:
raise RuntimeError("Cannot find coupling string.")
test=[999999]
fcard = open('proc_card_mg5.dat','w')
if runArgs.runNumber in test and channel=="mumu":
fcard.write("""
import model VectorLQ_U1_UFO\n
define p = p b b~
define j = j b b~
generate p p > mu+ mu- NP==2
output VectorLQSingleProduction""")
fcard.close()
elif runArgs.runNumber in test and channel=="ee":
fcard.write("""
import model VectorLQ_U1_UFO\n
define p = p b b~
define j = j b b~
generate p p > e+ e- NP==2
output VectorLQSingleProduction""")
fcard.close()
else:
raise RuntimeError("runNumber %i not recognised in these jobOptions."%runArgs.runNumber)
beamEnergy = -999
if hasattr(runArgs, 'ecmEnergy'):
beamEnergy = runArgs.ecmEnergy / 2.
else:
raise RuntimeError("No center of mass energy found.")
process_dir = new_process()
extras = {'pdlabel': "'lhapdf'",
'ktdurham': '1.0'}
try:
os.remove('run_card.dat')
except OSError:
pass
build_run_card(run_card_old=get_default_runcard(proc_dir=process_dir), run_card_new='run_card.dat',
nevts=nevents, rand_seed=runArgs.randomSeed, beamEnergy=beamEnergy, extras=extras)
if os.path.exists("param_card.dat"):
os.remove("param_card.dat")
param_card_name = 'MadGraph_param_card_SingleVectorLQ_U1_DrellYan.py'
param_card = subprocess.Popen(['get_files', '-data', param_card_name])
param_card.wait()
if not os.access(param_card_name, os.R_OK):
print 'ERROR: Could not get param card'
elif os.access('param_card.dat',os.R_OK):
print 'ERROR: Old param card in the current directory. Dont want to clobber it. Please move it first.'
else:
oldcard = open(param_card_name, 'r')
newcard = open('param_card.dat', 'w')
for line in oldcard:
if 'mLQ' in line:
newcard.write(' 9000002 %e # mLQ\n' % (mass))
elif 'gbmuL' in line:
newcard.write(' 2 %e # gbmuL\n' % (gbmuL))
elif 'gbeL' in line:
newcard.write(' 3 %e # gbeL\n' % (gbeL))
elif 'gsmuL' in line:
newcard.write(' 4 %e # gsmuL\n' % (gsmuL))
elif 'gseL' in line:
newcard.write(' 5 %e # gseL\n' % (gseL))
else:
newcard.write(line)
oldcard.close()
newcard.close()
print_cards()
runName = 'run_01'
process_dir = new_process()
generate(run_card_loc='run_card.dat',
param_card_loc='param_card.dat',
mode=mode,
proc_dir=process_dir,
run_name=runName)
arrange_output(run_name=runName, proc_dir=process_dir, outputDS=runName + '._00001.events.tar.gz', lhe_version=3,
saveProcDir=True)
include("MC15JobOptions/Pythia8_A14_NNPDF23LO_EvtGen_Common.py")
include("MC15JobOptions/Pythia8_MadGraph.py")
#### Shower
evgenConfig.description = 'Single-production vector LQ to DrellYan'
evgenConfig.keywords+=['BSM', 'exotic', 'leptoquark', 'scalar']
evgenConfig.generators+=["MadGraph", "Pythia8", "EvtGen"]
evgenConfig.process = 'pp -> ll'
evgenConfig.contact = ["Etienne Dreyer <etienne.dreyer@cern.ch>"]
evgenConfig.inputfilecheck = runName
runArgs.inputGeneratorFile=runName+'._00001.events.tar.gz'
|
nilq/baby-python
|
python
|
# coding: utf-8
import sys
import utils
# python .github/workflows/scripts/override_version.py example/tools/analyzer_plugin/pubspec.yaml 1.1.0
pubspec_yaml = sys.argv[1]
version = sys.argv[2]
utils.override_version(pubspec_yaml, version)
|
nilq/baby-python
|
python
|
"""Define a version number for turboPy"""
VERSION = ('2020', '10', '14')
__version__ = '.'.join(map(str, VERSION))
|
nilq/baby-python
|
python
|
from django.db import models
# Create your models here.
class TimeStampedModel(models.Model):
created_at = models.DateTimeField(_(""), auto_now=False, auto_now_add=True)
updated_at = models.DateTimeField(_(""), auto_now=True, auto_now_add=False)
class Meta:
abstract = True
class Image(TimeStampedModel):
file = models.ImageField()
location = models.CharField(max_length=140)
caption = models.TextField()
class Comment(TimeStampedModel):
text = models.TextField()
|
nilq/baby-python
|
python
|
#
# Copyright 2018 Jaroslav Chmurny
#
#
# This file is part of Python Sudoku Sandbox.
#
# Python Sudoku Sandbox is free software developed for educational and
# experimental purposes. It is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module provides classes supporting implementation of search algorithms. Search
algorithm implementations should use the SearchSupport class which is more or less
a facade encapsulating the functionality provided by this module. They should not
use the other classes (except of the CandidateQueryMode enum) directly.
"""
from collections import deque, OrderedDict
from enum import Enum, unique
from logging import getLogger
from grid import Grid, CellStatus
_logger = getLogger()
@unique
class _ExclusionOutcome(Enum):
"""
Defines possible outcomes of an exclusion, for instance an exclusion of a candidate
value for a single undefined cell. The meaning of particular enum elements is the
following:
* UNAMBIGUOUS_CANDIDATE_FOUND indicates that after the exclusion of a candidate, there
is only single applicable candidate remaining. This outcome inidcates that an
unambiguous candidate has been found by the exclusion.
* UNAMBIGUOUS_CANDIDATE_NOT_FOUND indicates that the exclusion has not identified an
unambiguous candidate. This value is to be used in several situations, for instance
if two or more applicable candidates are still remaining after the exclusion, or if
the exclusion of a candidate has not changed the set of candidates as the candidate
was already excluded.
This enum is internal, there is no need to use it directly in other modules.
"""
UNAMBIGUOUS_CANDIDATE_FOUND = 1
UNAMBIGUOUS_CANDIDATE_NOT_FOUND = 2
@unique
class CandidateQueryMode(Enum):
"""
Defines options how value exclusion logic can provide candidates for an undefined cell.
The meaning of particular enum elements is the following:
* FIRST_UNDEFINED_CELL indicates that the candidates for the first undefined cell
are to be returned, regardless of how many candidates are applicable to the first
undefined cell.
* UNDEFINED_CELL_WITH_LEAST_CANDIDATES indicates that the candidates for the
undefined cell with least applicable candidates are to be returned.
"""
FIRST_UNDEFINED_CELL = 1
UNDEFINED_CELL_WITH_LEAST_CANDIDATES = 2
class _BaseCandidateInfo:
"""
Internal base class providing functionality common to UnambiguousCandidate and CandidateList
classes.
"""
def __init__(self, row, column):
self._row = row
self._column = column
@property
def cell_address(self):
"""
The coordinates of the cell the candidate information carried by this object
is applicable to.
Returns:
Tuple representing the above mentioned coordinates. The first element of the
tuple is the row, the second element is the column. Zero corresponds
to the first row or column, eight corresponds to the last row or column.
"""
return (self._row, self._column)
def __eq__(self, other):
if type(self) is not type(other):
return False
return self._row == other._row and self._column == other._column
def __repr__(self):
return "[row; column] = [{0}; {1}]".format(self._row, self._column)
class UnambiguousCandidate(_BaseCandidateInfo):
"""
Immutable structure carrying information about an unambiguous candidate for an
undefined cell. Besides the only applicable candidate value, this structure also
carries the address (i.e. the row and the column) of the concerned cell.
"""
def __init__(self, row, column, value):
super().__init__(row, column)
self._value = value
@property
def value(self):
"""
The only candidate value applicable to the cell with the coordinates carried by this
object.
Returns:
All canidate values applicable to the concerned cell at the time this object has
been created. An empty tuple is returned if no candiate value is applicable to the
concerned cell.
"""
return self._value
def __eq__(self, other):
return super().__eq__(other) and self._value == other._value
def __repr__(self):
type_name = type(self).__name__
return "{0}({1}, value = {2})".format(type_name, super().__repr__(), self._value)
class CandidateList(_BaseCandidateInfo):
"""
Simple structure carrying all candidate values applicable to a single undefined
cell. Besides the applicable candidate values, this structure also carries the
address (i.e. the row and the column) of the concerned cell.
"""
def __init__(self, row, column, values):
super().__init__(row, column)
self._values = tuple(values)
@property
def values(self):
"""
Returns a tuple with all candidate values applicable to the cell with the coordinates
carried by this candidate list.
Returns:
All canidate values applicable to the concerned cell at the time this object has
been created. An empty tuple is returned if no candiate value is applicable to the
concerned cell.
"""
return self._values
def __len__(self):
return len(self._values)
def __eq__(self, other):
return super().__eq__(other) and sorted(self._values) == sorted(other._values)
def __repr__(self):
type_name = type(self).__name__
return "{0}({1}, values = {2})".format(type_name, super().__repr__(), self._values)
class _CellPeers:
"""
Internal helper class that creates a list of peers for every single cell contained in a Sudoku
grid. For a cell, the peers are other cells contained in the same row, in the same column, or
in the same region. Peers play a vital role in the exclusion logic provided by this module.
"""
@staticmethod
def create():
result = []
for row in range(0, 9):
cells_in_row = [tuple(_CellPeers.__create_for_single_cell(row, column)) for column in range(0, 9)]
result.append(tuple(cells_in_row))
return tuple(result)
@staticmethod
def __create_for_single_cell(row, column):
peers_in_row = [(row, c) for c in range(0, 9) if c != column]
peers_in_column = [(r, column) for r in range(0, 9) if r != row]
topmost_row = 3 * (row // 3)
leftmost_column = 3 * (column // 3)
peers_in_region = [(r, c) for r in range(topmost_row, topmost_row + 3) for c in range(leftmost_column, leftmost_column + 3)]
peers_in_region.remove((row, column))
return OrderedDict((address, True) for address in (peers_in_row + peers_in_column + peers_in_region)).keys()
class _CandidateValues:
"""
Internal helper that keeps track of applicable candidate values for a single cell. An
instance of this class is to be updated whenever one of the peers of the cell corresponding
to the instance of this class is updated. For better understanding, let's assume the
following example. An instance of this class corresponds to an undefined cell. The row
containing the cell contains another undefined cells, and the value of one of them is set
to 5. The above mentioned instance of this class has to be updated via the exclude_value
method as the value 5 is not applicable anymore.
"""
def __init__(self, bitmask = 0b111111111, applicable_value_count = 9):
self._bitmask = bitmask
self._applicable_value_count = applicable_value_count
def clear(self):
self._bitmask = 0
self._applicable_value_count = 0
def get_applicable_value_count(self):
return self._applicable_value_count
def exclude_value(self, value):
_logger.debug("Going to exclude the value %d, bitmask before exclusion = %s", value, format(self._bitmask, "b"))
value_mask = 1 << (value - 1)
if self._bitmask & value_mask == value_mask:
self._bitmask ^= value_mask
_logger.debug("Bitmask after exclusion = %s", format(self._bitmask, "b"))
self._applicable_value_count -= 1
if self._applicable_value_count == 1:
return _ExclusionOutcome.UNAMBIGUOUS_CANDIDATE_FOUND
return _ExclusionOutcome.UNAMBIGUOUS_CANDIDATE_NOT_FOUND
def get_applicable_values(self):
result = [value for value in range(1, 10) if self._bitmask & (1 << (value - 1))]
return tuple(result)
def get_single_remaining_applicable_value(self):
if self._applicable_value_count != 1:
message = "Cannot provide single remaining applicable value ({0} candidates remaining)."
raise RuntimeError(message.format(self._applicable_value_count))
for value in range(1, 10):
if self._bitmask == (1 << (value - 1)):
return value
def is_applicable(self, value):
value_mask = 1 << (value - 1)
return self._bitmask & value_mask == value_mask
def copy(self):
"""
Creates and returns a deep copy of this object.
"""
return _CandidateValues(self._bitmask, self._applicable_value_count)
class _CandidateValueExclusionLogic:
"""
Logic responsible for exclusion of candidate values inapplicable to particular cells.
For instance, if the value of a cell is set to 5, the value 5 is excluded for all
cells within the same row, column, and region. If a single candidate value remains
applicable to a cell, that value is considered as unambiguous candidate for that
cell. This class is an internal helper which should not be used directly by other
modules.
"""
__cell_peers = _CellPeers.create()
def __init__(self, original_exclusion_logic = None):
if original_exclusion_logic is None:
self._candidates = _CandidateValueExclusionLogic.__create_candidates_from_scratch()
else:
self._candidates = _CandidateValueExclusionLogic.__create_candidates_from_other_instance(original_exclusion_logic)
@staticmethod
def __create_candidates_from_scratch():
rows = []
for row in range(0, 9):
rows.append(tuple([_CandidateValues() for column in range(0, 9)]))
return tuple(rows)
@staticmethod
def __create_candidates_from_other_instance(original_exclusion_logic):
rows = []
for row in range(0, 9):
rows.append(tuple([original_exclusion_logic._candidates[row][column].copy() for column in range(0, 9)]))
return tuple(rows)
@staticmethod
def create_from(grid):
"""
Creates and returns a new CandidateValueExclusionLogic instance. Before returning
the above mentioned instance, candidate value exclusion is performed reflecting the
predefined and completed cells of the given grid.
Args:
grid:
Returns:
The created CandidateValueExclusionLogic instance.
"""
exclusion_logic = _CandidateValueExclusionLogic()
for (row, column) in Grid.get_all_cell_addresses():
if grid.get_cell_status(row, column) is not CellStatus.UNDEFINED:
value = grid.get_cell_value(row, column)
exclusion_logic.apply_and_exclude_cell_value(row, column, value)
return exclusion_logic
def apply_and_exclude_cell_value(self, row, column, value):
"""
Applies the given cell value to the cell with the given coordinates and excludes
the given cell value for the peers of the cell with the coordinates.
Args:
row (int): The row coordinate of the cell the given value is to
be applied to. Zero corresponds to the first row, eight
corresponds to the last row.
column (int) The column coordinate of the cell the given value is to
be applied to. Zero corresponds to the first column, eight
corresponds to the last column.
value (int): The value for the given cell.
Returns:
List of UnambiguousCandidate instances, one for each of those peers of the concerned
cell for which just a single applicable candidate value has remained after the
exclusion. None is returned if there is no such peer.
"""
_logger.debug("Going to apply candidate value %d to cell [%d, %d]", value, row, column)
self._candidates[row][column].clear()
result = None
for cell in _CandidateValueExclusionLogic.__cell_peers[row][column]:
row, column = cell
_logger.debug("Going to exclude candidate value %d for cell [%d, %d]", value, row, column)
exclusion_outcome = self._candidates[row][column].exclude_value(value)
_logger.debug("Exclusion outcome = %s", exclusion_outcome)
if exclusion_outcome is _ExclusionOutcome.UNAMBIGUOUS_CANDIDATE_FOUND:
result = result if result is not None else []
candidate_list = UnambiguousCandidate(row, column, self._candidates[row][column].get_single_remaining_applicable_value())
result.append(candidate_list)
return result
def get_undefined_cell_candidates(self, mode):
"""
Returns a list of candidate values applicable to one of the undefined cells.
Args:
mode: Determines which the undefined cell for which the candidate values
are to be provided.
Returns:
New CandidateList instance carrying the applicable candidate values as well
as the address of the undefined cell the candidate values are applicable to.
Raises:
ValueError: If unexpected mode is received.
"""
if mode is CandidateQueryMode.FIRST_UNDEFINED_CELL:
return self.__get_candidates_for_first_undefined_cell()
elif mode is CandidateQueryMode.UNDEFINED_CELL_WITH_LEAST_CANDIDATES:
return self.__get_candidates_for_undefined_cell_with_least_candidates()
message = "Unexpected candidate query mode {0}".format(mode)
raise ValueError(message)
def __get_candidates_for_first_undefined_cell(self):
for (row, column) in Grid.get_all_cell_addresses():
if self._candidates[row][column].get_applicable_value_count() > 0:
values = self._candidates[row][column].get_applicable_values()
return CandidateList(row, column, values)
return None
def __get_candidates_for_undefined_cell_with_least_candidates(self):
candidate_list = None
for (row, column) in Grid.get_all_cell_addresses():
count_for_current_cell = self._candidates[row][column].get_applicable_value_count()
if count_for_current_cell == 0:
continue
if candidate_list is None or count_for_current_cell < len(candidate_list):
candidate_list = CandidateList(row, column, self._candidates[row][column].get_applicable_values())
return candidate_list
def is_applicable(self, unambiguous_candidate):
"""
Verifies whether the given unambiguous candidate is applicable.
Args:
unambiguous_candidate: The unambiguous candidate to be verified.
Returns:
True if and only of the candidate value carried by the given candidate
object is applicable to the cell with the coordinates carried by the
given candidate object. False if the concerned cell is not empty, or if
the concerned cell value is already present in the row, column, or region
containing the concerned cell.
"""
row, column = unambiguous_candidate.cell_address
value = unambiguous_candidate.value
return self._candidates[row][column].is_applicable(value)
def get_applicable_value_count(self, row, column):
"""
Returns the number of candidate values applicable to the cell with the given
coordinates.
Args:
row (int): The row coordinate of the cell for which the number of
applicable candidate values is to be returned. Zero
corresponds to the first row, eight corresponds to the
last row.
column (int): The column coordinate of the cell for which the number of
candidate values is to be returned. Zero corresponds to
the first column, eight corresponds to the last column.
"""
return self._candidates[row][column].get_applicable_value_count()
def copy(self):
"""
Creates and returns a deep copy of this object.
"""
return _CandidateValueExclusionLogic(self)
class _RegionCandidateCells:
"""
Keeps track of cells within a region where a particular value is applicable.
"""
__row_peers = {0: 0b111111000, 1: 0b111000111, 2: 0b000111111}
__column_peers = {0: 0b110110110, 1: 0b101101101, 2: 0b011011011}
def __init__(self, topmost_row, leftmost_column, value, bitmask = 0b111111111, applicable_cell_count = 9):
self._topmost_row = topmost_row
self._leftmost_column = leftmost_column
self._value = value
self._bitmask = bitmask
self._applicable_cell_count = applicable_cell_count
def apply_and_exclude_cell_value(self, row, column, value):
_logger.debug("Going to apply/exclude value %d for [%d, %d]", value, row, column)
row_within_region, column_within_region = self.__get_cell_coordinates_within_this_region(row, column)
_logger.debug("Cell address within region [%d, %d]", row_within_region, column_within_region)
if (row_within_region, column_within_region) == (-1, -1):
# cell not contained in this region, and neither the row, nor the column
# containing the cell is crossing this region => nothing to be excluded
_logger.debug("Ignoring region starting at [%d, %d] for the value %d", self._topmost_row, self._leftmost_column, self._value)
return _ExclusionOutcome.UNAMBIGUOUS_CANDIDATE_NOT_FOUND
if row_within_region in [0, 1, 2] and column_within_region not in [0, 1, 2]:
_logger.debug("Row is crossing this region")
# cell not contained in this region, but the row containing the cell is
# crossing this region; depending on the value, we have to exclude either
# nothing, or all peers of the cell
if value != self._value:
_logger.debug("Ignoring the value %d (my value is %d)", value, self._value)
return _ExclusionOutcome.UNAMBIGUOUS_CANDIDATE_NOT_FOUND
peers_mask = _RegionCandidateCells.__row_peers[row_within_region]
_logger.debug("Peers mask (row) = %s, current status = %s", format(peers_mask, 'b'), format(self._bitmask, 'b'))
self._bitmask = self._bitmask & peers_mask
_logger.debug("New status = %s", format(self._bitmask, 'b'))
return self.__update_applicable_value_count()
if column_within_region in [0, 1, 2] and row_within_region not in [0, 1, 2]:
_logger.debug("Column is crossing this region")
# cell not contained in this region, but the column containing the cell is
# crossing this region; depending on the value, we have to exclude either
# nothing, or all peers of the cell
if value != self._value:
_logger.debug("Ignoring the value %d (my value is %d)", value, self._value)
return _ExclusionOutcome.UNAMBIGUOUS_CANDIDATE_NOT_FOUND
peers_mask = _RegionCandidateCells.__column_peers[column_within_region]
_logger.debug("Peers mask (column) = %s, current status = %s", format(peers_mask, 'b'), format(self._bitmask, 'b'))
self._bitmask = self._bitmask & peers_mask
_logger.debug("New status = %s", format(self._bitmask, 'b'))
return self.__update_applicable_value_count()
# cell contained in this region; depending on the value, we have to exclude eihter
# a single cell, or the entire region
if self._value == value:
_logger.debug("Excluding complete region")
self._bitmask = 0
self._applicable_cell_count = 0
return _ExclusionOutcome.UNAMBIGUOUS_CANDIDATE_NOT_FOUND
_logger.debug("Excluding single cell")
cell_mask = 1 << (3 * row_within_region + column_within_region)
cell_mask = 0b111111111 ^ cell_mask
self._bitmask = self._bitmask & cell_mask
_logger.debug("New status = %s", format(self._bitmask, 'b'))
return self.__update_applicable_value_count()
def __get_cell_coordinates_within_this_region(self, row, column):
row_within_region, column_within_region = (-1, -1)
if (3 * (row // 3)) == self._topmost_row:
row_within_region = row - self._topmost_row
if (3 * (column // 3)) == self._leftmost_column:
column_within_region = column - self._leftmost_column
return (row_within_region, column_within_region)
def __update_applicable_value_count(self):
new_count = 0
for shift in range(0, 9):
mask = 1 << shift
if self._bitmask & mask == mask:
new_count += 1
_logger.debug("Going to update the value count from %d to %d", self._applicable_cell_count, new_count)
result = _ExclusionOutcome.UNAMBIGUOUS_CANDIDATE_NOT_FOUND
if new_count == 1 and self._applicable_cell_count > new_count:
result = _ExclusionOutcome.UNAMBIGUOUS_CANDIDATE_FOUND
self._applicable_cell_count = new_count
return result
def get_single_remaining_applicable_cell(self):
if self._applicable_cell_count != 1:
message = "Cannot provide single remaining applicable cell ({0} candidates remaining)."
raise RuntimeError(message.format(self._applicable_value_count))
_logger.debug("Remaining bitmask = %s", format(self._bitmask, 'b'))
for i in range(0, 9):
mask = 1 << i
if self._bitmask & mask == mask:
row = self._topmost_row + (i // 3)
column = self._leftmost_column + (i % 3)
result = UnambiguousCandidate(row, column, self._value)
_logger.debug("%s will be returned", result)
return result
_logger.debug("None will be returned")
def copy(self):
"""
Creates and returns a deep copy of this object.
"""
return _RegionCandidateCells(self._topmost_row, self._leftmost_column, self._value, self._bitmask, self._applicable_cell_count)
class _RegionGrid:
"""
Helper class supporting candidate cell exclusion. Single instance of this class
aggregates 9 instances of _RegionCandidateCells.
"""
def __init__(self, value, regions = None):
if regions is None:
self._regions = tuple([_RegionCandidateCells(row, column, value) for row in [0, 3, 6] for column in [0, 3, 6]])
else:
self._regions = regions
def apply_and_exclude_cell_value(self, row, column, value):
result = None
for region in self._regions:
exclusion_outcome = region.apply_and_exclude_cell_value(row, column, value)
if exclusion_outcome is _ExclusionOutcome.UNAMBIGUOUS_CANDIDATE_FOUND:
result = result if result is not None else []
candidate = region.get_single_remaining_applicable_cell()
result.append(candidate)
return result
def copy(self):
"""
Creates and returns a deep copy of this object.
"""
regions_copy = tuple([single_region.copy() for single_region in self._regions])
return _RegionGrid(None, regions_copy)
class _CandidateCellExclusionLogic:
"""
Logic responsible for exclusion of candidate cells where a particular value is
not applicable. The exclusion leads to identification of the only cell within
a region where a value is applicable. For such a cell, the value is considered
as unambiguous candidate value. This class is an internal helper that should
not be used directly by other modules.
"""
def __init__(self, original_exclusion_logic = None):
if original_exclusion_logic is None:
self._region_grids = tuple([_RegionGrid(value) for value in range(1, 10)])
else:
self._region_grids = tuple([grid.copy() for grid in original_exclusion_logic._region_grids])
def apply_and_exclude_cell_value(self, row, column, value):
"""
Applies the given cell value to the cell with the given coordinates and excludes
all peers of the given cell as candidate cells for the given value.
Args:
row (int): The row coordinate of the cell the given value is to
be applied to. Zero corresponds to the first row, eight
corresponds to the last row.
column (int) The column coordinate of the cell the given value is to
be applied to. Zero corresponds to the first column, eight
corresponds to the last column.
value (int): The value for the given cell.
Returns:
List of UnambiguousCandidate instances, one for each of those cells which have
been identified as unambiguous candidate cells with any region for any value.
None is returned if the exclusion has not led to any cell being identified as
unambiguous candidate cell.
"""
_logger.debug("Going to apply & exclude the value %d for the cell [%d, %d]", value, row, column)
result = None
for grid in self._region_grids:
partial_result = grid.apply_and_exclude_cell_value(row, column, value)
if partial_result is not None:
result = result if result is not None else []
result += partial_result
return result
def copy(self):
"""
Creates and returns a deep copy of this object.
"""
return _CandidateCellExclusionLogic(self)
class _ExclusionLogic:
"""
Composite that aggregates and coordinates _CandidateValueExclusionLogic and
_CandidateCellExclusionLogic. This class is an internal helper that should not
be used directly by other modules.
"""
def __init__(self, candidate_value_exclusion = None, candidate_cell_exclusion = None):
if candidate_value_exclusion is None:
candidate_value_exclusion = _CandidateValueExclusionLogic()
self._candidate_value_exclusion = candidate_value_exclusion
if candidate_cell_exclusion is None:
candidate_cell_exclusion = _CandidateCellExclusionLogic()
self._candidate_cell_exclusion = candidate_cell_exclusion
def apply_and_exclude_cell_value(self, row, column, value):
"""
Applies the given cell value to the cell with the given coordinates and excludes
the given cell value for the peers of the cell with the coordinates.
Args:
row (int): The row coordinate of the cell the given value is to
be applied to. Zero corresponds to the first row, eight
corresponds to the last row.
column (int) The column coordinate of the cell the given value is to
be applied to. Zero corresponds to the first column, eight
corresponds to the last column.
value (int): The value for the given cell.
Returns:
List of UnambiguousCandidate instances, one for each of those cells for which just
a single applicable candidate value has remained after the exclusion. None is returned
if there is no such peer.
"""
_logger.debug("Going to apply & exclude the value %d for the cell [%d, %d]", value, row, column)
result = None
list = self._candidate_value_exclusion.apply_and_exclude_cell_value(row, column, value)
if list is not None:
_logger.debug("There are %d candidates from candidate value exclusion", len(list))
result = []
result.extend(list)
list = self._candidate_cell_exclusion.apply_and_exclude_cell_value(row, column, value)
if list is not None:
_logger.debug("There are %d candidates from candidate cell exclusion", len(list))
result = [] if result is None else result
result.extend(list)
return result
def is_applicable(self, unambiguous_candidate):
"""
Verifies whether the given unambiguous candidate is applicable.
Args:
unambiguous_candidate: The unambiguous candidate to be verified.
Returns:
True if and only of the candidate value carried by the given candidate
object is applicable to the cell with the coordinates carried by the
given candidate object. False if the concerned cell is not empty, or if
the concerned cell value is already present in the row, column, or region
containing the concerned cell.
"""
return self._candidate_value_exclusion.is_applicable(unambiguous_candidate)
def get_applicable_value_count(self, row, column):
"""
Returns the number of candidate values applicable to the cell with the given
coordinates.
Args:
row (int): The row coordinate of the cell for which the number of
applicable candidate values is to be returned. Zero
corresponds to the first row, eight corresponds to the
last row.
column (int): The column coordinate of the cell for which the number of
candidate values is to be returned. Zero corresponds to
the first column, eight corresponds to the last column.
"""
return self._candidate_value_exclusion.get_applicable_value_count(row, column)
def get_undefined_cell_candidates(self, mode):
"""
Returns a list of candidate values applicable to one of the undefined cells.
Args:
mode: Determines which the undefined cell for which the candidate values
are to be provided.
Returns:
New CandidateList instance carrying the applicable candidate values as well
as the address of the undefined cell the candidate values are applicable to.
None is returned if there is no undefined cell, or no candidate is applicable
to any of the undefined cells.
"""
return self._candidate_value_exclusion.get_undefined_cell_candidates(mode)
def copy(self):
"""
Creates and returns a deep copy of this object.
"""
return _ExclusionLogic(self._candidate_value_exclusion.copy(), self._candidate_cell_exclusion.copy())
class SearchSupport:
"""
Facade encapsulating the functionality provided by this module. An instance of
this class coordinates a grid with exclusion logic keeping track of applicable
candidate values for the grid.
"""
def __init__(self, grid = None, original = None):
"""
Initializer allowing to create a new instance of this class either based on
the given Grid, or as a clone of the given SearchSupport instance. In any of
the two cases, use only one of the two arguments - the other must be None.
Args:
grid: The grid the new search support is to be based on. If you
want to create a copy of an existing search support, use
None.
original: Original search support that is to be cloned. Use None if
you want to create a new search support based on a grid.
"""
if SearchSupport.__is_ordinary_constructor(grid, original):
self.__init_from_scratch(grid)
elif SearchSupport.__is_copy_constructor(grid, original):
self.__init_from_other_instance(original)
else:
message = "Invalid arguments. Exactly one of the two arguments is expected."
raise ValueError(message)
@staticmethod
def __is_ordinary_constructor(grid, original):
return original is None and isinstance(grid, Grid)
def __init_from_scratch(self, grid):
self._exclusion_logic = _ExclusionLogic()
self._candidate_queue = deque()
self._grid = grid
for (row, column) in Grid.get_all_cell_addresses():
if grid.get_cell_status(row, column) is CellStatus.PREDEFINED:
value = grid.get_cell_value(row, column)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row, column, value)
if candidate_list is not None:
self._candidate_queue.extend(candidate_list)
@staticmethod
def __is_copy_constructor(grid, original):
return grid is None and isinstance(original, SearchSupport)
def __init_from_other_instance(self, original):
self._exclusion_logic = original._exclusion_logic.copy()
self._candidate_queue = deque()
self._grid = original._grid.copy()
@property
def grid(self):
"""
Provides a clone of the underlying grid.
"""
return self._grid.copy()
def has_completed_grid(self):
"""
Verifies whether the underlying grid is already completed.
Returns:
True if and only if none of the cells of the underlying grid is empty; False if
the underlying grid contains at least one empty value.
"""
return self._grid.is_complete()
def set_cell_value(self, row, column, value):
"""
Sets the cell with the given coordinates to the given value, assumed the
cell with the given coordinates is empty (i.e. its value is undefined).
Subsequently, excludes the given value from applicable candidate values
for the peers of the given cell. If the exclusion identifies unambiguous
candidate(s) for any undefined cell(s), the unambiguous candidates are
retained so that they can be provided by the get_unambiguous_candidate
method.
Args:
row (int): The row coordinate of the cell whose value is to
be set. Zero corresponds to the first row, eight
corresponds to the last row.
column (int) The column coordinate of the cell whose value is to
be set. Zero corresponds to the first column, eight
corresponds to the last column.
value (int): The new value for the given cell.
Raises:
ValueError If the given cell has already a value, regardless
of whether the value was defined in the original
puzzle or completed during the search.
"""
self._grid.set_cell_value(row, column, value)
candidate_list = self._exclusion_logic.apply_and_exclude_cell_value(row, column, value)
_logger.info("Assignment [%d, %d] = %d completed, outcome of exclusion is %s", row, column, value, candidate_list)
if candidate_list is not None:
self._candidate_queue.extend(candidate_list)
def has_empty_cells_without_applicable_candidates(self):
"""
Verifies whether the underlying grid contains at least one undefined cell for
which all nine values have been already excluded (i.e. no candidate value is
applicable to the cell).
Returns:
True if and only if the underlying grid contains at least one undefined cell
for which all nine values have been already excluded. False if at least one
candidate value is applicable to each undefined cell of underlying grid.
"""
for (row, column) in Grid.get_all_cell_addresses():
cell_status = self._grid.get_cell_status(row, column)
if cell_status is not CellStatus.UNDEFINED:
continue
if self._exclusion_logic.get_applicable_value_count(row, column) == 0:
_logger.info("Cell [%d, %d] undefined, but there are no applicable candidates", row, column)
return True
return False
def get_unambiguous_candidate(self):
"""
Returns the next unambiguous candidate identified by one of the former
invocations of the set_cell_value method. None is returned if there is
no such unambiguous candidate.
"""
while len(self._candidate_queue) > 0:
candidate = self._candidate_queue.popleft()
_logger.debug("Candidate taken from queue: %s", candidate)
if self._exclusion_logic.is_applicable(candidate):
_logger.debug("Candidate still applicable, going to return it")
return candidate
else:
_logger.debug("Candidate not applicable anymore, cannot return it")
return None
def get_undefined_cell_candidates(self, mode):
"""
Returns candidate values applicable to one of the undefined cells of the
underlying grid.
Args:
mode: One of the elements of the CandidateQueryMode enum determining
which of the undefined cells of the underlying grid is to be
taken into account.
"""
result = self._exclusion_logic.get_undefined_cell_candidates(mode)
if result:
_logger.info("Undefined cell candidates found (mode = %s): %s", mode, result)
row, column = result.cell_address
assert self._grid.get_cell_status(row, column) is CellStatus.UNDEFINED
else:
_logger.debug("No undefined cell candidates, returning None")
return result
def copy(self):
"""
Creates and returns a deep copy of this object.
"""
return SearchSupport(original = self)
|
nilq/baby-python
|
python
|
# _ __
# | |/ /___ ___ _ __ ___ _ _ ®
# | ' </ -_) -_) '_ \/ -_) '_|
# |_|\_\___\___| .__/\___|_|
# |_|
#
# Keeper Commander
# Copyright 2022 Keeper Security Inc.
# Contact: ops@keepersecurity.com
#
import abc
import json
import logging
from typing import Optional, List, Set, Tuple
from google.protobuf import message
from .params import KeeperParams
from .proto import enterprise_pb2 as proto
from . import api, utils, crypto
def query_enterprise(params): # type: (KeeperParams) -> None
if not params.enterprise_loader:
params.enterprise_loader = _EnterpriseLoader()
params.enterprise_loader.load(params)
def _to_key_type(key_type): # type: (proto.EncryptedKeyType) -> str
if key_type == proto.KT_ENCRYPTED_BY_DATA_KEY:
return 'encrypted_by_data_key'
if key_type == proto.KT_ENCRYPTED_BY_PUBLIC_KEY:
return 'encrypted_by_public_key'
if key_type == proto.KT_ENCRYPTED_BY_DATA_KEY_GCM:
return 'encrypted_by_data_key_gcm'
if key_type == proto.KT_ENCRYPTED_BY_PUBLIC_KEY_ECC:
return 'encrypted_by_public_key_ecc'
return 'no_key'
class EnterpriseInfo(object):
def __init__(self):
self._tree_key = b''
self._rsa_key = b''
self._ec_key = b''
self._enterprise_name = ''
@property
def tree_key(self):
return self._tree_key
@property
def rsa_key(self):
return self._rsa_key
@property
def ec_key(self):
return self._ec_key
@property
def enterprise_name(self):
return self._enterprise_name
class _EnterpriseLoader(object):
def __init__(self):
super(_EnterpriseLoader, self).__init__()
self._enterprise = EnterpriseInfo()
self._continuationToken = b''
self._data_types = { # type: dict[int, _EnterpriseDataParser]
proto.NODES: _EnterpriseNodeEntity(self._enterprise),
proto.USERS: _EnterpriseUserEntity(self._enterprise),
proto.TEAMS: _EnterpriseTeamEntity(self._enterprise),
proto.ROLES: _EnterpriseRoleEntity(self._enterprise),
proto.LICENSES: _EnterpriseLicenseEntity(self._enterprise),
proto.QUEUED_TEAMS: _EnterpriseQueuedTeamEntity(self._enterprise),
proto.SCIMS: _EnterpriseScimEntity(self._enterprise),
proto.SSO_SERVICES: _EnterpriseSsoServiceEntity(self._enterprise),
proto.BRIDGES: _EnterpriseBridgeEntity(self._enterprise),
proto.EMAIL_PROVISION: _EnterpriseEmailProvisionEntity(self._enterprise),
proto.TEAM_USERS: _EnterpriseTeamUserEntity(self._enterprise),
proto.QUEUED_TEAM_USERS: _EnterpriseQueuedTeamUserEntity(self._enterprise),
proto.ROLE_USERS: _EnterpriseRoleUserEntity(self._enterprise),
proto.ROLE_TEAMS: _EnterpriseRoleTeamEntity(self._enterprise),
proto.MANAGED_NODES: _EnterpriseManagedNodeEntity(self._enterprise),
proto.ROLE_PRIVILEGES: _EnterpriseRolePrivilegeEntity(self._enterprise),
proto.ROLE_ENFORCEMENTS: _EnterpriseRoleEnforcements(self._enterprise),
proto.MANAGED_COMPANIES: _EnterpriseManagedCompanyEntity(self._enterprise),
proto.DEVICES_REQUEST_FOR_ADMIN_APPROVAL: _EnterpriseAdminApprovalRequestEntity(self._enterprise),
}
teams = self._data_types[proto.TEAMS]
if isinstance(teams, _EnterpriseEntity):
teams.register_link('team_uid', self._data_types[proto.TEAM_USERS])
teams.register_link('team_uid', self._data_types[proto.ROLE_TEAMS])
users = self._data_types[proto.USERS]
if isinstance(teams, _EnterpriseEntity):
users.register_link('enterprise_user_id', self._data_types[proto.TEAM_USERS])
users.register_link('enterprise_user_id', self._data_types[proto.ROLE_USERS])
roles = self._data_types[proto.ROLES]
if isinstance(roles, _EnterpriseEntity):
users.register_link('role_id', self._data_types[proto.ROLE_TEAMS])
users.register_link('role_id', self._data_types[proto.ROLE_USERS])
users.register_link('role_id', self._data_types[proto.MANAGED_NODES])
@property
def enterprise(self):
return self._enterprise
def load(self, params): # type: (KeeperParams) -> None
if params.enterprise is None:
params.enterprise = {}
self._continuationToken = b''
if not self._enterprise.tree_key:
rq = proto.GetEnterpriseDataKeysRequest()
rs = api.communicate_rest(params, rq, 'enterprise/get_enterprise_data_keys',
rs_type=proto.GetEnterpriseDataKeysResponse)
if rs.treeKey:
encrypted_tree_key = utils.base64_url_decode(rs.treeKey.treeKey)
if rs.treeKey.keyTypeId == proto.ENCRYPTED_BY_DATA_KEY:
self._enterprise._tree_key = crypto.decrypt_aes_v1(encrypted_tree_key, params.data_key)
elif rs.treeKey.keyTypeId == proto.ENCRYPTED_BY_PUBLIC_KEY:
if len(encrypted_tree_key) == 60:
self._enterprise._tree_key = crypto.decrypt_aes_v2(encrypted_tree_key, params.msp_tree_key)
else:
self._enterprise._tree_key = api.decrypt_rsa(rs.treeKey.treeKey, params.rsa_key)
params.enterprise['unencrypted_tree_key'] = self._enterprise.tree_key
if rs.enterpriseKeys:
keys = {}
if rs.enterpriseKeys.rsaEncryptedPrivateKey:
self._enterprise._rsa_key = \
api.decrypt_aes_plain(rs.enterpriseKeys.rsaEncryptedPrivateKey, self._enterprise.tree_key)
keys['rsa_public_key'] = utils.base64_url_encode(rs.enterpriseKeys.rsaPublicKey)
keys['rsa_encrypted_private_key'] = \
utils.base64_url_encode(rs.enterpriseKeys.rsaEncryptedPrivateKey)
if rs.enterpriseKeys.eccEncryptedPrivateKey:
self._enterprise._ec_key = \
api.decrypt_aes_plain(rs.enterpriseKeys.eccEncryptedPrivateKey, self._enterprise.tree_key)
keys['ecc_public_key'] = utils.base64_url_encode(rs.enterpriseKeys.eccPublicKey)
keys['ecc_encrypted_private_key'] = \
utils.base64_url_encode(rs.enterpriseKeys.eccEncryptedPrivateKey)
params.enterprise['keys'] = keys
entities = set()
while True:
rq = proto.EnterpriseDataRequest()
if self._continuationToken:
rq.continuationToken = self._continuationToken
rs = api.communicate_rest(params, rq, 'enterprise/get_enterprise_data_for_user',
rs_type=proto.EnterpriseDataResponse)
if rs.cacheStatus == proto.CLEAR:
for d in self._data_types.values():
d.clear(params)
if not self._enterprise.enterprise_name and rs.generalData:
self._enterprise._enterprise_name = rs.generalData.enterpriseName
params.enterprise['enterprise_name'] = self._enterprise.enterprise_name
for ed in rs.data:
entities.add(ed.entity)
parser = self._data_types.get(ed.entity)
if parser:
parser.parse(params, ed)
self._continuationToken = rs.continuationToken
if not rs.hasMore:
break
if proto.MANAGED_NODES in entities:
self.load_missing_role_keys(params)
if not entities.isdisjoint([proto.MANAGED_NODES, proto.NODES, proto.ROLE_USERS]):
if 'user_root_nodes' in params.enterprise:
del params.enterprise['user_root_nodes']
if 'user_managed_nodes' in params.enterprise:
del params.enterprise['user_managed_nodes']
@staticmethod
def load_missing_role_keys(params): # type: (KeeperParams) -> None
nodes = set()
if 'managed_nodes' in params.enterprise:
for mn in params.enterprise['managed_nodes']:
nodes.add(mn['role_id'])
if len(nodes) > 0:
roles = set()
if 'role_keys' in params.enterprise:
for rk in params.enterprise['role_keys']:
roles.add(rk['role_id'])
if 'role_keys2' in params.enterprise:
for rk in params.enterprise['role_keys2']:
roles.add(rk['role_id'])
nodes.difference_update(roles)
if len(nodes) > 0:
rq = proto.GetEnterpriseDataKeysRequest()
rq.roleId.extend(nodes)
rs = api.communicate_rest(params, rq, 'enterprise/get_enterprise_data_keys',
rs_type=proto.GetEnterpriseDataKeysResponse)
if len(rs.roleKey) > 0:
if 'role_keys' not in params.enterprise:
params.enterprise['role_keys'] = []
for rk1 in rs.roleKey:
params.enterprise['role_keys'].append({
'role_id': rk1.roleId,
'encrypted_key': utils.base64_url_encode(rk1.encryptedKey),
'key_type': _to_key_type(rk1.keyType)
})
if len(rs.reEncryptedRoleKey) > 0:
if 'role_keys2' not in params.enterprise:
params.enterprise['role_keys2'] = []
for rk2 in rs.reEncryptedRoleKey:
params.enterprise['role_keys2'].append({
'role_id': rk2.role_id,
'role_key': utils.base64_url_encode(rk2.encryptedRoleKey),
})
class _EnterpriseDataParser(abc.ABC):
def __init__(self, enterprise): # type: (EnterpriseInfo) -> None
self.enterprise = enterprise
@abc.abstractmethod
def parse(self, params, enterprise_data, **kwargs): # type: (KeeperParams, proto.EnterpriseData, dict) -> None
pass
@abc.abstractmethod
def get_entity_type(self):
pass
@abc.abstractmethod
def get_keeper_entity_name(self): # type: () -> str
pass
@abc.abstractmethod
def to_keeper_entity(self, proto_entity, keeper_entity):
pass
def get_entities(self, params, create_if_absent=True): # type: (KeeperParams, bool) -> Optional[List]
name = self.get_keeper_entity_name()
if name not in params.enterprise:
if not create_if_absent:
return None
params.enterprise[name] = []
return params.enterprise[name]
def clear(self, params): # type: (KeeperParams) -> None
entities = self.get_entities(params, create_if_absent=False)
if entities:
entities.clear()
class _EnterpriseEntity(_EnterpriseDataParser):
def __init__(self, enterprise): # type: (EnterpriseInfo) -> None
super(_EnterpriseEntity, self).__init__(enterprise)
self._links = [] # type: List[Tuple[str, _EnterpriseLink]]
@abc.abstractmethod
def get_keeper_entity_id(self, proto_entity): # type: (dict) -> any
pass
@abc.abstractmethod
def get_proto_entity_id(self, proto_entity): # type: (message.Message) -> any
pass
@staticmethod
def fix_data(d):
idx = d.rfind(b'}')
if idx < len(d) - 1:
d = d[:idx+1]
return d
def register_link(self, keeper_entity_id_name, parser): # type: (str, _EnterpriseDataParser) -> None
if isinstance(parser, _EnterpriseLink):
self._links.append((keeper_entity_id_name, parser))
def parse(self, params, enterprise_data, **kwargs): # type: (KeeperParams, proto.EnterpriseData, dict) -> None
if not enterprise_data.data:
return
entities = self.get_entities(params)
entity_map = {self.get_keeper_entity_id(x): x for x in entities}
entity_type = self.get_entity_type()
deleted_entities = set()
for entityData in enterprise_data.data:
entity = entity_type()
entity.ParseFromString(entityData)
entity_id = self.get_proto_entity_id(entity)
if enterprise_data.delete:
if entity_id in entity_map:
entity_map.pop(entity_id)
deleted_entities.add(entity_id)
else:
keeper_entity = entity_map.get(entity_id)
if not keeper_entity:
keeper_entity = {}
entity_map[entity_id] = keeper_entity
self.to_keeper_entity(entity, keeper_entity)
entities.clear()
entities.extend(entity_map.values())
if len(deleted_entities) > 0:
for keeper_entity_id_name, link in self._links:
link.cascade_delete(params, keeper_entity_id_name, deleted_entities)
class _EnterpriseLink(_EnterpriseDataParser):
@abc.abstractmethod
def get_keeper_entity1_id(self, proto_entity): # type: (dict) -> any
pass
@abc.abstractmethod
def get_keeper_entity2_id(self, proto_entity): # type: (dict) -> any
pass
@abc.abstractmethod
def get_proto_entity1_id(self, proto_entity): # type: (message.Message) -> any
pass
@abc.abstractmethod
def get_proto_entity2_id(self, proto_entity): # type: (message.Message) -> any
pass
def cascade_delete(self, params, keeper_entity_id, deleted_entities): # type: (KeeperParams, str, Set) -> None
entities = self.get_entities(params, create_if_absent=False)
if not entities:
return
to_keep = [x for x in entities if keeper_entity_id not in x or x[keeper_entity_id] not in deleted_entities]
if len(to_keep) < len(entities):
entities.clear()
entities.extend(to_keep)
def parse(self, params, enterprise_data, **kwargs): # type: (KeeperParams, proto.EnterpriseData, dict) -> None
entities = self.get_entities(params)
entity_map = {
'{0}:{1}'.format(self.get_keeper_entity1_id(x), self.get_keeper_entity2_id(x)): x for x in entities
}
entity_type = self.get_entity_type()
for entityData in enterprise_data.data:
entity = entity_type()
entity.ParseFromString(entityData)
entity1_id = self.get_proto_entity1_id(entity)
entity2_id = self.get_proto_entity2_id(entity)
key = '{0}:{1}'.format(entity1_id, entity2_id)
if enterprise_data.delete:
if key in entity_map:
entity_map.pop(key)
else:
keeper_entity = entity_map.get(key)
if not keeper_entity:
keeper_entity = {}
entity_map[key] = keeper_entity
self.to_keeper_entity(entity, keeper_entity)
entities.clear()
entities.extend(entity_map.values())
def get_entities(self, params, create_if_absent=True): # type: (KeeperParams, bool) -> Optional[List]
name = self.get_keeper_entity_name()
if name not in params.enterprise:
if not create_if_absent:
return None
params.enterprise[name] = []
return params.enterprise[name]
def _set_or_remove(obj, key, value): # type: (dict, str, any) -> None
if value is not None:
obj[key] = value
else:
if key in obj:
obj.pop(key)
class _EnterpriseNodeEntity(_EnterpriseEntity):
def to_keeper_entity(self, proto_entity, keeper_entity): # type: (proto.Node, dict) -> None
_set_or_remove(keeper_entity, 'node_id', proto_entity.nodeId)
_set_or_remove(keeper_entity, 'parent_id', proto_entity.parentId if proto_entity.parentId > 0 else None)
_set_or_remove(keeper_entity, 'bridge_id', proto_entity.bridgeId if proto_entity.bridgeId > 0 else None)
_set_or_remove(keeper_entity, 'scim_id', proto_entity.scimId if proto_entity.scimId > 0 else None)
_set_or_remove(keeper_entity, 'license_id', proto_entity.licenseId if proto_entity.licenseId > 0 else None)
_set_or_remove(keeper_entity, 'encrypted_data', proto_entity.encryptedData)
_set_or_remove(keeper_entity, 'duo_enabled', True if proto_entity.duoEnabled else None)
_set_or_remove(keeper_entity, 'rsa_enabled', True if proto_entity.rsaEnabled else None)
_set_or_remove(keeper_entity, 'sso_service_provider_id',
proto_entity.ssoServiceProviderId if proto_entity.ssoServiceProviderId > 0 else None)
_set_or_remove(keeper_entity, 'restrict_visibility',
proto_entity.restrictVisibility if proto_entity.restrictVisibility else None)
data = {}
if 'encrypted_data' in keeper_entity:
try:
data_json = api.decrypt_data(keeper_entity['encrypted_data'], self.enterprise.tree_key)
data_json = self.fix_data(data_json)
data.update(json.loads(data_json.decode('utf-8')))
except Exception as e:
logging.warning('Decrypt encryption data error: %s', e)
elif 'parent_id' not in keeper_entity:
data['displayname'] = self.enterprise.enterprise_name
keeper_entity['data'] = data
def get_keeper_entity_id(self, entity): # type: (dict) -> any
return entity.get('node_id')
def get_proto_entity_id(self, entity): # type: (proto.Node) -> any
return entity.nodeId
def get_entity_type(self):
return proto.Node
def get_keeper_entity_name(self): # type: () -> str
return 'nodes'
class _EnterpriseUserEntity(_EnterpriseEntity):
def to_keeper_entity(self, proto_entity, keeper_entity): # type: (proto.User, dict) -> None
_set_or_remove(keeper_entity, 'enterprise_user_id', self.get_proto_entity_id(proto_entity))
_set_or_remove(keeper_entity, 'node_id', proto_entity.nodeId)
_set_or_remove(keeper_entity, 'username', proto_entity.username)
_set_or_remove(keeper_entity, 'encrypted_data', proto_entity.encryptedData)
_set_or_remove(keeper_entity, 'key_type', proto_entity.keyType)
_set_or_remove(keeper_entity, 'status', proto_entity.status)
_set_or_remove(keeper_entity, 'lock', proto_entity.lock)
_set_or_remove(keeper_entity, 'user_id', proto_entity.userId)
_set_or_remove(keeper_entity, 'account_share_expiration',
proto_entity.accountShareExpiration if proto_entity.accountShareExpiration > 0 else None)
_set_or_remove(keeper_entity, 'full_name', proto_entity.fullName if proto_entity.fullName else None)
_set_or_remove(keeper_entity, 'job_title', proto_entity.jobTitle if proto_entity.jobTitle else None)
data = {}
encrypted_data = keeper_entity.get('encrypted_data')
if encrypted_data:
if keeper_entity.get('key_type') == 'no_key':
data['displayname'] = encrypted_data
else:
try:
data_json = api.decrypt_data(encrypted_data, self.enterprise.tree_key)
data_json = self.fix_data(data_json)
data.update(json.loads(data_json.decode('utf-8')))
except Exception as e:
logging.warning('Decrypt User data error: %s', e)
elif 'full_name' in proto_entity:
data['displayname'] = proto_entity['full_name']
keeper_entity['data'] = data
def get_keeper_entity_id(self, entity): # type: (dict) -> any
return entity.get('enterprise_user_id')
def get_proto_entity_id(self, entity): # type: (proto.User) -> any
return entity.enterpriseUserId
def get_entity_type(self):
return proto.User
def get_keeper_entity_name(self): # type: () -> str
return 'users'
class _EnterpriseTeamEntity(_EnterpriseEntity):
def to_keeper_entity(self, proto_entity, keeper_entity): # type: (proto.Team, dict) -> None
_set_or_remove(keeper_entity, 'team_uid', self.get_proto_entity_id(proto_entity))
_set_or_remove(keeper_entity, 'name', proto_entity.name)
_set_or_remove(keeper_entity, 'node_id', proto_entity.nodeId)
_set_or_remove(keeper_entity, 'restrict_edit', proto_entity.restrictEdit)
_set_or_remove(keeper_entity, 'restrict_sharing', proto_entity.restrictShare)
_set_or_remove(keeper_entity, 'restrict_view', proto_entity.restrictView)
_set_or_remove(keeper_entity, 'encrypted_data', proto_entity.encryptedData)
_set_or_remove(keeper_entity, 'encrypted_team_key', proto_entity.encryptedTeamKey)
def get_keeper_entity_id(self, entity): # type: (dict) -> any
return entity.get('team_uid')
def get_proto_entity_id(self, entity): # type: (proto.Team) -> any
return utils.base64_url_encode(entity.teamUid)
def get_entity_type(self):
return proto.Team
def get_keeper_entity_name(self): # type: () -> str
return 'teams'
class _EnterpriseRoleEntity(_EnterpriseEntity):
def to_keeper_entity(self, proto_entity, keeper_entity): # type: (proto.Role, dict) -> None
_set_or_remove(keeper_entity, 'role_id', self.get_proto_entity_id(proto_entity))
_set_or_remove(keeper_entity, 'node_id', proto_entity.nodeId)
_set_or_remove(keeper_entity, 'encrypted_data', proto_entity.encryptedData)
_set_or_remove(keeper_entity, 'visible_below', proto_entity.visibleBelow)
_set_or_remove(keeper_entity, 'new_user_inherit', proto_entity.newUserInherit)
_set_or_remove(keeper_entity, 'key_type', proto_entity.keyType)
_set_or_remove(keeper_entity, 'role_type', proto_entity.roleType)
data = {}
encrypted_data = keeper_entity.get('encrypted_data')
if encrypted_data:
try:
data_json = api.decrypt_data(encrypted_data, self.enterprise.tree_key)
data_json = self.fix_data(data_json)
data.update(json.loads(data_json.decode('utf-8')))
except Exception as e:
logging.warning('Decrypt encryption data error: %s', e)
keeper_entity['data'] = data
def get_keeper_entity_id(self, entity): # type: (dict) -> any
return entity.get('role_id')
def get_proto_entity_id(self, entity): # type: (proto.Role) -> any
return entity.roleId
def get_entity_type(self):
return proto.Role
def get_keeper_entity_name(self): # type: () -> str
return 'roles'
class _EnterpriseLicenseEntity(_EnterpriseEntity):
def to_keeper_entity(self, proto_entity, keeper_entity): # type: (proto.License, dict) -> None
_set_or_remove(keeper_entity, 'enterprise_license_id', proto_entity.enterpriseLicenseId)
_set_or_remove(keeper_entity, 'name', proto_entity.name)
_set_or_remove(keeper_entity, 'paid', proto_entity.paid)
_set_or_remove(keeper_entity, 'number_of_seats', proto_entity.numberOfSeats)
_set_or_remove(keeper_entity, 'expiration', proto_entity.expiration)
_set_or_remove(keeper_entity, 'license_key_id',
proto_entity.licenseKeyId if proto_entity.licenseKeyId > 0 else None)
_set_or_remove(keeper_entity, 'product_type_id',
proto_entity.productTypeId if proto_entity.productTypeId > 0 else None)
_set_or_remove(keeper_entity, 'seats_allocated', proto_entity.seatsAllocated)
_set_or_remove(keeper_entity, 'seats_pending', proto_entity.seatsPending)
_set_or_remove(keeper_entity, 'tier', proto_entity.tier)
_set_or_remove(keeper_entity, 'file_plan',
proto_entity.filePlanTypeId if proto_entity.filePlanTypeId > 0 else None)
_set_or_remove(keeper_entity, 'max_gb',
int(proto_entity.maxBytes / 1024 / 1024 / 1024) if proto_entity.filePlanTypeId > 0 else None)
_set_or_remove(keeper_entity, 'storage_expiration',
proto_entity.storageExpiration if proto_entity.storageExpiration > 0 else None)
_set_or_remove(keeper_entity, 'lic_status', proto_entity.licenseStatus)
msp_pool = None
if proto_entity.mspPool:
msp_pool = [{
'product_id': x.productId,
'seats': x.seats,
'availableSeats': x.availableSeats,
'stash': x.stash
} for x in proto_entity.mspPool]
_set_or_remove(keeper_entity, 'msp_pool', msp_pool)
if proto_entity.managedBy and proto_entity.managedBy.enterpriseId > 0:
_set_or_remove(keeper_entity, 'managed_by', {
'enterprise_id': proto_entity.managedBy.enterpriseId,
'enterprise_name': proto_entity.managedBy.enterpriseName,
})
if proto_entity.addOns:
_set_or_remove(keeper_entity, 'add_ons', [{
'name': x.name,
'enabled': x.enabled,
'is_trial': x.isTrial,
'created': x.created,
'expiration': x.expiration,
} for x in proto_entity.addOns])
_set_or_remove(keeper_entity, 'next_billing_date',
proto_entity.nextBillingDate if proto_entity.nextBillingDate > 0 else None)
def get_keeper_entity_id(self, entity): # type: (dict) -> any
return entity.get('enterprise_license_id')
def get_proto_entity_id(self, entity): # type: (proto.License) -> any
return entity.enterpriseLicenseId
def get_entity_type(self):
return proto.License
def get_keeper_entity_name(self): # type: () -> str
return 'licenses'
class _EnterpriseQueuedTeamEntity(_EnterpriseEntity):
def to_keeper_entity(self, proto_entity, keeper_entity): # type: (proto.QueuedTeam, dict) -> None
_set_or_remove(keeper_entity, 'team_uid', self.get_proto_entity_id(proto_entity))
_set_or_remove(keeper_entity, 'name', proto_entity.name)
_set_or_remove(keeper_entity, 'node_id', proto_entity.nodeId)
_set_or_remove(keeper_entity, 'encrypted_data', proto_entity.encryptedData)
def get_keeper_entity_id(self, entity): # type: (dict) -> any
return entity.get('team_uid')
def get_proto_entity_id(self, entity): # type: (proto.QueuedTeam) -> any
return utils.base64_url_encode(entity.teamUid)
def get_entity_type(self):
return proto.QueuedTeam
def get_keeper_entity_name(self): # type: () -> str
return 'queued_teams'
class _EnterpriseScimEntity(_EnterpriseEntity):
def to_keeper_entity(self, proto_entity, keeper_entity): # type: (proto.Scim, dict) -> None
_set_or_remove(keeper_entity, 'scim_id', self.get_proto_entity_id(proto_entity))
_set_or_remove(keeper_entity, 'node_id', proto_entity.nodeId)
_set_or_remove(keeper_entity, 'status', proto_entity.status)
_set_or_remove(keeper_entity, 'last_synced', proto_entity.lastSynced if proto_entity.lastSynced > 0 else None)
_set_or_remove(keeper_entity, 'role_prefix', proto_entity.rolePrefix)
_set_or_remove(keeper_entity, 'unique_groups', proto_entity.uniqueGroups)
def get_keeper_entity_id(self, entity): # type: (dict) -> any
return entity.get('scim_id')
def get_proto_entity_id(self, entity): # type: (proto.Scim) -> any
return entity.scimId
def get_entity_type(self):
return proto.Scim
def get_keeper_entity_name(self): # type: () -> str
return 'scims'
class _EnterpriseTeamUserEntity(_EnterpriseLink):
def to_keeper_entity(self, proto_entity, keeper_entity): # type: (proto.TeamUser, dict) -> None
_set_or_remove(keeper_entity, 'team_uid', self.get_proto_entity1_id(proto_entity))
_set_or_remove(keeper_entity, 'enterprise_user_id', proto_entity.enterpriseUserId)
user_type = 0 if proto_entity.userType == 'USER' else 1 if proto_entity.userType == 'ADMIN' else 2
_set_or_remove(keeper_entity, 'user_type', user_type)
def get_keeper_entity1_id(self, entity): # type: (dict) -> any
return entity.get('team_uid')
def get_keeper_entity2_id(self, entity): # type: (dict) -> any
return entity.get('enterprise_user_id')
def get_proto_entity1_id(self, entity): # type: (proto.TeamUser) -> any
return utils.base64_url_encode(entity.teamUid)
def get_proto_entity2_id(self, entity): # type: (proto.TeamUser) -> any
return entity.enterpriseUserId
def get_entity_type(self):
return proto.TeamUser
def get_keeper_entity_name(self): # type: () -> str
return 'team_users'
class _EnterpriseRoleUserEntity(_EnterpriseLink):
def to_keeper_entity(self, proto_entity, keeper_entity): # type: (proto.RoleUser, dict) -> None
_set_or_remove(keeper_entity, 'role_id', self.get_proto_entity1_id(proto_entity))
_set_or_remove(keeper_entity, 'enterprise_user_id', proto_entity.enterpriseUserId)
def get_keeper_entity1_id(self, entity): # type: (dict) -> any
return entity.get('role_id')
def get_keeper_entity2_id(self, entity): # type: (dict) -> any
return entity.get('enterprise_user_id')
def get_proto_entity1_id(self, entity): # type: (proto.RoleUser) -> any
return entity.roleId
def get_proto_entity2_id(self, entity): # type: (proto.RoleUser) -> any
return entity.enterpriseUserId
def get_entity_type(self):
return proto.RoleUser
def get_keeper_entity_name(self): # type: () -> str
return 'role_users'
class _EnterpriseRoleTeamEntity(_EnterpriseLink):
def to_keeper_entity(self, proto_entity, keeper_entity): # type: (proto.RoleTeam, dict) -> None
_set_or_remove(keeper_entity, 'role_id', self.get_proto_entity1_id(proto_entity))
_set_or_remove(keeper_entity, 'team_uid', self.get_proto_entity2_id(proto_entity))
def get_keeper_entity1_id(self, entity): # type: (dict) -> any
return entity.get('role_id')
def get_keeper_entity2_id(self, entity): # type: (dict) -> any
return entity.get('team_uid')
def get_proto_entity1_id(self, entity): # type: (proto.RoleTeam) -> any
return entity.role_id
def get_proto_entity2_id(self, entity): # type: (proto.RoleTeam) -> any
return utils.base64_url_encode(entity.teamUid)
def get_entity_type(self):
return proto.RoleTeam
def get_keeper_entity_name(self): # type: () -> str
return 'role_teams'
class _EnterpriseManagedNodeEntity(_EnterpriseLink):
def to_keeper_entity(self, proto_entity, keeper_entity): # type: (proto.ManagedNode, dict) -> None
_set_or_remove(keeper_entity, 'role_id', self.get_proto_entity1_id(proto_entity))
_set_or_remove(keeper_entity, 'managed_node_id', self.get_proto_entity2_id(proto_entity))
_set_or_remove(keeper_entity, 'cascade_node_management', proto_entity.cascadeNodeManagement)
def get_keeper_entity1_id(self, entity): # type: (dict) -> any
return entity.get('role_id')
def get_keeper_entity2_id(self, entity): # type: (dict) -> any
return entity.get('managed_node_id')
def get_proto_entity1_id(self, entity): # type: (proto.ManagedNode) -> any
return entity.roleId
def get_proto_entity2_id(self, entity): # type: (proto.ManagedNode) -> any
return entity.managedNodeId
def get_entity_type(self):
return proto.ManagedNode
def get_keeper_entity_name(self): # type: () -> str
return 'managed_nodes'
class _EnterpriseRolePrivilegeEntity(_EnterpriseEntity):
def to_keeper_entity(self, proto_entity, keeper_entity): # type: (proto.RolePrivilege, dict) -> None
_set_or_remove(keeper_entity, 'role_id', proto_entity.roleId)
_set_or_remove(keeper_entity, 'managed_node_id', proto_entity.managedNodeId)
_set_or_remove(keeper_entity, 'privilege', proto_entity.privilegeType)
def get_keeper_entity_id(self, entity): # type: (dict) -> any
return '{0}:{1}:{2}'.format(entity.get('role_id'), entity.get('managed_node_id'), entity.get('privilege'))
def get_proto_entity_id(self, entity): # type: (proto.RolePrivilege) -> any
return '{0}:{1}:{2}'.format(entity.roleId, entity.managedNodeId, entity.privilegeType)
def get_entity_type(self):
return proto.RolePrivilege
def get_keeper_entity_name(self): # type: () -> str
return 'role_privileges'
class _EnterpriseRoleEnforcements(_EnterpriseDataParser):
def parse(self, params, enterprise_data, **kwargs): # type: (KeeperParams, proto.EnterpriseData, dict) -> None
entities = self.get_entities(params)
entity_map = {x['role_id']: x for x in entities}
entity_type = self.get_entity_type()
for entityData in enterprise_data.data:
entity = entity_type()
entity.ParseFromString(entityData)
role_id = entity.roleId
enforcement_type = entity.enforcementType
if enterprise_data.delete:
if role_id in entity_map:
enforcements = entity_map[role_id]['enforcements']
if enforcement_type in enforcements:
enforcements.pop(enforcement_type)
else:
keeper_entity = entity_map.get(role_id)
if not keeper_entity:
keeper_entity = {
'role_id': role_id,
'enforcements': {}
}
entity_map[role_id] = keeper_entity
enforcements = keeper_entity['enforcements']
enforcements[enforcement_type] = entity.value
entities.clear()
entities.extend(entity_map.values())
def get_entity_type(self):
return proto.RoleEnforcement
def get_keeper_entity_name(self): # type: () -> str
return 'role_enforcements'
def to_keeper_entity(self, proto_entity, keeper_entity):
pass
class _EnterpriseManagedCompanyEntity(_EnterpriseEntity):
def to_keeper_entity(self, proto_entity, keeper_entity): # type: (proto.ManagedCompany, dict) -> None
_set_or_remove(keeper_entity, 'mc_enterprise_id', proto_entity.mcEnterpriseId)
_set_or_remove(keeper_entity, 'mc_enterprise_name', proto_entity.mcEnterpriseName)
_set_or_remove(keeper_entity, 'msp_node_id', proto_entity.mspNodeId)
_set_or_remove(keeper_entity, 'number_of_seats', proto_entity.numberOfSeats)
_set_or_remove(keeper_entity, 'number_of_users', proto_entity.numberOfUsers)
_set_or_remove(keeper_entity, 'product_id', proto_entity.productId)
_set_or_remove(keeper_entity, 'paused', proto_entity.isExpired)
_set_or_remove(keeper_entity, 'tree_key', proto_entity.treeKey if proto_entity.treeKey else None)
_set_or_remove(keeper_entity, 'tree_key_role', proto_entity.tree_key_role)
_set_or_remove(keeper_entity, 'file_plan_type', proto_entity.filePlanType)
if proto_entity.addOns:
_set_or_remove(keeper_entity, 'add_ons', [{
'name': x.name,
'enabled': x.enabled,
'is_trial': x.isTrial,
'created': x.created,
'expiration': x.expiration,
} for x in proto_entity.addOns])
def get_keeper_entity_id(self, entity): # type: (dict) -> any
return entity.get('mc_enterprise_id')
def get_proto_entity_id(self, entity): # type: (proto.ManagedCompany) -> any
return entity.mcEnterpriseId
def get_entity_type(self):
return proto.ManagedCompany
def get_keeper_entity_name(self): # type: () -> str
return 'managed_companies'
class _EnterpriseQueuedTeamUserEntity(_EnterpriseDataParser):
def parse(self, params, enterprise_data, **kwargs): # type: (KeeperParams, proto.EnterpriseData, dict) -> None
entities = self.get_entities(params)
entity_map = {x['team_uid']: x for x in entities}
entity_type = self.get_entity_type()
for entityData in enterprise_data.data:
entity = entity_type()
entity.ParseFromString(entityData)
team_uid = utils.base64_url_encode(entity.teamUid)
if enterprise_data.delete:
if team_uid in entity_map:
users = entity_map[team_uid]['users'] # type: set
users.difference_update(entity.users)
else:
keeper_entity = entity_map.get(team_uid)
if not keeper_entity:
keeper_entity = {
'team_uid': team_uid,
'users': set()
}
entity_map[team_uid] = keeper_entity
users = keeper_entity['users']
users.update(entity.users)
entities.clear()
entities.extend(entity_map.values())
def get_entity_type(self):
return proto.QueuedTeamUser
def get_keeper_entity_name(self): # type: () -> str
return 'queued_team_users'
def to_keeper_entity(self, proto_entity, keeper_entity):
pass
class _EnterpriseAdminApprovalRequestEntity(_EnterpriseEntity):
def to_keeper_entity(self, proto_entity, keeper_entity):
# type: (proto.DeviceRequestForAdminApproval, dict) -> None
_set_or_remove(keeper_entity, 'enterprise_user_id', proto_entity.enterpriseUserId)
_set_or_remove(keeper_entity, 'encrypted_device_token',
utils.base64_url_encode(proto_entity.encryptedDeviceToken))
_set_or_remove(keeper_entity, 'device_id', proto_entity.deviceId)
_set_or_remove(keeper_entity, 'device_public_key', utils.base64_url_encode(proto_entity.devicePublicKey))
_set_or_remove(keeper_entity, 'device_name', proto_entity.deviceName)
_set_or_remove(keeper_entity, 'client_version', proto_entity.clientVersion)
_set_or_remove(keeper_entity, 'device_type', proto_entity.deviceType)
_set_or_remove(keeper_entity, 'date', proto_entity.date)
_set_or_remove(keeper_entity, 'ip_address', proto_entity.ipAddress)
_set_or_remove(keeper_entity, 'location', proto_entity.location)
_set_or_remove(keeper_entity, 'email', proto_entity.email)
def get_keeper_entity_id(self, entity): # type: (dict) -> any
return '{0}:{1}'.format(entity.get('enterprise_user_id'), entity.get('device_id'))
def get_proto_entity_id(self, entity): # type: (proto.DeviceRequestForAdminApproval) -> any
return '{0}:{1}'.format(entity.enterpriseUserId, entity.deviceId)
def get_entity_type(self):
return proto.DeviceRequestForAdminApproval
def get_keeper_entity_name(self): # type: () -> str
return 'devices_request_for_admin_approval'
class _EnterpriseSsoServiceEntity(_EnterpriseEntity):
def to_keeper_entity(self, proto_entity, keeper_entity): # type: (proto.SsoService, dict) -> None
_set_or_remove(keeper_entity, 'sso_service_provider_id', self.get_proto_entity_id(proto_entity))
_set_or_remove(keeper_entity, 'node_id', proto_entity.nodeId)
_set_or_remove(keeper_entity, 'name', proto_entity.name)
_set_or_remove(keeper_entity, 'sp_url', proto_entity.sp_url)
_set_or_remove(keeper_entity, 'invite_new_users', proto_entity.inviteNewUsers)
_set_or_remove(keeper_entity, 'active', proto_entity.active)
_set_or_remove(keeper_entity, 'is_cloud', proto_entity.isCloud)
def get_keeper_entity_id(self, entity): # type: (dict) -> any
return entity.get('sso_service_provider_id')
def get_proto_entity_id(self, entity): # type: (proto.SsoService) -> any
return entity.ssoServiceProviderId
def get_entity_type(self):
return proto.SsoService
def get_keeper_entity_name(self): # type: () -> str
return 'sso_services'
class _EnterpriseBridgeEntity(_EnterpriseEntity):
def to_keeper_entity(self, proto_entity, keeper_entity): # type: (proto.Bridge, dict) -> None
_set_or_remove(keeper_entity, 'bridge_id', self.get_proto_entity_id(proto_entity))
_set_or_remove(keeper_entity, 'node_id', proto_entity.nodeId)
_set_or_remove(keeper_entity, 'wan_ip_enforcement', proto_entity.wanIpEnforcement)
_set_or_remove(keeper_entity, 'lan_ip_enforcement', proto_entity.lanIpEnforcement)
_set_or_remove(keeper_entity, 'status', proto_entity.status)
def get_keeper_entity_id(self, entity): # type: (dict) -> any
return entity.get('bridge_id')
def get_proto_entity_id(self, entity): # type: (proto.Bridge) -> any
return entity.bridgeId
def get_entity_type(self):
return proto.Bridge
def get_keeper_entity_name(self): # type: () -> str
return 'bridges'
class _EnterpriseEmailProvisionEntity(_EnterpriseEntity):
def to_keeper_entity(self, proto_entity, keeper_entity): # type: (proto.EmailProvision, dict) -> None
_set_or_remove(keeper_entity, 'id', self.get_proto_entity_id(proto_entity))
_set_or_remove(keeper_entity, 'node_id', proto_entity.nodeId)
_set_or_remove(keeper_entity, 'domain', proto_entity.domain)
_set_or_remove(keeper_entity, 'method', proto_entity.method)
def get_keeper_entity_id(self, entity): # type: (dict) -> any
return entity.get('id')
def get_proto_entity_id(self, entity): # type: (proto.EmailProvision) -> any
return entity.id
def get_entity_type(self):
return proto.EmailProvision
def get_keeper_entity_name(self): # type: () -> str
return 'email_provision'
|
nilq/baby-python
|
python
|
"""
Given a non-empty integer array of size n, find the minimum number of moves required to make all array elements equal, where a move is incrementing n - 1 elements by 1.
Example:
Input:
[1,2,3]
Output:
3
Explanation:
Only three moves are needed (remember each move increments two elements):
[1,2,3] => [2,3,3] => [3,4,3] => [4,4,4]
"""
class Solution(object):
def minMoves(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums.sort()
t, c, p = [], 0, None
for n in reversed(nums):
if p is None:
c += 1
else:
if n == p:
c += 1
else:
t.append((p, c))
c = 1
p = n
if p is not None:
t.append((p, c))
r = 0
p, s = None, 0
for n, c in t:
if p is None:
s = c
p = n
continue
r += (p - n) * s
p = n
s += c
return r
|
nilq/baby-python
|
python
|
from __future__ import annotations
import os
import warnings
from datetime import datetime
from pathlib import Path
from typing import (
Any,
Dict,
List,
Optional,
Tuple,
Union,
Sequence,
Iterable,
)
import pydantic
from .managers import ManagerQueryBody, ComputeManager
from .metadata_models import QueryMetadata, UpdateMetadata
from .molecules import Molecule, MoleculeIdentifiers, MoleculeQueryBody, MoleculeModifyBody
from .permissions import (
UserInfo,
RoleInfo,
is_valid_username,
is_valid_password,
is_valid_rolename,
)
from .records import (
RecordStatusEnum,
PriorityEnum,
RecordQueryBody,
RecordModifyBody,
RecordDeleteURLParameters,
RecordUndeleteURLParameters,
AllRecordTypes,
AllDataModelTypes,
)
from .records.gridoptimization import (
GridoptimizationKeywords,
GridoptimizationAddBody,
GridoptimizationRecord,
)
from .records.optimization import (
OptimizationProtocols,
OptimizationRecord,
OptimizationQueryBody,
OptimizationQCInputSpecification,
OptimizationInputSpecification,
OptimizationAddBody,
)
from .records.singlepoint import (
SinglepointRecord,
SinglepointAddBody,
SinglepointQueryBody,
SinglepointDriver,
SinglepointProtocols,
)
from .records.torsiondrive import (
TorsiondriveKeywords,
TorsiondriveAddBody,
TorsiondriveRecord,
TorsiondriveQueryBody,
)
from .serverinfo import (
AccessLogQueryParameters,
AccessLogQuerySummaryParameters,
ErrorLogQueryParameters,
ServerStatsQueryParameters,
DeleteBeforeDateParameters,
)
from .base_models import (
CommonGetURLParametersName,
CommonGetProjURLParameters,
CommonGetURLParameters,
CommonDeleteURLParameters,
)
from .cache import PortalCache
from .client_base import PortalClientBase, PortalRequestError
from .keywords import KeywordSet
from .metadata_models import InsertMetadata, DeleteMetadata, UndeleteMetadata
from .utils import make_list, make_str
# TODO : built-in query limit chunking, progress bars, fs caching and invalidation
class PortalClient(PortalClientBase):
def __init__(
self,
address: str = "api.qcarchive.molssi.org:443",
username: Optional[str] = None,
password: Optional[str] = None,
verify: bool = True,
cache: Optional[Union[str, Path]] = None,
max_memcache_size: Optional[int] = 1000000,
) -> None:
"""Initializes a PortalClient instance from an address and verification information.
Parameters
----------
address
The IP and port of the FractalServer instance ("192.168.1.1:8888")
username
The username to authenticate with.
password
The password to authenticate with.
verify
Verifies the SSL connection with a third party server. This may be False if a
FractalServer was not provided a SSL certificate and defaults back to self-signed
SSL keys.
cache
Path to directory to use for cache.
If None, only in-memory caching used.
max_memcache_size
Number of items to hold in client's memory cache.
Increase this value to improve performance for repeated calls,
at the cost of higher memory usage.
"""
PortalClientBase.__init__(self, address, username, password, verify)
self._cache = PortalCache(self, cachedir=cache, max_memcache_size=max_memcache_size)
def __repr__(self) -> str:
"""A short representation of the current PortalClient.
Returns
-------
str
The desired representation.
"""
ret = "PortalClient(server_name='{}', address='{}', username='{}', cache='{}')".format(
self.server_name, self.address, self.username, self.cache
)
return ret
def _repr_html_(self) -> str:
output = f"""
<h3>PortalClient</h3>
<ul>
<li><b>Server: </b>{self.server_name}</li>
<li><b>Address: </b>{self.address}</li>
<li><b>Username: </b>{self.username}</li>
<li><b>Cache: </b>{self.cache}</li>
</ul>
"""
# postprocess due to raw spacing above
return "\n".join([substr.strip() for substr in output.split("\n")])
def recordmodel_from_datamodel(self, data: Sequence[Optional[AllDataModelTypes]]) -> List[Optional[AllRecordTypes]]:
record_init = [
{"client": self, "record_type": d.record_type, "raw_data": d} if d is not None else None for d in data
]
return pydantic.parse_obj_as(List[Optional[AllRecordTypes]], record_init)
@property
def cache(self):
if self._cache.cachedir is not None:
return os.path.relpath(self._cache.cachedir)
else:
return None
def _get_with_cache(self, func, id, missing_ok, entity_type, include=None):
str_id = make_str(id)
ids = make_list(str_id)
# pass through the cache first
# remove any ids that were found in cache
# if `include` filters passed, don't use cache, just query DB, as it's often faster
# for a few fields
if include is None:
cached = self._cache.get(ids, entity_type=entity_type)
else:
cached = {}
for i in cached:
ids.remove(i)
# if all ids found in cache, no need to go further
if len(ids) == 0:
if isinstance(id, list):
return [cached[i] for i in str_id]
else:
return cached[str_id]
# molecule getting does *not* support "include"
if include is None:
payload = {
"data": {"id": ids},
}
else:
if "id" not in include:
include.append("id")
payload = {
"meta": {"include": include},
"data": {"id": ids},
}
results, to_cache = func(payload)
# we only cache if no field filtering was done
if include is None:
self._cache.put(to_cache, entity_type=entity_type)
# combine cached records with queried results
results.update(cached)
# check that we have results for all ids asked for
missing = set(make_list(str_id)) - set(results.keys())
if missing and not missing_ok:
raise KeyError(f"No objects found for `id`: {missing}")
# order the results by input id list
if isinstance(id, list):
ordered = [results.get(i, None) for i in str_id]
else:
ordered = results.get(str_id, None)
return ordered
# TODO - needed?
def _query_cache(self):
pass
def get_server_information(self) -> Dict[str, Any]:
"""Request general information about the server
Returns
-------
:
Server information.
"""
# Request the info, and store here for later use
return self._auto_request("get", "v1/information", None, None, Dict[str, Any], None, None)
##############################################################
# Molecules
##############################################################
def get_molecules(
self,
id: Union[int, Sequence[int]],
missing_ok: bool = False,
) -> Union[Optional[Molecule], List[Optional[Molecule]]]:
"""Obtains molecules from the server via molecule ids
Parameters
----------
id
An id or list of ids to query.
missing_ok
If True, return ``None`` for ids that were not found on the server.
If False, raise ``KeyError`` if any ids were not found on the server.
Returns
-------
:
The requested molecules, in the same order as the requested ids.
If given a list of ids, the return value will be a list.
Otherwise, it will be a single Molecule.
"""
url_params = {"id": make_list(id), "missing_ok": missing_ok}
mols = self._auto_request(
"get", "v1/molecule", None, CommonGetURLParameters, List[Optional[Molecule]], None, url_params
)
if isinstance(id, Sequence):
return mols
else:
return mols[0]
# TODO: we would like more fields to be queryable via the REST API for mols
# e.g. symbols/elements. Unless these are indexed might not be performant.
# TODO: what was paginate: bool = False for?
def query_molecules(
self,
molecule_hash: Optional[Union[str, Iterable[str]]] = None,
molecular_formula: Optional[Union[str, Iterable[str]]] = None,
identifiers: Optional[Dict[str, Union[str, Iterable[str]]]] = None,
limit: Optional[int] = None,
skip: int = 0,
) -> List[Molecule]:
"""Query molecules by attributes.
All matching molecules, up to the lower of `limit` or the server's
maximum result count, will be returned.
The return list will be in an indeterminate order
Parameters
----------
molecule_hash
Queries molecules by hash
molecular_formula
Queries molecules by molecular formula
Molecular formulas are not order-sensitive (e.g. "H2O == OH2 != Oh2").
identifiers
Additional identifiers to search for (smiles, etc)
limit
The maximum number of Molecules to query.
skip
The number of Molecules to skip in the query, used during pagination
"""
if limit is not None and limit > self.api_limits["get_molecules"]:
warnings.warn(f"Specified limit of {limit} is over the server limit. Server limit will be used")
limit = min(limit, self.api_limits["get_molecules"])
query_body = {
"molecule_hash": make_list(molecule_hash),
"molecular_formula": make_list(molecular_formula),
"limit": limit,
"skip": skip,
}
if identifiers is not None:
query_body["identifiers"] = {k: make_list(v) for k, v in identifiers.items()}
meta, molecules = self._auto_request(
"post", "v1/molecule/query", MoleculeQueryBody, None, Tuple[QueryMetadata, List[Molecule]], query_body, None
)
return meta, molecules
def add_molecules(self, molecules: Sequence[Molecule]) -> Tuple[InsertMetadata, List[int]]:
"""Add molecules to the server.
Parameters
molecules
A list of Molecules to add to the server.
Returns
-------
:
A list of Molecule ids in the same order as the `molecules` parameter.
"""
if len(molecules) > self.api_limits["add_molecules"]:
raise RuntimeError(
f"Cannot add {len(molecules)} molecules - over the limit of {self.api_limits['add_molecules']}"
)
mols = self._auto_request(
"post",
"v1/molecule",
List[Molecule],
None,
Tuple[InsertMetadata, List[int]],
make_list(molecules),
None,
)
return mols
def modify_molecule(
self,
id: int,
name: Optional[str] = None,
comment: Optional[str] = None,
identifiers: Optional[Union[Dict[str, Any], MoleculeIdentifiers]] = None,
overwrite_identifiers: bool = False,
) -> UpdateMetadata:
"""
Modify molecules on the server
This is only capable of updating the name, comment, and identifiers fields (except molecule_hash
and molecular formula).
If a molecule with that id does not exist, an exception is raised
Parameters
----------
id
Molecule ID of the molecule to modify
name
New name for the molecule. If None, name is not changed.
comment
New comment for the molecule. If None, comment is not changed
identifiers
A new set of identifiers for the molecule
overwrite_identifiers
If True, the identifiers of the molecule are set to be those given exactly (ie, identifiers
that exist in the DB but not in the new set will be removed). Otherwise, the new set of
identifiers is merged into the existing ones. Note that molecule_hash and molecular_formula
are never removed.
Returns
-------
:
Metadata about the modification/update.
"""
body = {
"name": name,
"comment": comment,
"identifiers": identifiers,
"overwrite_identifiers": overwrite_identifiers,
}
return self._auto_request("patch", f"v1/molecule/{id}", MoleculeModifyBody, None, UpdateMetadata, body, None)
def delete_molecules(self, id: Union[int, Sequence[int]]) -> DeleteMetadata:
"""Deletes molecules from the server
This will not delete any keywords that are in use
Parameters
----------
id
An id or list of ids to query.
Returns
-------
:
Metadata about what was deleted
"""
url_params = {"id": make_list(id)}
return self._auto_request(
"delete", "v1/molecule", None, CommonDeleteURLParameters, DeleteMetadata, None, url_params
)
##############################################################
# Keywords
##############################################################
def get_keywords(
self,
keywords_id: Union[int, Sequence[int]],
missing_ok: bool = False,
) -> Union[Optional[KeywordSet], List[Optional[KeywordSet]]]:
"""Obtains keywords from the server via keyword ids
Parameters
----------
keywords_id
An id or list of ids to query.
missing_ok
If True, return ``None`` for ids that were not found on the server.
If False, raise ``KeyError`` if any ids were not found on the server.
Returns
-------
:
The requested keywords, in the same order as the requested ids.
If given a list of ids, the return value will be a list.
Otherwise, it will be a single KeywordSet.
"""
url_params = {"id": make_list(keywords_id), "missing_ok": missing_ok}
if len(url_params["id"]) > self.api_limits["get_keywords"]:
raise RuntimeError(
f"Cannot get {len(url_params['id'])} keywords - over the limit of {self.api_limits['get_keywords']}"
)
keywords = self._auto_request(
"get", "v1/keyword", None, CommonGetURLParameters, List[Optional[KeywordSet]], None, url_params
)
if isinstance(keywords_id, Sequence):
return keywords
else:
return keywords[0]
def add_keywords(self, keywords: Sequence[KeywordSet]) -> Union[List[int], Tuple[InsertMetadata, List[int]]]:
"""Adds keywords to the server
This function is not expected to be used by end users
Parameters
----------
keywords
A KeywordSet or list of KeywordSet to add to the server.
full_return
If True, return additional metadata about the insertion. The return will be a tuple
of (metadata, ids)
Returns
-------
:
A list of KeywordSet ids that were added or existing on the server, in the
same order as specified in the keywords parameter. If full_return is True,
this function will return a tuple containing metadata and the ids.
"""
if len(keywords) > self.api_limits["add_molecules"]:
raise RuntimeError(
f"Cannot add {len(keywords)} keywords - over the limit of {self.api_limits['add_keywords']}"
)
return self._auto_request(
"post", "v1/keyword", List[KeywordSet], None, Tuple[InsertMetadata, List[int]], make_list(keywords), None
)
def _delete_keywords(self, keywords_id: Union[int, Sequence[int]]) -> DeleteMetadata:
"""Deletes keywords from the server
This will not delete any keywords that are in use
Parameters
----------
keywords_id
An id or list of ids to query.
Returns
-------
:
Metadata about what was deleted
"""
url_params = {"id": make_list(keywords_id)}
return self._auto_request(
"delete", "v1/keyword", None, CommonDeleteURLParameters, DeleteMetadata, None, url_params
)
##############################################################
# General record functions
##############################################################
def get_records(
self,
record_id: Union[int, Sequence[int]],
missing_ok: bool = False,
*,
include_task: bool = False,
include_service: bool = False,
include_outputs: bool = False,
include_comments: bool = False,
) -> Union[List[Optional[AllRecordTypes]], Optional[AllRecordTypes]]:
"""Get result records by id."""
if isinstance(record_id, Sequence) and not record_id:
return []
url_params = {"id": make_list(record_id), "missing_ok": missing_ok}
if len(url_params["id"]) > self.api_limits["get_records"]:
raise RuntimeError(
f"Cannot get {len(url_params['id'])} records - over the limit of {self.api_limits['get_records']}"
)
include = set()
# We must add '*' so that all the default fields are included
if include_task:
include |= {"*", "task"}
if include_service:
include |= {"*", "service"}
if include_outputs:
include |= {"*", "compute_history.*", "compute_history.outputs"}
if include_comments:
include |= {"*", "comments"}
if include:
url_params["include"] = include
record_data = self._auto_request(
"get",
"v1/record",
None,
CommonGetProjURLParameters,
List[Optional[AllDataModelTypes]],
None,
url_params,
)
records = self.recordmodel_from_datamodel(record_data)
if isinstance(record_id, Sequence):
return records
else:
return records[0]
def query_records(
self,
record_id: Optional[Iterable[int]] = None,
record_type: Optional[Iterable[str]] = None,
manager_name: Optional[Iterable[str]] = None,
status: Optional[Iterable[RecordStatusEnum]] = None,
created_before: Optional[datetime] = None,
created_after: Optional[datetime] = None,
modified_before: Optional[datetime] = None,
modified_after: Optional[datetime] = None,
limit: int = None,
skip: int = 0,
*,
include_task: bool = False,
include_service: bool = False,
include_outputs: bool = False,
include_comments: bool = False,
) -> Tuple[QueryMetadata, List[AllRecordTypes]]:
if limit is not None and limit > self.api_limits["get_records"]:
warnings.warn(f"Specified limit of {limit} is over the server limit. Server limit will be used")
limit = min(limit, self.api_limits["get_records"])
query_data = {
"record_id": make_list(record_id),
"record_type": make_list(record_type),
"manager_name": make_list(manager_name),
"status": make_list(status),
"created_before": created_before,
"created_after": created_after,
"modified_before": modified_before,
"modified_after": modified_after,
"limit": limit,
"skip": skip,
}
include = set()
# We must add '*' so that all the default fields are included
if include_task:
include |= {"*", "task"}
if include_service:
include |= {"*", "service"}
if include_outputs:
include |= {"*", "compute_history.*", "compute_history.outputs"}
if include_comments:
include |= {"*", "comments"}
if include:
query_data["include"] = include
meta, record_data = self._auto_request(
"post",
"v1/record/query",
RecordQueryBody,
None,
Tuple[QueryMetadata, List[AllDataModelTypes]],
query_data,
None,
)
return meta, self.recordmodel_from_datamodel(record_data)
def cancel_records(self, record_id: Union[int, Sequence[int]]) -> UpdateMetadata:
body_data = {"record_id": make_list(record_id), "status": RecordStatusEnum.cancelled}
return self._auto_request("patch", "v1/record", RecordModifyBody, None, UpdateMetadata, body_data, None)
def reset_records(self, record_id: Union[int, Sequence[int]]) -> UpdateMetadata:
body_data = {"record_id": make_list(record_id), "status": RecordStatusEnum.waiting}
return self._auto_request("patch", "v1/record", RecordModifyBody, None, UpdateMetadata, body_data, None)
def delete_records(
self, record_id: Union[int, Sequence[int]], soft_delete=True, delete_children: bool = True
) -> DeleteMetadata:
url_params = {"record_id": make_list(record_id), "soft_delete": soft_delete, "delete_children": delete_children}
return self._auto_request(
"delete", "v1/record", None, RecordDeleteURLParameters, DeleteMetadata, None, url_params
)
def undelete_records(self, record_id: Union[int, Sequence[int]]) -> UndeleteMetadata:
url_params = {"record_id": make_list(record_id)}
return self._auto_request(
"post", "v1/record/undelete", None, RecordUndeleteURLParameters, UndeleteMetadata, None, url_params
)
def modify_records(
self,
record_id: Union[int, Sequence[int]],
new_tag: Optional[str] = None,
new_priority: Optional[PriorityEnum] = None,
delete_tag: bool = False,
) -> UpdateMetadata:
body_data = {
"record_id": make_list(record_id),
"tag": new_tag,
"priority": new_priority,
"delete_tag": delete_tag,
}
return self._auto_request("patch", "v1/record", RecordModifyBody, None, UpdateMetadata, body_data, None)
def add_comment(self, record_id: Union[int, Sequence[int]], comment: str) -> UpdateMetadata:
"""
Adds a comment to records
Parameters
----------
record_id
The record or records to add the comments to
comment
The comment string to add. You username will be added automatically
Returns
-------
:
Metadata about which records were updated
"""
body_data = {
"record_id": make_list(record_id),
"comment": comment,
}
return self._auto_request("patch", "v1/record", RecordModifyBody, None, UpdateMetadata, body_data, None)
##############################################################
# Singlepoint calculations
##############################################################
def add_singlepoints(
self,
molecules: Union[int, Molecule, List[Union[int, Molecule]]],
program: str,
driver: str,
method: str,
basis: Optional[str],
keywords: Optional[Union[KeywordSet, Dict[str, Any], int]] = None,
protocols: Optional[Union[SinglepointProtocols, Dict[str, Any]]] = None,
tag: Optional[str] = None,
priority: PriorityEnum = PriorityEnum.normal,
) -> Tuple[InsertMetadata, List[int]]:
"""
Adds a "single" compute to the server.
Parameters
----------
molecules
The Molecules or Molecule ids to compute with the above methods
program
The computational program to execute the result with (e.g., "rdkit", "psi4").
driver
The primary result that the compute will acquire {"energy", "gradient", "hessian", "properties"}
method
The computational method to use (e.g., "B3LYP", "PBE")
basis
The basis to apply to the computation (e.g., "cc-pVDZ", "6-31G")
keywords
The KeywordSet ObjectId to use with the given compute
priority
The priority of the job {"HIGH", "MEDIUM", "LOW"}. Default is "MEDIUM".
protocols
Protocols for store more or less data per field
tag
The computational tag to add to your compute, managers can optionally only pull
based off the string tags. These tags are arbitrary, but several examples are to
use "large", "medium", "small" to denote the size of the job or "project1", "project2"
to denote different projects.
Returns
-------
:
A list of record ids (one per molecule) that were added or existing on the server, in the
same order as specified in the molecules.keywords parameter
"""
body_data = {
"molecules": make_list(molecules),
"specification": {
"program": program,
"driver": driver,
"method": method,
"basis": basis,
},
"tag": tag,
"priority": priority,
}
if isinstance(keywords, dict):
# Turn this into a keyword set
keywords = KeywordSet(values=keywords)
# If these are None, then let the pydantic models handle the defaults
if keywords is not None:
body_data["specification"]["keywords"] = keywords
if protocols is not None:
body_data["specification"]["protocols"] = protocols
if len(body_data["molecules"]) > self.api_limits["add_records"]:
raise RuntimeError(
f"Cannot add {len(body_data['molecules'])} records - over the limit of {self.api_limits['add_records']}"
)
return self._auto_request(
"post", "v1/record/singlepoint", SinglepointAddBody, None, Tuple[InsertMetadata, List[int]], body_data, None
)
def get_singlepoints(
self,
record_id: Union[int, Sequence[int]],
missing_ok: bool = False,
*,
include_task: bool = False,
include_outputs: bool = False,
include_comments: bool = False,
include_molecule: bool = False,
include_wavefunction: bool = False,
) -> Union[Optional[SinglepointRecord], List[Optional[SinglepointRecord]]]:
url_params = {"id": make_list(record_id), "missing_ok": missing_ok}
include = set()
# We must add '*' so that all the default fields are included
if include_task:
include |= {"*", "task"}
if include_outputs:
include |= {"*", "compute_history.*", "compute_history.outputs"}
if include_comments:
include |= {"*", "comments"}
if include_molecule:
include |= {"*", "molecule"}
if include_wavefunction:
include |= {"*", "wavefunction"}
if include:
url_params["include"] = include
if len(url_params["id"]) > self.api_limits["get_records"]:
raise RuntimeError(
f"Cannot get {len(url_params['id'])} records - over the limit of {self.api_limits['get_records']}"
)
record_data = self._auto_request(
"get",
"v1/record/singlepoint",
None,
CommonGetProjURLParameters,
List[Optional[SinglepointRecord._DataModel]],
None,
url_params,
)
records = self.recordmodel_from_datamodel(record_data)
if isinstance(record_id, Sequence):
return records
else:
return records[0]
def query_singlepoints(
self,
record_id: Optional[Iterable[int]] = None,
manager_name: Optional[Iterable[str]] = None,
status: Optional[Iterable[RecordStatusEnum]] = None,
created_before: Optional[datetime] = None,
created_after: Optional[datetime] = None,
modified_before: Optional[datetime] = None,
modified_after: Optional[datetime] = None,
program: Optional[Iterable[str]] = None,
driver: Optional[Iterable[SinglepointDriver]] = None,
method: Optional[Iterable[str]] = None,
basis: Optional[Iterable[Optional[str]]] = None,
keywords_id: Optional[Iterable[int]] = None,
molecule_id: Optional[Iterable[int]] = None,
limit: Optional[int] = None,
skip: int = 0,
*,
include_task: bool = False,
include_outputs: bool = False,
include_comments: bool = False,
include_molecule: bool = False,
include_wavefunction: bool = False,
) -> Tuple[QueryMetadata, List[SinglepointRecord]]:
"""Queries SinglepointRecords from the server."""
if limit is not None and limit > self.api_limits["get_records"]:
warnings.warn(f"Specified limit of {limit} is over the server limit. Server limit will be used")
limit = min(limit, self.api_limits["get_records"])
query_data = {
"record_id": make_list(record_id),
"manager_name": make_list(manager_name),
"status": make_list(status),
"program": make_list(program),
"driver": make_list(driver),
"method": make_list(method),
"basis": make_list(basis),
"keywords_id": make_list(keywords_id),
"molecule_id": make_list(molecule_id),
"created_before": created_before,
"created_after": created_after,
"modified_before": modified_before,
"modified_after": modified_after,
"limit": limit,
"skip": skip,
}
include = set()
# We must add '*' so that all the default fields are included
if include_task:
include |= {"*", "task"}
if include_outputs:
include |= {"*", "compute_history.*", "compute_history.outputs"}
if include_comments:
include |= {"*", "comments"}
if include_molecule:
include |= {"*", "molecule"}
if include_wavefunction:
include |= {"*", "wavefuntion"}
if include:
query_data["include"] = include
meta, record_data = self._auto_request(
"post",
"v1/record/singlepoint/query",
SinglepointQueryBody,
None,
Tuple[QueryMetadata, List[SinglepointRecord._DataModel]],
query_data,
None,
)
return meta, self.recordmodel_from_datamodel(record_data)
##############################################################
# Optimization calculations
##############################################################
def add_optimizations(
self,
initial_molecules: Union[int, Molecule, List[Union[int, Molecule]]],
program: str,
qc_specification: OptimizationQCInputSpecification,
keywords: Optional[Union[KeywordSet, Dict[str, Any], int]] = None,
protocols: Optional[OptimizationProtocols] = None,
tag: Optional[str] = None,
priority: PriorityEnum = PriorityEnum.normal,
) -> Tuple[InsertMetadata, List[int]]:
"""
Adds optimization calculations to the server
"""
body_data = {
"initial_molecules": make_list(initial_molecules),
"specification": {
"program": program,
"qc_specification": qc_specification,
},
"tag": tag,
"priority": priority,
}
# If these are None, then let the pydantic models handle the defaults
if keywords is not None:
body_data["specification"]["keywords"] = keywords
if protocols is not None:
body_data["specification"]["protocols"] = protocols
if len(body_data["initial_molecules"]) > self.api_limits["add_records"]:
raise RuntimeError(
f"Cannot get {len(body_data['initial_molecules'])} records - over the limit of {self.api_limits['add_records']}"
)
return self._auto_request(
"post",
"v1/record/optimization",
OptimizationAddBody,
None,
Tuple[InsertMetadata, List[int]],
body_data,
None,
)
def get_optimizations(
self,
record_id: Union[int, Sequence[int]],
missing_ok: bool = False,
*,
include_task: bool = False,
include_outputs: bool = False,
include_comments: bool = False,
include_initial_molecule: bool = False,
include_final_molecule: bool = False,
include_trajectory: bool = False,
) -> Union[Optional[OptimizationRecord], List[Optional[OptimizationRecord]]]:
url_params = {"id": make_list(record_id), "missing_ok": missing_ok}
include = set()
# We must add '*' so that all the default fields are included
if include_task:
include |= {"*", "task"}
if include_outputs:
include |= {"*", "compute_history.*", "compute_history.outputs"}
if include_comments:
include |= {"*", "comments"}
if include_initial_molecule:
include |= {"*", "initial_molecule"}
if include_final_molecule:
include |= {"*", "final_molecule"}
if include_trajectory:
include |= {"*", "trajectory"}
if include:
url_params["include"] = include
if len(url_params["id"]) > self.api_limits["get_records"]:
raise RuntimeError(
f"Cannot get {len(url_params['id'])} records - over the limit of {self.api_limits['get_records']}"
)
record_data = self._auto_request(
"get",
"v1/record/optimization",
None,
CommonGetProjURLParameters,
List[Optional[OptimizationRecord._DataModel]],
None,
url_params,
)
records = self.recordmodel_from_datamodel(record_data)
if isinstance(record_id, Sequence):
return records
else:
return records[0]
def query_optimizations(
self,
record_id: Optional[Iterable[int]] = None,
manager_name: Optional[Iterable[str]] = None,
status: Optional[Iterable[RecordStatusEnum]] = None,
created_before: Optional[datetime] = None,
created_after: Optional[datetime] = None,
modified_before: Optional[datetime] = None,
modified_after: Optional[datetime] = None,
program: Optional[Iterable[str]] = None,
singlepoint_program: Optional[Iterable[str]] = None,
singlepoint_method: Optional[Iterable[str]] = None,
singlepoint_basis: Optional[Iterable[Optional[str]]] = None,
singlepoint_keywords_id: Optional[Iterable[int]] = None,
initial_molecule_id: Optional[Iterable[int]] = None,
final_molecule_id: Optional[Iterable[int]] = None,
limit: Optional[int] = None,
skip: int = 0,
*,
include_task: bool = False,
include_outputs: bool = False,
include_comments: bool = False,
include_initial_molecule: bool = False,
include_final_molecule: bool = False,
include_trajectory: bool = False,
) -> Tuple[QueryMetadata, List[OptimizationRecord]]:
"""Queries OptimizationRecords from the server."""
if limit is not None and limit > self.api_limits["get_records"]:
warnings.warn(f"Specified limit of {limit} is over the server limit. Server limit will be used")
limit = min(limit, self.api_limits["get_records"])
query_data = {
"record_id": make_list(record_id),
"manager_name": make_list(manager_name),
"status": make_list(status),
"program": make_list(program),
"singlepoint_program": make_list(singlepoint_program),
"singlepoint_method": make_list(singlepoint_method),
"singlepoint_basis": make_list(singlepoint_basis),
"singlepoint_keywords_id": make_list(singlepoint_keywords_id),
"initial_molecule_id": make_list(initial_molecule_id),
"final_molecule_id": make_list(final_molecule_id),
"created_before": created_before,
"created_after": created_after,
"modified_before": modified_before,
"modified_after": modified_after,
"limit": limit,
"skip": skip,
}
include = set()
# We must add '*' so that all the default fields are included
if include_task:
include |= {"*", "task"}
if include_outputs:
include |= {"*", "compute_history.*", "compute_history.outputs"}
if include_comments:
include |= {"*", "comments"}
if include_initial_molecule:
include |= {"*", "initial_molecule"}
if include_final_molecule:
include |= {"*", "final_molecule"}
if include_trajectory:
include |= {"*", "trajectory"}
if include:
query_data["include"] = include
meta, record_data = self._auto_request(
"post",
"v1/record/optimization/query",
OptimizationQueryBody,
None,
Tuple[QueryMetadata, List[OptimizationRecord._DataModel]],
query_data,
None,
)
return meta, self.recordmodel_from_datamodel(record_data)
##############################################################
# Torsiondrive calculations
##############################################################
def add_torsiondrives(
self,
initial_molecules: List[List[Union[int, Molecule]]],
program: str,
optimization_specification: OptimizationInputSpecification,
keywords: Union[TorsiondriveKeywords, Dict[str, Any]],
tag: Optional[str] = None,
priority: PriorityEnum = PriorityEnum.normal,
) -> Tuple[InsertMetadata, List[int]]:
"""
Adds torsiondrive calculations to the server
"""
body_data = {
"initial_molecules": initial_molecules,
"specification": {
"program": program,
"optimization_specification": optimization_specification,
"keywords": keywords,
},
"as_service": True,
"tag": tag,
"priority": priority,
}
if len(body_data["initial_molecules"]) > self.api_limits["add_records"]:
raise RuntimeError(
f"Cannot get {len(body_data['initial_molecules'])} records - over the limit of {self.api_limits['add_records']}"
)
return self._auto_request(
"post",
"v1/record/torsiondrive",
TorsiondriveAddBody,
None,
Tuple[InsertMetadata, List[int]],
body_data,
None,
)
def get_torsiondrives(
self,
record_id: Union[int, Sequence[int]],
missing_ok: bool = False,
*,
include_task: bool = False,
include_service: bool = False,
include_outputs: bool = False,
include_comments: bool = False,
include_initial_molecules: bool = False,
include_optimizations: bool = False,
) -> Union[Optional[TorsiondriveRecord], List[Optional[TorsiondriveRecord]]]:
url_params = {"id": make_list(record_id), "missing_ok": missing_ok}
include = set()
# We must add '*' so that all the default fields are included
if include_task:
include |= {"*", "task"}
if include_service:
include |= {"*", "service"}
if include_outputs:
include |= {"*", "compute_history.*", "compute_history.outputs"}
if include_comments:
include |= {"*", "comments"}
if include_initial_molecules:
include |= {"*", "initial_molecules"}
if include_optimizations:
include |= {"*", "optimizations"}
if include:
url_params["include"] = include
if len(url_params["id"]) > self.api_limits["get_records"]:
raise RuntimeError(
f"Cannot get {len(url_params['id'])} records - over the limit of {self.api_limits['get_records']}"
)
record_data = self._auto_request(
"get",
"v1/record/torsiondrive",
None,
CommonGetProjURLParameters,
List[Optional[TorsiondriveRecord._DataModel]],
None,
url_params,
)
records = self.recordmodel_from_datamodel(record_data)
if isinstance(record_id, Sequence):
return records
else:
return records[0]
def query_torsiondrives(
self,
record_id: Optional[Iterable[int]] = None,
manager_name: Optional[Iterable[str]] = None,
status: Optional[Iterable[RecordStatusEnum]] = None,
created_before: Optional[datetime] = None,
created_after: Optional[datetime] = None,
modified_before: Optional[datetime] = None,
modified_after: Optional[datetime] = None,
program: Optional[Iterable[str]] = None,
optimization_program: Optional[Iterable[str]] = None,
singlepoint_program: Optional[Iterable[str]] = None,
singlepoint_method: Optional[Iterable[str]] = None,
singlepoint_basis: Optional[Iterable[Optional[str]]] = None,
singlepoint_keywords_id: Optional[Iterable[int]] = None,
initial_molecule_id: Optional[Iterable[int]] = None,
limit: Optional[int] = None,
skip: int = 0,
*,
include_task: bool = False,
include_service: bool = False,
include_outputs: bool = False,
include_comments: bool = False,
include_initial_molecules: bool = False,
include_optimizations: bool = False,
) -> Tuple[QueryMetadata, List[TorsiondriveRecord]]:
"""Queries torsiondrive records from the server."""
if limit is not None and limit > self.api_limits["get_records"]:
warnings.warn(f"Specified limit of {limit} is over the server limit. Server limit will be used")
limit = min(limit, self.api_limits["get_records"])
query_data = {
"record_id": make_list(record_id),
"manager_name": make_list(manager_name),
"status": make_list(status),
"program": make_list(program),
"optimization_program": make_list(optimization_program),
"singlepoint_program": make_list(singlepoint_program),
"singlepoint_method": make_list(singlepoint_method),
"singlepoint_basis": make_list(singlepoint_basis),
"singlepoint_keywords_id": make_list(singlepoint_keywords_id),
"initial_molecule_id": make_list(initial_molecule_id),
"created_before": created_before,
"created_after": created_after,
"modified_before": modified_before,
"modified_after": modified_after,
"limit": limit,
"skip": skip,
}
include = set()
# We must add '*' so that all the default fields are included
if include_task:
include |= {"*", "task"}
if include_service:
include |= {"*", "service"}
if include_outputs:
include |= {"*", "compute_history.*", "compute_history.outputs"}
if include_comments:
include |= {"*", "comments"}
if include_initial_molecules:
include |= {"*", "initial_molecules"}
if include_optimizations:
include |= {"*", "optimizations"}
if include:
query_data["include"] = include
meta, record_data = self._auto_request(
"post",
"v1/record/torsiondrive/query",
TorsiondriveQueryBody,
None,
Tuple[QueryMetadata, List[TorsiondriveRecord._DataModel]],
query_data,
None,
)
return meta, self.recordmodel_from_datamodel(record_data)
##############################################################
# Grid optimization calculations
##############################################################
def add_gridoptimizations(
self,
initial_molecules: Union[int, Molecule, Sequence[Union[int, Molecule]]],
program: str,
optimization_specification: OptimizationInputSpecification,
keywords: Union[GridoptimizationKeywords, Dict[str, Any]],
tag: Optional[str] = None,
priority: PriorityEnum = PriorityEnum.normal,
) -> Tuple[InsertMetadata, List[int]]:
"""
Adds gridoptimization calculations to the server
"""
body_data = {
"initial_molecules": initial_molecules,
"specification": {
"program": program,
"optimization_specification": optimization_specification,
"keywords": keywords,
},
"tag": tag,
"priority": priority,
}
if len(body_data["initial_molecules"]) > self.api_limits["add_records"]:
raise RuntimeError(
f"Cannot get {len(body_data['initial_molecules'])} records - over the limit of {self.api_limits['add_records']}"
)
return self._auto_request(
"post",
"v1/record/gridoptimization",
GridoptimizationAddBody,
None,
Tuple[InsertMetadata, List[int]],
body_data,
None,
)
def get_gridoptimizations(
self,
record_id: Union[int, Sequence[int]],
missing_ok: bool = False,
*,
include_service: bool = False,
include_outputs: bool = False,
include_comments: bool = False,
include_initial_molecule: bool = False,
include_starting_molecule: bool = False,
include_optimizations: bool = False,
) -> Union[Optional[GridoptimizationRecord], List[Optional[GridoptimizationRecord]]]:
url_params = {"id": make_list(record_id), "missing_ok": missing_ok}
include = set()
# We must add '*' so that all the default fields are included
if include_service:
include |= {"*", "service"}
if include_outputs:
include |= {"*", "compute_history.*", "compute_history.outputs"}
if include_comments:
include |= {"*", "comments"}
if include_initial_molecule:
include |= {"*", "initial_molecule"}
if include_starting_molecule:
include |= {"*", "starting_molecule"}
if include_optimizations:
include |= {"*", "optimizations"}
if include:
url_params["include"] = include
if len(url_params["id"]) > self.api_limits["get_records"]:
raise RuntimeError(
f"Cannot get {len(url_params['id'])} records - over the limit of {self.api_limits['get_records']}"
)
record_data = self._auto_request(
"get",
"v1/record/gridoptimization",
None,
CommonGetProjURLParameters,
List[Optional[GridoptimizationRecord._DataModel]],
None,
url_params,
)
records = self.recordmodel_from_datamodel(record_data)
if isinstance(record_id, Sequence):
return records
else:
return records[0]
def query_gridoptimizations(
self,
record_id: Optional[Iterable[int]] = None,
manager_name: Optional[Iterable[str]] = None,
status: Optional[Iterable[RecordStatusEnum]] = None,
created_before: Optional[datetime] = None,
created_after: Optional[datetime] = None,
modified_before: Optional[datetime] = None,
modified_after: Optional[datetime] = None,
program: Optional[Iterable[str]] = None,
optimization_program: Optional[Iterable[str]] = None,
singlepoint_program: Optional[Iterable[str]] = None,
singlepoint_method: Optional[Iterable[str]] = None,
singlepoint_basis: Optional[Iterable[Optional[str]]] = None,
singlepoint_keywords_id: Optional[Iterable[int]] = None,
initial_molecule_id: Optional[Iterable[int]] = None,
limit: Optional[int] = None,
skip: int = 0,
*,
include_task: bool = False,
include_service: bool = False,
include_outputs: bool = False,
include_comments: bool = False,
include_initial_molecule: bool = False,
include_optimizations: bool = False,
) -> Tuple[QueryMetadata, List[GridoptimizationRecord]]:
"""Queries torsiondrive records from the server."""
if limit is not None and limit > self.api_limits["get_records"]:
warnings.warn(f"Specified limit of {limit} is over the server limit. Server limit will be used")
limit = min(limit, self.api_limits["get_records"])
query_data = {
"record_id": make_list(record_id),
"manager_name": make_list(manager_name),
"status": make_list(status),
"program": make_list(program),
"optimization_program": make_list(optimization_program),
"singlepoint_program": make_list(singlepoint_program),
"singlepoint_method": make_list(singlepoint_method),
"singlepoint_basis": make_list(singlepoint_basis),
"singlepoint_keywords_id": make_list(singlepoint_keywords_id),
"initial_molecule_id": make_list(initial_molecule_id),
"created_before": created_before,
"created_after": created_after,
"modified_before": modified_before,
"modified_after": modified_after,
"limit": limit,
"skip": skip,
}
include = set()
# We must add '*' so that all the default fields are included
if include_task:
include |= {"*", "task"}
if include_service:
include |= {"*", "service"}
if include_outputs:
include |= {"*", "compute_history.*", "compute_history.outputs"}
if include_comments:
include |= {"*", "comments"}
if include_initial_molecule:
include |= {"*", "initial_molecule"}
if include_optimizations:
include |= {"*", "optimizations"}
if include:
query_data["include"] = include
meta, record_data = self._auto_request(
"post",
"v1/record/gridoptimization/query",
TorsiondriveQueryBody,
None,
Tuple[QueryMetadata, List[GridoptimizationRecord._DataModel]],
query_data,
None,
)
return meta, self.recordmodel_from_datamodel(record_data)
##############################################################
# Managers
##############################################################
def get_managers(
self,
name: Union[str, Sequence[str]],
missing_ok: bool = False,
) -> Union[Optional[ComputeManager], List[Optional[ComputeManager]]]:
"""Obtains manager information from the server via name
Parameters
----------
name
A manager name or list of names
missing_ok
If True, return ``None`` for managers that were not found on the server.
If False, raise ``KeyError`` if any managers were not found on the server.
Returns
-------
:
The requested managers, in the same order as the requested ids.
If given a list of ids, the return value will be a list.
Otherwise, it will be a single manager.
"""
url_params = {"name": make_list(name), "missing_ok": missing_ok}
managers = self._auto_request(
"get", "v1/manager", None, CommonGetURLParametersName, List[Optional[ComputeManager]], None, url_params
)
if isinstance(name, Sequence):
return managers
else:
return managers[0]
def query_managers(
self,
id: Optional[Union[int, Iterable[int]]] = None,
name: Optional[Union[str, Iterable[str]]] = None,
cluster: Optional[Union[str, Iterable[str]]] = None,
hostname: Optional[Union[str, Iterable[str]]] = None,
status: Optional[Union[RecordStatusEnum, Iterable[RecordStatusEnum]]] = None,
modified_before: Optional[datetime] = None,
modified_after: Optional[datetime] = None,
include_log: bool = False,
limit: Optional[int] = None,
skip: int = 0,
) -> Tuple[QueryMetadata, Dict[str, Any]]:
"""Obtains information about compute managers attached to this Fractal instance
Parameters
----------
id
ID assigned to the manager (this is not the UUID. This should be used very rarely).
name
Queries the managers name
cluster
Queries the managers cluster
hostname
Queries the managers hostname
status
Queries the manager's status field
modified_before
Query for managers last modified before a certain time
modified_after
Query for managers last modified after a certain time
include_log
If True, include the log entries for the manager
limit
The maximum number of managers to query
skip
The number of managers to skip in the query, used during pagination
Returns
-------
:
Metadata about the query results, and a list of dictionaries with information matching the specified query.
"""
if limit is not None and limit > self.api_limits["get_managers"]:
warnings.warn(f"Specified limit of {limit} is over the server limit. Server limit will be used")
limit = min(limit, self.api_limits["get_managers"])
query_body = {
"id": make_list(id),
"name": make_list(name),
"cluster": make_list(cluster),
"hostname": make_list(hostname),
"status": make_list(status),
"modified_before": modified_before,
"modified_after": modified_after,
"limit": limit,
"skip": skip,
}
if include_log:
query_body["include"] = ["*", "log"]
return self._auto_request(
"post",
"v1/manager/query",
ManagerQueryBody,
None,
Tuple[QueryMetadata, List[ComputeManager]],
query_body,
None,
)
##############################################################
# Server statistics and logs
##############################################################
def query_server_stats(
self,
before: Optional[datetime] = None,
after: Optional[datetime] = None,
limit: Optional[int] = None,
skip: int = 0,
) -> Tuple[QueryMetadata, List[Dict[str, Any]]]:
"""Obtains individual entries in the server stats logs"""
if limit is not None and limit > self.api_limits["get_server_stats"]:
warnings.warn(f"Specified limit of {limit} is over the server limit. Server limit will be used")
limit = min(limit, self.api_limits["get_server_stats"])
url_params = {"before": before, "after": after, "limit": limit, "skip": skip}
return self._auto_request(
"get",
"v1/server_stats",
None,
ServerStatsQueryParameters,
Tuple[QueryMetadata, List[Dict[str, Any]]],
None,
url_params,
)
def delete_server_stats(self, before: datetime):
url_params = {"before": before}
return self._auto_request("delete", "v1/server_stats", None, DeleteBeforeDateParameters, int, None, url_params)
def query_access_log(
self,
access_type: Optional[Union[str, Iterable[str]]] = None,
access_method: Optional[Union[str, Iterable[str]]] = None,
before: Optional[datetime] = None,
after: Optional[datetime] = None,
limit: Optional[int] = None,
skip: int = 0,
) -> Tuple[QueryMetadata, List[Dict[str, Any]]]:
"""Obtains individual entries in the access logs"""
if limit is not None and limit > self.api_limits["get_access_logs"]:
warnings.warn(f"Specified limit of {limit} is over the server limit. Server limit will be used")
limit = min(limit, self.api_limits["get_access_logs"])
url_params = {
"access_type": make_list(access_type),
"access_method": make_list(access_method),
"before": before,
"after": after,
"limit": limit,
"skip": skip,
}
return self._auto_request(
"get",
"v1/access",
None,
AccessLogQueryParameters,
Tuple[QueryMetadata, List[Dict[str, Any]]],
None,
url_params,
)
def delete_access_log(self, before: datetime):
url_params = {"before": before}
return self._auto_request("delete", "v1/access", None, DeleteBeforeDateParameters, int, None, url_params)
def query_error_log(
self,
id: Optional[Union[int, Iterable[int]]] = None,
username: Optional[Union[str, Iterable[str]]] = None,
before: Optional[datetime] = None,
after: Optional[datetime] = None,
limit: Optional[int] = None,
skip: int = 0,
) -> Tuple[QueryMetadata, Dict[str, Any]]:
"""Obtains individual entries in the error logs"""
if limit is not None and limit > self.api_limits["get_error_logs"]:
warnings.warn(f"Specified limit of {limit} is over the server limit. Server limit will be used")
limit = min(limit, self.api_limits["get_error_logs"])
url_params = {
"id": make_list(id),
"username": make_list(username),
"before": before,
"after": after,
"limit": limit,
"skip": skip,
}
return self._auto_request(
"get",
"v1/server_error",
None,
ErrorLogQueryParameters,
Tuple[QueryMetadata, List[Dict[str, Any]]],
None,
url_params,
)
def delete_error_log(self, before: datetime):
url_params = {"before": before}
return self._auto_request("delete", "v1/server_error", None, DeleteBeforeDateParameters, int, None, url_params)
def query_access_summary(
self,
group_by: str = "day",
before: Optional[datetime] = None,
after: Optional[datetime] = None,
) -> Dict[str, Any]:
"""Obtains daily summaries of accesses
Parameters
----------
group_by
How to group the data. Valid options are "user", "hour", "day", "country", "subdivision"
before
Query for log entries with a timestamp before a specific time
after
Query for log entries with a timestamp after a specific time
"""
url_params = {
"group_by": group_by,
"before": before,
"after": after,
}
return self._auto_request(
"get", "v1/access/summary", None, AccessLogQuerySummaryParameters, Dict[str, Any], None, url_params
)
##############################################################
# User & role management
##############################################################
def list_roles(self) -> List[RoleInfo]:
"""
List all user roles on the server
"""
return self._auto_request("get", "v1/role", None, None, List[RoleInfo], None, None)
def get_role(self, rolename: str) -> RoleInfo:
"""
Get information about a role on the server
"""
is_valid_rolename(rolename)
return self._auto_request("get", f"v1/role/{rolename}", None, None, RoleInfo, None, None)
def add_role(self, role_info: RoleInfo) -> None:
"""
Adds a role with permissions to the server
If not successful, an exception is raised.
"""
is_valid_rolename(role_info.rolename)
return self._auto_request("post", "v1/role", RoleInfo, None, None, role_info, None)
def modify_role(self, role_info: RoleInfo) -> RoleInfo:
"""
Modifies the permissions of a role on the server
If not successful, an exception is raised.
Returns
-------
:
A copy of the role as it now appears on the server
"""
is_valid_rolename(role_info.rolename)
return self._auto_request("put", f"v1/role/{role_info.rolename}", RoleInfo, None, RoleInfo, role_info, None)
def delete_role(self, rolename: str) -> None:
"""
Deletes a role from the server
This will not delete any role to which a user is assigned
Will raise an exception on error
Parameters
----------
rolename
Name of the role to delete
"""
is_valid_rolename(rolename)
return self._auto_request("delete", f"v1/role/{rolename}", None, None, None, None, None)
def list_users(self) -> List[UserInfo]:
"""
List all user roles on the server
"""
return self._auto_request("get", "v1/user", None, None, List[UserInfo], None, None)
def get_user(self, username: Optional[str] = None, as_admin: bool = False) -> UserInfo:
"""
Get information about a user on the server
If the username is not supplied, then info about the currently logged-in user is obtained
Parameters
----------
username
The username to get info about
as_admin
If True, then fetch the user from the admin user management endpoint. This is the default
if requesting a user other than the currently logged-in user
Returns
-------
:
Information about the user
"""
if username is None:
username = self.username
if username is None:
raise RuntimeError("Cannot get user - not logged in?")
# Check client side so we can bail early
is_valid_username(username)
if username != self.username:
as_admin = True
if as_admin is False:
# For the currently logged-in user, use the "me" endpoint. The other endpoint is
# restricted to admins
uinfo = self._auto_request("get", f"v1/me", None, None, UserInfo, None, None)
if uinfo.username != self.username:
raise RuntimeError(
f"Inconsistent username - client is {self.username} but logged in as {uinfo.username}"
)
else:
uinfo = self._auto_request("get", f"v1/user/{username}", None, None, UserInfo, None, None)
return uinfo
def add_user(self, user_info: UserInfo, password: Optional[str] = None) -> str:
"""
Adds a user to the server
Parameters
----------
user_info
Info about the user to add
password
The user's password. If None, then one will be generated
Returns
-------
:
The password of the user (either the same as the supplied password, or the
server-generated one)
"""
is_valid_username(user_info.username)
is_valid_rolename(user_info.role)
if password is not None:
is_valid_password(password)
if user_info.id is not None:
raise RuntimeError("Cannot add user when user_info contains an id")
return self._auto_request(
"post", "v1/user", Tuple[UserInfo, Optional[str]], None, str, (user_info, password), None
)
def modify_user(self, user_info: UserInfo, as_admin: bool = False) -> UserInfo:
"""
Modifies a user on the server
The user is determined by the username field of the input UserInfo, although the id
and username are checked for consistency.
Depending on the current user's permissions, some fields may not be updatable.
Parameters
----------
user_info
Updated information for a user
as_admin
If True, then attempt to modify fields that are only modifiable by an admin (enabled, role).
This is the default if requesting a user other than the currently logged-in user.
Returns
-------
:
The updated user information as it appears on the server
"""
is_valid_username(user_info.username)
is_valid_rolename(user_info.role)
if as_admin or (user_info.username != self.username):
url = f"v1/user/{user_info.username}"
else:
url = "v1/me"
return self._auto_request("put", url, UserInfo, None, UserInfo, user_info, None)
def change_user_password(self, username: Optional[str] = None, new_password: Optional[str] = None) -> str:
"""
Change a users password
If the username is not specified, then the current logged-in user is used.
If the password is not specified, then one is automatically generated by the server.
Parameters
----------
username
The name of the user whose password to change. If None, then use the currently logged-in user
new_password
Password to change to. If None, let the server generate one.
Returns
-------
:
The new password (either the same as the supplied one, or the server generated one
"""
if username is None:
username = self.username
is_valid_username(username)
if new_password is not None:
is_valid_password(new_password)
if username == self.username:
url = "v1/me/password"
else:
url = f"v1/user/{username}/password"
return self._auto_request("put", url, Optional[str], None, str, new_password, None)
def delete_user(self, username: str) -> None:
is_valid_username(username)
if username == self.username:
raise RuntimeError("Cannot delete your own user!")
return self._auto_request("delete", f"v1/user/{username}", None, None, None, None, None)
|
nilq/baby-python
|
python
|
import sys
import jinja2
import tdclient
import tdclient.version
from .version import __version__
import logging
logger = logging.getLogger(__name__)
class Context(object):
'''High-level wrapper for tdclient.Client.'''
def __init__(self, module=None, config=None):
if config is None:
config = {}
self.module = module
# tdclient
self.client = self.get_client(apikey=config.get('apikey'), endpoint=config.get('endpoint'))
# jinja2
if 'template_loader' in config:
self.template_loader = config['template_loader']
elif self.module:
self.template_loader = jinja2.PackageLoader(self.module, 'templates')
else:
self.template_loader = jinja2.FileSystemLoader('templates')
def get_client(self, apikey=None, endpoint=None):
kwargs = {}
if apikey is not None:
kwargs['apikey'] = apikey
if endpoint is not None:
if not endpoint.endswith('/'):
endpoint = endpoint + '/'
kwargs['endpoint'] = endpoint
if 'user_agent' not in kwargs:
versions = [
"tdclient/{0}".format(tdclient.version.__version__),
"Python/{0}.{1}.{2}.{3}.{4}".format(*list(sys.version_info)),
]
kwargs['user_agent'] = "pytd/{0} ({1})".format(__version__, ' '.join(versions))
return tdclient.Client(**kwargs)
@property
def apikey(self):
return self.client.api.apikey
@property
def endpoint(self):
return self.client.api.endpoint
def query(self, *args, **kwargs):
from pytd.query import Query
return Query(self, *args, **kwargs)
|
nilq/baby-python
|
python
|
import agama
mass_unit = (1.0/4.3)*(10.0**(6.0))
agama.setUnits(mass=mass_unit, length=1, velocity=1)
pot = agama.Potential(type='Spheroid', gamma=1.0, beta=3.1, scaleRadius=2.5, outerCutoffRadius=15.0)
df = agama.DistributionFunction(type='QuasiSpherical',potential=pot)
model = agama.GalaxyModel(pot,df)
M = model.sample(10000)
print(M[0][9999,0])
agama.writeSnapshot('test_snapshot.snp',M,'n')
|
nilq/baby-python
|
python
|
# Developed for the LSST System Integration, Test and Commissioning Team.
# This product includes software developed by the LSST Project
# (http://www.lsst.org).
# See the LICENSE file at the top-level directory of this distribution
# for details of code ownership.
#
# Use of this source code is governed by a 3-clause BSD-style
# license that can be found in the LICENSE file.
__all__ = ["NON_CONFIG_CSCS", "OFFLINE_CSCS"]
NON_CONFIG_CSCS = [
"ATArchiver",
"ATHeaderService",
"ATMCS",
"ATPneumatics",
"ATPtg",
"CCHeaderService",
"CCArchiver",
"DSM",
"LinearStage",
"MTPtg",
"MTRotator",
"ScriptQueue"
]
OFFLINE_CSCS = [
"ATCamera",
"CCCamera",
]
|
nilq/baby-python
|
python
|
# Import the toolkit specific version.
from pyface.toolkit import toolkit_object
TaskWindowBackend = toolkit_object(
'tasks.task_window_backend:TaskWindowBackend')
|
nilq/baby-python
|
python
|
# Based on
# https://www.paraview.org/Wiki/Python_Programmable_Filter#Generating_Data_.28Programmable_Source.29
#This script generates a helix curve.
#This is intended as the script of a 'Programmable Source'
def _helix(self, numPts):
import math
#numPts = 80 # Points along Helix
length = 8.0 # Length of Helix
rounds = 3.0 # Number of times around
#Get a vtk.PolyData object for the output
pdo = self.GetPolyDataOutput()
#This will store the points for the Helix
newPts = vtk.vtkPoints()
for i in range(0, numPts):
#Generate the Points along the Helix
x = i*length/numPts
y = math.sin(i*rounds*2*math.pi/numPts)
z = math.cos(i*rounds*2*math.pi/numPts)
#Insert the Points into the vtkPoints object
#The first parameter indicates the reference.
#value for the point. Here we add them sequentially.
#Note that the first point is at index 0 (not 1).
newPts.InsertPoint(i, x,y,z)
#Add the points to the vtkPolyData object
#Right now the points are not associated with a line -
#it is just a set of unconnected points. We need to
#create a 'cell' object that ties points together
#to make a curve (in this case). This is done below.
#A 'cell' is just an object that tells how points are
#connected to make a 1D, 2D, or 3D object.
pdo.SetPoints(newPts)
#Make a vtkPolyLine which holds the info necessary
#to create a curve composed of line segments. This
#really just hold constructor data that will be passed
#to vtkPolyData to add a new line.
aPolyLine = vtk.vtkPolyLine()
#Indicate the number of points along the line
aPolyLine.GetPointIds().SetNumberOfIds(numPts)
for i in range(0,numPts):
#Add the points to the line. The first value indicates
#the order of the point on the line. The second value
#is a reference to a point in a vtkPoints object. Depends
#on the order that Points were added to vtkPoints object.
#Note that this will not be associated with actual points
#until it is added to a vtkPolyData object which holds a
#vtkPoints object.
aPolyLine.GetPointIds().SetId(i, i)
#Allocate the number of 'cells' that will be added. We are just
#adding one vtkPolyLine 'cell' to the vtkPolyData object.
pdo.Allocate(1, 1)
#Add the poly line 'cell' to the vtkPolyData object.
pdo.InsertNextCell(aPolyLine.GetCellType(), aPolyLine.GetPointIds())
def wrapper(**kwargs):
import sys
import paraview.simple as pvs
# create a new 'Programmable Source'
programmableSource1 = pvs.ProgrammableSource()
# https://stackoverflow.com/questions/436198/what-is-an-alternative-to-execfile-in-python-3
if sys.version_info[0] < 3:
programmableSource1.Script = "kwargs="+str(kwargs)+";execfile('" + __file__ + "',globals(),locals())"
else:
programmableSource1.Script = "kwargs="+str(kwargs)+";exec(open('" + __file__ + "').read())"
programmableSource1.ScriptRequestInformation = ''
programmableSource1.PythonPath = ''
# get active view
renderView1 = pvs.GetActiveViewOrCreate('RenderView')
# show data in view
programmableSource1Display = pvs.Show(programmableSource1, renderView1)
def helix(n=10):
wrapper(n=n)
if 'kwargs' in vars():
_helix(self, kwargs['n'])
|
nilq/baby-python
|
python
|
from django.shortcuts import get_object_or_404, render
from django.http import HttpResponseRedirect, HttpResponse
from django.contrib.auth import authenticate, login, logout
from Propylaea.forms import LoginForm, SignUpForm
from django.template import loader
from django.contrib.auth.decorators import login_required
def SignUpV(request):
signedUp = False # Signed up flag
if request.method == 'POST': # If the request is type of POST then proccess data
form1 = SignUpForm(request.POST)# Create 2 form instances and populate them with data from the POST request
if form1.is_valid(): # Check whether it's valid
try: # Save main user form first and hash password
user = form1.save(commit=False)
user.set_password(user.password)
user.save() # Change sign up flag to true
signedUp = True
return HttpResponseRedirect('/demoscopesis/')
except:
signedUp = False
else:
template = loader.get_template('error.html')
context = {
"errorType": "409",
"errorMessage": "Your prefered credentials were received but your account was not created. Please try again with a different username.",
"redirectTo": "/user/signup"
}
return HttpResponse(template.render(context, request))
else: # If request is not POST create empty forms
#form1 = SignUpForm()
template = loader.get_template('Propylaea/login.html')
context = {
'SignUpForm': SignUpForm,
'LoginForm': LoginForm,
}
return HttpResponse(template.render(context, request))
#return HttpResponseRedirect('/Demoscopesis/Demoscopesis')
#return render(request, 'Propylaea/login_register.html', {'SignUpForm': SignUpForm, 'LoginForm': LoginForm})
def LogIn(request):
if request.method == 'POST': # If the request is type of POST then proccess data
#forml = LoginForm(request.POST)
#if forml.is_valid():
#email = request.POST.get('email')
username = request.POST.get('username')
password = request.POST.get('password')
#email = request.POST['email']
#password = request.POST['password']
# Authenticate
#user = authenticate(email=email, password=password)
user = authenticate(username=username, password=password)
if user is not None: # User valid
if user.is_active:
login(request, user)
return HttpResponseRedirect('/demoscopesis/')
else:
return HttpResponse("Your account is disabled.")
else:
#return HttpResponse("Invalid login details supplied")#: {0}, {1}".format(email, password))
template = loader.get_template('error.html')
context = {
"errorType": "401",
"errorMessage": "You are not authorized to login. Please check your credentials or register an account",
"redirectTo": "/user/login"
}
return HttpResponse(template.render(context, request))
else:
template = loader.get_template('Propylaea/login.html')
context = {
'LoginForm': LoginForm,
'SignUpForm': SignUpForm,
}
return HttpResponse(template.render(context, request))
#return render_to_response('Propylaea/login_register.html', {}, context)
@login_required(login_url='/user/login/')
def UsrLogout(request):
# Since we know the user is logged in, we can now just log them out.
logout(request)
# Take the user back to the homepage.
return HttpResponseRedirect('/user/login/')
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import numpy as np
import h5py as h
import matplotlib
import matplotlib.pyplot as plt
import sys, os, re, shutil, subprocess, time
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", "--file", action="store", type="string", dest="inputFile", help="Input H5 file with TOF data", metavar="FILENAME", default="")
parser.add_option("-m", "--motor", action="store", type="string", dest="motorName", help="Motorname to plot TOF data against (default: injectory)", metavar="NAME", default="injectory")
parser.add_option("-r", "--run", action="store", type="int", dest="runNumber", help="Run number with TOF data", metavar="NUMBER", default=0)
parser.add_option("-l", "--level", action="store", type="int", dest="outputLevel", help="Output level in input H5 file (default: 3)", metavar="NUMBER", default=3)
(options, args) = parser.parse_args()
########################################################
original_dir = os.getcwd() + '/'
work_dir = "/asap3/flash/gpfs/bl1/2017/data/11001733/processed/hummingbird_tof/"
if options.inputFile != '' or options.runNumber != 0:
# open input cxidb file
if options.inputFile != '':
print "Reading TOF data from %s%s ..." % (work_dir, options.inputFile)
f = h.File(work_dir + options.inputFile, "r")
else:
fname = "r%04d_ol%d.h5" % (options.runNumber, options.outputLevel)
print "Reading TOF data from %s%s ..." % (work_dir, fname)
f = h.File(work_dir + fname, "r")
gmdPath = "/entry_1/FEL/gmd"
tdPath = "/entry_1/detector_2/data"
injectorPath = "/entry_1/motors/%s" % options.motorName
# sanity check
for p in [gmdPath, tdPath, injectorPath]:
if (not f.get(p)):
print "\t'%s' does not exist, aborting..." % (p)
sys.exit(1)
td = f[tdPath][:]
print "\tfound %d time traces with %d bins" % (td.shape[0], td.shape[1])
gmd = f[gmdPath][:]
gmd_is_nan = np.isnan(gmd)
gmd_is_not_nan = np.abs(gmd_is_nan.astype(np.int) - 1).astype(np.bool)
gmd_without_nan = gmd[gmd_is_not_nan]
print "\tfound %d gmd values (including %d NaNs) between %.2f and %.2f (%.2f +/- %.2f)" % (gmd.shape[0], gmd_is_nan.sum(), np.nanmin(gmd), np.nanmax(gmd), np.nanmean(gmd), np.nanstd(gmd))
injector = f[injectorPath][:]
injector_unique = np.sort(list(set(injector)))
print "\tfound %d injector (%d unique) values between %.2f and %.2f (%.2f +/- %.2f)" % (injector.shape[0], injector_unique.shape[0], injector.min(), injector.max(), injector.mean(), injector.std())
#print injector_unique
# histogram gmd energies
gmd_bins = np.arange(np.floor(gmd_without_nan.min()) - 1., np.ceil(gmd_without_nan.max()) + 3., 1.) - 0.5
gmd_hist, gmd_bins = np.histogram(gmd_without_nan, bins=gmd_bins)
gmd_bins_center = np.array([(gmd_bins[i] + gmd_bins[i + 1])/2 for i in range(len(gmd_bins) - 1)])
# histogram injector values
injector_delta = 0.05
injector_bins = np.arange(injector.min() - injector_delta, injector.max() + 3*injector_delta, injector_delta) - injector_delta/2.
injector_hist, injector_bins = np.histogram(injector, bins=injector_bins)
injector_bins_center = np.array([(injector_bins[i] + injector_bins[i + 1])/2 for i in range(len(injector_bins) - 1)])
# plot histogram
fig = plt.figure(num=1, figsize=(11.5, 5.0), dpi=100, facecolor='w', edgecolor='k')
fig.suptitle("Histograms")
ax1 = fig.add_subplot(121)
ax1.set_title("GMD energies (uJ)")
ax1.set_xlabel("GMD (uJ)")
ax1.set_ylabel("number of hits")
ax1.plot(gmd_bins_center, gmd_hist)
ax2 = fig.add_subplot(122)
ax2.set_title("%s positions (mm)" % options.motorName)
ax2.set_xlabel("%s (mm)" % options.motorName)
ax2.set_ylabel("number of hits")
ax2.plot(injector_bins_center, injector_hist)
plt.show(block=False)
while True:
try:
gmd_low = np.float(input("Enter lower limit for GMD energies (uJ): "))
ax1.axvline(x=gmd_low, color='k', linestyle='--')
plt.draw()
gmd_high = np.float(input("Enter upper limit for GMD energies (uJ): "))
ax1.axvline(x=gmd_high, color='k', linestyle='--')
plt.draw()
break
except ValueError as err:
print err
gmd[gmd_is_nan] = -1
data_to_use = gmd_is_not_nan & (gmd >= gmd_low) & (gmd <= gmd_high)
print "\taveraging %d / %d traces (%.1f%%)" % (data_to_use.sum(), len(data_to_use), data_to_use.sum()*100./len(data_to_use))
td_avg = np.zeros((injector_unique.shape[0], td.shape[1]))
n = 0
for p in injector_unique:
td_avg[n] = np.mean(td[data_to_use & (injector == p)], axis=0)
n += 1
# plot TOF data
fig = plt.figure(num=2, figsize=(11.5, 5.0), dpi=100, facecolor='w', edgecolor='k')
fig.suptitle("TOF")
ax1 = fig.add_subplot(121)
ax1.set_title("TOF traces")
ax1.set_xlabel("flight time (arb. u.)")
ax1.set_ylabel("ion trace (mV)")
ax1.plot(np.mean(td[data_to_use], axis=0), 'k', label="selected")
ax1.plot(np.mean(td, axis=0), 'k--', label="all")
cmap = plt.get_cmap('plasma')
colors = [cmap(n) for n in np.linspace(0, 1, len(injector_unique))]
n = 0
for p in injector_unique:
ax1.plot(td_avg[n] - (n + 1)*100, color=colors[n], label="%.2f mm" % p)
n += 1
#plt.legend(loc='best')
#plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.legend(loc=4)
ax2 = fig.add_subplot(122)
ax2.set_title("TOF trace vs %s" % options.motorName)
#ax2.imshow(td_avg, interpolation='nearest', origin='lower', extent=[0, td_avg.shape[1], injector_unique[0], injector_unique[-1]], aspect="auto")
im = ax2.imshow(td_avg, interpolation='nearest', origin='lower', extent=[0, td_avg.shape[1], injector_bins[1], injector_bins[-3]], aspect="auto", cmap=plt.get_cmap("viridis"))
plt.colorbar(im, pad=0.01)
plt.show()
else:
print "No input file or run specified, aborting..."
sys.exit(1)
|
nilq/baby-python
|
python
|
import numpy as np
import math
# from mxnet import nd
from mxnet.gluon import nn
class Augmentation(nn.HybridBlock):
def __init__(self, angle_range, zoom_range, translation_range, target_shape, orig_shape, batch_size,
aspect_range = None, relative_angle = 0, relative_scale = (1, 1), relative_translation = 0):
super().__init__()
self._angle_range = tuple(map(lambda x : x / 180 * math.pi, angle_range) )
self._scale_range = zoom_range
try:
translation_range = tuple(translation_range)
if len(translation_range) != 2:
raise ValueError('expect translation range to have shape [2,], but got {}'.format(translation_range))
except TypeError:
translation_range = (-translation_range, translation_range)
self._translation_range = tuple(map(lambda x : x * 2, translation_range))
self._target_shape = np.array(target_shape)
self._orig_shape = np.array(orig_shape)
self._batch_size = batch_size
self._unit = np.flip(self._target_shape - 1, axis=0).reshape([2,1]) / np.flip(self._orig_shape - 1, axis=0).reshape([1,2])
self._relative_scale = relative_scale
self._relative_angle = tuple(map(lambda x : x / 180 * math.pi * relative_angle, angle_range) )
self._relative_translation = (-relative_translation * 2, relative_translation * 2)
self._aspect_range = aspect_range
def _get_relative_transform(self, F):
aspect_ratio = (self._target_shape[0] - 1) / (self._target_shape[1] - 1)
rotation = F.random.uniform(*self._relative_angle, shape=(self._batch_size))
scale = F.random.uniform(*self._relative_scale, shape=(self._batch_size))
affine_params = [scale * rotation.cos(), scale * -rotation.sin() * aspect_ratio, F.zeros_like(scale),
scale * rotation.sin() / aspect_ratio, scale * rotation.cos(), F.zeros_like(scale),
F.zeros_like(scale), F.zeros_like(scale), F.ones_like(scale)]
affine = F.reshape(F.stack(*affine_params, axis=1), [0, 3, 3])
return affine
def hybrid_forward(self, F, img1, img2):
rotation = F.random.uniform(*self._angle_range, shape=(self._batch_size))
scale = F.random.uniform(*self._scale_range, shape=(self._batch_size))
if self._aspect_range is not None:
aspect_ratio = F.random.uniform(*self._aspect_range, shape=(self._batch_size))
else:
aspect_ratio = 1
pad_x, pad_y = 1 - scale * self._unit[0, 0], 1 - scale * self._unit[1, 1]
translation_x = F.random.uniform(-1, 1, shape=(self._batch_size,)) * pad_x + F.random.uniform(*self._translation_range, shape=(self._batch_size))
translation_y = F.random.uniform(-1, 1, shape=(self._batch_size,)) * pad_y + F.random.uniform(*self._translation_range, shape=(self._batch_size))
affine_params = [scale * aspect_ratio * rotation.cos() * self._unit[0, 0], scale * aspect_ratio * -rotation.sin() * self._unit[1, 0], translation_x,
scale * rotation.sin() * self._unit[0, 1], scale * rotation.cos() * self._unit[1, 1], translation_y]
affine_params = F.stack(*affine_params, axis=1)
rel_affine = self._get_relative_transform(F)
affine_2 = F.reshape(F.batch_dot(F.reshape(affine_params, [0, 2, 3]), rel_affine), [0, 6])
rel_translation = [F.zeros((self._batch_size,)), F.zeros((self._batch_size,)), F.random.uniform(*self._relative_translation, shape=(self._batch_size,)),
F.zeros((self._batch_size,)), F.zeros((self._batch_size,)), F.random.uniform(*self._relative_translation, shape=(self._batch_size))]
rel_translation = F.stack(*rel_translation, axis = 1)
affine_2 = affine_2 + rel_translation
grid = F.GridGenerator(data=affine_params, transform_type='affine', target_shape=list(self._target_shape))
img1 = F.BilinearSampler(data=img1, grid=grid)
grid_2 = F.GridGenerator(data=affine_2, transform_type='affine', target_shape=list(self._target_shape))
img2 = F.BilinearSampler(data=img2, grid=grid_2)
return img1, img2
'''
class ChromaticBrightnessAugmentation(nn.HybridBlock):
def __init__(self, brightness = 0.5, batch_size = 1, **kwargs):
super().__init__(**kwargs)
self.brightness = brightness
self.batch_size = batch_size
def hybrid_forward(self, F, img):
aug = img
alpha = 1.0 + F.random.uniform(-self.brightness, self.brightness, shape = (self.batch_size, 1, 1, 1))
aug = F.broadcast_mul(aug, alpha)
return aug
class ChromaticContrastAugmentation(nn.HybridBlock):
def __init__(self, contrast = 0.5, batch_size = 1, **kwargs):
super().__init__(**kwargs)
self.contrast = contrast
self.coefficient = [0.299, 0.587, 0.114]
self.batch_size = batch_size
def hybrid_forward(self, F, img):
aug = img
alpha = 1.0 + F.random.uniform(-self.contrast, self.contrast, shape = (self.batch_size, 1, 1, 1))
gray = F.concat(*[img.slice_axis(axis = 1, begin = k, end = k + 1) * self.coefficient[k] for k in range(3)], dim = 1)
mean = F.mean(gray, keepdims = True, axis = (1, 2, 3))
gray = 3.0 * (1.0 - alpha) * mean
aug = F.broadcast_mul(aug, alpha)
aug = F.broadcast_add(aug, gray)
return aug
'''
class ChromaticSHAugmentation(nn.HybridBlock):
def __init__(self, saturation = 0.5, hue = 0.5, batch_size = 1, **kwargs):
super().__init__(**kwargs)
self.saturation = saturation
self.hue = hue
self.matrix_yiq = [ [ 0.299, 0.587, 0.114],
[ 0.596, -0.274, -0.321],
[ 0.211, -0.523, -0.311]]
self.matrix_rgb = [ [ 1. , 0.956, 0.621],
[ 1. , -0.272, -0.647],
[ 1. , -1.107, 1.705]]
self.batch_size = batch_size
def hybrid_forward(self, F, img):
aug = img
alpha = 1.0 + F.random.uniform(-self.saturation, self.saturation, shape = (self.batch_size, 1, 1, 1))
theta = F.random.uniform(-self.hue * np.pi, self.hue * np.pi, shape = (self.batch_size, 1, 1, 1))
su = alpha * F.cos(theta)
sw = alpha * F.sin(theta)
matrix = [ [0.299 + 0.701 * su + 0.168 * sw, 0.587 - 0.587 * su + 0.330 * sw, 0.114 - 0.114 * su - 0.497 * sw],
[0.299 - 0.299 * su - 0.328 * sw, 0.587 + 0.413 * su + 0.035 * sw, 0.114 - 0.114 * su + 0.292 * sw],
[0.299 - 0.300 * su + 1.250 * sw, 0.587 - 0.588 * su - 1.050 * sw, 0.114 + 0.886 * su - 0.203 * sw]]
aug = F.concat(*[sum([F.broadcast_mul(aug.slice_axis(axis = 1, begin = j, end = j + 1), matrix[i][j]) for j in range(3)]) for i in range(3)], dim = 1)
return aug
'''
class ChromaticGammaAugmentation(nn.HybridBlock):
def __init__(self, gamma = (0.7, 1.5), batch_size = 1, **kwargs):
super().__init__(**kwargs)
self.gamma_min, self.gamma_max = gamma
self.batch_size = batch_size
def hybrid_forward(self, F, img):
aug = img
alpha = F.random.uniform(self.gamma_min, self.gamma_max, shape = (self.batch_size, 1, 1, 1))
aug = F.broadcast_power(aug, alpha)
return aug
class ChromaticEigenAugmentation(nn.HybridBlock):
def __init__(self, batch_size = 1, **kwargs):
super().__init__(**kwargs)
self.batch_size = batch_size
def hybrid_forward(self, F, img):
spin_angle = F.random.uniform(low = -np.pi, high = np.pi, shape = (self.batch_size, 3, 1, 1))
cos_ = [F.cos(spin_angle).slice_axis(axis = 1, begin = k, end = k + 1) for k in range(3)]
sin_ = [F.sin(spin_angle).slice_axis(axis = 1, begin = k, end = k + 1) for k in range(3)]
spin_matrix = [ [ cos_[0] * cos_[1], sin_[1] * cos_[2] + sin_[0] * cos_[1] * sin_[2], sin_[1] * sin_[2] - sin_[0] * cos_[1] * cos_[2]],
[- cos_[0] * sin_[1], cos_[1] * cos_[2] - sin_[0] * sin_[1] * sin_[2], cos_[1] * sin_[2] + sin_[0] * sin_[1] * cos_[2]],
[ sin_[0] , - cos_[0] * sin_[2] , cos_[0] * cos_[2] ]]
aug = F.concat(*[sum([F.broadcast_mul(img.slice_axis(axis = 1, begin = j, end = j + 1), spin_matrix[i][j]) for j in range(3)]) for i in range(3)], dim = 1)
return aug
class ChromaticComposeAugmentation(nn.Block):
def __init__(self, brightness = 0.2, contrast = 0.5, saturation = 0.5, hue = 0.5, gamma = (0.7, 1.5), batch_size = 1, **kwargs):
super().__init__(**kwargs)
self.brightness = brightness
self.contrast = contrast
self.saturation = saturation
self.hue = hue
self.gamma = gamma
self.batch_size = batch_size
self.aug_brightness = ChromaticBrightnessAugmentation(self.brightness, self.batch_size)
self.aug_contrast = ChromaticContrastAugmentation(self.contrast, self.batch_size)
self.aug_sh = ChromaticSHAugmentation(self.saturation, self.hue, self.batch_size)
self.augs = [self.aug_brightness, self.aug_contrast, self.aug_sh]
self.Gamma = ChromaticGammaAugmentation(self.gamma, self.batch_size)
def forward(self, img1, img2):
aug = nd.concat(img1, img2, dim = 2)
augs = random.sample(self.augs, 3)
for aug_op in augs:
aug = aug_op(aug)
aug = aug.clip(0, 1)
aug = self.Gamma(aug)
return nd.split(aug, axis = 2, num_outputs = 2)
'''
class ColorAugmentation(nn.HybridBlock):
def __init__(self, contrast_range, brightness_sigma, channel_range, batch_size, shape, noise_range,
saturation, hue, gamma_range = None, eigen_aug = False, **kwargs):
super().__init__(**kwargs)
self._contrast_range = contrast_range
self._brightness_sigma = brightness_sigma
self._channel_range = channel_range
self._batch_size = batch_size
self._shape = shape
self._noise_range = noise_range
self._gamma_range = gamma_range
self._eigen_aug = eigen_aug
self._saturation = saturation
self._hue = hue
def hybrid_forward(self, F, img1, img2):
contrast = F.random.uniform(*self._contrast_range, shape=(self._batch_size, 1, 1, 1)) + 1
brightness = F.random.normal(scale=self._brightness_sigma, shape=(self._batch_size, 1, 1, 1))
channel = F.random.uniform(*self._channel_range, shape=(self._batch_size, 3, 1, 1))
noise_sigma = F.random.uniform(*self._noise_range)
if self._gamma_range is not None:
gamma = F.random.uniform(*self._gamma_range, shape = (self._batch_size, 1, 1, 1))
contrast = contrast.repeat(repeats=3, axis=1)
brightness = brightness.repeat(repeats=3, axis=1)
alpha = 1.0 + F.random.uniform(-self._saturation, self._saturation, shape = (self._batch_size, 1, 1, 1))
theta = F.random.uniform(-self._hue * np.pi, self._hue * np.pi, shape = (self._batch_size, 1, 1, 1))
su = alpha * F.cos(theta)
sw = alpha * F.sin(theta)
sh_matrix = [ [0.299 + 0.701 * su + 0.168 * sw, 0.587 - 0.587 * su + 0.330 * sw, 0.114 - 0.114 * su - 0.497 * sw],
[0.299 - 0.299 * su - 0.328 * sw, 0.587 + 0.413 * su + 0.035 * sw, 0.114 - 0.114 * su + 0.292 * sw],
[0.299 - 0.300 * su + 1.250 * sw, 0.587 - 0.588 * su - 1.050 * sw, 0.114 + 0.886 * su - 0.203 * sw]]
if self._eigen_aug:
spin_angle = F.random.uniform(low = -np.pi, high = np.pi, shape = (self._batch_size, 3, 1, 1))
cos_ = [F.cos(spin_angle).slice_axis(axis = 1, begin = k, end = k + 1) for k in range(3)]
sin_ = [F.sin(spin_angle).slice_axis(axis = 1, begin = k, end = k + 1) for k in range(3)]
spin_matrix = [ [ cos_[0] * cos_[1], sin_[1] * cos_[2] + sin_[0] * cos_[1] * sin_[2], sin_[1] * sin_[2] - sin_[0] * cos_[1] * cos_[2]],
[-cos_[0] * sin_[1], cos_[1] * cos_[2] - sin_[0] * sin_[1] * sin_[2], cos_[1] * sin_[2] + sin_[0] * sin_[1] * cos_[2]],
[ sin_[0] ,-cos_[0] * sin_[2] , cos_[0] * cos_[2] ]]
ret = []
for img in (img1, img2):
aug = img
aug = F.concat(*[sum([F.broadcast_mul(aug.slice_axis(axis = 1, begin = j, end = j + 1), sh_matrix[i][j]) for j in range(3)]) for i in range(3)], dim = 1)
noise = F.random.normal(scale=1, shape=(self._batch_size, 3) + tuple(self._shape))
aug = aug + noise * noise_sigma
mean = F.mean(aug, keepdims=True, axis=(2,3))
aug = F.broadcast_minus(aug, mean)
aug = F.broadcast_mul(aug, contrast * channel)
if self._eigen_aug:
aug = F.concat(*[sum([F.broadcast_mul(aug.slice_axis(axis = 1, begin = j, end = j + 1), spin_matrix[i][j]) for j in range(3)]) for i in range(3)], dim = 1)
aug = F.broadcast_add(aug, mean * channel + brightness)
aug = F.clip(aug, 0, 1)
if self._gamma_range is not None:
aug = F.broadcast_power(aug, F.exp(gamma))
ret.append(aug)
return ret
class GeometryAugmentation(nn.HybridBlock):
def __init__(self, angle_range, zoom_range, translation_range, target_shape, orig_shape, batch_size,
aspect_range = None, relative_angle=None, relative_scale=None, relative_translation=None):
super().__init__()
self._angle_range = tuple(map(lambda x : x / 180 * math.pi, angle_range) )
self._scale_range = zoom_range
try:
translation_range = tuple(translation_range)
if len(translation_range) != 2:
raise ValueError('expect translation range to have shape [2,], but got {}'.format(translation_range))
except TypeError:
translation_range = (-translation_range, translation_range)
self._translation_range = tuple(map(lambda x : x * 2, translation_range))
self._target_shape = np.array(target_shape)
self._orig_shape = np.array(orig_shape)
self._batch_size = batch_size
self._unit = np.flip(self._target_shape - 1, axis=0).reshape([2,1]) / np.flip(self._orig_shape - 1, axis=0).reshape([1,2])
self._relative = relative_angle is not None
if self._relative:
self._relative_scale = relative_scale
self._relative_angle = tuple(map(lambda x : x / 180 * math.pi * relative_angle, angle_range) )
self._relative_translation = tuple(map(lambda x: x * relative_translation, self._translation_range)) if relative_translation is not None else None
self._aspect_range = aspect_range
def _get_relative_transform(self, F):
aspect_ratio = (self._target_shape[0] - 1) / (self._target_shape[1] - 1)
rotation = F.random.uniform(*self._relative_angle, shape=(self._batch_size))
scale = F.random.uniform(*self._relative_scale, shape=(self._batch_size))
affine_params = [scale * rotation.cos(), scale * -rotation.sin() * aspect_ratio, F.zeros_like(scale),
scale * rotation.sin() / aspect_ratio, scale * rotation.cos(), F.zeros_like(scale),
F.zeros_like(scale), F.zeros_like(scale), F.ones_like(scale)]
affine = F.reshape(F.stack(*affine_params, axis=1), [0, 3, 3])
inverse = F.stack(
rotation.cos() / scale,
rotation.sin() / scale,
-rotation.sin() / scale,
rotation.cos() / scale,
axis=1
)
inverse = F.reshape(inverse, [0, 2, 2])
return affine, inverse
def hybrid_forward(self, F, img1, img2, flow, mask):
rotation = F.random.uniform(*self._angle_range, shape=(self._batch_size))
aspect_ratio = F.random.uniform(*self._aspect_range, shape=(self._batch_size)) if self._aspect_range is not None else 1
scale = F.random.uniform(*self._scale_range, shape=(self._batch_size))
os = (self._orig_shape[0] - 1, self._orig_shape[1] - 1)
ts = (self._target_shape[0] - 1, self._target_shape[1] - 1)
abs_rotation = F.abs(rotation)
scale = F.minimum(scale, os[1] / (aspect_ratio * (ts[0] * F.sin(abs_rotation) + ts[1] * F.cos(abs_rotation))))
scale = F.minimum(scale, os[0] / (ts[0] * F.cos(abs_rotation) + ts[1] * F.sin(abs_rotation)))
pad_x, pad_y = 1 - scale * self._unit[0, 0], 1 - scale * self._unit[1, 1]
translation_x = F.random.uniform(-1, 1, shape=(self._batch_size,)) * pad_x + F.random.uniform(*self._translation_range, shape=(self._batch_size))
translation_y = F.random.uniform(-1, 1, shape=(self._batch_size,)) * pad_y + F.random.uniform(*self._translation_range, shape=(self._batch_size))
affine_params = [scale * aspect_ratio * rotation.cos() * self._unit[0, 0], scale * aspect_ratio * -rotation.sin() * self._unit[1, 0], translation_x,
scale * rotation.sin() * self._unit[0, 1], scale * rotation.cos() * self._unit[1, 1], translation_y]
affine_params = F.stack(*affine_params, axis=1)
affine_inverse = F.stack(
rotation.cos() / (scale * aspect_ratio),
rotation.sin() / (scale * aspect_ratio),
-rotation.sin() / scale,
rotation.cos() / scale,
axis=1
)
linv = F.reshape(affine_inverse, [0, 2, 2])
mask = mask.broadcast_like(flow.slice_axis(axis = 1, begin = 0, end = 1))
rel_affine, rel_inverse = self._get_relative_transform(F)
affine_2 = F.reshape(F.batch_dot(F.reshape(affine_params, [0, 2, 3]), rel_affine), [0, 6])
if self._relative_translation is not None:
rel_translation = F.random.uniform(*self._relative_translation, shape=(self._batch_size, 2, 1, 1))
rel_scale = F.concat(F.ones([self._batch_size, 1, 1, 1]) * (self._orig_shape[1] - 1) / 2,
F.ones([self._batch_size, 1, 1, 1]) * (self._orig_shape[0] - 1) / 2, dim=1)
flow = F.broadcast_minus(flow, rel_translation * rel_scale)
concat_img = F.concat(img1, mask, F.broadcast_mul(flow, mask), dim=1)
grid = F.GridGenerator(data=affine_params, transform_type='affine', target_shape=list(self._target_shape))
force_translation = F.maximum(grid.max(axis=(2, 3), keepdims=True) - 1, 0) + F.minimum(grid.min(axis=(2, 3), keepdims=True) + 1, 0)
grid = F.broadcast_minus(grid, force_translation)
grid = grid.clip(-1, 1)
concat_img = F.BilinearSampler(data=concat_img, grid=grid)
img1 = F.slice_axis(concat_img, axis=1, begin=0, end=3)
mask = F.slice_axis(concat_img, axis=1, begin=3, end=4)
flow = F.slice_axis(concat_img, axis=1, begin=4, end=6)
flow = F.broadcast_div(flow, F.maximum(mask, 1e-8))
# relative
grid_2 = F.GridGenerator(data=affine_2, transform_type='affine', target_shape=list(self._target_shape))
grid_2 = F.broadcast_minus(grid_2, force_translation)
if self._relative_translation is not None:
grid_2 = F.broadcast_add(grid_2, rel_translation)
img2 = F.BilinearSampler(data=img2, grid=grid_2)
inverse_2 = F.batch_dot(rel_inverse, linv)
flow = F.reshape_like(F.batch_dot(inverse_2, F.reshape(flow, (0, 0, -3))), flow)
scale = F.stack(F.ones([self._batch_size]) * (self._target_shape[1] - 1) / 2,
F.zeros([self._batch_size]),
F.zeros([self._batch_size]),
F.ones([self._batch_size]) * (self._target_shape[0] - 1) / 2,
axis=1)
scale = F.reshape(scale, [0, 2, 2])
I = F.reshape(F.one_hot(F.arange(0, 2), depth=2), [1, 2, 2])
grid = F.GridGenerator(data=F.reshape(F.one_hot(F.arange(0, 2), depth=3), [1, 6]),
transform_type='affine',
target_shape=list(self._target_shape))
grid = F.reshape(F.repeat(grid, axis=0, repeats=self._batch_size), [0, 0, -3])
factor = F.batch_dot(F.broadcast_minus(rel_inverse, I), scale)
flow = flow + F.reshape_like(F.batch_dot(factor, grid), flow)
return img1, img2, flow, mask
|
nilq/baby-python
|
python
|
from __future__ import absolute_import
import os
import sys
import pytest
from collections import defaultdict
myPath = os.path.abspath(os.getcwd())
sys.path.insert(0, myPath)
from salt.exceptions import ArgumentValueError
import hubblestack.extmods.fdg.process
class TestProcess():
"""
Class used to test the functions in ``process.py``
"""
def test__compare_invalidComp_raiseException(self):
"""
Test that given invalid ``comp``,
the function raises an ArgumentValueError exception
"""
with pytest.raises(ArgumentValueError) as e_info:
hubblestack.extmods.fdg.process._compare('foo', 1, 2)
def test__compare_geCompt_validReturn(self):
"""
Test that given correct values,
the function outputs the correct result
"""
# ge = greater equal
ret = hubblestack.extmods.fdg.process._compare('ge', 1, 2)
assert ret is False
ret = hubblestack.extmods.fdg.process._compare('ge', 2, 2)
assert ret is True
ret = hubblestack.extmods.fdg.process._compare('ge', 2, 1)
assert ret is True
# gt = greater than
ret = hubblestack.extmods.fdg.process._compare('gt', 10, 2)
assert ret is True
ret = hubblestack.extmods.fdg.process._compare('gt', 1, 2)
assert ret is False
ret = hubblestack.extmods.fdg.process._compare('gt', 2, 2)
assert ret is False
# lt = lower than
ret = hubblestack.extmods.fdg.process._compare('lt', 1, 2)
assert ret is True
ret = hubblestack.extmods.fdg.process._compare('lt', 2, 2)
assert ret is False
ret = hubblestack.extmods.fdg.process._compare('lt', 2, 1)
assert ret is False
# le = lower equal
ret = hubblestack.extmods.fdg.process._compare('le', 1, 2)
assert ret is True
ret = hubblestack.extmods.fdg.process._compare('le', 2, 2)
assert ret is True
ret = hubblestack.extmods.fdg.process._compare('le', 2, 1)
assert ret is False
# eq = equal
ret = hubblestack.extmods.fdg.process._compare('eq', 1, 2)
assert ret is False
ret = hubblestack.extmods.fdg.process._compare('eq', 1, 1)
assert ret is True
ret = hubblestack.extmods.fdg.process._compare('eq', 2, 1)
assert ret is False
# ne = not equal
ret = hubblestack.extmods.fdg.process._compare('ne', 1, 2)
assert ret is True
ret = hubblestack.extmods.fdg.process._compare('ne', 2, 1)
assert ret is True
ret = hubblestack.extmods.fdg.process._compare('ne', 1, 1)
assert ret is False
def test__filterDict_invalidFilterRules_returnNone(self):
"""
Test that given invalid ``filter_values``, the function returns None
"""
expected_ret = None
ret = hubblestack.extmods.fdg.process._filter_dict(
{1: 'a', 2: 'b'}, False, {'invalid': 1, 'data': 2})
assert expected_ret == ret
def test__filterDict_filterKeysValidFilterRules_returnFilteredDict(self):
"""
Test that given valid ``filter_values``,
the function correctly filters a dict by keys
"""
expected_ret = {2: 'b', 4: 'd'}
ret = hubblestack.extmods.fdg.process._filter_dict(
{1: 'a', 2: 'b', 3: 'c', 4: 'd'}, False, {'gt': 1, 'le': 4, 'ne': 3})
assert expected_ret == ret
expected_ret = {'a': 1, 'b': 2}
ret = hubblestack.extmods.fdg.process._filter_dict(
{'a': 1, 'b': 2, 'c': 3, 'd': 4}, False, {'ge': 'a', 'lt': 'd', 'ne': 'c'})
assert expected_ret == ret
def test__filterDict_filterValuesValidFilterRules_returnFilteredDict(self):
"""
Test that given valid ``filter_values``,
the function correctly filters a dict by values
"""
expected_ret = {'b': 2, 'd': 4}
ret = hubblestack.extmods.fdg.process._filter_dict(
{'a': 1, 'b': 2, 'c': 3, 'd': 4}, True, {'gt': 1, 'le': 4, 'ne': 3})
assert expected_ret == ret
expected_ret = {1: 'a', 2: 'b'}
ret = hubblestack.extmods.fdg.process._filter_dict(
{1: 'a', 2: 'b', 3: 'c', 4: 'd'}, True, {'ge': 'a', 'lt': 'd', 'ne': 'c'})
assert expected_ret == ret
def test__filterDict_emptyFilterRules_returnUnfilteredDict(self):
"""
Test that given empty ``filter_rules``,
the function leaves the dict intact
"""
expected_ret = {1: 'a', 2: 'b'}
ret = hubblestack.extmods.fdg.process._filter_dict({1: 'a', 2: 'b'}, True, {})
assert expected_ret == ret
def test_filterDict_invalidDict_emptyReturn(self):
"""
Test that given invalid types for ``starting_dict`` or ``chained``,
the function returns False and None
"""
expected_status, expected_ret = False, None
status, ret = hubblestack.extmods.fdg.process.filter_dict(
starting_dict=[1, 2, 3], chained={1: 'a', 2: 'b'})
assert expected_status == status
assert expected_ret == ret
status, ret = hubblestack.extmods.fdg.process.filter_dict(
starting_dict={1: 'a', 2: 'b'}, chained=[1, 2])
assert expected_status == status
assert expected_ret == ret
def test_filterDict_validDictFilterKeys_returnFilteredDict(self):
"""
Test that given correct input, the function correctly filters by keys
"""
expected_status, expected_ret = True, {1: 'a', 2: 'b', 4: 'd'}
status, ret = hubblestack.extmods.fdg.process.filter_dict(
starting_dict={1: 'a', 2: 'b', 3: 'c'}, chained={1: 'b', 3: 'd', 4: 'd'},
ge=1, ne=3)
assert expected_status == status
assert expected_ret == ret
def test_filterDict_validDictFilterValues_returnFilteredDict(self):
"""
Test that given correct input, the function correctly filters by values
"""
expected_status, expected_ret = True, {3: 'c', 4: 'd'}
status, ret = hubblestack.extmods.fdg.process.filter_dict(
starting_dict={1: 'a', 2: 'b', 3: 'c'}, filter_values=True,
chained={1: 'b', 3: 'd', 4: 'd'}, gt='a', ne='b', le='d')
assert expected_status == status
assert expected_ret == ret
def test__filter_invalidComp_returnNone(self):
"""
Test that given invalid input, the function returns None
"""
expected_ret = None
ret = hubblestack.extmods.fdg.process._filter([1, 2, 3], {'foo': 1})
assert expected_ret == ret
def test__filter_validArguments_returnFilteredSeq(self):
"""
Test that given valid arguments of different types,
the function returns the filtered sequence
"""
# list
expected_ret = [2, 4]
seq = [1, 2, 3, 4]
ret = hubblestack.extmods.fdg.process._filter(seq, {"gt": 1, "ne": 3, "le": 4})
assert expected_ret == ret
# set
seq = set(seq)
ret = hubblestack.extmods.fdg.process._filter(seq, {"gt": 1, "ne": 3, "le": 4})
assert expected_ret == ret
# string
seq = "test string"
expected_ret = ['e', 's', ' ', 's', 'r', 'i', 'n', 'g']
ret = hubblestack.extmods.fdg.process._filter(seq, {"ne": 't'})
assert expected_ret == ret
def test_filterSeq_invalidSeq_returnNone(self):
"""
Test that given invalid input, the function returns None
"""
# invalid ``starting_seq``
expected_status, expected_ret = False, None
status, ret = hubblestack.extmods.fdg.process.filter_seq(
starting_seq=1, chained=[2, 3, 4], ge=1, lt=4)
assert expected_status == status
assert expected_ret == ret
# invalid ``chained``
status, ret = hubblestack.extmods.fdg.process.filter_seq(
starting_seq=[1, 2], chained=4, ge=1, lt=4)
assert expected_status == status
assert expected_ret == ret
def test_filterSeq_validSeq_returnFilteredSeq(self):
"""Test that given valid input of different types,
the function returns True and the filtered sequence
"""
# list
expected_status, expected_ret = True, [2, 4]
chained = [1, 2]
seq = [3, 4]
status, ret = hubblestack.extmods.fdg.process.filter_seq(
starting_seq=seq, chained=chained, gt=1, ne=3, le=4)
assert expected_ret == ret
assert expected_status == status
# set
expected_status, expected_ret = True, [3]
seq = set(seq)
chained = set(chained)
status, ret = hubblestack.extmods.fdg.process.filter_seq(
starting_seq=seq, chained=chained, ge=1, ne=2, lt=4, eq=3)
assert expected_ret == ret
assert expected_status == status
# string
expected_status, expected_ret = True, ['e', 's', ' ', 's', 'r', 'i', 'n', 'g']
seq = 'test {}'
chained = 'string'
status, ret = hubblestack.extmods.fdg.process.filter_seq(
starting_seq=seq, chained=chained, ne='t')
assert expected_ret == ret
assert expected_status == status
def test_getIndex_invalidArguments_returnNone(self):
"""
Test that given invalid arguments, the function returns None
"""
# invalid ``chained``
expected_status, expected_ret = False, None
status, ret = hubblestack.extmods.fdg.process.get_index(
starting_list=[1, 2, 3])
assert expected_status == status
assert expected_ret == ret
# index out of range
expected_status, expected_ret = False, None
status, ret = hubblestack.extmods.fdg.process.get_index(
index=4, chained=[1, 2, 3])
assert expected_status == status
assert expected_ret == ret
# invalid ``chained`` type
expected_status, expected_ret = False, None
status, ret = hubblestack.extmods.fdg.process.get_index(
chained=set([1, 2, 3]))
assert expected_status == status
assert expected_ret == ret
def test_getIndex_validData_returnValue(self):
"""
Test that given valid arguments,
the function extracts the correct value
"""
expected_status = True
status, ret = hubblestack.extmods.fdg.process.get_index(
index=-1, starting_list=[1, 2], chained=[3, 4])
assert expected_status == status
assert ret == 2
status, ret = hubblestack.extmods.fdg.process.get_index(
starting_list=[1, 2], chained=[3, 4])
assert expected_status == status
assert ret == 3
status, ret = hubblestack.extmods.fdg.process.get_index(
index=2, starting_list=[1, 2], chained=[3, 4])
assert expected_status == status
assert ret == 1
def test_getKey_invalidArguments_returnNone(self):
"""
Test that given invalid arguments, the function returns None
"""
# invalid ``chained`` type
expected_status, expected_ret = False, None
status, ret = hubblestack.extmods.fdg.process.get_key(
key='a', chained=['a', 'b', 'c'])
assert expected_status == status
assert expected_ret == ret
# invalid key
expected_status, expected_ret = False, None
status, ret = hubblestack.extmods.fdg.process.get_key(
key='d', chained=['a', 'b', 'c'])
assert expected_status == status
assert expected_ret == ret
def test_getKey_validKey_returnValue(self):
"""
Test that given valid arguments,
the function returns the correct value
"""
expected_status, expected_ret = True, 1
status, ret = hubblestack.extmods.fdg.process.get_key(
key='b', starting_dict={'b': 1, 'c': 2},
chained={'a': 1, 'b': 2})
assert expected_status == status
assert expected_ret == ret
def test_join_invalidArgumentType_returnNone(self):
"""
Test that given invalid arguments,
the function returns None
"""
# invalid ``chained``
expected_status, expected_ret = False, None
status, ret = hubblestack.extmods.fdg.process.join(
chained=1)
assert expected_status == status
assert expected_ret == ret
# invalid ``sep``
status, ret = hubblestack.extmods.fdg.process.join(
sep=[1, 2], chained=['foo', 'bar'])
assert expected_status == status
assert expected_ret == ret
def test_join_validArguments_returnString(self):
"""
Test that given valid arguments,
the function will return the joined string
"""
# no ``sep``
expected_status, expected_ret = True, 'testwordstogether'
status, ret = hubblestack.extmods.fdg.process.join(
words='together', chained=['test', 'words'])
assert expected_status == status
assert expected_ret == ret
# valid ``sep``
status, ret = hubblestack.extmods.fdg.process.join(
words=['words', 'together'], sep='-', chained=['test', 'more'])
assert expected_status == status
assert ret == 'test-more-words-together'
def test__sort_invalidSeq_returnNone(self):
"""
Test that given invalid arguments, the function returns None
"""
# invalid ``seq``
expected_ret = None
ret = hubblestack.extmods.fdg.process._sort(
seq=1, desc=True, lexico=False)
assert expected_ret == ret
# invalid ``desc``
ret = hubblestack.extmods.fdg.process._sort(
seq=[2, 1], desc='yes', lexico=False)
assert expected_ret == ret
# invalid ``lexico``
ret = hubblestack.extmods.fdg.process._sort(
seq=[1, 2, 12, 13], desc=False, lexico=True)
assert expected_ret == ret
def test__sort_validArguments_returnSortedSeq(self):
"""
Test that given valid arguments,
the function correctly sorts them with different parameters
"""
ret = hubblestack.extmods.fdg.process._sort(
seq=['b', 'a', 'Z'], desc=False, lexico=False)
assert ret == ['Z', 'a', 'b']
ret = hubblestack.extmods.fdg.process._sort(
seq={'a': 1, 'b': 2, 'B': 3}, desc=True, lexico=False)
assert ret == ['b', 'a', 'B']
ret = hubblestack.extmods.fdg.process._sort(
seq=set(['b', 'A', 'C']), desc=False, lexico=True)
assert ret == ['A', 'b', 'C']
def test_sort_invalidArgument_returnNone(self):
"""
Test that given invalid arguments, the function returns None
"""
expected_status, expected_ret = False, None
# invalid ``chained``
status, ret = hubblestack.extmods.fdg.process.sort(seq=2, chained=1)
assert expected_status == status
assert expected_ret == ret
# invalid ``desc``
status, ret = hubblestack.extmods.fdg.process.sort(
chained=[1, 2, 3], desc='yes')
assert expected_status == status
assert expected_ret == ret
# invalid ``lexico``
status, ret = hubblestack.extmods.fdg.process.sort(
chained=[1, 2, 3], lexico=True)
assert expected_status == status
assert expected_ret == ret
def test_sort_validArguments_returnSortedSeq(self):
"""
Test that given valid arguments,
the function correctly sorts them with different parameters
"""
expected_status = True
# desc list
status, ret = hubblestack.extmods.fdg.process.sort(
seq=[1, 2], desc=True, chained=[3])
assert expected_status == status
assert ret == [3, 2, 1]
# dict
status, ret = hubblestack.extmods.fdg.process.sort(chained={2: 'a', 1: 'b', 3: 'c'})
assert expected_status == status
assert ret == [1, 2, 3]
# desc set
status, ret = hubblestack.extmods.fdg.process.sort(
seq=['A', 'B'], chained=set(['a', 'b']), desc=True)
assert expected_status == status
assert ret == ['b', 'a', 'B', 'A']
# lexicographic string
status, ret = hubblestack.extmods.fdg.process.sort(
seq='A{}B', chained='ab', lexico=True)
assert expected_status == status
assert ret == ['A', 'a', 'b', 'B']
def test__split_invalidArguments_returnNone(self):
"""
Test that given invalid arguments,
the function returns None
"""
expected_ret = None
ret = hubblestack.extmods.fdg.process._split([1, 2, 3], " ", False)
assert ret == expected_ret
ret = hubblestack.extmods.fdg.process._split("foo bar", [1, 2, 3], False)
assert ret == expected_ret
ret = hubblestack.extmods.fdg.process._split([1, 2, 3], " ", True)
assert ret == expected_ret
ret = hubblestack.extmods.fdg.process._split("foo bar", [1, 2, 3], True)
assert ret == expected_ret
def test__split_validArguments_returnList(self):
"""
Test that given valid arguments,
the function correctly splits the string into a list
"""
# simple ``sep``
expected_ret = ['foo', 'bar']
ret = hubblestack.extmods.fdg.process._split("foo bar", " ", False)
assert ret == expected_ret
# ``sep`` simple regex
ret = hubblestack.extmods.fdg.process._split("foo bar", " ", True)
assert ret == expected_ret
# regex
ret = hubblestack.extmods.fdg.process._split("foo bar", "\s+", True)
assert ret == expected_ret
# invalid ``sep``
ret = hubblestack.extmods.fdg.process._split("foo bar", "?", False)
assert ret == ['foo bar']
def test_split_invalidArguments_returnNone(self):
"""
Test that given invalid arguments,
the function returns None
"""
expected_status, expected_ret = False, None
# invalid ``words``
status, ret = hubblestack.extmods.fdg.process.split([1, 2, 3], chained='ab')
assert ret == expected_ret
assert status == expected_status
status, ret = hubblestack.extmods.fdg.process.split({1: 'a', 2: 'b'}, chained='ab')
assert ret == expected_ret
assert status == expected_status
# invalid ``words`` & ``chained``
status, ret = hubblestack.extmods.fdg.process.split(1, chained=12)
assert ret == expected_ret
assert status == expected_status
status, ret = hubblestack.extmods.fdg.process.split('foo bar', regex=True)
assert ret == expected_ret
assert status == expected_status
def test_split_validArguments_returnList(self):
"""
Test that given valid arguments, the function correctly splits
in all scenarios
"""
# valid regex
status, ret = hubblestack.extmods.fdg.process.split(
phrase="a1b2c3d", sep="\d+", regex=True)
assert status is True
assert ret == ['a', 'b', 'c', 'd']
# invalid regex
status, ret = hubblestack.extmods.fdg.process.split(
phrase="a1b2{}", sep="\d+", regex=False, chained='c3d')
assert status is False
assert ret == ['a1b2c3d']
# simple sep
status, ret = hubblestack.extmods.fdg.process.split(
phrase="a1 b2 {}", sep=" ", chained='c3 d')
assert status is True
assert ret == ['a1', 'b2', 'c3', 'd']
# no sep
status, ret = hubblestack.extmods.fdg.process.split(
phrase="a1 b2 \n{}", chained='c3 d')
assert status is True
assert ret == ['a1', 'b2', 'c3', 'd']
def test_dictToList_invalidArguments_returnNone(self):
"""
Test that given invalid arguments,
the function returns None
"""
expected_status, expected_ret = False, None
status, ret = hubblestack.extmods.fdg.process.dict_to_list(
starting_dict={1: 'a'}, chained=[1, 2, 3])
assert status == expected_status
assert ret == expected_ret
status, ret = hubblestack.extmods.fdg.process.dict_to_list(
starting_dict='foo', chained={1: 'a', 2: 'b'})
assert status == expected_status
assert ret == expected_ret
def test_dictToList_validArguments_returnList(self):
"""
Test that given valid arguments,
the function outputs a valid list
"""
# flat dict
status, ret = hubblestack.extmods.fdg.process.dict_to_list(
starting_dict={1: 'a'}, update_chained=False, chained={1: 'b', 2: 'c'})
assert status is True
assert ret == [(1, 'b'), (2, 'c')]
# nested dict
status, ret = hubblestack.extmods.fdg.process.dict_to_list(
starting_dict={1: 'a', 3: {1: 'a'}}, chained={1: 'b', 2: 'c'})
assert status is True
assert ret == [(1, 'a'), (2, 'c'), (3, {1: 'a'})]
# empty dict
status, ret = hubblestack.extmods.fdg.process.dict_to_list(chained={})
assert status is False
assert ret == []
def test__dictConvertNone_invalidArguments_returnNone(self):
"""
Test that given invalid arguments, the function returns None
"""
ret = hubblestack.extmods.fdg.process._dict_convert_none([1, 2, 3])
assert ret == None
ret = hubblestack.extmods.fdg.process._dict_convert_none(1)
assert ret == None
ret = hubblestack.extmods.fdg.process._dict_convert_none(defaultdict())
assert ret == {}
def test__dictConvertNone_validArgumentRecursive_returnDict(self):
"""
Test that given valid arguments,
the function converts empty strings to None in all scenarios
"""
# flat dict
ret = hubblestack.extmods.fdg.process._dict_convert_none(
{1: "", 2: 'a', 3: "None", 4: None})
assert ret == {1: None, 2: 'a', 3: "None", 4: None}
# nested dicts
ret = hubblestack.extmods.fdg.process._dict_convert_none(
{'a': {'aa': {'aaa': 3, 'bbb': {'bbbb': 4, 'cccc': ''},
'ccc': ''}, 'bb': ''}, 'b': ''})
assert ret == {'a': {'aa': {'aaa': 3, 'bbb': {'bbbb': 4, 'cccc': None},
'ccc': None}, 'bb': None}, 'b': None}
# nested dicts & seqs
ret = hubblestack.extmods.fdg.process._dict_convert_none(
{'a': [{'b': ({'c': ['d', {'e': ''}], 'f': ''}, {'g': ''}),
'h': ''}, 'i'], 'j': ''})
assert ret == {'a': [{'b': [{'c': ['d', {'e': None}], 'f': None}, {'g': None}],
'h': None}, 'i'], 'j': None}
def test__seqConvertNone_invalidArguments_returnNone(self):
"""
Test that given invalid arguments, the function returns None
"""
ret = hubblestack.extmods.fdg.process._seq_convert_none({1: 'a', 2: 'b'})
assert ret == None
ret = hubblestack.extmods.fdg.process._seq_convert_none(1)
assert ret == None
ret = hubblestack.extmods.fdg.process._seq_convert_none(True)
assert ret == None
def test__seqConvertNone_validArgumentRecursive_returnList(self):
"""
Test that given valid arguments,
the function correctly converts empty strings to None in all scenarios
"""
# flat seq
ret = hubblestack.extmods.fdg.process._seq_convert_none(
['a', {1: ''}, 'b', {1: ''}, 'c'])
assert ret == ['a', {1: None}, 'b', {1: None}, 'c']
# nested seq & dict
ret = hubblestack.extmods.fdg.process._seq_convert_none(
('a', [{1: '', 2: [3, (4, {1: '', 2: {3: ''}})]}, 'b'], 'c'))
assert ret == ['a', [{1: None, 2: [3, [4, {1: None, 2: {3: None}}]]}, 'b'], 'c']
def test_dictConvertNone_invalidArgument_returnNone(self):
"""
Test that given invalid arguments, the function returns None
"""
expected_status, expected_ret = False, None
status, ret = hubblestack.extmods.fdg.process.dict_convert_none(
chained='foo bar')
assert status == expected_status
assert ret == expected_ret
status, ret = hubblestack.extmods.fdg.process.dict_convert_none(
chained={1: 'a'}, starting_seq=[1, 2])
assert status == expected_status
assert ret == expected_ret
status, ret = hubblestack.extmods.fdg.process.dict_convert_none(
chained=[])
assert status == expected_status
assert ret == []
def test_dictConvertNone_validArguments_returnDict(self):
"""
Test that given valid arguments,
the function returns a valid dict with None instead of empty strings
"""
# flat dict
status, ret = hubblestack.extmods.fdg.process.dict_convert_none(
chained={1: 'a', 2: '', 3: 'b', 4: ''})
assert ret == {1: 'a', 2: None, 3: 'b', 4: None}
assert status is True
# nested dict & tuple
status, ret = hubblestack.extmods.fdg.process.dict_convert_none(
chained={'a': [{'b': ({'c': {'e': ''}, 'f': ''}, {'g': ''}),
'h': ''}, 'i']}, starting_seq={'j': ''})
assert status is True
assert ret == {'a': [{'b': [{'c': {'e': None}, 'f': None}, {'g': None}],
'h': None}, 'i'], 'j': None}
# nested dict, list & tuple
status, ret = hubblestack.extmods.fdg.process.dict_convert_none(
chained=('a', [{1: '', 2: [3, (4, {1: '', 2: {3: ''}})]}, 'b'], 'c'))
assert status is True
assert ret == ['a', [{1: None, 2: [3, [4, {1: None, 2: {3: None}}]]}, 'b'], 'c']
# nested dict & list
status, ret = hubblestack.extmods.fdg.process.dict_convert_none(
chained=['a', {1: ''}, 'b'], starting_seq=[{1: ''}, 'c'])
assert status is True
assert ret == ['a', {1: None}, 'b', {1: None}, 'c']
def test_printString_invalidArguments_returnNone(self):
"""
Test that given invalid arguments, the function returns None
"""
expected_status, expected_ret = False, None
status, ret = hubblestack.extmods.fdg.process.print_string(
starting_string=['foo', 'bar'])
assert status == expected_status
assert ret == expected_ret
status, ret = hubblestack.extmods.fdg.process.print_string(
starting_string='')
assert status == expected_status
assert ret == ''
def test_printString_validArguments_returnString(self):
"""
Test that given valid arguments, the function returns the correct string
"""
status, ret = hubblestack.extmods.fdg.process.print_string(
starting_string='foo', chained='bar')
assert status is True
assert ret == 'foo'
status, ret = hubblestack.extmods.fdg.process.print_string(
starting_string='foo {}', chained=['b', 'a', 'r'])
assert status is True
assert ret == "foo ['b', 'a', 'r']"
def test__sterilizeDict_invalidArguments_returnNone(self):
"""
Test that given invalid arguments, the function returns None
"""
ret = hubblestack.extmods.fdg.process._sterilize_dict(
dictionary=[1, 2])
assert ret is None
ret = hubblestack.extmods.fdg.process._sterilize_dict(
dictionary={})
assert ret == {}
ret = hubblestack.extmods.fdg.process._sterilize_dict(
dictionary=12)
assert ret is None
def test__sterilizeDict_validArgumentRecursive_returnDict(self):
"""
Test tgat given valid arguments,
the function correctly removes keys containing values of None
"""
# flat dict
ret = hubblestack.extmods.fdg.process._sterilize_dict(
{1: None, 2: 'a'})
assert ret == {2: 'a'}
# nested dicts
ret = hubblestack.extmods.fdg.process._sterilize_dict(
{1: None, 2: {3: {4: None, 5: 'a'}, 6: None, 7: 'b'}, 8: 'c', 9: {10: None}})
assert ret == {2: {3: {5: 'a'}, 7: 'b'}, 8: 'c', 9: {}}
# nested dicts & sequences
ret = hubblestack.extmods.fdg.process._sterilize_dict(
{1: None, 2: {3: [4, {5: None}], 6: {7: ('b', {9: None}), 8: None}}})
assert ret == {2: {3: [4, {}], 6: {7: ['b', {}]}}}
def test__sterilizeSeq_invalidArguments_returnNone(self):
"""
Test that given invalid arguments, the function returns None
"""
ret = hubblestack.extmods.fdg.process._sterilize_seq(
{1: 'a', 2: ['b']})
assert ret == None
ret = hubblestack.extmods.fdg.process._sterilize_seq(12)
assert ret == None
ret = hubblestack.extmods.fdg.process._sterilize_seq([])
assert ret == []
def test__sterilizeSeq_validArgumentRecursive_returnNone(self):
"""
Test that given valid arguments,
the function finds nested dicts and removes keys with values of None
"""
# flat seq
ret = hubblestack.extmods.fdg.process._sterilize_seq(
[1, 2, set([1, 2, 1]), (1, 2)])
assert ret == [1, 2, [1, 2], [1, 2]]
# nested dicts & seq
ret = hubblestack.extmods.fdg.process._sterilize_seq(
[{1: None, 2: {3: ({4: None, 5: 'a'}, [None, {6: None, 7: 'b'}]),
8: 'c', 9: {10: None}}}])
assert ret == [{2: {3: [{5: 'a'}, [None, {7: 'b'}]], 8: 'c', 9: {}}}]
def test_removeDictNone_invalidArgument_returnNone(self):
"""
Test that given invalid arguments, the function returns None
"""
# invalid ``starting_seq``
expected_status, expected_ret = False, None
status, ret = hubblestack.extmods.fdg.process.dict_remove_none(
starting_seq=[1, 2, 3], chained={1: 'a', 2: 'b'})
assert status == expected_status
assert ret == expected_ret
# invalid ``chained`` & valid ``starting_seq``
status, ret = hubblestack.extmods.fdg.process.dict_remove_none(
starting_seq=[1, 2, 3], chained="123")
assert status == expected_status
assert ret == expected_ret
# invalid ``chained``
status, ret = hubblestack.extmods.fdg.process.dict_remove_none(
chained="123")
assert status == expected_status
assert ret == expected_ret
def test_dictRemoveNone_validArguments_returnSeq(self):
"""
Test that given valid arguments, the function finds nested dicts
and removes keys with values of None
"""
# flat dict
status, ret = hubblestack.extmods.fdg.process.dict_remove_none(
chained={1: None, 2: 'a', 3: None, 4: 'b'})
assert status is True
assert ret == {2: 'a', 4: 'b'}
# flat seq
status, ret = hubblestack.extmods.fdg.process.dict_remove_none(
chained=[{1: None}, {2: 'a', 3: None}],
starting_seq=[5, None, {4: 'b'}])
assert status is True
assert ret == [{}, {2: 'a'}, 5, None, {4: 'b'}]
# nested sequences & dicts
status, ret = hubblestack.extmods.fdg.process.dict_remove_none(
starting_seq=[{1: None, 2: {3: ({4: None, 5: 'a'},
[None, {6: None, 7: 'b'}]), 8: 'c'}}],
chained=[{9: {10: None, 11: set([1, 2, 1])}}, 11])
assert status is True
assert ret == [{9: {11: [1, 2]}}, 11, {2: {3: [{5: 'a'}, [None, {7: 'b'}]], 8: 'c'}}]
# nested dicts & sequences
status, ret = hubblestack.extmods.fdg.process.dict_remove_none(
starting_seq={1: None, 2: {3: ({4: None, 5: 'a'}, [None, {6: None, 7: 'b'}]), 8: 'c'}},
chained={9: {10: None, 11: set([1, 2, 1])}, 11: None})
assert status is True
assert ret == {2: {3: [{5: 'a'}, [None, {7: 'b'}]], 8: 'c'}, 9: {11: [1, 2]}}
def test_encodeBase64_invalidArguments_emptyReturn(self):
"""
Test that given invalid arguments, the function returns None
"""
# invalid `starting_string`
expected_status, expected_ret = False, None
status, ret = hubblestack.extmods.fdg.process.encode_base64(
starting_string=123, chained="foo")
assert status == expected_status
assert ret == expected_ret
status, ret = hubblestack.extmods.fdg.process.encode_base64(
starting_string=['a', 'c'], format_chained=False)
assert status == expected_status
assert ret == expected_ret
status, ret = hubblestack.extmods.fdg.process.encode_base64(
starting_string='', format_chained=False)
assert status == expected_status
assert ret == ''
def test_encodeBase64_validArguments_returnString(self):
"""
Test that given valid arguments, the function correctly encodes the string
and returns it
"""
status, ret = hubblestack.extmods.fdg.process.encode_base64(
starting_string="foo {}", chained="bar")
assert status
assert ret == 'Zm9vIGJhcg=='
status, ret = hubblestack.extmods.fdg.process.encode_base64(
starting_string="foo", chained="bar")
assert status
assert ret == 'Zm9v'
status, ret = hubblestack.extmods.fdg.process.encode_base64(
starting_string="foo {}", format_chained=False, chained="bar")
assert status
assert ret == 'Zm9vIHt9'
|
nilq/baby-python
|
python
|
from pprint import pprint
def sort_with_index(arr):
arr_with_index = []
for i, item in enumerate(arr):
arr_with_index.append((i, item))
arr_with_index.sort(key=lambda it: -it[1])
return arr_with_index
def assign(jobs_with_index, n_not_fulfilled_jobs, n_machines):
assignment = {}
assigned_jobs = set()
i = 0
while i < n_machines and i < n_not_fulfilled_jobs:
current_job = -1
for job_idx, job in jobs_with_index:
if job_idx not in assigned_jobs and job > 0:
current_job = job_idx
break
assigned_jobs.add(current_job)
assignment[current_job] = i
i += 1
return assignment
def level(jobs, machines):
n_jobs = len(jobs)
n_machines = len(machines)
n_not_fulfilled_jobs = len(jobs)
assignment = []
t = 0
while n_not_fulfilled_jobs > 0:
jobs_with_index = sort_with_index(jobs)
current_assignment = assign(jobs_with_index, n_not_fulfilled_jobs, n_machines=n_machines)
dt1 = min(jobs[job_id] / machines[machine_id] for job_id, machine_id in current_assignment.items())
assignment_list = sorted(current_assignment.items(), key=lambda assg: assg[0])
dt2 = None
for start_idx, (job_i, machine_i) in enumerate(assignment_list):
for job_j, machine_j in assignment_list[start_idx + 1:]:
if jobs[job_i] <= jobs[job_j] or machines[machine_i] <= machines[machine_j]:
continue
current_dt2 = (jobs[job_i] - jobs[job_j]) / (machines[machine_i] - machines[machine_j])
if dt2 is None or current_dt2 < dt2:
dt2 = current_dt2
dt = min(dt1, dt2) if dt2 is not None else dt1
for job_idx in range(n_jobs):
if jobs[job_idx] > 0:
if job_idx in current_assignment:
machine_idx = current_assignment[job_idx]
assignment.append((job_idx, machine_idx, t, t + dt))
jobs[job_idx] -= machines[machine_idx] * dt
if jobs[job_idx] <= 0:
n_not_fulfilled_jobs -= 1
t += dt
return assignment
def solve():
jobs = [7, 8, 4, 9, 12, 5, 3, 9, 5, 12, 7, 5, 8]
machines = [3, 4, 3, 2]
return level(jobs, machines)
print(solve())
pprint([(3, 2, 0, 3.0), (4, 0, 0, 3.0), (7, 3, 0, 3.0), (9, 1, 0, 3.0), (0, 2, 3.0, 4.0), (1, 0, 3.0, 4.0),
(10, 3, 3.0, 4.0), (12, 1, 3.0, 4.0), (1, 0, 4.0, 5.25), (5, 1, 4.0, 5.25), (8, 2, 4.0, 5.25),
(10, 3, 4.0, 5.25), (0, 1, 5.25, 6.25), (2, 2, 5.25, 6.25), (11, 0, 5.25, 6.25), (12, 3, 5.25, 6.25),
(4, 0, 6.25, 6.5), (6, 1, 6.25, 6.5), (7, 2, 6.25, 6.5), (10, 3, 6.25, 6.5), (4, 0, 6.5, 6.625),
(6, 2, 6.5, 6.625), (7, 1, 6.5, 6.625), (10, 3, 6.5, 6.625), (4, 2, 6.625, 6.75), (7, 3, 6.625, 6.75),
(11, 0, 6.625, 6.75), (12, 1, 6.625, 6.75), (4, 3, 6.75, 7.15625), (6, 1, 6.75, 7.15625),
(10, 0, 6.75, 7.15625), (11, 2, 6.75, 7.15625), (1, 2, 7.15625, 7.40625), (7, 0, 7.15625, 7.40625),
(8, 3, 7.15625, 7.40625), (12, 1, 7.15625, 7.40625), (2, 0, 7.40625, 7.59375), (4, 3, 7.40625, 7.59375),
(7, 1, 7.40625, 7.59375), (8, 2, 7.40625, 7.59375), (1, 1, 7.59375, 7.625), (2, 3, 7.59375, 7.625),
(10, 0, 7.59375, 7.625), (12, 2, 7.59375, 7.625), (1, 3, 7.625, 7.7265625), (10, 0, 7.625, 7.7265625),
(11, 1, 7.625, 7.7265625), (12, 2, 7.625, 7.7265625), (1, 3, 7.7265625, 7.7890625),
(2, 0, 7.7265625, 7.7890625), (4, 1, 7.7265625, 7.7890625), (8, 2, 7.7265625, 7.7890625),
(2, 0, 7.7890625, 7.8203125), (4, 3, 7.7890625, 7.8203125), (10, 1, 7.7890625, 7.8203125),
(12, 2, 7.7890625, 7.8203125), (1, 1, 7.8203125, 7.822916666666667), (2, 0, 7.8203125, 7.822916666666667),
(10, 2, 7.8203125, 7.822916666666667), (12, 3, 7.8203125, 7.822916666666667),
(1, 1, 7.822916666666667, 7.823784722222222), (2, 0, 7.822916666666667, 7.823784722222222),
(12, 2, 7.822916666666667, 7.823784722222222), (1, 1, 7.823784722222222, 7.83203125),
(2, 0, 7.823784722222222, 7.83203125), (2, 0, 7.83203125, 7.8515625), (2, 0, 7.8515625, 7.8515625)])
|
nilq/baby-python
|
python
|
'''
Copyright 2022 Airbus SAS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
'''
mode: python; py-indent-offset: 4; tab-width: 4; coding: utf-8
'''
import unittest
from sos_trades_core.execution_engine.execution_engine import ExecutionEngine
from sos_trades_core.execution_engine.sos_discipline import SoSDiscipline
class TestNSManager(unittest.TestCase):
"""
Namespace manager test class
"""
def setUp(self):
'''
Initialize third data needed for testing
'''
self.name = 'MyCase'
self.exec_eng = ExecutionEngine(self.name)
def test_01_nsm_basic(self):
nsm = self.exec_eng.ns_manager
test = {}
ns_key1 = 'ns_ac'
ns1_value = 'toto.AC'
ns1 = {ns_key1: ns1_value}
test.update(ns1)
nsm.add_ns_def(ns1)
ns_key2 = 'ns_bc'
ns2_value = 'toto.bc'
ns2 = {ns_key2: 'toto.bc'}
test.update(ns2)
nsm.add_ns_def(ns2)
self.assertEqual(nsm.shared_ns_dict[ns_key1].get_value(), ns1_value)
self.assertEqual(nsm.shared_ns_dict[ns_key2].get_value(), ns2_value)
# ns already exists with same value
nsm.add_ns_def(ns1)
self.assertEqual(nsm.shared_ns_dict[ns_key1].get_value(), ns1_value)
# ns already exists but different value
ns1_val2 = {ns_key1: ns2_value}
nsm.add_ns_def(ns1_val2)
self.assertEqual(nsm.shared_ns_dict[ns_key1].get_value(), ns2_value)
# reset and redo
nsm.reset_current_disc_ns()
ns2_val1 = {ns_key2: ns1_value}
nsm.add_ns_def(ns2_val1)
self.assertEqual(nsm.shared_ns_dict[ns_key2].get_value(), ns1_value)
def test_02_nsm_check_ns_dict(self):
nsm = self.exec_eng.ns_manager
nsm.set_current_disc_ns('T.E')
ns1 = {'ns_ac': 'AC'}
nsm.add_ns_def(ns1)
disc = SoSDiscipline('toto', self.exec_eng)
nsm.create_disc_ns_info(disc)
self.assertEqual(nsm.shared_ns_dict['ns_ac'].get_value(), 'AC')
ns_dict = nsm.get_disc_ns_info(disc)
self.assertEqual(ns_dict['local_ns'].get_value(), 'T.E.toto')
self.assertListEqual(list(ns_dict.keys()), ['local_ns', 'others_ns'])
self.assertEqual(ns_dict['others_ns']['ns_ac'].get_value(), 'AC')
def test_03_nsm_current_ns_reset(self):
nsm = self.exec_eng.ns_manager
nsm.reset_current_disc_ns()
self.assertEqual(nsm.current_disc_ns, None)
def test_04_nsm_change_disc_ns(self):
nsm = self.exec_eng.ns_manager
nsm.set_current_disc_ns('T.E')
nsm.change_disc_ns('..')
self.assertEqual(nsm.current_disc_ns, 'T')
nsm.change_disc_ns('..')
self.assertEqual(nsm.current_disc_ns, None)
nsm.change_disc_ns('SA')
self.assertEqual(nsm.current_disc_ns, 'SA')
nsm.change_disc_ns('toto')
self.assertEqual(nsm.current_disc_ns, 'SA.toto')
|
nilq/baby-python
|
python
|
#!python
"""
ANNOTATE FUNCTIONS WITH TIME AND SPACE COMPLEXITY!!!!!
"""
def linear_search(array, item):
"""return the first index of item in array or None if item is not found"""
return linear_search_iterative(array, item)
# return linear_search_recursive(array, item)
def linear_search_iterative(array, item):
"""Time complexity: O(n) because you iterate through n amount of items in array
Space Complexity: O(n) because there are n amount of items"""
# loop over all array values until item is found
for index, value in enumerate(array): #O(n)
if item == value: #O(1)
return index # found O(1)
return None # not found O(1)
def linear_search_recursive(array, item, index=0):
"""Time complexity: O(n) because you are returning the function continuously until index equals to nth-item
"""
if len(array) <= index:
return index
if array[index] == item:
return index
else:
return linear_search_recursive(array, item, index + 1)
def binary_search(array, item):
"""return the index of item in sorted array or None if item is not found"""
return binary_search_iterative(array, item)
# return binary_search_recursive(array, item)
def binary_search_iterative(array, item):
"""Time Complexity: O(log*n) because you are constantly dividing the length of array by 2 until array length is 1
Space Complexity: O(1) """
left, right = 0, len(array) - 1
if len(array) == 0:
return None
while left <= right:
middle = left + (right - left) // 2
if item == array[middle]:
return middle
elif item > array[middle]:
left = middle + 1
else:
right = middle - 1
return None
def binary_search_recursive(array, item, left=None, right=None):
"""Time Complexity: O(log*n)
Space Complexity: 0(log*n) recursion call stack space"""
# TODO: implement binary search recursively here
if left is None and right is None:
left, right = 0, len(array) - 1
middle = left + (right - left) // 2
if left > right:
return None
if array[middle] == item:
return middle
elif item > array[middle]:
return binary_search_recursive(array, item, middle + 1, right)
else:
return binary_search_recursive(array, item, left, middle - 1)
|
nilq/baby-python
|
python
|
import json
import sys
import os
from time import sleep
import wxpy
class Greeting:
def __init__(self, name, puid, greeting='{name}新年快乐!狗年大吉!'):
self.name = name
self.puid = puid
self._greeting = greeting
def toJSON(self):
# return str(self.__dict__)
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4, ensure_ascii=False)
def items(self):
return {
'name': self.name,
'puid': self.puid,
'greeting': self._greeting
}
@property
def greeting(self):
return self._greeting.format(name=self.name)
class Greetings(list):
"""docstring for Greetings."""
def __init__(self):
super(Greetings, self).__init__()
def toJSON(self):
return json.dumps(self, default=lambda o: o.items(),
sort_keys=True, indent=4, ensure_ascii=False)
def fromJSON(self, json_object):
self.clear()
greetings = json.loads(json_object)
for g in greetings:
self.append(Greeting(**g))
def send_greeting(bot: wxpy.Bot, greeting: Greeting):
people = wxpy.ensure_one(bot.friends().search(puid=greeting.puid))
print("Sending {} to {}……".format(people.name, greeting.greeting))
people.send(greeting.greeting)
def test():
g = Greetings()
g.append(Greeting('赵奔', '123', ''))
g.append(Greeting('赵奔2', '232', '', '{}hao'))
json_object = g.toJSON()
# print(json_object)
g.fromJSON(json_object)
# print(g.toJSON())
return g
def show_help():
print('Usage:')
print(os.path.basename(__file__), end=' ')
print('[list] [send]')
print(''' list\tgenerate friends list and keep that you want to send
send\tsend message to those friends
''')
def main():
if len(sys.argv) < 2:
show_help()
sys.exit(1)
if ('list' not in sys.argv) and ('send' not in sys.argv):
show_help()
sys.exit(1)
bot = wxpy.Bot(cache_path=True, console_qr=False)
bot.enable_puid()
if 'list' in sys.argv:
greetings = Greetings()
for friend in bot.friends():
greetings.append(
Greeting(
name=friend.name,
puid=friend.puid,
)
)
with open('friends.json', 'w', encoding='utf8') as f:
f.write(greetings.toJSON())
if 'send' in sys.argv:
greetings = Greetings()
with open('friends.json', encoding='utf8') as f:
greetings.fromJSON(f.read())
for i in greetings:
try:
send_greeting(bot, i)
except Exception as e:
print(e)
sleep(0.5)
wxpy.embed()
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
# ----------------------------------------------------------------------
# |
# | CentOsShell.py
# |
# | David Brownell <db@DavidBrownell.com>
# | 2019-08-30 19:25:23
# |
# ----------------------------------------------------------------------
# |
# | Copyright David Brownell 2019-22.
# | Distributed under the Boost Software License, Version 1.0.
# | (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
# |
# ----------------------------------------------------------------------
"""Contains the CentOsShell object"""
import os
import CommonEnvironment
from CommonEnvironment.Interface import staticderived, override, DerivedProperty
from CommonEnvironment.Shell.LinuxShellImpl import LinuxShellImpl
# ----------------------------------------------------------------------
_script_fullpath = CommonEnvironment.ThisFullpath()
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
# <Method '<...>' is abstract in class '<...>' but is not overridden> pylint: disable = W0223
@staticderived
class CentOsShell(LinuxShellImpl):
"""Shell for CentOS systems"""
Name = DerivedProperty("CentOS")
# ----------------------------------------------------------------------
@staticderived
@override
class CommandVisitor(LinuxShellImpl.CommandVisitor):
try:
import distro
if int(distro.major_version()) < 7:
# ----------------------------------------------------------------------
@classmethod
@override
def OnSymbolicLink(cls, command):
# Older versions of CentOS do not support relative paths
return super(CentOsShell.CommandVisitor, cls).OnSymbolicLink(
command,
no_relative_flag=True,
)
except ImportError:
pass
|
nilq/baby-python
|
python
|
class ReasonCode:
"""Default server reason codes."""
# General error
GENERAL_ERROR = 1
# General session error
SESSION_ERROR = 11
# The session resource is already registered
SESSION_REGISTRATION_ERROR = 12
# An authentication error occurred
SESSION_AUTHENTICATION_FAILED = 13
# An error occurred while unregistering the session in the server
SESSION_UNREGISTER_FAILED = 14
# The required action is invalid for current session state
SESSION_INVALID_ACTION_FOR_STATE = 15
# The session negotiation has timed out
SESSION_NEGOTIATION_TIMEOUT = 16
# Invalid selected negotiation options
SESSION_NEGOTIATION_INVALID_OPTIONS = 17
# Invalid session mode requested
SESSION_INVALID_SESSION_MODE_REQUESTED = 18
# General validation error
VALIDATION_ERROR = 21
# The envelope document is null
VALIDATION_EMPTY_DOCUMENT = 22
# The envelope document MIME type is invalid
VALIDATION_INVALID_RESOURCE = 23
# The request status is invalid
VALIDATION_INVALID_STATUS = 24
# The request identity is invalid
VALIDATION_INVALID_IDENTITY = 25
# The envelope originator or destination is invalid
VALIDATION_INVALID_RECIPIENTS = 26
# The command method is invalid
VALIDATION_INVALID_METHOD = 27
# The command URI format is invalid
VALIDATION_INVALID_URI = 27
# General authorization error
AUTHORIZATION_ERROR = 31
# The sender is not authorized to send messages to the message destination
AUTHORIZATION_UNAUTHORIZED_SENDER = 32
# The destination doesn't have an active account
AUTHORIZATION_DESTINATION_ACCOUNT_NOT_FOUND = 33
# The envelope quota limit has been exceeded
AUTHORIZATION_QUOTA_THRESHOLD_EXCEEDED = 34
# General routing error
ROUTING_ERROR = 41
# The message destination was not found
ROUTING_DESTINATION_NOT_FOUND = 42
# The message destination gateway was not found
ROUTING_GATEWAY_NOT_FOUND = 43
# The message destination was not found
ROUTING_ROUTE_NOT_FOUND = 44
# General dispatching error
DISPATCH_ERROR = 51
# General command processing error
COMMAND_PROCESSING_ERROR = 61
# There's no command processor available for process the request
COMMAND_RESOURCE_NOT_SUPPORTED = 62
# The command method is not supported
COMMAND_METHOD_NOT_SUPPORTED = 63
# The command method has an invalid argument value
COMMAND_INVALID_ARGUMENT = 64
# The requested command is not valid for current session mode
COMMAND_INVALID_SESSION_MODE = 65
# The command method was not allowed
COMMAND_NOT_ALLOWED = 66
# The command resource was not found
COMMAND_RESOURCE_NOT_FOUND = 67
# General message processing error
MESSAGE_PROCESSING_ERROR = 61
# The message content type is not supported
MESSAGE_UNSUPPORTED_CONTENT_TYPE = 71
# General gateway processing error
GATEWAY_ERROR = 81
# The content type is not supported by the gateway
GATEWAY_CONTENT_TYPE_NOT_SUPPORTED = 82
# The message destination was not found on gateway
GATEWAY_DESTINATION_NOT_FOUND = 83
# The functionality is not supported by the gateway
GATEWAY_NOT_SUPPORTED = 84
# General application processing error
APPLICATION_ERROR = 101
|
nilq/baby-python
|
python
|
from django.db import models
from subscribers import mailchimp
class AbstractSubscriber(models.Model):
email = models.EmailField(blank=True, null=True)
created_on = models.DateField(auto_now_add=True)
objects = models.Manager()
class Meta:
abstract = True
def __str__(self):
return self.email
def clean(self):
if self.email is not None:
klass = mailchimp.MailChimp()
class EmailSubscriber(AbstractSubscriber):
"""People who subscribed to the website"""
pass
|
nilq/baby-python
|
python
|
from possum import *
spec = possum()
spec._generateParams(N=30000, fluxMin=0.1, noiseMax=0.2, pcomplex=0.5, seed=923743)
spec._simulateNspec(save=True, dir='data/train/V2/', timeit=True)
|
nilq/baby-python
|
python
|
from django.db import models
from djangae.tasks.deferred import defer
from djangae.test import TestCase, TaskFailedError
def test_task(*args, **kwargs):
pass
def assert_cache_wiped(instance):
field = DeferModelA._meta.get_field("b")
assert(field.get_cached_value(instance, None) is None)
class DeferModelA(models.Model):
b = models.ForeignKey("DeferModelB", on_delete=models.CASCADE)
class Meta:
app_label = "djangae"
class DeferModelB(models.Model):
class Meta:
app_label = "djangae"
class DeferTests(TestCase):
def test_wipe_related_caches(self):
b = DeferModelB.objects.create()
a = DeferModelA.objects.create(b=b)
a.b # Make sure we access it
cache_name = DeferModelA._meta.get_field("b").get_cache_name()
self.assertTrue(getattr(a, cache_name))
defer(assert_cache_wiped, a)
# Should raise an assertion error if the cache existed
try:
self.process_task_queues()
except TaskFailedError as e:
raise e.original_exception
# Should not have wiped the cache for us!
self.assertIsNotNone(getattr(a, cache_name, None))
def test_queues_task(self):
initial_count = self.get_task_count()
defer(test_task)
self.assertEqual(self.get_task_count(), initial_count + 1)
|
nilq/baby-python
|
python
|
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.components.infra_validator.model_server_clients.tensorflow_serving_client."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, Text
from unittest import mock
import grpc
import tensorflow as tf
from tfx.components.infra_validator import error_types
from tfx.components.infra_validator import types
from tfx.components.infra_validator.model_server_clients import tensorflow_serving_client
from google.protobuf import json_format
from tensorflow_serving.apis import classification_pb2
from tensorflow_serving.apis import get_model_status_pb2
from tensorflow_serving.apis import regression_pb2
def _make_response(
payload: Dict[Text, Any]) -> get_model_status_pb2.GetModelStatusResponse:
result = get_model_status_pb2.GetModelStatusResponse()
json_format.ParseDict(payload, result)
return result
class TensorflowServingClientTest(tf.test.TestCase):
def setUp(self):
super(TensorflowServingClientTest, self).setUp()
self.model_stub_patcher = mock.patch('tensorflow_serving.apis.model_service_pb2_grpc.ModelServiceStub') # pylint: disable=line-too-long
self.model_stub_cls = self.model_stub_patcher.start()
self.model_stub = self.model_stub_cls.return_value
self.prediction_stub_patcher = mock.patch('tensorflow_serving.apis.prediction_service_pb2_grpc.PredictionServiceStub') # pylint: disable=line-too-long
self.prediction_stub_cls = self.prediction_stub_patcher.start()
self.prediction_stub = self.prediction_stub_cls.return_value
def tearDown(self):
super(TensorflowServingClientTest, self).tearDown()
self.model_stub_patcher.stop()
self.prediction_stub_patcher.stop()
def testGetModelState_ReturnsReady_IfAllAvailable(self):
# Prepare stub and client.
self.model_stub.GetModelStatus.return_value = _make_response({
'model_version_status': [
{'state': 'AVAILABLE'},
{'state': 'AVAILABLE'},
{'state': 'AVAILABLE'}
]
})
client = tensorflow_serving_client.TensorFlowServingClient(
'localhost:1234', 'a_model_name')
# Call.
result = client._GetServingStatus()
# Check result.
self.assertEqual(result, types.ModelServingStatus.READY)
def testGetModelState_ReturnsNotReady_IfAnyStateNotAvailable(self):
# Prepare stub and client.
self.model_stub.GetModelStatus.return_value = _make_response({
'model_version_status': [
{'state': 'AVAILABLE'},
{'state': 'AVAILABLE'},
{'state': 'LOADING'}
]
})
client = tensorflow_serving_client.TensorFlowServingClient(
'localhost:1234', 'a_model_name')
# Call.
result = client._GetServingStatus()
# Check result.
self.assertEqual(result, types.ModelServingStatus.NOT_READY)
def testGetModelState_ReturnsUnavailable_IfAnyStateEnded(self):
# Prepare stub and client.
self.model_stub.GetModelStatus.return_value = _make_response({
'model_version_status': [
{'state': 'AVAILABLE'},
{'state': 'AVAILABLE'},
{'state': 'END'}
]
})
client = tensorflow_serving_client.TensorFlowServingClient(
'localhost:1234', 'a_model_name')
# Call.
result = client._GetServingStatus()
# Check result.
self.assertEqual(result, types.ModelServingStatus.UNAVAILABLE)
def testGetModelState_ReturnsNotReady_IfEmptyState(self):
# Prepare stub and client.
self.model_stub.GetModelStatus.return_value = _make_response({
'model_version_status': [] # Empty
})
client = tensorflow_serving_client.TensorFlowServingClient(
'localhost:1234', 'a_model_name')
# Calls
result = client._GetServingStatus()
# Check result.
self.assertEqual(result, types.ModelServingStatus.NOT_READY)
def testGetModelState_ReturnsNotReady_IfServerUnavailable(self):
# Prepare stub and client.
self.model_stub.GetModelStatus.side_effect = grpc.RpcError
client = tensorflow_serving_client.TensorFlowServingClient(
'localhost:1234', 'a_model_name')
# Call.
result = client._GetServingStatus()
# Check result.
self.assertEqual(result, types.ModelServingStatus.NOT_READY)
def testIssueRequests_NoErrorIfSucceeded(self):
# Prepare requests and client.
r1 = classification_pb2.ClassificationRequest()
r2 = classification_pb2.ClassificationRequest()
r3 = regression_pb2.RegressionRequest()
client = tensorflow_serving_client.TensorFlowServingClient(
'localhost:1234', 'a_model_name')
# Call.
client.SendRequests([r1, r2, r3])
# Check calls
self.prediction_stub.Classify.assert_called_with(r1)
self.prediction_stub.Classify.assert_called_with(r2)
self.prediction_stub.Regress.assert_called_with(r3)
def testIssueRequests_RaiseValueErrorOnUnrecognizedRequestType(self):
# Prepare requests and client.
not_a_request = 'i am a request'
client = tensorflow_serving_client.TensorFlowServingClient(
'localhost:1234', 'a_model_name')
# Call
with self.assertRaises(error_types.ValidationFailed):
client.SendRequests([not_a_request])
def testIssueRequests_RaiseRpcErrorIfRpcFailed(self):
# Prepare client and a side effect.
request = classification_pb2.ClassificationRequest()
client = tensorflow_serving_client.TensorFlowServingClient(
'localhost:1234', 'a_model_name')
self.prediction_stub.Classify.side_effect = grpc.RpcError
# Call.
with self.assertRaises(error_types.ValidationFailed):
client.SendRequests([request])
if __name__ == '__main__':
tf.test.main()
|
nilq/baby-python
|
python
|
"""
MSX SDK
MSX SDK client. # noqa: E501
The version of the OpenAPI document: 1.0.9
Generated by: https://openapi-generator.tech
"""
import unittest
import python_msx_sdk
from python_msx_sdk.api.workflow_events_api import WorkflowEventsApi # noqa: E501
class TestWorkflowEventsApi(unittest.TestCase):
"""WorkflowEventsApi unit test stubs"""
def setUp(self):
self.api = WorkflowEventsApi() # noqa: E501
def tearDown(self):
pass
def test_create_workflow_event(self):
"""Test case for create_workflow_event
Creates a new workflow event. # noqa: E501
"""
pass
def test_delete_workflow_event(self):
"""Test case for delete_workflow_event
Deletes a workflow event. # noqa: E501
"""
pass
def test_get_workflow_event(self):
"""Test case for get_workflow_event
Returns a workflow event. # noqa: E501
"""
pass
def test_get_workflow_events_list(self):
"""Test case for get_workflow_events_list
Returns a list of workflow events. # noqa: E501
"""
pass
def test_update_workflow_event(self):
"""Test case for update_workflow_event
Updates a workflow event. # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
# coding: utf-8
__author__ = 'Paul Cunningham'
__email__ = 'pjcunningham@borsuk.co.uk'
__copyright = 'Copyright 2017, Paul Cunningham'
__license__ = 'MIT License'
__version__ = '0.1'
from .select2 import Select2
|
nilq/baby-python
|
python
|
import numpy as np
import pandas as pd
returns = prices.pct_change()
returns.dropna()
returns.std()
deviations = (returns - returns.mean())**2
squared_deviations = deviations ** 2
variance = squared_deviations.mean()
volatility = np.sqrt(variance)
me_m = pd.read_csv('./Data/Portfolios_Formed_on_ME_monthly_EW.csv',
header=0, index_col=0, parse_dates=True, na_values=-99.99)
rets = me_m[['Lo 10', 'Hi 10']]
rets.columns = ['SmallCap', 'LargeCap']
rets = rets / 100
rets.plot.line()
rets.head()
rets.index = pd.to_datetime(rets.index, format='%Y%m')
rets.head()
rets.index = rets.index.to_period('M')
rets['1975']
wealth_index = 1000 * (1+rets['LargeCap']).cumprod()
wealth_index.plot.line()
previous_peaks = wealth_index.cummax()
previous_peaks.plot.line()
drawdown = (wealth_index - previous_peaks) / previous_peaks
drawdown.plot()
drawdown.min()
drawdown['1975':].min()
drawdown['1975':].idxmin()
def drawdown(return_series: pd.Series):
"""
Takes a time series of asset returns
Computes and returns a DataFrame that contains:
the wealth index
the previous peaks
percent drawdowns
:param return_series:
:return:
"""
wealth_index = 1000 * (1+return_series).cumprod()
previous_peaks = wealth_index.cummax()
drawdowns = (wealth_index - previous_peaks) / previous_peaks
return pd.DataFrame(
{
"Wealth": wealth_index,
"Peaks": previous_peaks,
"Drawdown": drawdowns
}
)
drawdown(rets['LargeCap']).head()
drawdown(rets['LargeCap'])[['Wealth', 'Peaks']].plot()
import pandas as pd
import EDHEC.edhec_risk_kit as erk
hfi = erk.get_hfi_returns()
hfi.head()
pd.concat([hfi.mean(), hfi.median(), hfi.mean()>hfi.median()], axis='columns')
erk.skewness(hfi).sort_values()
import scipy.stats
scipy.stats.skew(hfi)
import numpy as np
normal_rets = np.random.normal(0, .15, size=(263, 1))
erk.skewness(normal_rets)
erk.kurtosis(normal_rets)
erk.kurtosis(hfi)
scipy.stats.kurtosis(normal_rets)
scipy.stats.jarque_bera(normal_rets)
scipy.stats.jarque_bera(hfi)
erk.is_normal(normal_rets)
hfi.aggregate(erk.is_normal)
ffme = erk.get_ffme_returns()
erk.skewness(ffme)
erk.kurtosis(ffme)
hfi.std(ddof=0)
hfi[hfi<0].std(ddof=0)
erk.semideviation(hfi)
# Historical VaR
# Parametric VaR - Gaussian
# Modified Cornish-Fisher VaR
np.percentile(hfi, q=5, axis=0)
hfi.apply(lambda x: np.percentile(x, q=5, axis=0))
erk.var_historic(hfi)
from scipy.stats import norm
z = norm.ppf(.05)
hfi.mean() + z*hfi.std(ddof=0)
erk.var_gaussian(hfi)
var_list = [erk.var_gaussian(hfi), erk.var_gaussian(hfi, modified=True), erk.var_historic(hfi)]
comparison = pd.concat(var_list, axis=1)
comparison.columns = ['Gaussian', 'Cornish-Fisher', 'Historic']
comparison.plot.bar(title='EDHEC Hedge Fund Indices: VaR Comparison')
erk.cvar_historic(hfi)
|
nilq/baby-python
|
python
|
import logging
from django.contrib.auth.backends import (
RemoteUserBackend,
get_user_model,
)
from django.contrib.auth.models import (
Group,
)
from django.utils.translation import ugettext as _
from rest_framework import exceptions
from rest_framework_auth0.settings import (
auth0_api_settings,
)
from rest_framework_auth0.utils import (
get_auth_token,
get_client_setting,
get_groups_from_payload,
decode_auth_token,
)
from rest_framework.authentication import (
BaseAuthentication,
)
get_username_from_payload = auth0_api_settings.GET_USERNAME_HANDLER
logger = logging.getLogger(__name__)
class Auth0JSONWebTokenAuthentication(BaseAuthentication, RemoteUserBackend):
"""
Clients should authenticate by passing the token key in the "Authorization"
HTTP header, prepended with the string specified in the setting
`AUTH_HEADER_PREFIX`. For example:
Authorization: JWT eyJhbGciOiAiSFMyNTYiLCAidHlwIj
By default, the ``authenticate_credentials`` method creates ``User`` objects for
usernames that don't already exist in the database. Subclasses can disable
this behavior by setting the ``create_unknown_user`` attribute to
``False``.
"""
www_authenticate_realm = 'api'
# Create a User object if not already in the database?
create_unknown_user = True
def authenticate(self, request):
"""
You should pass a header of your request: clientcode: web
This function initialize the settings of JWT with the specific client's informations.
"""
client = None
payload = None
logger.debug("authenticating user using Auth0JSONWebTokenAuthentication")
client = get_client_setting(request)
auth_token = get_auth_token(request)
if auth_token is None:
return None
payload = decode_auth_token(
client=client,
auth_token=auth_token
)
# Add request param to authenticated_credentials() call
user = self.authenticate_credentials(request, payload)
return (user, payload)
def authenticate_credentials(self, request, payload):
"""
Returns an active user that matches the payload's user id and email.
"""
UserModel = get_user_model()
remote_user = get_username_from_payload(payload)
if not remote_user:
msg = _('Invalid payload.')
logger.info(
"{message}".format(
message=msg
)
)
raise exceptions.AuthenticationFailed(msg)
# RemoteUserBackend behavior:
# return
user = None
if auth0_api_settings.REPLACE_PIPE_FOR_DOTS_IN_USERNAME:
username = self.clean_username(remote_user)
else:
username = remote_user
logger.debug(
"username = {username}".format(
username=username
)
)
if self.create_unknown_user:
user, created = UserModel._default_manager.get_or_create(**{
UserModel.USERNAME_FIELD: username
})
if created:
user = self.configure_user(request, user)
else:
try:
user = UserModel._default_manager.get_by_natural_key(username)
except UserModel.DoesNotExist:
msg = _('Invalid signature.')
raise exceptions.AuthenticationFailed(msg)
# RemoteUserBackend behavior:
# pass
user = self.configure_user_permissions(user, payload)
return user if self.user_can_authenticate(user) else None
def authenticate_header(self, request):
"""
Return a string to be used as the value of the `WWW-Authenticate`
header in a `401 Unauthenticated` response, or `None` if the
authentication scheme should return `403 Permission Denied` responses.
"""
return '{0} realm="{1}"'.format(
auth0_api_settings.AUTH_HEADER_PREFIX,
self.www_authenticate_realm
)
def configure_user_permissions(self, user, payload):
"""
Validate if AUTHORIZATION_EXTENSION is enabled, defaults to False
If AUTHORIZATION_EXTENSION is enabled, created and associated groups
with the current user (the user of the token).
"""
if auth0_api_settings.AUTHORIZATION_EXTENSION:
logger.debug(
"Using Auth0 Authorization Extension"
)
logger.debug(
"Clearing groups for user: {username}".format(
username=user.username
)
)
user.groups.clear()
try:
logger.debug(
"Getting groups from payload"
)
groups = get_groups_from_payload(payload)
logger.debug(
"Groups: {groups}".format(
groups=groups
)
)
except Exception: # No groups where defined in Auth0?
logger.warning(
"No groups were defined for user: {username}".format(
username=user.username
)
)
return user
for user_group in groups:
group, created = Group.objects.get_or_create(name=user_group)
logger.debug(
"Associating group {group} with user {username}".format(
group=group,
username=user.username
)
)
user.groups.add(group)
return user
def clean_username(self, username):
"""
Cleans the "username" prior to using it to get or create the user object.
Returns the cleaned username.
Auth0 default username (user_id) field returns, e.g. auth0|123456789...xyz
which contains illegal characters ('|').
"""
logger.debug("Cleaning username")
username = username.replace('|', '.')
logger.debug(
"Clean username: {username}".format(
username=username
)
)
return username
|
nilq/baby-python
|
python
|
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectUD import DistributedObjectUD
class AccountUD(DistributedObjectUD):
notify = DirectNotifyGlobal.directNotify.newCategory("AccountUD")
|
nilq/baby-python
|
python
|
from turtle import color
import matplotlib.pyplot as plt
from matplotlib import patches
import numpy as np
import math
th = np.linspace(0, 2*np.pi, 1000)
r=1
c=r*np.cos(th)
d=r*np.sin(th)
figure, axes = plt.subplots(1)
axes.plot(c,d)
axes.set_aspect(1)
plt.title("sensor position")
plt.plot(1,0,'o',color="blue",)
plt.plot(0.6234898,0.78183148,'o',color="blue")
plt.plot(-0.22252093,0.97492791,'o',color="blue")
plt.plot(-0.90096887,0.43388374,'o',color="blue")
plt.plot(-0.90096887,-0.43388374,'o',color="blue")
plt.plot(-0.22252093,-0.97492791,'o',color="blue")
plt.plot(0.6234898,-0.78183148,'o',color="blue")
plt.plot(c,d,color="red")
plt.plot([0,1],[0,0],color="red")
plt.show()
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
import itertools
import os
import re
_RE_INCLUDE = re.compile('#include ([<"])([^"<>]+)')
_LIB_BY_HEADER = {
'curl/curl.h': 'curl',
're2/re2.h': 're2',
'sqlite3.h': 'sqlite3',
}
def dfs(root, get_children):
todo = [root]
visited = {id(root)}
while todo:
item = todo.pop()
yield item
for child in get_children(item):
if id(child) not in visited:
visited.add(id(child))
todo.append(child)
class File:
def __init__(self, path):
self.path = path
self.implemented_header_path = None
self.path_without_ext, ext = os.path.splitext(self.path)
self.is_source = ext == '.cpp'
self.is_test = self.is_source and self.path_without_ext.endswith('_test')
self.has_main_function = False
self.headers_paths = []
self.headers = []
self.sources = []
self.library = None
self.external_libs = []
self._load_content()
def _get_path_from_root(self, path):
return path if '/' in path else os.path.join(os.path.dirname(self.path), path)
def _load_content(self):
with open(self.path) as f:
for line in f:
include_match = _RE_INCLUDE.match(line)
if include_match:
header = include_match.group(2)
if include_match.group(1) == '"':
self.headers_paths.append(self._get_path_from_root(header))
elif header in _LIB_BY_HEADER:
self.external_libs.append(_LIB_BY_HEADER[header])
elif self.is_source and line.startswith('int main('):
self.has_main_function = True
elif line.startswith('// IMPLEMENTS:'):
self.implemented_header_path = self._get_path_from_root(line[len('// IMPLEMENTS:'):].strip())
def resolve_direct_dependencies(self, all_files):
self.headers = [all_files[path] for path in self.headers_paths]
if self.is_source:
header = all_files.get(self.implemented_header_path or self.path_without_ext + '.h')
if header:
header.sources.append(self)
def get_code_dependencies(self):
deps = [header.path for header in dfs(self, lambda file: file.headers)]
return [deps[0]] + sorted(deps[1:])
def get_bin_dependencies(self):
objects = []
libraries = set()
external_libs = set()
for file in dfs(self, lambda file: itertools.chain(file.headers, file.sources)):
if file.library:
libraries.add((file.library.sort_key, file.library.path))
elif file.is_source:
objects.append(file.path_without_ext + '.o')
external_libs.update(file.external_libs)
return ([objects[0]] + sorted(objects[1:]) + [path for _, path in sorted(libraries)], sorted(external_libs))
def add_to_library(self, library):
if self.has_main_function:
raise RuntimeError(f'File with main function added to library: {self.path}')
self.library = library
if self.is_source:
library.objects.add(self.path_without_ext + '.o')
def add_to_library_rec(self, library):
def add_rec(file):
file.add_to_library(library)
for child in itertools.chain(file.headers, file.sources):
if not child.library:
add_rec(child)
add_rec(self)
class Library:
def __init__(self, path, sort_key):
self.path = path
self.sort_key = sort_key
self.objects = set()
def enum_targets():
for (dir_path, dir_names, file_names) in os.walk('.'):
if dir_path == '.':
dir_names.remove('.git')
for file_name in file_names:
_, extension = os.path.splitext(file_name)
if extension in ['.h', '.cpp']:
yield os.path.join(dir_path[2:], file_name)
def format_rule(target, dependencies, command, max_line_length=120):
content = target + ':'
length = len(content)
for dependency in dependencies:
length += len(dependency) + 3
if length > max_line_length:
content += ' \\\n\t' + dependency
length = 8 + len(dependency)
else:
content += ' ' + dependency
content += f'\n\t{command}\n'
return content
def replace_section(content, start_marker, end_marker, section_content):
start = content.find(start_marker)
if start == -1:
raise RuntimeError(f'"{start_marker}" not found')
start += len(start_marker)
end = content.find(end_marker)
if end == -1:
raise RuntimeError(f'"{end_marker}" not found')
return content[:start] + section_content + content[end:]
def main():
all_files = {}
for path in enum_targets():
all_files[path] = File(path)
for file in all_files.values():
file.resolve_direct_dependencies(all_files)
mwclient_lib = Library('mwclient/libmwclient.a', 2)
wikiutil_lib = Library('orlodrimbot/wikiutil/libwikiutil.a', 1)
for file in all_files.values():
if file.path.startswith('mwclient/') and not file.is_test and not file.path.startswith('mwclient/tests/'):
file.add_to_library_rec(mwclient_lib)
elif file.path.startswith('orlodrimbot/wikiutil/') and not file.is_test:
file.add_to_library(wikiutil_lib)
rules = []
tests = []
binaries = []
for path, file in sorted(all_files.items()):
if not file.is_source:
continue
rules.append(format_rule(file.path_without_ext + '.o', file.get_code_dependencies(),
'$(CXX) $(CXXFLAGS) -c -o $@ $<'))
if file.has_main_function:
objects, external_libs = file.get_bin_dependencies()
external_libs_command = ''.join(' -l' + lib for lib in external_libs)
rules.append(format_rule(file.path_without_ext, objects, '$(CXX) -o $@ $^' + external_libs_command))
if file.is_test:
tests.append(file.path_without_ext)
else:
binaries.append(file.path_without_ext)
for library in [mwclient_lib, wikiutil_lib]:
rules.append(format_rule(library.path, sorted(library.objects), 'ar rcs $@ $^'))
with open('Makefile', 'r') as f:
content = f.read()
content = replace_section(content, '# autogenerated-lists-begin\n', '# autogenerated-lists-end\n',
'BINARIES= \\\n\t{binaries}\nTESTS= \\\n\t{tests}\n'.format(
binaries=' \\\n\t'.join(binaries), tests=' \\\n\t'.join(tests)))
content = replace_section(content, '# autogenerated-rules-begin\n', '# autogenerated-rules-end\n', ''.join(rules))
with open('Makefile', 'w') as f:
f.write(content)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
from django.contrib import admin
from django.urls import path
from .views import IndexClassView, index
urlpatterns = [
path("", index, name="home"),
path(
"class", IndexClassView.as_view(template_name="index.html"), name="home_class"
),
path(
"class2",
IndexClassView.as_view(template_name="index2.html"),
name="home_class2",
),
path("admin/", admin.site.urls),
]
|
nilq/baby-python
|
python
|
# -*- coding:utf-8 -*-
from __future__ import print_function
import math
import numpy as np
import os
import sys
sys.path.insert(0, '../facealign')
sys.path.insert(0, '../util')
from caffe_extractor import CaffeExtractor
def model_centerface(do_mirror):
model_dir = './models/centerface/'
model_proto = model_dir + 'face_deploy.prototxt'
model_path = model_dir + 'face_model.caffemodel'
image_size = (96, 112)
extractor = CaffeExtractor(model_proto, model_path, do_mirror = do_mirror, featLayer='fc5')
return extractor, image_size
def model_sphereface(do_mirror):
model_dir = './models/sphereface/'
model_proto = model_dir + 'sphereface_deploy.prototxt'
model_path = model_dir + 'sphereface_model.caffemodel'
image_size = (96, 112)
extractor = CaffeExtractor(model_proto, model_path, do_mirror = do_mirror, featLayer='fc5')
return extractor, image_size
def model_AMSoftmax(do_mirror):
model_dir = './models/AMSoftmax/'
if do_mirror:
model_proto = model_dir + 'face_deploy_mirror_normalize.prototxt'
else:
model_proto = model_dir + 'deploy.prototxt'
model_path = model_dir + 'face_train_test_iter_30000.caffemodel'
image_size = (96, 112)
extractor = CaffeExtractor(model_proto, model_path, do_mirror = False, featLayer='fc5')
return extractor, image_size
def model_arcface(do_mirror):
model_dir = './models/arcface/'
model_proto = model_dir + 'model.prototxt'
model_path = model_dir + 'model-r50-am.caffemodel'
image_size = (112, 112)
extractor = CaffeExtractor(model_proto, model_path, do_mirror = do_mirror, featLayer='fc1')
return extractor, image_size
def model_mobileface(do_mirror):
model_dir = './models/mobilefacenet/'
model_proto = model_dir + 'mobilefacenet-res2-6-10-2-dim128-opencv.prototxt'
model_path = model_dir + 'mobilefacenet-res2-6-10-2-dim128.caffemodel'
image_size = (112, 112)
extractor = CaffeExtractor(model_proto, model_path, do_mirror = do_mirror, featLayer='fc1')
return extractor, image_size
def model_mobileface2(do_mirror):
model_dir = './models/mobilefacenet/'
model_proto = model_dir + 'model.prototxt'
model_path = model_dir + 'model.caffemodel'
image_size = (112, 112)
extractor = CaffeExtractor(model_proto, model_path, do_mirror = do_mirror, featLayer='fc1')
return extractor, image_size
def model_factory(name, do_mirror):
model_dict = {
'centerface':model_centerface,
'sphereface':model_sphereface,
'AMSoftmax' :model_AMSoftmax,
'arcface' :model_arcface,
'mobileface':model_mobileface,
'mobileface2':model_mobileface2
}
model_func = model_dict[name]
return model_func(do_mirror)
|
nilq/baby-python
|
python
|
from raytracer.tuple import (
tuple,
point,
vector,
magnitude,
normalize,
dot,
cross,
reflect,
Color,
)
from raytracer.rays import Ray
from raytracer.spheres import Sphere
from raytracer.intersections import Intersection, intersections, hit, prepare_computations
from raytracer.lights import PointLight
from raytracer.materials import Material, lighting
from raytracer.transformations import (
translation,
scaling,
rotation_x,
rotation_y,
rotation_z,
shearing,
)
from raytracer.util import equal
from raytracer.world import World, default_world, shade_hit, color_at
from raytracer.matrices import Matrix, I
from raytracer.patterns import DummyPattern, StripePattern, GradientPattern, RingPattern, CheckersPattern
black = Color(0, 0, 0)
white = Color(1, 1, 1)
def test_creating_a_stripe_pattern():
pattern = StripePattern(white, black)
assert pattern.a == white
assert pattern.b == black
def test_a_stripe_pattern_is_constant_in_y():
pattern = StripePattern(white, black)
assert pattern.pattern_at(point(0, 0, 0)) == white
assert pattern.pattern_at(point(0, 1, 0)) == white
assert pattern.pattern_at(point(0, 2, 0)) == white
def test_a_stripe_pattern_is_constant_in_b():
pattern = StripePattern(white, black)
assert pattern.pattern_at(point(0, 0, 0)) == white
assert pattern.pattern_at(point(0, 0, 1)) == white
assert pattern.pattern_at(point(0, 0, 2)) == white
def test_a_stripe_alternates_in_x():
pattern = StripePattern(white, black)
assert pattern.pattern_at(point(0.0, 0, 0)) == white
assert pattern.pattern_at(point(0.9, 0, 0)) == white
assert pattern.pattern_at(point(1.0, 0, 0)) == black
assert pattern.pattern_at(point(-0.1, 0, 0)) == black
assert pattern.pattern_at(point(-1.0, 0, 0)) == black
assert pattern.pattern_at(point(-1.1, 0, 0)) == white
def test_lighting_with_a_pattern_applied():
m = Material()
m.pattern = StripePattern(white, black)
m.ambient = 1
m.diffuse = 0
m.specular = 0
eyev = vector(0, 0, -1)
normalv = vector(0, 0, -1)
light = PointLight(point(0, 0, -10), white)
object = Sphere()
c1 = lighting(m, object, light, point(0.9, 0, 0), eyev, normalv, False)
c2 = lighting(m, object, light, point(1.1, 0, 0), eyev, normalv, False)
assert c1 == white
assert c2 == black
def test_stripes_with_an_object_transformation():
object = Sphere()
object.set_transform(scaling(2, 2, 2))
pattern = StripePattern(white, black)
c = pattern.pattern_at_shape(object, point(1.5, 0, 0))
assert c == white
def test_stripes_with_a_pattern_transformation():
object = Sphere()
pattern = StripePattern(white, black)
pattern.set_pattern_transform(scaling(2, 2, 2))
c = pattern.pattern_at_shape(object, point(1.5, 0, 0))
assert c == white
def test_stripes_with_both_an_object_and_a_pattern_transformation():
object = Sphere()
object.set_transform(scaling(2, 2, 2))
pattern = StripePattern(white, black)
pattern.set_pattern_transform(translation(0.5, 0, 0))
c = pattern.pattern_at_shape(object, point(2.5, 0, 0))
assert c == white
def test_the_default_pattern_transformation():
pattern = DummyPattern()
assert pattern.transform == I
def test_assigning_a_transformation():
pattern = DummyPattern()
pattern.set_pattern_transform(translation(1, 2, 3))
assert pattern.transform == translation(1, 2, 3)
def test_a_pattern_with_an_object_transformation():
shape = Sphere()
shape.set_transform(scaling(2, 2, 2))
pattern = DummyPattern()
c = pattern.pattern_at_shape(shape, point(2, 3, 4))
assert c == Color(1, 1.5, 2)
def test_a_pattern_with_a_pattern_transformation():
shape = Sphere()
pattern = DummyPattern()
pattern.set_pattern_transform(scaling(2, 2, 2))
c = pattern.pattern_at_shape(shape, point(2, 3, 4))
assert c == Color(1, 1.5, 2)
def test_a_pattern_with_both_an_object_and_a_pattern_transformation():
shape = Sphere()
shape.set_transform(scaling(2, 2, 2))
pattern = DummyPattern()
pattern.set_pattern_transform(translation(0.5, 1, 1.5))
c = pattern.pattern_at_shape(shape, point(2.5, 3, 3.5))
assert c == Color(0.75, 0.5, 0.25)
def test_a_gradient_linearly_interpolates_between_colors():
pattern = GradientPattern(white, black)
pattern.pattern_at(point(0, 0, 0)) == white
assert pattern.pattern_at(point(0.25, 0, 0)) == Color(0.75, 0.75, 0.75)
assert pattern.pattern_at(point(0.5, 0, 0)) == Color(0.5, 0.5, 0.5)
assert pattern.pattern_at(point(0.75, 0, 0)) == Color(0.25, 0.25, 0.25)
def test_a_ring_should_extend_in_both_x_and_z():
pattern = RingPattern(white, black)
assert pattern.pattern_at(point(0, 0, 0)) == white
assert pattern.pattern_at(point(1, 0, 0)) == black
assert pattern.pattern_at(point(0, 0, 1)) == black
# 0.708 = just slightly more than sqrt(2)/2
assert pattern.pattern_at(point(0.708, 0, 0.708)) == black
def test_checkers_should_repeat_in_x():
pattern = CheckersPattern(white, black)
assert pattern.pattern_at(point(0, 0, 0)) == white
assert pattern.pattern_at(point(0.99, 0, 0)) == white
assert pattern.pattern_at(point(1.01, 0, 0)) == black
def test_checkers_should_repeat_in_y():
pattern = CheckersPattern(white, black)
assert pattern.pattern_at(point(0, 0, 0)) == white
assert pattern.pattern_at(point(0, 0.99, 0)) == white
assert pattern.pattern_at(point(0, 1.01, 0)) == black
def test_checkers_should_repeat_in_z():
pattern = CheckersPattern(white, black)
assert pattern.pattern_at(point(0, 0, 0)) == white
assert pattern.pattern_at(point(0, 0, 0.99)) == white
assert pattern.pattern_at(point(0, 0, 1.01)) == black
|
nilq/baby-python
|
python
|
import os
import pathlib
import urllib
import bs4
from .subsearcher import HTMLSubSearcher, SubInfo
class SubHDSubSearcher(HTMLSubSearcher):
"""SubHD 字幕搜索器(https://subhd.tv)"""
SUPPORT_LANGUAGES = ['zh_chs', 'zh_cht', 'en', 'zh_en']
SUPPORT_EXTS = ['ass', 'srt']
API_URL = 'https://subhd.tv/search/'
API_SUBTITLE_DOWNLOAD = '/ajax/down_ajax'
API_SUBTITLE_PREVIEW = '/ajax/file_ajax'
_cache = {}
shortname = 'subhd'
def __init__(self, subfinder, api_urls=None):
super(SubHDSubSearcher, self).__init__(subfinder, api_urls=api_urls)
self.API_SUBTITLE_DOWNLOAD = self.api_urls.get(
'subhd_api_subtitle_download', self.__class__.API_SUBTITLE_DOWNLOAD
)
self.API_SUBTITLE_PREVIEW = self.api_urls.get('subhd_api_subtitle_preview', self.__class__.API_SUBTITLE_PREVIEW)
def _parse_search_results_html(self, doc):
"""parse search result html"""
soup = bs4.BeautifulSoup(doc, 'lxml')
subinfo_list = []
div_list = soup.select('div.mb-4')
if not div_list:
return subinfo_list
for div in div_list:
subinfo = SubInfo()
div_title = div.find('div', class_='f12 pt-1')
if not div_title:
break
a = div_title.a
# 字幕标题
subinfo['title'] = a.get('title').strip()
# 链接
subinfo['link'] = a.get('href').strip()
div_format = div_title.find_next_siblings('div', limit=1)
if not div_format:
break
div_format = div_format[0]
# 语言
format_str = ' '.join(div_format.strings)
for l1, l2 in self.LANGUAGES_MAP.items():
if l1 in format_str:
subinfo['languages'].append(l2)
# 格式
for ext in self.SUPPORT_EXTS:
if ext in format_str or ext.upper() in format_str:
subinfo['exts'].append(ext)
# 下载次数
div_download = div_format.find_next_siblings('div', class_='pt-3')
if not div_download:
break
div_download = div_download[0]
fa_download = div_download.find('i', class_='fa-download')
dl_str = fa_download.next_sibling
dl_str = dl_str.replace('次', '')
subinfo['download_count'] = int(dl_str)
subinfo_list.append(subinfo)
return subinfo_list
def _get_subinfo_list(self, keyword):
"""return subinfo_list of keyword"""
# searching subtitles
url = self.API_URL
if not url.endswith('/'):
url += '/'
url += urllib.parse.quote(keyword)
res = self.session.get(url)
doc = res.text
self.referer = res.url
subinfo_list = self._parse_search_results_html(doc)
for subinfo in subinfo_list:
subinfo['link'] = self._join_url(res.url, subinfo['link'])
return subinfo_list
def _visit_detailpage(self, detailpage_link):
download_link = ''
res = self.session.get(detailpage_link, headers={'Referer': self.referer})
if not res.ok:
return download_link
doc = res.text
self.referer = res.url
soup = bs4.BeautifulSoup(doc, 'lxml')
button_download = soup.find('button', id=True, sid=True)
if not button_download:
return download_link
api_subtitle_url = self._join_url(self.referer, self.API_SUBTITLE_DOWNLOAD)
params = {
'sub_id': button_download.get('sid'),
'dtoken1': button_download.get('dtoken1'),
}
res = self.session.post(api_subtitle_url, json=params)
if not res.ok:
return download_link
data = res.json()
if data['success']:
download_link = data['url']
else:
self.subfinder.logger.info('遇到验证码, 尝试通过字幕预览下载, 如果失败请尝试手动下载: {}'.format(detailpage_link))
return download_link
def _visit_downloadpage(self, downloadpage_link):
pass
def _try_preview_subs(self, detailpage_link):
subs = []
root = os.path.dirname(self.videofile)
api_url = self._join_url(detailpage_link, self.API_SUBTITLE_PREVIEW)
res = self.session.get(detailpage_link, headers={'Referer': self.referer})
if not res.ok:
return subs
doc = res.text
self.referer = res.url
soup = bs4.BeautifulSoup(doc, 'lxml')
a_list = soup.select('a[data-target="#fileModal"][data-sid]')
if not a_list:
return subs
files = []
for a in a_list:
s = a.string.strip()
if s == '预览':
sid = a.get('data-sid')
fname = a.get('data-fname')
ext = pathlib.PurePath(fname).suffix
ext = ext[1:]
if ext in self.exts:
files.append((sid, fname))
for sid, fname in files:
params = {'dasid': sid, 'dafname': fname}
resp = self.session.post(api_url, data=params)
if not resp.ok:
continue
data = resp.json()
if not data['success']:
continue
filedata = data['filedata']
origin_file = os.path.basename(fname)
subname = self._gen_subname(origin_file, self.videofile)
subname = os.path.join(root, subname)
with open(subname, 'w') as fp:
fp.write(filedata)
subs.append(subname)
return subs
def _download_subtitle(self, subinfo):
subtitle_download_link = self._visit_detailpage(subinfo['link'])
self._debug('subtitle_download_link: {}'.format(subtitle_download_link))
subs = None
if not subtitle_download_link:
subs = self._try_preview_subs(subinfo['link'])
else:
filepath = self._download_subs(subtitle_download_link, subinfo['title'])
self._debug('filepath: {}'.format(filepath))
subs = self._extract(filepath)
self._debug('subs: {}'.format(subs))
return subs
|
nilq/baby-python
|
python
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
import pickle
class MyContainer(object):
def __init__(self, data):
self._data = data
def get_data(self):
return self._data
d1 = MyContainer([2, 5, 4, 3, [ 12, 3, 5 ], 32, { 'a': 12, 'b': 43}])
with open('/tmp/pickle_data.dat', "wb") as f:
p = pickle.Pickler(f, 2)
p.dump(d1)
|
nilq/baby-python
|
python
|
import os
import urllib
import elasticsearch
import elasticsearch_dsl
import es2json.helperscripts as helperscripts
class ESGenerator:
"""
Main generator Object where other Generators inherit from
"""
def __init__(self, host='localhost',
port=9200,
es=None,
index=None,
type_=None,
id_=None,
body=None,
source=True,
excludes=None,
includes=None,
headless=False,
chunksize=1000,
timeout=10,
verbose=True,
slice_=None):
"""
Construct a new ESGenerator Object.
:param host: Elasticsearch host to use, default is localhost
:param port: Elasticsearch port to use, default is 9200
:param index: Elasticsearch Index to use, optional, if no parameter given, ESGenerator uses ALL the indices
:param es: Don't use the host/port/timeout setting, use your own elasticsearch.Elasticsearch() Object
:param type_: Elasticsearch doc_type to use, optional, deprecated after Elasticsearch>=7.0.0
:param body: Query body to use for Elasticsearch, optional
:param source: Include the source field in your record, default is True
:param excludes: don't include the fields defined by this parameter, optional, must be python list()
:param includes: only include the fields defined by this parameter, optional, must be python list()
:param headless: don't include the metafields, only the data in the _source field, default is False
:param chunksize: pagesize to used, default is 1000
:param timeout: Elasticsearch timeout parameter, default is 10 (seconds)
:param verbose: print out progress information on /dev/stderr, default is True, optional
:param slice_: only return records defined by a python slice() object
free earworm when working with python slices: https://youtu.be/Nlnoa67MUJU
"""
if es:
self.es = es
else:
if "://" in host: # we don't want the hostname to start with the protocoll
host = urllib.parse.urlparse(host).hostname
self.es = elasticsearch_dsl.connections.create_connection(
host=host, port=port, timeout=timeout,
max_retries=10, retry_on_timeout=True,
http_compress=True)
self.id_ = id_
self.source = source
self.chunksize = chunksize
self.headless = headless
self.index = index
self.type_ = type_
self.source_excludes = excludes
self.source_includes = includes
self.body = body
self.verbose = verbose
self.slice_ = slice_
def return_doc(self, hit):
"""
prints out the elasticsearch record defined by user input
also rewrites the metadata fields back to NonPythonic Elasticsearch Standard
see elasticsearch_dsl.utils.py::ObjectBase(AttrDict)__init__.py
:param hit: The hit returned from the elasticsearch_dsl-call, is always
"""
meta = hit.meta.to_dict()
if self.headless and not self.source:
return {}
if self.headless:
return hit.to_dict()
else:
# collect metadata fields and convert to fields
# starting with underscore ("_")
for key in elasticsearch_dsl.utils.META_FIELDS:
if key in meta:
meta["_{}".format(key)] = meta.pop(key)
if "doc_type" in meta:
meta["_type"] = meta.pop("doc_type")
if self.source:
meta["_source"] = hit.to_dict()
else:
meta["_source"] = {} # @BH: necessarry?
return meta
def __enter__(self):
"""
function needed for with-statement
__enter__ only returns the instanced object
"""
return self
def __exit__(self, doc_, value, traceback):
"""
function needed for with-statement
since we don't need to do any cleanup, this function does nothing
"""
pass
def generator(self):
"""
main generator function which harvests from the Elasticsearch-Cluster after all init and argument stuff is done
"""
if self.id_:
s = elasticsearch_dsl.Document.get(using=self.es,
index=self.index,
id=self.id_,
_source_excludes=self.source_excludes,
_source_includes=self.source_includes,
_source=self.source)
yield self.return_doc(s)
return
s = elasticsearch_dsl.Search(using=self.es,
index=self.index,
doc_type=self.type_).source(excludes=self.source_excludes,
includes=self.source_includes)
if self.body:
s = s.update_from_dict(self.body)
if self.verbose:
hits_total = s.count()
if self.slice_:
hits = s[self.slice_].execute()
else:
hits = s.params(scroll='12h', size=self.chunksize).scan() # in scroll context, size = pagesize, still all records will be returned
for n, hit in enumerate(hits):
yield self.return_doc(hit)
if self.verbose and ((n+1) % self.chunksize == 0 or n+1 == hits_total):
helperscripts.eprint("{}/{}".format(n+1, hits_total))
class IDFile(ESGenerator):
"""
wrapper for esgenerator() to submit a list of ids or a file with ids
to reduce the searchwindow on
"""
def __init__(self, idfile, missing_behaviour='print', **kwargs):
"""
Creates a new IDFile Object
:param idfile: the path of the file containing the IDs or an iterable containing the IDs
:param missing_behaviour: What should we do with missing IDs? 'print' or 'yield' an dict containing the ID
"""
super().__init__(**kwargs)
self.idfile = idfile # string containing the path to the idfile, or an iterable containing all the IDs
self.ids = [] # an iterable containing all the IDs from idfile, going to be reduced during runtime
self.missing_behaviour = missing_behaviour # what to do with missing records? print or yield an dict containing the ID? default is print
self.read_file()
def read_file(self):
"""
determining weather self.idfile is an iterable or a file,
harvests the IDs out of it and saves them in a set (for de-duplication)
"""
ids_set = set()
if isinstance(self.idfile, str) and helperscripts.isfile(self.idfile):
with open(self.idfile, "r") as inp:
for ppn in inp:
ids_set.add(ppn.rstrip())
elif helperscripts.isiter(self.idfile) and not isinstance(self.idfile, str) and not helperscripts.isfile(self.idfile):
for ppn in self.idfile:
ids_set.add(ppn.rstrip())
else:
raise AttributeError
self.iterable = list(ids_set)
self.ids = list(ids_set)
def write_file(self, missing):
"""
writing of idfile for the consume generator,
we instance this here to be used in generator() function, even if we
don't use it in this parent class at this point we just like to
error-print every missing ids
"""
for item in missing:
if self.missing_behaviour == 'print':
helperscripts.eprint("ID {} not found".format(item))
elif self.missing_behaviour == 'yield':
yield {"_id": item, 'found': False}
def generator(self):
"""
main generator function for IDFile and IDFileConsume
searching with an set of IDs can take quite long time
better would be to reduce the set of documents to a pure idlist, this is quite fast over mget
often, its needed to do it with a search, therefore both ways work
"""
missing = [] # an iterable containing missing ids
while len(self.ids) > 0:
if self.body:
ms = elasticsearch_dsl.MultiSearch(using=self.es, index=self.index, doc_type=self.type_) # setting up MultiSearch
this_iter_ids = self.ids[:self.chunksize] # an ID List per iteration, so we can check if all the IDs of this chunksize are found at the end.
for _id in this_iter_ids: # add a search per ID
ms = ms.add(elasticsearch_dsl.Search().source(excludes=self.source_excludes,
includes=self.source_includes).from_dict(self.body).query("match", _id=_id))
responses = ms.execute()
for response in responses:
for hit in response:
_id = hit.meta.to_dict()["id"]
yield self.return_doc(hit)
del self.ids[self.ids.index(_id)]
del this_iter_ids[this_iter_ids.index(_id)]
for _id in this_iter_ids:
"""
unfortunately MultiSearch doesn't throw an exception for non-Found-IDs, so we have manually check for missing ids
so we again iterate over the helper_list with the IDs per chunk size (simply doing self.dis[:self.chunksize] would give us a new set)
and we put all the IDs who are still in there in our missing list and delete them from self.ids and this_iter_ids
"""
missing.append(_id)
del self.ids[self.ids.index(_id)]
del this_iter_ids[this_iter_ids.index(_id)]
else:
try:
s = elasticsearch_dsl.Document.mget(docs=self.ids[:self.chunksize],
using=self.es,
index=self.index,
_source_excludes=self.source_excludes,
_source_includes=self.source_includes,
_source=self.source,
missing='raise')
except elasticsearch.exceptions.NotFoundError as e:
for doc in e.info['docs']: # we got some missing ids and harvest the missing ids from the Elasticsearch NotFoundError Exception
missing.append(doc['_id'])
del self.ids[self.ids.index(doc['_id'])]
else: # only gets called if we don't run into an exception
for hit in s:
_id = hit.meta.to_dict()["id"]
yield self.return_doc(hit)
del self.ids[self.ids.index(_id)]
if not self.ids:
"""
if we delete the last item from ids,
ids turns to None and then the while(len(list()))
would throw an exception, since None isn't an iterable
"""
self.ids = []
for item in self.write_file(missing):
yield item
class IDFileConsume(IDFile):
"""
same class like IDFile, but here we overwrite the write_file and read_file functions for missing-ID-handling purposes
"""
def __init__(self, **kwargs):
"""
Creates a new IDFileConsume Object
"""
super().__init__(**kwargs)
def read_file(self):
"""
no more iterables here, only files
"""
ids_set = set()
with open(self.idfile, "r") as inp:
for ppn in inp:
ids_set.add(ppn.rstrip())
self.ids = list(ids_set)
def write_file(self, missing):
"""
overwriting write_file so this outputs a idfile of the consume generator with the missing ids
if no IDs are missing, that file gets deleted
"""
if missing:
with open(self.idfile, "w") as outp:
for item in missing:
print(item, file=outp)
if self.missing_behaviour == 'yield':
yield {"_id": item, 'found': False}
else: # no ids missing in the cluster? alright, we clean up
os.remove(self.idfile)
|
nilq/baby-python
|
python
|
from copy import deepcopy
from sqlalchemy import (
Table,
Column,
Integer,
String,
DateTime,
UniqueConstraint,
DECIMAL,
LargeBinary,
Boolean,
ForeignKey,
PrimaryKeyConstraint,
)
from wt.common import Currency
from wt.entities.deliverables import DeliverableStatus
from wt.ids import EntityType
from wt.entities.issues import IssueType
from wt.entities.projects import ProjectStatus
from wt.provider.db import METADATA
from wt.provider.db._columns import (
ID_COLUMN_TYPE,
PROJECT_ID_COLUMN_TYPE,
OBJECT_ID_COLUMN_REFERENCE,
PARENT_ID_COLUMN_REFERENCE,
)
from wt.provider.db._utils import get_enum_length
from wt.costs.expenditures import ExpenditureStatus, ExpenditureType
FIELD_FILES_TABLE = Table(
"field_files",
METADATA,
Column("id", Integer(), primary_key=True, autoincrement=True),
deepcopy(PARENT_ID_COLUMN_REFERENCE),
Column("uri", String(2048), index=True, nullable=False),
Column("created_on", DateTime(), nullable=False),
UniqueConstraint("parent_id", "uri")
)
FIELD_LINKS_TABLE = Table(
"field_links",
METADATA,
Column("id", Integer(), primary_key=True, autoincrement=True),
deepcopy(PARENT_ID_COLUMN_REFERENCE),
Column("uri", String(2048), nullable=False),
Column("title", String(126), nullable=False),
Column("description", String(4096), nullable=False),
Column("created_on", DateTime(), nullable=False),
UniqueConstraint("parent_id", "uri")
)
FIELD_TASKS_TABLE = Table(
"field_tasks",
METADATA,
Column("id", Integer(), primary_key=True, autoincrement=True),
deepcopy(PARENT_ID_COLUMN_REFERENCE),
Column("task", String(1024), nullable=False),
Column("completed", Boolean(), nullable=False),
Column("created_on", DateTime(), nullable=False),
UniqueConstraint("parent_id", "task")
)
FIELD_TAGS_TABLE = Table(
"field_tags",
METADATA,
Column("id", Integer(), primary_key=True, autoincrement=True),
deepcopy(PARENT_ID_COLUMN_REFERENCE),
Column("tag", String(50), index=True, nullable=False),
Column("created_on", DateTime(), nullable=False),
UniqueConstraint("parent_id", "tag")
)
DELIVERABLES_TABLE = Table(
"deliverables",
METADATA,
deepcopy(OBJECT_ID_COLUMN_REFERENCE),
Column(
"project_id",
PROJECT_ID_COLUMN_TYPE,
ForeignKey("projects.project_id", ondelete="RESTRICT"),
index=True,
nullable=False,
),
Column("name", String(128), nullable=False),
Column("status", String(get_enum_length(DeliverableStatus)), nullable=False),
Column("description", String(), nullable=False),
Column("date_opened", DateTime(), nullable=False),
Column("date_closed", DateTime(), nullable=True),
Column("deadline", DateTime(), nullable=True),
Column("created_on", DateTime(), nullable=False),
)
IDS_COUNTER_TABLE = Table(
"ids_counter",
METADATA,
Column("project_id", ID_COLUMN_TYPE, primary_key=True),
Column("next_id", Integer(), nullable=False),
)
OBJECTS_TRACKER_TABLE = Table(
"objects_tracker",
METADATA,
Column("id", ID_COLUMN_TYPE, primary_key=True),
Column(
"project_id",
PROJECT_ID_COLUMN_TYPE,
index=True,
nullable=False,
),
Column("type", String(get_enum_length(EntityType)), nullable=False),
)
PROJECTS_TABLE = Table(
"projects",
METADATA,
Column("project_id", PROJECT_ID_COLUMN_TYPE, primary_key=True),
Column("name", String(128), nullable=False),
Column("status", String(get_enum_length(ProjectStatus)), nullable=False),
Column("date_opened", DateTime(), nullable=False),
Column("date_closed", DateTime(), nullable=True),
Column("deadline", DateTime(), nullable=True),
Column("hour_rate_amount", DECIMAL(), nullable=True),
Column("hour_rate_currency", String(get_enum_length(Currency)), nullable=True),
Column("description", String(), nullable=False),
Column("limitations_and_restrictions", String(), nullable=False),
Column("goals_and_metrics", String(), nullable=False),
Column("primary_color", String(7), nullable=False),
Column("secondary_color", String(7), nullable=False),
Column("created_on", DateTime(), nullable=False),
)
ISSUES_TABLE = Table(
"issues",
METADATA,
deepcopy(OBJECT_ID_COLUMN_REFERENCE),
Column(
"project_id",
PROJECT_ID_COLUMN_TYPE,
ForeignKey("projects.project_id", ondelete="RESTRICT"),
index=True,
nullable=False,
),
Column("name", String(128), nullable=False),
Column("description", String(), nullable=False),
Column("external_type", String(256), nullable=False),
Column("status", String(get_enum_length(IssueType)), nullable=False),
Column("priority", String(get_enum_length(IssueType)), nullable=False),
Column("type", String(get_enum_length(IssueType)), nullable=False),
Column("date_opened", DateTime(), nullable=False),
Column("date_closed", DateTime(), nullable=True),
Column("deadline", DateTime(), nullable=True),
Column("hour_rate_amount", DECIMAL(), nullable=True),
Column("hour_rate_currency", String(get_enum_length(Currency)), nullable=True),
Column("estimated_duration", DECIMAL(), nullable=True),
Column("created_on", DateTime(), nullable=False),
)
USER_TABLE = Table(
"users",
METADATA,
Column("id", Integer(), primary_key=True, autoincrement=True),
Column("username", String(64), unique=True, nullable=False),
Column("password", LargeBinary(256), nullable=False),
)
ENTITY_LINKS_TABLE = Table(
"entity_links",
METADATA,
Column(
"object_id",
ID_COLUMN_TYPE,
ForeignKey("objects_tracker.id", ondelete="RESTRICT"),
nullable=False
),
Column(
"other_object_id",
ID_COLUMN_TYPE,
ForeignKey("objects_tracker.id", ondelete="RESTRICT"),
nullable=False
),
PrimaryKeyConstraint("object_id", "other_object_id"),
)
TIMESHEETS_TABLE = Table(
"timesheets",
METADATA,
Column("id", Integer(), primary_key=True, autoincrement=True),
deepcopy(PARENT_ID_COLUMN_REFERENCE),
Column("description", String(256), nullable=False),
Column("duration", DECIMAL(), nullable=False),
Column("date_opened", DateTime(), nullable=False),
Column("created_on", DateTime(), nullable=False),
)
EXPENDITURES_TABLE = Table(
"expenditures",
METADATA,
Column("id", Integer(), primary_key=True, autoincrement=True),
deepcopy(PARENT_ID_COLUMN_REFERENCE),
Column("description", String(), nullable=False),
Column("name", String(256), nullable=False),
Column("date_opened", DateTime(), nullable=False),
Column("date_closed", DateTime(), nullable=True),
Column("deadline", DateTime(), nullable=True),
Column("status", String(get_enum_length(ExpenditureStatus)), nullable=True),
Column("type", String(get_enum_length(ExpenditureType)), nullable=True),
Column("cost_amount", DECIMAL(), nullable=True),
Column("cost_currency", String(get_enum_length(Currency)), nullable=True),
Column("created_on", DateTime(), nullable=False),
)
|
nilq/baby-python
|
python
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2019 ckitagawa <ckitagawa@edu.uwaterloo.ca>
#
# Distributed under terms of the MIT license.
import logging
import threading
import serial
import serial.tools.list_ports
import fiber_reading
from collections import deque
def select_device():
"""User-provided serial device selector.
Args:
None
Returns:
The selected serial device as ListPortInfo.
"""
while True:
print('Pick the serial device:')
ports = serial.tools.list_ports.comports()
for i, port in enumerate(ports):
print('{}: {}'.format(i, port))
try:
chosen_port = ports[int(input())]
print('Selected {}'.format(chosen_port))
return chosen_port
except IndexError:
print('Invalid device!')
continue
class SerialDataSource(object):
"""A datasource that reads from a bound serial port interface."""
def __init__(self, device):
self.q = deque()
self.ser = serial.Serial(device, 115200)
self.running = False
self.t = None
def start(self):
"""Starts the packet_service."""
if self.running:
return
self.running = True
self.t = threading.Thread(target=self.packet_service)
self.t.start()
def stop(self):
self.running = False
self.t.join()
self.t = None
def get_packet(self):
if self.q:
return self.q.popleft()
def packet_service(self):
# Discard the first packet
self.ser.readline().decode('ascii')
while True:
line = ''
try:
line = self.ser.readline().decode('ascii')
except Exception:
continue
if not line:
continue
ints = line.split(',')
l = len(ints)
if l < 3:
print(line)
continue
axis_char = int(ints[0])
axis = fiber_reading.Axis.UNKNOWN
if (axis_char == 0):
axis = fiber_reading.Axis.X_AXIS
elif (axis_char == 1):
axis = fiber_reading.Axis.Y_AXIS
index = int(ints[1])
callib = int(ints[2])
reading = fiber_reading.FiberReading(axis, index, callib)
for i in range(3, l):
reading.AddData(int(ints[i]))
self.q.append(reading)
|
nilq/baby-python
|
python
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Maintainer:
# Based on code written by Jed Smith <jed@jedsmith.org> who based it on
# code written by Alex Polvi <polvi@cloudkick.com>
#
import sys
import unittest
import json
from libcloud.utils.py3 import httplib
from libcloud.compute.drivers.equinixmetal import EquinixMetalNodeDriver
from libcloud.compute.base import Node, KeyPair
from libcloud.compute.types import NodeState
from libcloud.test import MockHttp
from libcloud.test.compute import TestCaseMixin
from libcloud.test.file_fixtures import ComputeFileFixtures
# This is causing test failures inder Python 3.5
import libcloud.compute.drivers.equinixmetal
libcloud.compute.drivers.equinixmetal.USE_ASYNC_IO_IF_AVAILABLE = False
__all__ = [
'EquinixMetalTest'
]
class EquinixMetalTest(unittest.TestCase, TestCaseMixin):
def setUp(self):
EquinixMetalNodeDriver.connectionCls.conn_class = EquinixMetalMockHttp
self.driver = EquinixMetalNodeDriver('foo')
def test_list_nodes(self):
nodes = self.driver.list_nodes('project-id')
self.assertEqual(len(nodes), 1)
node = nodes[0]
self.assertEqual(node.id, '1e52437e-bbbb-cccc-dddd-74a9dfd3d3bb')
self.assertEqual(node.name, 'test-node')
self.assertEqual(node.state, NodeState.RUNNING)
self.assertTrue('147.75.255.255' in node.public_ips)
self.assertTrue('2604:EEEE::EE' in node.public_ips)
self.assertTrue('10.0.0.255' in node.private_ips)
self.assertEqual(node.extra['created_at'], '2015-05-03T15:50:49Z')
self.assertEqual(node.extra['updated_at'], '2015-05-03T16:00:08Z')
self.assertEqual(node.extra['billing_cycle'], 'hourly')
self.assertEqual(node.extra['locked'], False)
self.assertEqual(node.size.id, 'baremetal_1')
self.assertEqual(node.size.name, 'Type 1 - 16384 RAM')
self.assertEqual(node.size.ram, 16384)
self.assertEqual(node.size.disk, 240)
self.assertEqual(node.size.price, 0.4)
self.assertEqual(node.size.extra['line'], 'baremetal')
self.assertEqual(node.image.id, 'ubuntu_14_04')
self.assertEqual(node.image.name, 'Ubuntu 14.04 LTS')
self.assertEqual(node.image.extra['distro'], 'ubuntu')
self.assertEqual(node.image.extra['version'], '14.04')
def test_list_nodes_response(self):
nodes = self.driver.list_nodes('project-id')
self.assertTrue(isinstance(nodes, list))
for node in nodes:
self.assertTrue(isinstance(node, Node))
def test_list_locations(self):
locations = self.driver.list_locations()
self.assertEqual(len(locations), 1)
def test_list_images(self):
images = self.driver.list_images()
self.assertEqual(len(images), 4)
def test_list_sizes(self):
sizes = self.driver.list_sizes()
self.assertEqual(len(sizes), 1)
def test_create_node(self):
node = self.driver.create_node(ex_project_id="project-id",
name="node-name",
size=self.driver.list_sizes()[0],
image=self.driver.list_images()[0],
location=self.driver.list_locations()[
0])
self.assertTrue(isinstance(node, Node))
def test_create_node_response(self):
size = self.driver.list_sizes()[0]
image = self.driver.list_images()[0]
location = self.driver.list_locations()[0]
node = self.driver.create_node(ex_project_id="project-id",
name='node-name',
image=image,
size=size,
location=location)
self.assertTrue(isinstance(node, Node))
def test_reboot_node(self):
node = self.driver.list_nodes('project-id')[0]
self.driver.reboot_node(node)
def test_reboot_node_response(self):
node = self.driver.list_nodes('project-id')[0]
self.driver.reboot_node(node)
def test_destroy_node(self):
node = self.driver.list_nodes('project-id')[0]
self.driver.destroy_node(node)
def test_destroy_node_response(self):
node = self.driver.list_nodes('project-id')[0]
self.driver.destroy_node(node)
def test_reinstall_node(self):
node = self.driver.list_nodes('project-id')[0]
self.driver.ex_reinstall_node(node)
def test_rescue_node(self):
node = self.driver.list_nodes('project-id')[0]
self.driver.ex_rescue_node(node)
def test_list_key_pairs(self):
keys = self.driver.list_key_pairs()
self.assertEqual(len(keys), 3)
def test_create_key_pair(self):
key = self.driver.create_key_pair(name="sshkey-name",
public_key="ssh-rsa AAAAB3NzaC1yc2EA\
AAADAQABAAABAQDI4pIqzpb5g3992h+yr527VRcaB68KE4vPjWPPoiQws49KIs2NMcOzS9QE4641uW\
1u5ML2HgQdfYKMF/YFGnI1Y6xV637DjhDyZYV9LasUH49npSSJjsBcsk9JGfUpNAOdcgpFzK8V90ei\
OrOC5YncxdwwG8pwjFI9nNVPCl4hYEu1iXdyysHvkFfS2fklsNjLWrzfafPlaen+qcBxygCA0sFdW/\
7er50aJeghdBHnE2WhIKLUkJxnKadznfAge7oEe+3LLAPfP+3yHyvp2+H0IzmVfYvAjnzliYetqQ8p\
g5ZW2BiJzvqz5PebGS70y/ySCNW1qQmJURK/Wc1bt9en root@libcloud")
self.assertTrue(isinstance(key, KeyPair))
def test_delete_key_pair(self):
key = self.driver.list_key_pairs()[0]
self.driver.delete_key_pair(key)
def test_ex_list_projects(self):
projects = self.driver.ex_list_projects()
self.assertEqual(len(projects), 3)
def test_ex_get_bgp_config_for_project(self):
config = self.driver.ex_get_bgp_config_for_project(ex_project_id='4b653fce-6405-4300-9f7d-c587b7888fe5')
self.assertEqual(config.get('status'), 'enabled')
def test_ex_get_bgp_config(self):
config = self.driver.ex_get_bgp_config()
self.assertEqual(len(config), 2)
def test_ex_list_nodes_for_project(self):
nodes = self.driver.ex_list_nodes_for_project(ex_project_id='4b653fce-6405-4300-9f7d-c587b7888fe5')
self.assertEqual(nodes[0].public_ips, ['147.75.102.193', '2604:1380:2000:c100::3'])
def test_ex_create_bgp_session(self):
node = self.driver.list_nodes('project-id')[0]
session = self.driver.ex_create_bgp_session(node, 'ipv4')
self.assertEqual(session['status'], 'unknown')
def test_ex_get_bgp_session(self):
session = self.driver.ex_get_bgp_session(self.driver.ex_list_bgp_sessions()[0]['id'])
self.assertEqual(session['status'], 'down')
def test_ex_list_bgp_sessions_for_project(self):
sessions = self.driver.ex_list_bgp_sessions_for_project(ex_project_id='4b653fce-6405-4300-9f7d-c587b7888fe5')
self.assertEqual(sessions['bgp_sessions'][0]['status'], 'down')
def test_ex_list_bgp_sessions_for_node(self):
sessions = self.driver.ex_list_bgp_sessions_for_node(self.driver.list_nodes()[0])
self.assertEqual(sessions['bgp_sessions'][0]['status'], 'down')
def test_ex_list_bgp_sessions(self):
sessions = self.driver.ex_list_bgp_sessions()
self.assertEqual(sessions[0]['status'], 'down')
def test_ex_delete_bgp_session(self):
self.driver.ex_delete_bgp_session(session_uuid='08f6b756-758b-4f1f-bfaf-b9b5479822d7')
def test_ex_list_events_for_node(self):
events = self.driver.ex_list_events_for_node(self.driver.list_nodes()[0])
self.assertEqual(events['events'][0]['ip'], '157.52.105.28')
def test_ex_list_events_for_project(self):
events = self.driver.ex_list_events_for_project(self.driver.ex_list_projects()[0])
self.assertEqual(events['meta']['total'], len(events['events']))
def test_ex_get_node_bandwidth(self):
node = self.driver.list_nodes('project-id')[0]
bw = self.driver.ex_get_node_bandwidth(node, 1553194476, 1553198076)
self.assertTrue(len(bw['bandwidth'][0]['datapoints'][0]) > 0)
def test_ex_update_node(self):
node = self.driver.list_nodes('project-id')[0]
self.driver.ex_update_node(node, description='new_description')
def test_ex_describe_all_addresses_for_project(self):
addresses = self.driver.ex_describe_all_addresses_for_project(
'4b653fce-6405-4300-9f7d-c587b7888fe5')
self.assertEqual(len(addresses), 5)
def test_ex_describe_address(self):
address = self.driver.ex_describe_address(
ex_address_id='01c184f5-1413-4b0b-9f6d-ac993f6c9241')
self.assertEqual(address['network'], '147.75.33.32')
def test_ex_request_address_reservation(self):
response = self.driver.ex_request_address_reservation(
ex_project_id='3d27fd13-0466-4878-be22-9a4b5595a3df')
assert response['global_ip']
def test_ex_associate_address_with_node(self):
node = self.driver.list_nodes('project-id')[0]
response = self.driver.ex_associate_address_with_node(node, '147.75.40.2/32')
assert response['enabled']
def test_ex_disassociate_address_with_node(self):
node = self.driver.list_nodes('project-id')[0]
assignments = self.driver.ex_list_ip_assignments_for_node(node)
for ip_assignment in assignments['ip_addresses']:
if ip_assignment['gateway'] == '147.75.40.2':
self.driver.ex_disassociate_address(
ip_assignment['id'])
break
def test_list_volumes(self):
volumes = self.driver.list_volumes()
assert len(volumes) == 2
assert len(volumes[0].extra['attachments']) == 0
def test_create_volume(self):
location = self.driver.list_locations()[0]
volume = self.driver.create_volume(
10, location, description="test volume", plan="storage_1",
ex_project_id='3d27fd13-0466-4878-be22-9a4b5595a3df')
assert len(volume.extra['attachments']) == 0
assert not volume.extra['locked']
def test_attach_volume(self):
attached = False
volumes = self.driver.ex_list_volumes_for_project(ex_project_id='3d27fd13-0466-4878-be22-9a4b5595a3df')
node = self.driver.ex_list_nodes_for_project(ex_project_id='3d27fd13-0466-4878-be22-9a4b5595a3df')[0]
for vol in volumes:
if len(vol.extra['attachments']) == 0:
attached = self.driver.attach_volume(node, vol)
break
assert attached
def test_detach_volume(self):
detached = False
volumes = self.driver.ex_list_volumes_for_project(ex_project_id='3d27fd13-0466-4878-be22-9a4b5595a3df')
for vol in volumes:
if len(vol.extra['attachments']) > 0:
detached = self.driver.detach_volume(vol)
break
assert detached
def test_destroy_volume(self):
destroyed = False
volumes = self.driver.ex_list_volumes_for_project(ex_project_id='3d27fd13-0466-4878-be22-9a4b5595a3df')
for vol in volumes:
if len(vol.extra['attachments']) == 0:
destroyed = self.driver.destroy_volume(vol)
break
assert destroyed
class EquinixMetalMockHttp(MockHttp):
fixtures = ComputeFileFixtures('equinixmetal')
def _metal_v1_facilities(self, method, url, body, headers):
body = self.fixtures.load('facilities.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _metal_v1_plans(self, method, url, body, headers):
body = self.fixtures.load('plans.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _metal_v1_projects_3d27fd13_0466_4878_be22_9a4b5595a3df_plans(self, method, url, body, headers):
body = self.fixtures.load('plans.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _metal_v1_projects(self, method, url, body, headers):
body = self.fixtures.load('projects.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _metal_v1_projects_4b653fce_6405_4300_9f7d_c587b7888fe5_devices(self, method, url, body, headers):
body = self.fixtures.load('devices_for_project.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _metal_v1_projects_4a4bce6b_d2ef_41f8_95cf_0e2f32996440_devices(self, method, url, body, headers):
body = self.fixtures.load('devices_for_project.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _metal_v1_projects_3d27fd13_0466_4878_be22_9a4b5595a3df_devices(self, method, url, body, headers):
body = self.fixtures.load('devices_for_project.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _metal_v1_projects_4b653fce_6405_4300_9f7d_c587b7888fe5_ips(self, method, url, body, headers):
body = self.fixtures.load('project_ips.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _metal_v1_projects_3d27fd13_0466_4878_be22_9a4b5595a3df_ips(self, method, url, body, headers):
if method == 'POST':
body = self.fixtures.load('reserve_ip.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _metal_v1_projects_4b653fce_6405_4300_9f7d_c587b7888fe5_bgp_config(self, method, url, body, headers):
body = self.fixtures.load('bgp_config_project_1.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _metal_v1_projects_3d27fd13_0466_4878_be22_9a4b5595a3df_bgp_config(self, method, url, body, headers):
body = self.fixtures.load('bgp_config_project_1.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _metal_v1_projects_4a4bce6b_d2ef_41f8_95cf_0e2f32996440_bgp_config(self, method, url, body, headers):
body = self.fixtures.load('bgp_config_project_3.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _metal_v1_operating_systems(self, method, url, body, headers):
body = self.fixtures.load('operatingsystems.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _metal_v1_ssh_keys(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('sshkeys.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'POST':
body = self.fixtures.load('sshkey_create.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _metal_v1_ssh_keys_2c1a7f23_1dc6_4a37_948e_d9857d9f607c(self, method, url,
body, headers):
if method == 'DELETE':
return (httplib.OK, '', {}, httplib.responses[httplib.OK])
def _metal_v1_projects_project_id_devices(self, method, url, body, headers):
if method == 'POST':
body = self.fixtures.load('device_create.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
elif method == 'GET':
body = self.fixtures.load('devices.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _metal_v1_devices_1e52437e_bbbb_cccc_dddd_74a9dfd3d3bb(self, method, url,
body, headers):
if method in ['DELETE', 'PUT']:
return (httplib.OK, '', {}, httplib.responses[httplib.OK])
def _metal_v1_devices_1e52437e_bbbb_cccc_dddd_74a9dfd3d3bb_actions(
self, method, url, body, headers):
return (httplib.OK, '', {}, httplib.responses[httplib.OK])
def _metal_v1_devices_1e52437e_bbbb_cccc_dddd_74a9dfd3d3bb_bgp_sessions(self,
method, url, body, headers):
if method == 'POST':
body = self.fixtures.load('bgp_session_create.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _metal_v1_bgp_sessions_08f6b756_758b_4f1f_bfaf_b9b5479822d7(self, method, url,
body, headers):
body = self.fixtures.load('bgp_session_get.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _metal_v1_projects_4b653fce_6405_4300_9f7d_c587b7888fe5_bgp_sessions(self,
method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('bgp_sessions.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _metal_v1_devices_905037a4_967c_4e81_b364_3a0603aa071b_bgp_sessions(self,
method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('bgp_sessions.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _metal_v1_projects_4a4bce6b_d2ef_41f8_95cf_0e2f32996440_bgp_sessions(self,
method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('bgp_sessions.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _metal_v1_projects_3d27fd13_0466_4878_be22_9a4b5595a3df_bgp_sessions(self,
method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('bgp_sessions.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _metal_v1_projects_3d27fd13_0466_4878_be22_9a4b5595a3df_events(self, method,
url, body, headers):
if method == 'GET':
body = self.fixtures.load('project_events.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _metal_v1_devices_905037a4_967c_4e81_b364_3a0603aa071b_events(self, method,
url, body, headers):
if method == 'GET':
body = self.fixtures.load('device_events.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _metal_v1_devices_1e52437e_bbbb_cccc_dddd_74a9dfd3d3bb_bandwidth(self, method,
url, body, headers):
if method == 'GET':
body = self.fixtures.load('node_bandwidth.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _metal_v1_ips_01c184f5_1413_4b0b_9f6d_ac993f6c9241(self, method, url, body,
headers):
body = self.fixtures.load('ip_address.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _metal_v1_devices_1e52437e_bbbb_cccc_dddd_74a9dfd3d3bb_ips(self, method, url,
body, headers):
if method == 'GET':
body = self.fixtures.load('ip_assignments.json')
elif method == 'POST':
body = self.fixtures.load('associate_ip.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _metal_v1_ips_aea4ee0c_675f_4b77_8337_8e13b868dd9c(self, method, url, body,
headers):
if method == 'DELETE':
return (httplib.OK, '', {}, httplib.responses[httplib.OK])
def _metal_v1_projects_3d27fd13_0466_4878_be22_9a4b5595a3df_storage(self, method,
url, body, headers):
if method == 'GET':
body = self.fixtures.load('volumes.json')
elif method == 'POST':
body = self.fixtures.load('create_volume.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _metal_v1_projects_4a4bce6b_d2ef_41f8_95cf_0e2f32996440_storage(self, method,
url, body, headers):
if method == 'GET':
body = json.dumps({"volumes": []})
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _metal_v1_projects_4b653fce_6405_4300_9f7d_c587b7888fe5_storage(self, method,
url, body, headers):
if method == 'GET':
body = json.dumps({"volumes": []})
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _metal_v1_storage_74f11291_fde8_4abf_8150_e51cda7308c3(self, method, url, body,
headers):
if method == 'DELETE':
return (httplib.NO_CONTENT, '', {}, httplib.responses[httplib.NO_CONTENT])
def _metal_v1_storage_a08aaf76_e0ce_43aa_b9cd_cce0d4ae4f4c_attachments(self, method,
url, body, headers):
if method == 'POST':
body = self.fixtures.load('attach_volume.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _metal_v1_storage_a08aaf76_e0ce_43aa_b9cd_cce0d4ae4f4c(self, method, url, body,
headers):
if method == 'DELETE':
return (httplib.NO_CONTENT, '', {}, httplib.responses[httplib.NO_CONTENT])
def _metal_v1_storage_attachments_2c16a96f_bb4f_471b_8e2e_b5820b9e1603(self,
method, url, body, headers):
if method == 'DELETE':
return (httplib.NO_CONTENT, '', {}, httplib.responses[httplib.NO_CONTENT])
if __name__ == '__main__':
sys.exit(unittest.main())
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from typing import Dict, List, Optional, Tuple
from django.conf import settings
from rest_framework import serializers
from backend.components import bk_repo
from backend.helm.helm.models.chart import Chart, ChartVersion, ChartVersionSnapshot
def get_chart_version(
project_name: str, repo_name: str, chart_name: str, version: str, username: str, password: str
) -> Dict:
"""调用接口获取仓库中指定版本的详情
:param project_name: 项目名称
:param repo_name: 仓库名称
:param chart_name: 指定chart的名称,用于找到指定的chart
:param version: 指定chart的版本
:param username: 访问仓库的用户身份: 用户名
:param password: 访问仓库的用户身份: 密码
"""
client = bk_repo.BkRepoClient(username=username, password=password)
return client.get_chart_version_detail(project_name, repo_name, chart_name, version)
def update_or_create_chart_version(chart: Chart, version_detail: Dict) -> ChartVersion:
"""更新或创建chart版本信息"""
return ChartVersion.update_or_create_version(chart, version_detail)
def release_snapshot_to_version(chart_version_snapshot: ChartVersionSnapshot, chart: Chart) -> ChartVersion:
"""通过snapshot组装version数据"""
return ChartVersion(id=0, chart=chart, keywords="chart version", **chart_version_snapshot.version_detail)
class VersionListSLZ(serializers.Serializer):
name = serializers.CharField()
version = serializers.CharField()
created = serializers.CharField()
urls = serializers.ListField(child=serializers.CharField())
class ReleaseVersionListSLZ(serializers.Serializer):
name = serializers.CharField()
version = serializers.CharField()
created = serializers.CharField()
def sort_version_list(versions: List) -> List:
versions.sort(key=lambda item: item["created"], reverse=True)
return versions
def get_helm_project_and_repo_name(
project_code: str, repo_name: Optional[str] = None, is_public_repo: Optional[bool] = None
) -> Tuple[str, str]:
"""获取项目及仓库名称
:param project_code: BCS 项目编码
:param repo_name: repo名称
:param is_public_repo: 是否是公共仓库
:returns: 返回项目名称和仓库名称
"""
if is_public_repo or repo_name == settings.BCS_SHARED_CHART_REPO_NAME:
return (settings.BK_REPO_SHARED_PROJECT_NAME, settings.BK_REPO_SHARED_CHART_DEPOT_NAME)
# 针对项目下的chart仓库,项目名称和仓库名称一样
return (project_code, project_code)
|
nilq/baby-python
|
python
|
import os
from collections import OrderedDict
from coverage_checker.utils import get_all_path_combinations
def test_get_all_path_combinations():
facets = OrderedDict([('a', ['1', '2']), ('b', ['3', '4']), ('c', ['5', '6'])])
all_paths = get_all_path_combinations(facets)
expected_result = ['1/3/5', '1/3/6', '1/4/5', '1/4/6', '2/3/5', '2/3/6', '2/4/5', '2/4/6']
assert(all_paths == expected_result)
|
nilq/baby-python
|
python
|
import re
from math import sqrt, atan2
if __name__ == "__main__":
"""
This script file demonstrates how to transform raw CSI out from the ESP32 into CSI-amplitude and CSI-phase.
"""
FILE_NAME = "./example_csi.csv"
f = open(FILE_NAME)
for j, l in enumerate(f.readlines()):
imaginary = []
real = []
amplitudes = []
phases = []
# Parse string to create integer list
csi_string = re.findall(r"\[(.*)\]", l)[0]
csi_raw = [int(x) for x in csi_string.split(" ") if x != '']
# Create list of imaginary and real numbers from CSI
for i in range(len(csi_raw)):
if i % 2 == 0:
imaginary.append(csi_raw[i])
else:
real.append(csi_raw[i])
# Transform imaginary and real into amplitude and phase
for i in range(int(len(csi_raw) / 2)):
amplitudes.append(sqrt(imaginary[i] ** 2 + real[i] ** 2))
phases.append(atan2(imaginary[i], real[i]))
print("-------------------")
print("csi_amplitude#{}:".format(j), amplitudes)
print("csi_phase#{}: ".format(j), phases)
print("-------------------")
|
nilq/baby-python
|
python
|
# Recording video to a file
# https://picamera.readthedocs.io/en/release-1.13/recipes1.html#recording-video-to-a-file
import picamera
camera = picamera.PiCamera()
camera.resolution = (640, 480)
camera.start_recording('output/07_video.h264')
camera.wait_recording(5)
camera.stop_recording()
|
nilq/baby-python
|
python
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility to enable oauth2 settings for NPTEL."""
__author__ = [
'Abhinav Khandelwal (abhinavk@google.com)',
'Rishav Thakker (rthakker@google.com)'
]
import logging
import re
import httplib2
import appengine_config
from oauth2client.client import SignedJwtAssertionCredentials
from apiclient.discovery import build
from google.appengine.api import memcache
from modules.google_service_account.service_account_models import (
GoogleServiceAccountTypes, GoogleServiceAccountSettings,
GoogleServiceAccountSettingsDTO)
# In real life we'd check in a blank file and set up the code to error with a
# message pointing people to https://code.google.com/apis/console.
EMAIL_REGEX = re.compile(r"^[A-Za-z0-9\.\+_-]+@[A-Za-z0-9\._-]+\.[a-zA-Z]+$")
DEFAULT_HTTP_TIMEOUT = 10
class GoogleServiceManager(object):
"""Manage all the credentials/services"""
# Services are added to this object as and when required by the respective
# Modules
_SERVICES = {}
_MEMCACHE_KEY = 'service_account_credentials'
_DEFAULT_CACHE_TTL_SECS = 3600
@classmethod
def _default_id_from_credential_type(cls, credential_type):
"""
Returns the ID for the default settings object from credential type
"""
return credential_type
@classmethod
def get_by_id(cls, id, namespace=appengine_config.DEFAULT_NAMESPACE_NAME):
"""Fetches an entry from the database using its ID"""
entity = GoogleServiceAccountSettings.get_by_id(id, namespace=namespace)
if entity:
return GoogleServiceAccountSettingsDTO(entity)
@classmethod
def update_service_account_settings(
cls, id, namespace=appengine_config.DEFAULT_NAMESPACE_NAME,
credential_type=None, client_email=None,
sub_user_email=None, scope=None, client_id=None, api_key=None,
project_id=None, project_key_id=None, private_key=None,
auth_uri=None, token_uri=None, auth_provider_x509_cert_url=None,
client_x509_cert_url=None):
"""Updates a GoogleServiceAccountSettings object"""
obj = GoogleServiceAccountSettings.get_or_create(id, namespace)
if credential_type is not None:
obj.credential_type = credential_type
if client_email is not None:
obj.client_email = client_email
if sub_user_email is not None:
obj.sub_user_email = sub_user_email
if scope is not None:
obj.scope = scope
if client_id is not None:
obj.client_id = client_id
if api_key is not None:
obj.api_key = api_key
if project_id is not None:
obj.project_id = project_id
if project_key_id is not None:
obj.project_key_id = project_key_id
if private_key is not None:
obj.private_key = private_key
if auth_uri is not None:
obj.auth_uri = auth_uri
if token_uri is not None:
obj.token_uri = token_uri
if auth_provider_x509_cert_url is not None:
obj.auth_provider_x509_cert_url = auth_provider_x509_cert_url
if client_x509_cert_url is not None:
obj.client_x509_cert_url = client_x509_cert_url
# call initialize_credentials again if required
if credential_type == GoogleServiceAccountTypes.SERVICE_ACCOUNT:
if not cls.initialize_credentials(
service_account_settings=obj, namespace=namespace):
return None
# Save and return
obj.put()
return GoogleServiceAccountSettingsDTO(obj)
@classmethod
def get_default_settings_by_type(cls, credential_type,
namespace=appengine_config.DEFAULT_NAMESPACE_NAME):
"""Returns the default settings object for a credential type"""
id = cls._default_id_from_credential_type(credential_type)
entry = cls.get_by_id(id, namespace=namespace)
return entry
@classmethod
def get_or_create_default_settings_by_type(cls, credential_type,
namespace=appengine_config.DEFAULT_NAMESPACE_NAME):
"""
Returns the default settings object for a credential type.
Creates a new object and returns it if none exist.
"""
entry = cls.get_default_settings_by_type(credential_type, namespace)
if not entry:
id = cls._default_id_from_credential_type(credential_type)
entry = cls.update_service_account_settings(
id=id, namespace=namespace, credential_type=credential_type)
return entry
@classmethod
def get_all_default_settings(
cls, namespace=appengine_config.DEFAULT_NAMESPACE_NAME):
"""Returns a list of the default settings objects for each type"""
all_settings = []
for credential_type in GoogleServiceAccountTypes.to_dict().values():
entity = cls.get_default_settings_by_type(
credential_type,
namespace)
if entity:
all_settings.append(entity)
return all_settings
@classmethod
def update_default_settings_by_type(
cls, namespace=appengine_config.DEFAULT_NAMESPACE_NAME,
credential_type=None, **kwargs):
"""
Updates the default settings object identified by type.
Each type will have exactly one default object.
"""
id = cls._default_id_from_credential_type(credential_type)
kwargs['id'] = id
kwargs['credential_type'] = credential_type
return cls.update_service_account_settings(
namespace=namespace, **kwargs)
@classmethod
def _store_credentials_in_memcache(
cls, credentials,
namespace=appengine_config.DEFAULT_NAMESPACE_NAME):
"""Stores the credential object in memcache"""
memcache.set(
cls._MEMCACHE_KEY, credentials, time=cls._DEFAULT_CACHE_TTL_SECS,
namespace=namespace)
@classmethod
def _get_credentials_from_memcache(
cls, namespace=appengine_config.DEFAULT_NAMESPACE_NAME):
"""Gets the credentials from the memcache"""
return memcache.get(cls._MEMCACHE_KEY, namespace=namespace)
@classmethod
def initialize_credentials(cls, service_account_settings=None,
namespace=appengine_config.DEFAULT_NAMESPACE_NAME):
"""Builds a decorator for using oauth2 with webapp2.RequestHandlers."""
# In real life we'd want to make one decorator per service because
# we wouldn't want users to have to give so many permissions.
# Initialize more credentials here if required
try:
if not service_account_settings:
service_account_settings = cls.get_default_settings_by_type(
GoogleServiceAccountTypes.SERVICE_ACCOUNT,
namespace=namespace)
if not service_account_settings:
raise ValueError(
'Default service_account Settings not found')
key = service_account_settings.private_key
scope = service_account_settings.scope
client_email = service_account_settings.client_email
sub_user_email = service_account_settings.sub_user_email
if key and scope and client_email:
if sub_user_email:
credentials = SignedJwtAssertionCredentials(
client_email, key, scope=scope, sub=sub_user_email)
else:
credentials = SignedJwtAssertionCredentials(
client_email, key, scope=scope)
if credentials:
cls._store_credentials_in_memcache(
credentials, namespace=namespace)
# Reset all services
cls._SERVICES = {}
return credentials
else:
raise ValueError('Could not create credentials')
else:
raise ValueError('Invalid default service_account settings')
# Deliberately catch everything. pylint: disable-msg=broad-except
except Exception as e:
logging.error('Could not initialize Google service account '
'credentials.\nError: %s', e)
return None
@classmethod
def _get_authorized_http_object(cls, http_obj=None,
timeout=DEFAULT_HTTP_TIMEOUT,
namespace=appengine_config.DEFAULT_NAMESPACE_NAME,
*args, **kwargs):
"""Calls the authorize function of credentials"""
if not http_obj:
http_obj = httplib2.Http(timeout=timeout)
credentials = cls._get_credentials_from_memcache(namespace)
if not credentials:
# Try initializing again
credentials = cls.initialize_credentials(namespace=namespace)
if not credentials:
# Initialization failed.
return None
return credentials.authorize(
http_obj, *args, **kwargs)
@classmethod
def _add_service(cls, name, version, service,
namespace=appengine_config.DEFAULT_NAMESPACE_NAME):
"""Adds a service to _SERVICES"""
if namespace not in cls._SERVICES:
cls._SERVICES[namespace] = {}
if name not in cls._SERVICES[namespace]:
cls._SERVICES[namespace][name] = {}
cls._SERVICES[namespace][name][version] = {
'name': name,
'version': version,
'service': service
}
return service
@classmethod
def _create_service(cls, name, version, http_obj=None,
timeout=DEFAULT_HTTP_TIMEOUT,
namespace=appengine_config.DEFAULT_NAMESPACE_NAME):
"""Creates and adds a service"""
if None in (name, version):
return None
if http_obj is None:
http_obj = cls._get_authorized_http_object(
timeout=timeout,
namespace=namespace)
if not http_obj:
return None
try:
service = build(name, version, http=http_obj)
cls._add_service(name, version, service, namespace)
return service
except Exception as e:
logging.error('Unable to initialize %s service: %s',
name, e)
return None
@classmethod
def get_service(cls, name=None, version=None, http_obj=None,
timeout=DEFAULT_HTTP_TIMEOUT,
namespace=appengine_config.DEFAULT_NAMESPACE_NAME):
"""
Returns the service from _SERVICES
Note: run this function every time you need to use a service to avoid
using stale services.
"""
if namespace in cls._SERVICES:
if name in cls._SERVICES[namespace]:
if version in cls._SERVICES[namespace][name]:
service = cls._SERVICES[namespace][name][version].get(
'service')
if service:
return service
# If we reach here it means service doesn't exist. Create a new service
return cls._create_service(
name, version, http_obj, timeout, namespace)
|
nilq/baby-python
|
python
|
# -----------------------------------------------------------------------------
# Copyright (c) 2013-2022, NeXpy Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING, distributed with this software.
# -----------------------------------------------------------------------------
import os
from configparser import ConfigParser
from nexusformat.nexus import NeXusError
class NXSettings(ConfigParser):
"""A ConfigParser subclass that preserves the case of option names"""
def __init__(self, directory=None):
super().__init__(allow_no_value=True)
self.directory = self.get_directory(server_directory=directory)
self.file = os.path.join(self.directory, 'settings.ini')
super().read(self.file)
sections = self.sections()
if 'setup' not in sections:
self.add_section('setup')
if 'nxrefine' not in sections:
self.add_section('nxrefine')
if 'nxreduce' not in sections:
self.add_section('nxreduce')
self.add_defaults()
def get_directory(self, server_directory=None):
self.home_settings = ConfigParser()
home_directory = os.path.join(os.path.abspath(os.path.expanduser('~')),
'.nxserver')
if not os.path.exists(home_directory):
os.mkdir(home_directory)
self.home_file = os.path.join(home_directory, 'settings.ini')
self.home_settings.read(self.home_file)
if 'setup' not in self.home_settings.sections():
self.home_settings.add_section('setup')
if server_directory:
self.home_settings.set('setup', 'directory', server_directory)
with open(self.home_file, 'w') as f:
self.home_settings.write(f)
elif self.home_settings.has_option('setup', 'directory'):
server_directory = self.home_settings.get('setup', 'directory')
else:
raise NeXusError(
"Please define settings directory - type 'nxsettings -h'")
if os.path.basename(server_directory) != 'nxserver':
server_directory = os.path.join(server_directory, 'nxserver')
if not os.path.exists(server_directory):
os.mkdir(server_directory)
return server_directory
def add_defaults(self):
if not self.has_option('setup', 'type'):
self.set('setup', 'type', 'multicore')
default = {'wavelength': 0.141, 'distance': 650,
'phi': -5.0, 'phi_end': 360.0, 'phi_step': 0.1,
'chi': -90.0, 'omega': 0.0, 'x': 0.0, 'y': 0.0,
'nsteps': 3, 'frame_rate': 10}
for p in default:
if not self.has_option('nxrefine', p):
self.set('nxrefine', p, default[p])
default = {'threshold': 50000, 'min_pixels': 10,
'first': 10, 'last': 3640,
'monitor': 'monitor2', 'norm': 30000,
'radius': 0.2, 'qmax': 16.0}
for p in default:
if not self.has_option('nxreduce', p):
self.set('nxreduce', p, default[p])
self.save()
def input_defaults(self):
for s in ['NXRefine', 'NXReduce']:
print(f'\n{s} Parameters\n-------------------')
s = s.lower()
for p in self.options(s):
value = input(f"{p} [{self.get(s, p)}]: ")
if value:
self.set(s, p, value)
self.save()
@property
def settings(self):
_settings = {}
_settings['nxrefine'] = {k: v for (k, v) in self.items('nxrefine')}
_settings['nxreduce'] = {k: v for (k, v) in self.items('nxreduce')}
return _settings
def set(self, section, option, value=None):
if isinstance(value, int) or isinstance(value, float):
super().set(section, option, f"{value:g}")
elif value is not None:
super().set(section, option, str(value))
else:
super().set(section, option)
def save(self):
with open(self.file, 'w') as f:
self.write(f)
|
nilq/baby-python
|
python
|
import os
import sys
from PIL import Image
import glob
import numpy as np
import h5py
import csv
import time
import zipfile
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
try:
from urllib.request import urlretrieve
except ImportError:
from urllib import urlretrieve
def reporthook(count, block_size, total_size):
"""Taken from https://blog.shichao.io/2012/10/04/progress_speed_indicator_for_urlretrieve_in_python.html
A simple reporthook() function for urllib.urlretrieve()‘s reporthook argument that shows a progressbar
while downloading the data
"""
global start_time
if count == 0:
start_time = time.time()
return
duration = time.time() - start_time
progress_size = int(count * block_size)
speed = int(progress_size / (1024 * duration))
percent = int(count * block_size * 100 / total_size)
sys.stdout.write("\r...%d%%, %d MB, %d KB/s, %d seconds passed" %
(percent, progress_size / (1024 * 1024), speed, duration))
sys.stdout.flush()
def download_data():
"""Downloads and Extracts tiny-imagenet Dataset
"""
if not os.path.exists(os.path.join(os.getcwd(), "tiny-imagenet-200")):
if not os.path.exists(os.path.join(os.getcwd(), "tiny-imagenet-200.zip")):
print ('Downloading Flowers data from http://cs231n.stanford.edu/tiny-imagenet-200.zip ...')
urlretrieve ('http://cs231n.stanford.edu/tiny-imagenet-200.zip', 'tiny-imagenet-200.zip', reporthook)
print ('\nExtracting tiny-imagenet-200.zip ...', end='', flush=True)
zfile = zipfile.ZipFile (os.path.join(os.getcwd(), 'tiny-imagenet-200.zip'), 'r')
zfile.extractall ('.')
zfile.close()
print ('Done')
def get_word_labels():
"""Get the wnids and label names from the words.txt file.
# Returns
A dictionary where keys are the wnids and values are the label names
"""
file = open ('tiny-imagenet-200/words.txt', 'r')
word_labels = {}
for f in file:
f = f.split(' ')
words = f[1]
words = words.replace('\n', '')
word_labels[f[0]] = words
file.close()
return word_labels
def get_train_wnid():
"""Extracts the wnids from the subdirectories for every image in the train folder
# Returns
A dictionary where keys are the image names and values are the wnids
"""
wnid_labels = {}
for subdir, dirs, files in os.walk('tiny-imagenet-200/train'):
for filename in files:
if filename.endswith(('.txt')):
file = open(subdir + '/' +filename, 'r')
for line in file:
line = line.split(' ')
wnid_labels[line[0]] = subdir.split('/')[-1]
file.close()
return wnid_labels
def get_val_wnid():
"""Extracts the wnids from the val_annotations.txt file for every image in the val folder
# Returns
A dictionary where keys are the image names and values are the wnids
"""
file = open('tiny-imagenet-200/val/val_annotations.txt', 'r')
wnid_labels = {}
for f in file:
f = f.split(' ')
wnid_labels[f[0]] = f[1]
file.close()
return wnid_labels
def load_labels():
"""Gets wnids for every image and convert them to categorical
# Returns
train_wnid: A dictionary where keys are the training image names and values are the wnids
val_wnid: A dictionary where keys are the validation image names and values are the wnids
uniq_wnids: A list of all the wnids
"""
train_wnid = get_train_wnid()
val_wnid = get_val_wnid()
uniq_wnids = list(set(list(train_wnid.values()) + list(val_wnid.values())))
return train_wnid, val_wnid, uniq_wnids
def load_images (folder, wnid_labels, uniq_wnids, train_val):
"""loads the images from a given folder
# Arguments
folder: directory where the images are stored
wnid_labels: A dictionary where keys are the validation image names and values are the wnids
uniq_wnids: A list of all the wnids
# Returns
images: A numpy array of the images
image_names: A numpy array of the image names
labels: A numpy array of the labels
wnids: A numpy array of the wnids
label_names: A numpy array of the label names
"""
print ('Loading {} images ... '.format(train_val), end='', flush=True)
word_labels = get_word_labels()
images = []
labels = []
wnids = []
label_names = []
image_names = []
for subdir, dirs, files in os.walk(folder):
for filename in files:
if filename.endswith(('.JPEG', '.jpeg', '.JPG', '.jpg', '.PNG', '.png')):
img = Image.open(subdir + '/' + filename)
np_img = np.array(img)
if np_img.ndim == 2:
np_img = np.dstack([np_img]*3)
images.append(np_img)
filename = filename.split("/")[-1]
labels.append(uniq_wnids.index(wnid_labels[filename]))
image_names.append(np.string_(filename))
wnids.append(np.string_(wnid_labels [filename]))
label_names.append(np.string_(word_labels [wnid_labels[filename]]))
img.close()
# if (len(images)%5000) is 0: print ('{} imges processed'.format(len(images)))
images = np.array(images)
labels = np.array(labels)
wnids = np.array(wnids)
image_names = np.array(image_names)
label_names = np.array(label_names)
# print ('Image processing finished')
print ('Done')
return images, image_names, labels, wnids, label_names
def h5_creator (filename, x, y, image_names=np.array([]), wnids=np.array([]), label_names=np.array([]) ):
"""Creates a H5 file and datasets with all the arguments.
# Arguments
filename: name of the h5 file
images: A numpy array of the images
image_names: A numpy array of the image names
labels: A numpy array of the labels
wnids: A numpy array of the wnids
label_names: A numpy array of the label names
"""
print ('Creating {} ... '.format(filename), end='', flush=True)
with h5py.File(filename, 'w') as hf:
hf.create_dataset('x', compression="gzip", data=x)
hf.create_dataset('y', compression="gzip", data=y)
hf.create_dataset('image_names', compression="gzip", data=image_names)
hf.create_dataset('label_names', compression="gzip", data=label_names)
hf.create_dataset('wnids', compression="gzip", data=wnids)
hf.close()
print ('Done')
def load_data(expanded=False):
"""Downloads the data loads all the images and the labels
# Returns
Tuple of Numpy arrays
if expanded is True: (x_train, y_train, train_image_names, train_wnids, train_label_names),
(x_val, y_val, val_image_names, val_wnids, val_label_names)
if expanded is False: (x_train, y_train), (x_val, y_val)
# Arguments
expanded: Boolean, where to load expanded entities
"""
download_data()
train_wnid_labels, val_wnid_labels, uniq_wnids = load_labels()
x_val, val_image_names, y_val, val_wnids, val_label_names = load_images ('tiny-imagenet-200/val', val_wnid_labels, uniq_wnids, 'Validation')
x_train, train_image_names, y_train, train_wnids, train_label_names = load_images ('tiny-imagenet-200/train', train_wnid_labels, uniq_wnids, 'Training')
if expanded == False:
return (x_train, y_train), (x_val, y_val)
else:
return (x_train, y_train, train_image_names, train_wnids, train_label_names), \
(x_val, y_val, val_image_names, val_wnids, val_label_names)
def create_h5(expanded=True):
if expanded == False:
(x_train, y_train), (x_val, y_val) = load_data(expanded=False)
h5_creator ('val.h5', x_val, y_val)
h5_creator ('train.h5', x_train, y_train)
else:
(x_train, y_train, train_image_names, train_wnids, train_label_names), \
(x_val, y_val, val_image_names, val_wnids, val_label_names) = load_data(expanded=True)
h5_creator ('val.h5', x_val, y_val, val_image_names, val_wnids, val_label_names)
h5_creator ('train.h5', x_train, y_train, train_image_names, train_wnids, train_label_names)
if __name__ == '__main__':
create_h5()
|
nilq/baby-python
|
python
|
import sys
# Expose the public API.
from ehrpreper.api import *
# Check major python version
if sys.version_info[0] < 3:
raise Exception("Ehrpreper does not support Python 2. Please upgrade to Python 3.")
# Check minor python version
elif sys.version_info[1] < 6:
raise Exception(
"Ehrpreper only supports Python 3.6 and beyond. "
"Use a later version of Python"
)
# Set the version attribute of the library
import pkg_resources
import configparser
# Get the current version
config = configparser.ConfigParser()
config.read([pkg_resources.resource_filename("ehrpreper", "config.ini")])
__version__ = config.get("ehrpreper", "version")
|
nilq/baby-python
|
python
|
__author__ ='Jacques Saraydaryan'
class ColorRange():
min_H=0
max_H=0
label=''
def getColor(self,minH,maxH,label):
self.min_H=minH
self.max_H=maxH
self.label=label
|
nilq/baby-python
|
python
|
#! /usr/bin/env python
import rospy, std_msgs.msg
from sensor_msgs.msg import Temperature
pub = rospy.Publisher('henri/temp_average', Temperature, queue_size=10)
average = 0
variance = 0
def callback(data):
global average, variance, pub
rospy.loginfo('Temperature Received: %f', data.temperature)
average = (average + data.temperature)/2
variance = (variance + data.variance)/2
t = Temperature()
h = std_msgs.msg.Header()
h.stamp = rospy.Time.now()
t.header = h
t.temperature = average
t.variance = variance
pub.publish(t)
def listen_temp():
rospy.init_node('temperature_monitor_py', anonymous=True)
rospy.Subscriber('henri/temperature', Temperature, callback)
rospy.spin()
if __name__ == '__main__':
listen_temp()
|
nilq/baby-python
|
python
|
from flask import Flask
from config import config_options
from flask_sqlalchemy import SQLAlchemy
from flask_uploads import UploadSet,configure_uploads,IMAGES
from flask_bcrypt import Bcrypt
from flask_login import LoginManager
from flask_bootstrap import Bootstrap
from flask_simplemde import SimpleMDE
from flask_mail import Mail
from dotenv import load_dotenv
import os
load_dotenv()
MAIL_USERNAME = os.getenv("MAIL_USERNAME")
MAIL_PASSWORD = os.getenv("MAIL_PASSWORD")
db = SQLAlchemy()
photos = UploadSet('photos',IMAGES)
bcrypt = Bcrypt()
bootstrap = Bootstrap()
simple = SimpleMDE()
mail = Mail()
login_manager = LoginManager()
login_manager.login_view = 'auth.login'
login_manager.login_message_category = 'info'
login_manager.session_protection = 'strong'
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config_options[config_name])
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .emails import email as email_blueprint
app.register_blueprint(email_blueprint)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint,url_prefix='/authenticate')
db.init_app(app)
configure_uploads(app,photos)
bcrypt.init_app(app)
login_manager.init_app(app)
bootstrap.init_app(app)
simple.init_app(app)
mail.init_app(app)
return app
|
nilq/baby-python
|
python
|
# import numpy as np
# import matplotlib.pyplot as plt
# import cv2
# img = cv2.imread('8.jpeg',0)
# dft = cv2.dft(np.float32(img),flags = cv2.DFT_COMPLEX_OUTPUT)
# dft_shift = np.fft.fftshift(dft)
# magnitude_spectrum = 20*np.log(cv2.magnitude(dft_shift[:,:,0],dft_shift[:,:,1]))
# plt.subplot(121),plt.imshow(img, cmap = 'gray')
# plt.title('Input Image'), plt.xticks([]), plt.yticks([])
# plt.subplot(122),plt.imshow(magnitude_spectrum, cmap = 'gray')
# plt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([])
# plt.show()
import cv2
import numpy as np
import matplotlib.pyplot as plt
def fftImage(gray_img, row, col):
rPadded = cv2.getOptimalDFTSize(row)
cPadded = cv2.getOptimalDFTSize(col)
imgPadded = np.zeros((rPadded, cPadded), np.float32)
imgPadded[:row, :col] = gray_img
fft_img = cv2.dft(imgPadded, flags=cv2.DFT_COMPLEX_OUTPUT) #输出为复数,双通道
return fft_img
def amplitudeSpectrum(fft_img):
real = np.power(fft_img[:, :, 0], 2.0)
imaginary = np.power(fft_img[:, :, 1], 2.0)
amplitude = np.sqrt(real+imaginary)
return amplitude
def graySpectrum(amplitude):
amplitude = np.log(amplitude+1)
spectrum = cv2.normalize(amplitude, 0, 1, cv2.NORM_MINMAX, dtype=cv2.CV_32F)
spectrum *= 255
return spectrum
def phaseSpectrum(fft_img):
phase = np.arctan2(fft_img[:,:,1], fft_img[:, :, 0])
spectrum = phase*180/np.pi
return spectrum
# 图像矩阵乘(-1)^(r+c), 中心化
def stdFftImage(img_gray, row, col):
fimg = np.copy(img_gray)
fimg = fimg.astype(np.float32)
for r in range(row):
for c in range(col):
if(r+c)%2:
fimg[r][c] = -1*img_gray[r][c]
fft_img = fftImage(fimg, row, col)
amplitude = amplitudeSpectrum(fft_img)
ampSpectrum = graySpectrum(amplitude)
return ampSpectrum
def GaussianHighFilter(image,d):
f = np.fft.fft2(image)
fshift = np.fft.fftshift(f)
def make_transform_matrix(d):
transmatrix = np.zeros(image.shape)
center_point = tuple(map(lambda x:(x-1)/2,s1.shape))
for i in range(transmatrix.shape[0]):
for j in range(transmatrix.shape[1]):
def cal_distance(pa,pb):
from math import sqrt
dis = sqrt((pa[0]-pb[0])**2+(pa[1]-pb[1])**2)
return dis
dis = cal_distance(center_point,(i,j))
transmatrix[i,j] = 1-np.exp(-(dis**2)/(2*(d**2)))
return transmatrix
d_matrix = make_transform_matrix(d)
out_img = np.abs(np.fft.ifft2(np.fft.ifftshift(fshift*d_matrix)))
return out_img
if __name__ == "__main__":
img_gray = cv2.imread("8.jpeg", 0)
row, col = img_gray.shape[:2]
fft_img = fftImage(img_gray, row, col)
amplitude = amplitudeSpectrum(fft_img)
ampSpectrum = graySpectrum(amplitude)
phaSpectrum = phaseSpectrum(fft_img)
ampSpectrum_center = stdFftImage(img_gray, row, col)
cv2.imshow("img_gray", img_gray)
cv2.imshow("ampSpectrum", ampSpectrum)
cv2.imshow("ampSpectrum_center", ampSpectrum_center)
cv2.imshow("phaSpectrum", phaSpectrum)
cv2.waitKey(0)
cv2.destroyAllWindows()
s1 = np.log(np.abs(fft_img))
img_d1 = GaussianHighFilter(img_gray,10)
img_d2 = GaussianHighFilter(img_gray,30)
img_d3 = GaussianHighFilter(img_gray,50)
plt.subplot(131)
plt.axis("off")
plt.imshow(img_d1,cmap="gray")
plt.title('D_10')
plt.subplot(132)
plt.axis("off")
plt.title('D_30')
plt.imshow(img_d2,cmap="gray")
plt.subplot(133)
plt.axis("off")
plt.title("D_50")
plt.imshow(img_d3,cmap="gray")
plt.show()
|
nilq/baby-python
|
python
|
from flask import json, render_template, g, abort
from flask_login import current_user, login_required
import urllib, json
from thanados import app
from thanados.models.entity import Data
@app.route('/vocabulary/')
def vocabulary():
hierarchytypes = app.config["HIERARCHY_TYPES"]
systemtypes = app.config["SYSTEM_TYPES"]
customtypes = app.config["CUSTOM_TYPES"]
valuetypes = app.config["VALUE_TYPES"]
alltypesused = list(set().union(hierarchytypes, systemtypes, customtypes, valuetypes))
parenttree = []
sql_list = """
SELECT name, id, name_path FROM (
SELECT name, id::INTEGER, path, name_path, left(path, strpos(path, ' >') -1)::INTEGER AS
topparent FROM thanados.types_all WHERE path LIKE '%%>%%'
UNION ALL
SELECT name, id::INTEGER, path, name_path, PATH::INTEGER AS topparent FROM
thanados.types_all WHERE path NOT LIKE '%%>%%' ORDER BY name_path) tp
WHERE topparent IN %(list)s
"""
g.cursor.execute(sql_list, {'list': tuple(alltypesused)})
results = g.cursor.fetchall()
Typelist = []
for row in results:
Typelist.append({'label': row.name, 'path': row.name_path, 'id': row.id})
def makeparents(typelist, typeClass):
for id in typelist:
sql_tree = "SELECT name, id FROM thanados.types_all WHERE id = %(id)s ORDER BY name"
g.cursor.execute(sql_tree, {'id': id})
results = g.cursor.fetchone()
if results:
node = {
'text': results.name,
'id': results.id,
'type': typeClass,
'class': 'treenode'
}
maketree(id, node, typeClass)
parenttree.append(node)
def maketree(id, node, typeClass):
sql_tree = """
SELECT name, id FROM thanados.types_all WHERE parent_id = %(id)s ORDER BY name
"""
g.cursor.execute(sql_tree, {'id': id})
results = g.cursor.fetchall()
if results:
node['nodes'] = []
for row in results:
currentnode = {
'text': row.name, # + getEntCount(row.id),
'id': row.id,
'type': typeClass,
'class': 'treenode'
}
node['nodes'].append(currentnode)
maketree(row.id, currentnode, typeClass)
tabsToCreate = ['Main classes', 'Types', 'Value types']
makeparents(hierarchytypes, 'Main classes')
#makeparents(systemtypes, 'Standard') #uncomment to display system types
makeparents(customtypes, 'Types')
makeparents(valuetypes, 'Value types')
# return json.dumps(parenttree)
return render_template('vocabulary/vocabulary.html', tree=parenttree, tabsToCreate=tabsToCreate, typelist=Typelist)
@app.route('/vocabulary/<int:object_id>')
@app.route('/vocabulary/<int:object_id>/<format_>')
def vocabulary_view(object_id: int, format_=None):
object_id = object_id
loc_image = app.config["API_FILE_DISPLAY"]
use_api = app.config["USE_API"]
use_jpgs = app.config["USE_JPGS"]
if not use_api:
if use_jpgs:
loc_image = app.config["JPG_FOLDER_PATH"] + '/'
else:
loc_image = app.config["WEB_FOLDER_PATH"] + '/'
if not object_id:
return render_template('vocabulary/vocabulary.html')
# get dataset for type entity
sql_base = 'SELECT * FROM model.entity WHERE id = %(object_id)s;'
g.cursor.execute(sql_base, {'object_id': object_id})
output_base = g.cursor.fetchone()
sql_date = """
SELECT
date_part('year', begin_from) AS begin_from,
date_part('year', begin_to) AS begin_to,
date_part('year', end_from) AS end_from,
date_part('year', end_to) AS end_to
FROM model.entity WHERE id = %(object_id)s;
"""
g.cursor.execute(sql_date, {'object_id': object_id})
output_date = g.cursor.fetchone()
# check if exists
if not output_base:
abort(403)
# check if type class
CRMclass = output_base.cidoc_class_code
if CRMclass not in ['E55']:
abort(403)
extrefs = """
SELECT jsonb_agg(jsonb_strip_nulls(jsonb_build_object(
'identifier', t.identifier,
'domain', t.name,
'website', t.website,
'about', t.description,
'SKOS', t.skos,
'url', t.url,
'icon', r.icon_url
))) AS ext_types
FROM thanados.ext_types t JOIN thanados.refsys r ON t.id = r.entity_id
WHERE t.type_id = %(object_id)s;
"""
g.cursor.execute(extrefs, {'object_id': object_id})
extresult = g.cursor.fetchone()
# get top parent
sql_topparent = """
SELECT topparent FROM (
SELECT id::INTEGER, path, name_path, left(path, strpos(path, ' >') -1)::INTEGER AS
topparent FROM thanados.types_all WHERE path LIKE '%%>%%'
UNION ALL
SELECT id::INTEGER, path, name_path, PATH::INTEGER AS topparent FROM
thanados.types_all WHERE path NOT LIKE '%%>%%' ORDER BY name_path) tp
WHERE id = %(object_id)s"""
g.cursor.execute(sql_topparent, {'object_id': object_id})
topparent = g.cursor.fetchone().topparent
g.cursor.execute('select name, description, id from model.entity WHERE id = %(object_id)s',
{'object_id': topparent})
topparent = g.cursor.fetchone()
sql_topparent_info = """
select e.name, e.description, e.id, h.multiple, h.category
from model.entity e JOIN web.hierarchy h ON e.id = h.id WHERE e.id = %(topparent)s
"""
g.cursor.execute(sql_topparent_info, {'topparent': topparent.id})
result = g.cursor.fetchone()
topparent = {}
topparent['id'] = result.id
topparent['name'] = result.name
topparent['description'] = result.description
if result.multiple:
multi = 'multiple selection'
else:
multi = 'single selection'
type = ''
if result.category == 'standard':
type = 'Classification'
if result.category == 'value':
type = 'Value type'
elif result.category == 'custom':
type = 'Type'
topparent['selection'] = multi
topparent['type'] = type
topparent['forms'] = []
sql_forms = """
select openatlas_class_name as name FROM
web.hierarchy_openatlas_class WHERE hierarchy_id = %(topparent)s
"""
g.cursor.execute(sql_forms, {'topparent': topparent['id']})
forms_used = g.cursor.fetchall()
for row in forms_used:
topparent['forms'].append(row.name)
# get parent and path
sql_path_parent = 'SELECT name_path, parent_id FROM thanados.types_all WHERE id = %(object_id)s;'
g.cursor.execute(sql_path_parent, {'object_id': object_id})
output_path_parent = g.cursor.fetchone()
# get name of parent
sql_parentname = 'SELECT name FROM thanados.types_all WHERE id = %(object_id)s;'
g.cursor.execute(sql_parentname, {'object_id': output_path_parent.parent_id})
output_parentname = g.cursor.fetchone()
#define time
time = {}
if output_base.begin_from:
time['earliest_begin'] = output_date.begin_from
if output_base.begin_to:
time['latest_begin'] = output_date.begin_to
if output_base.end_from:
time['earliest_end'] = output_date.end_from
if output_base.end_to:
time['latest_end'] = output_date.end_to
# define json
data = {}
data['id'] = output_base.id
data['name'] = output_base.name
data['path'] = output_path_parent.name_path
if output_base.description:
data['description'] = output_base.description
if output_path_parent.parent_id:
data['parent'] = output_path_parent.parent_id
data['parent_name'] = output_parentname.name
if len(time) > 0:
data['time'] = time
credits = None
license = None
if extresult.ext_types:
data['gazetteers'] = []
gazetteers = extresult.ext_types
for row in gazetteers:
if 'about' in row:
about = row['about']
else:
about = row['domain']
if row['website']:
about = row['domain'] + ': ' + row['website']
if 'SKOS' in row:
SKOS = row['SKOS']
else:
SKOS = None
extid = {'SKOS': SKOS, 'url': row['url'], 'about': about, 'domain': row['domain'],
'identifier': row['identifier']}
if row['domain'] == 'Wikidata' and format_ != 'json':
extid['description'] = Data.getWikidata(row['identifier'])['description']
extid['label'] = Data.getWikidata(row['identifier'])['label']
extid['image'] = Data.getWikidataimage(row['identifier'])
if extid['image']:
try:
credits = extid['image']['metadata']['Artist']['value']
try:
credits = credits + '<br>Credit: ' + extid['image']['metadata']['Credit']['value']
except KeyError:
credits = extid['image']['metadata']['Artist']['value']
except KeyError:
try:
credits = extid['image']['metadata']['Credit']['value']
except KeyError:
credits = 'Author unknown'
try:
license = '<a href="' + extid['image']['metadata']['LicenseUrl']['value'] + '" target="blank_">'
try:
license = license + extid['image']['metadata']['LicenseShortName']['value'] + '</a>'
except KeyError:
license = ''
except KeyError:
try:
license = extid['image']['metadata']['LicenseShortName']['value']
except KeyError:
license = '<a href="'+ extid['image']['origin'] +'">' + extid['image']['origin'] + '</a>'
if row['icon']:
extid['favicon'] = row['icon']
data['gazetteers'].append(extid)
if row['domain'] == 'Getty AAT' and format_ != 'json':
gettydata = Data.getGettyData(row['identifier'])
extid['description'] = gettydata['description']
extid['label'] = gettydata['label']
extid['qualifier'] = gettydata['qualifier']
# get subtypes
sql_children = 'SELECT id, name FROM thanados.types_all WHERE parent_id = %(object_id)s;'
g.cursor.execute(sql_children, {'object_id': object_id})
output_children = g.cursor.fetchall()
if output_children:
data['children'] = []
for row in output_children:
data['children'].append({'id': row.id, 'name': row.name})
# get files
sql_files = """SELECT
m.id
FROM model.entity m JOIN model.link l ON m.id = l.domain_id
WHERE l.range_id = %(object_id)s AND l.property_code = 'P67' AND m.openatlas_class_name =
'file'
"""
g.cursor.execute(sql_files, {'object_id': object_id})
output_files = g.cursor.fetchall()
# get file license
sql_filelicense = """
SELECT
name AS license, name_path::TEXT, t.id::INTEGER AS licId, domain_id::INTEGER
FROM thanados.types_all t JOIN model.link l ON t.id = l.range_id WHERE l.domain_id =
%(file_id)s AND l.property_code = 'P2' AND t.name_path LIKE 'License >%%'
"""
# define files
if output_files:
data['files'] = []
# get file references
sql_file_refs = """
SELECT
r.description AS title,
l.description AS reference
FROM model.entity r JOIN model.link l ON r.id = l.domain_id
WHERE l.range_id = %(file_id)s AND l.property_code = 'P67'
"""
for row in output_files:
file_name = (Data.get_file_path(row.id))
print(file_name)
file_id = (row.id)
file = {'id': file_id, 'file_name': (loc_image + file_name)}
g.cursor.execute(sql_file_refs, {'file_id': file_id})
output_file_refs = g.cursor.fetchone()
g.cursor.execute(sql_filelicense, {'file_id': file_id})
output_filelicense = g.cursor.fetchone()
if output_file_refs:
if output_file_refs.title:
file['source'] = output_file_refs.title
if output_file_refs.reference:
file['reference'] = output_file_refs.reference
# add licence information
if output_filelicense:
file['license'] = output_filelicense.license
file['licenseId'] = output_filelicense.licid
data['files'].append(file)
# get all subtypes recursively
sql_subtypesrec = """
SELECT id from thanados.types_all WHERE path LIKE %(type_name)s OR path LIKE
%(type_name2)s OR id = %(type_id)s
"""
entlist = []
g.cursor.execute(sql_subtypesrec,
{'type_id': object_id, 'type_name': '%> ' + str(output_base.id) + ' >%',
'type_name2': str(output_base.id) + ' >%'})
output_subtypesrec = g.cursor.fetchall()
if output_subtypesrec:
data['types_recursive'] = []
for row in output_subtypesrec:
data['types_recursive'].append(row.id)
entlist.append(row.id)
entlist = tuple(entlist)
# get all entitites with this type
sql_entities = """
SELECT child_id, child_name, maintype, type, type_id, min, lon, lat, context,
filename, openatlas_class_name FROM
thanados.searchdata s
WHERE type_id IN %(type_id)s AND s.site_id IN %(site_ids)s
"""
g.cursor.execute(sql_entities, {'type_id': tuple([object_id]), 'site_ids': tuple(g.site_list)})
output_direct_ents = g.cursor.fetchall()
if output_direct_ents:
data['entities'] = []
for row in output_direct_ents:
data['entities'].append({'id': row.child_id, 'name': row.child_name, 'main_type':
row.maintype, 'type': row.type, 'type_id': row.type_id, 'value': row.min,
'lon': row.lon,
'lat': row.lat, 'context': row.context, 'file': row.filename,
'openatlas_class_name':
row.openatlas_class_name})
g.cursor.execute(sql_entities, {'type_id': entlist, 'site_ids': tuple(g.site_list)})
output_direct_ents = g.cursor.fetchall()
if output_direct_ents:
data['entities_recursive'] = []
for row in output_direct_ents:
data['entities_recursive'].append({'id': row.child_id, 'name': row.child_name,
'main_type':
row.maintype, 'type': row.type,
'type_id': row.type_id, 'value': row.min,
'lon': row.lon,
'lat': row.lat, 'context': row.context,
'file': row.filename,
'openatlas_class_name':
row.openatlas_class_name})
# get type tree
def getchildren(id, node):
sql_getChildren = """
SELECT name, id FROM thanados.types_all WHERE parent_id = %(id)s ORDER BY name
"""
g.cursor.execute(sql_getChildren, {'id': id})
results = g.cursor.fetchall()
if results:
node['nodes'] = []
for row in results:
currentnode = {'text': row.name,
'class': 'treenode',
'href': '/vocabulary/%r' % row.id,
'openNodeLinkOnNewTab': False}
node['nodes'].append(currentnode)
getchildren(row.id, currentnode)
tree = [{
'text': data['name'],
'class': 'toptreenode'
}]
getchildren(object_id, tree[0])
hierarchy = {}
currentcolor = '#97C2FC'
if object_id == topparent['id']:
currentcolor = '#ff8c8c'
alltreeNodes = [{'id': topparent['id'], 'label': topparent['name'], 'color' : currentcolor}]
alltreeEdges = []
def getTree(id):
sql_getChildren = """
SELECT DISTINCT name, id FROM thanados.types_all WHERE parent_id = %(id)s ORDER BY name
"""
g.cursor.execute(sql_getChildren, {'id': id})
results = g.cursor.fetchall()
if results:
for row in results:
currentcolor = '#97C2FC';
if row.id == object_id:
currentcolor= '#ff8c8c'
currentnode = {'id': row.id, 'label': row.name, 'color' : currentcolor}
currentedge = {'from': id, 'to': row.id, 'color': '#757575'}
alltreeNodes.append(currentnode)
alltreeEdges.append(currentedge)
getTree(row.id)
getTree(topparent['id'])
hierarchy['nodes'] = alltreeNodes
hierarchy['edges'] = alltreeEdges
data['topparent'] = topparent
data['tree'] = tree
data['hierarchy'] = hierarchy
if format_ == 'json':
return json.dumps(data)
if object_id:
return render_template('vocabulary/view.html', object_id=object_id, data=data,
children=len(output_children), credit=credits, license=license,
children_recursive=len(entlist), webfolder=app.config["WEB_FOLDER_PATH"])
|
nilq/baby-python
|
python
|
# TensorFlow and tf.keras
import tensorflow as tf
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
# Display the image, labeled with the predicted label (blue if accurate to true label, red if not)
def plot_image(i, predictions_array, true_label, img):
true_label, img = true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
# Bar graph of the full set of 10 class predictions for Model Predictions.
# Color the predicted label red and the true label blue (override predicted label red if accurate).
def plot_value_array(i, predictions_array, true_label):
true_label = true_label[i]
plt.grid(False)
plt.xticks(range(10))
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
print(tf.__version__)
# --------------- Import and load the Fashion MNIST data directly from TensorFlow ----------------
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
# label numbers correspond to their respective classes of clothing
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
# ------------------------ Explore the format of the dataset ----------------------
# (60000, 28, 28) --> 60,000 images in the training set, with each image represented as 28 x 28 pixels
print(train_images.shape)
# 60000 --> there are 60,000 corresponding labels in the training set
print(len(train_labels))
# array([9, 0, 0, ..., 3, 0, 5], dtype=uint8) --> each label is a number between 0 and 9
print(train_labels)
# (10000, 28, 28) --> 10,000 images in the test set, each image is represented as 28 x 28 pixels
print(test_images.shape)
# 10000 --> test set contains 10,000 images labels
print(len(test_labels))
# ------------------------------ Preprocess the data ----------------------------
# pixel values fall in the range of 0 to 255
# Scale these values to a range of 0 to 1 before feeding them to the neural network model
train_images = train_images / 255.0
test_images = test_images / 255.0
# verify data is in correct format and that you're ready to build and train the network
# display the first 25 images from the training set and display the class name below each image
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
plt.show()
# ------------------------------- Build and train the model ---------------------------------
# set up the layers, which extract representations from the data fed into them
model = tf.keras.Sequential([
# the Flatten layer transforms the format of the images from a two-dimensional array
# (of 28 by 28 pixels) to a one-dimensional array (of 28 * 28 = 784 pixels)
tf.keras.layers.Flatten(input_shape=(28, 28)),
# two densely connected, or fully connected, neural layers
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10)
])
# compile the model and add a few more settings
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
# train the model by feeding it the training data
# as the model trains, the loss and accuracy metrics are displayed
model.fit(train_images, train_labels, epochs=10)
# evaluate accuracy against the test data
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print('\nTest accuracy:', test_acc)
# ------------------------------- Make Predictions from Model ---------------------------------
# attach a softmax layer to convert the model's linear outputs—logits—to probabilities
probability_model = tf.keras.Sequential([model,
tf.keras.layers.Softmax()])
# predict the label for each image in the testing set into a prediction array of 10 numbers
predictions = probability_model.predict(test_images)
# Plot the first X test images, their predicted labels, and the true labels.
num_rows = 5
num_cols = 3
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*num_rows))
# MODIFICATION: for i in range(num_images):
for i in range(9000,9015):
# MODIFICATION: plt.subplot(num_rows, 2*num_cols, 2*i+1)
plt.subplot(num_rows, 2*num_cols, 2*(i - 9000)+1)
plot_image(i, predictions[i], test_labels, test_images)
# MODIFICATION: plt.subplot(num_rows, 2*num_cols, 2*i+2)
plt.subplot(num_rows, 2*num_cols, 2*(i-9000)+2)
plot_value_array(i, predictions[i], test_labels)
plt.tight_layout()
plt.show()
# ----------------------------------- Use the Trained Model -------------------------------------
# Finally, use the trained model to make a prediction about a single image.
# Grab an image from the test dataset.
img = test_images[1]
# (28, 28)
print(img.shape)
# Add the image to a batch where it's the only member.
# tf.keras models are optimized to make predictions on a batch, or collection, of examples at once
img = (np.expand_dims(img,0))
# (1, 28, 28)
print(img.shape)
# now predict the correct label for this image
predictions_single = probability_model.predict(img)
# [[8.26038831e-06 1.10213664e-13 9.98591125e-01 1.16777841e-08 1.29609776e-03 2.54965649e-11
# 1.04560357e-04 7.70050608e-19 4.55051066e-11 3.53864888e-17]]
print(predictions_single)
plot_value_array(1, predictions_single[0], test_labels)
_ = plt.xticks(range(10), class_names, rotation=45)
plt.show()
|
nilq/baby-python
|
python
|
import os
import os.path as osp
import torch
from torch.utils.data import Dataset
from torch.utils.data.dataloader import default_collate
from torchvision.transforms import functional as F
import numpy as np
import numpy.linalg as LA
import cv2
import json
import csv
import matplotlib.pyplot as plt
from pylsd import lsd
import datasets.transforms as T
def center_crop(img):
sz = img.shape[0:2]
side_length = np.min(sz)
if sz[0] > sz[1]:
ul_x = 0
ul_y = int(np.floor((sz[0]/2) - (side_length/2)))
x_inds = [ul_x, sz[1]-1]
y_inds = [ul_y, ul_y + side_length - 1]
else:
ul_x = int(np.floor((sz[1]/2) - (side_length/2)))
ul_y = 0
x_inds = [ul_x, ul_x + side_length - 1]
y_inds = [ul_y, sz[0]-1]
c_img = img[y_inds[0]:y_inds[1]+1, x_inds[0]:x_inds[1]+1, :]
return c_img
def create_masks(image):
masks = torch.zeros((1, height, width), dtype=torch.uint8)
return masks
def filter_length(segs, min_line_length=10):
lengths = LA.norm(segs[:,2:4] - segs[:,:2], axis=1)
segs = segs[lengths > min_line_length]
return segs[:,:4]
def normalize_segs(segs, pp, rho):
pp = np.array([pp[0], pp[1], pp[0], pp[1]], dtype=np.float32)
return rho*(segs - pp)
def normalize_safe_np(v, axis=-1, eps=1e-6):
de = LA.norm(v, axis=axis, keepdims=True)
de = np.maximum(de, eps)
return v/de
def segs2lines_np(segs):
ones = np.ones(len(segs))
ones = np.expand_dims(ones, axis=-1)
p1 = np.concatenate([segs[:,:2], ones], axis=-1)
p2 = np.concatenate([segs[:,2:], ones], axis=-1)
lines = np.cross(p1, p2)
return normalize_safe_np(lines)
def sample_segs_np(segs, num_sample, use_prob=True):
num_segs = len(segs)
sampled_segs = np.zeros([num_sample, 4], dtype=np.float32)
mask = np.zeros([num_sample, 1], dtype=np.float32)
if num_sample > num_segs:
sampled_segs[:num_segs] = segs
mask[:num_segs] = np.ones([num_segs, 1], dtype=np.float32)
else:
lengths = LA.norm(segs[:,2:] - segs[:,:2], axis=-1)
prob = lengths/np.sum(lengths)
idxs = np.random.choice(segs.shape[0], num_sample, replace=True, p=prob)
sampled_segs = segs[idxs]
mask = np.ones([num_sample, 1], dtype=np.float32)
return sampled_segs, mask
def sample_vert_segs_np(segs, thresh_theta=22.5):
lines = segs2lines_np(segs)
(a,b) = lines[:,0],lines[:,1]
theta = np.arctan2(np.abs(b),np.abs(a))
thresh_theta = np.radians(thresh_theta)
return segs[theta < thresh_theta]
class ImageDataset(Dataset):
def __init__(self, cfg, image_path, return_masks=False, transform=None):
self.input_width = cfg.DATASETS.INPUT_WIDTH
self.input_height = cfg.DATASETS.INPUT_HEIGHT
self.min_line_length = cfg.DATASETS.MIN_LINE_LENGTH
self.num_input_lines = cfg.DATASETS.NUM_INPUT_LINES
self.num_input_vert_lines = cfg.DATASETS.NUM_INPUT_VERT_LINE
self.vert_line_angle = cfg.DATASETS.VERT_LINE_ANGLE
self.return_vert_lines = cfg.DATASETS.RETURN_VERT_LINES
self.return_masks = return_masks
self.transform = transform
self.list_filename = [image_path,]
def __getitem__(self, idx):
target = {}
extra = {}
filename = self.list_filename[idx]
image = cv2.imread(filename)
assert image is not None, print(filename)
image = image[:,:,::-1] # convert to rgb
org_image = image
org_h, org_w = image.shape[0], image.shape[1]
org_sz = np.array([org_h, org_w])
crop_image = center_crop(org_image)
crop_h, crop_w = crop_image.shape[0], crop_image.shape[1]
crop_sz = np.array([crop_h, crop_w])
image = cv2.resize(image, dsize=(self.input_width, self.input_height))
input_sz = np.array([self.input_height, self.input_width])
# preprocess
ratio_x = float(self.input_width)/float(org_w)
ratio_y = float(self.input_height)/float(org_h)
pp = (org_w/2, org_h/2)
rho = 2.0/np.minimum(org_w,org_h)
# detect line and preprocess
gray = cv2.cvtColor(org_image, cv2.COLOR_BGR2GRAY)
org_segs = lsd(gray, scale=0.5)
org_segs = filter_length(org_segs, self.min_line_length)
num_segs = len(org_segs)
assert len(org_segs) > 10, print(len(org_segs))
segs = normalize_segs(org_segs, pp=pp, rho=rho)
# whole segs
sampled_segs, line_mask = sample_segs_np(
segs, self.num_input_lines)
sampled_lines = segs2lines_np(sampled_segs)
# vertical directional segs
vert_segs = sample_vert_segs_np(segs, thresh_theta=self.vert_line_angle)
if len(vert_segs) < 2:
vert_segs = segs
sampled_vert_segs, vert_line_mask = sample_segs_np(
vert_segs, self.num_input_vert_lines)
sampled_vert_lines = segs2lines_np(sampled_vert_segs)
if self.return_masks:
masks = create_masks(image)
image = np.ascontiguousarray(image)
if self.return_vert_lines:
target['segs'] = torch.from_numpy(np.ascontiguousarray(sampled_vert_segs)).contiguous().float()
target['lines'] = torch.from_numpy(np.ascontiguousarray(sampled_vert_lines)).contiguous().float()
target['line_mask'] = torch.from_numpy(np.ascontiguousarray(vert_line_mask)).contiguous().float()
else:
target['segs'] = torch.from_numpy(np.ascontiguousarray(sampled_segs)).contiguous().float()
target['lines'] = torch.from_numpy(np.ascontiguousarray(sampled_lines)).contiguous().float()
target['line_mask'] = torch.from_numpy(np.ascontiguousarray(line_mask)).contiguous().float()
if self.return_masks:
target['masks'] = masks
target['org_img'] = org_image
target['org_sz'] = org_sz
target['crop_sz'] = crop_sz
target['input_sz'] = input_sz
target['img_path'] = filename
target['filename'] = filename
extra['lines'] = target['lines'].clone()
extra['line_mask'] = target['line_mask'].clone()
return self.transform(image, extra, target)
def __len__(self):
return len(self.list_filename)
def make_transform():
return T.Compose([
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
def build_image(image_path, cfg):
dataset = ImageDataset(cfg, image_path, return_masks=cfg.MODELS.MASKS, transform=make_transform())
return dataset
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: mcxiaoke
# @Date: 2015-07-10 14:13:05
import os
import sys
from os import path
import re
import tempfile
import shutil
import time
'''
clean idea project files
param: max_depth -> max depth for recursively, default=3
param: permanently -> move to system tmp dir or permanently delete,
default=False
'''
def clean(start_dir, max_depth=3, permanently=False):
idea_pattern = re.compile(r'.*\.iml|build$|\.idea')
deleted = []
backup_dir_name = 'clean_idea_backup_%s' % str(time.time())
backup_dir = path.join(tempfile.gettempdir(), backup_dir_name)
for root, dirs, files in os.walk(start, topdown=True):
for name in dirs:
if name == '.git':
dirs.remove(name)
level = root.replace(start, '').count(os.sep)
if level >= max_depth:
continue
for name in dirs+files:
# print '--> %s' % path.join(root, name).replace(start,' ')
if idea_pattern.match(name):
# os.renames()
file = path.join(root, name)
deleted.append(file)
try:
if permanently:
if path.isfile(file):
os.remove(file)
else:
shutil.rmtree(file)
else:
shutil.move(file, path.join(backup_dir, name))
print("delete %s" % file)
except shutil.Error, e:
print('delete error: %s' % e)
if deleted:
print('cleaned in %s' % start)
print('backup to %s' % backup_dir)
else:
print('no idea files in %s' % start)
if __name__ == '__main__':
usage = ''''Usage: %s dir' Be careful, this script will
remove all files and directories named .idea/*.iml/build
''' % path.basename(sys.argv[0])
if len(sys.argv) < 2:
print(usage)
sys.exit(1)
start = path.abspath(sys.argv[1])
clean(start)
|
nilq/baby-python
|
python
|
import os
from .handler import QuickOpenHandler
from ._version import get_versions
from notebook.utils import url_path_join
__version__ = get_versions()['version']
del get_versions
def _jupyter_server_extension_paths():
"""Defines the entrypoint for the Jupyter server extension."""
return [{
"module": "jupyterlab_quickopen"
}]
def load_jupyter_server_extension(nb_app):
"""Registers the quick open API handler to receive HTTP requests from the frontend extension.
Parameters
----------
nb_app: notebook.notebookapp.NotebookApp
Notebook application instance
"""
if (not os.path.isdir(nb_app.notebook_dir)
or nb_app.contents_manager.root_dir != nb_app.notebook_dir):
nb_app.log.info(f'Refusing to register QuickOpenHandler extension: '
f'{nb_app.contents_manager} does not appear to load from the local filesystem')
return
web_app = nb_app.web_app
host_pattern = '.*$'
route_pattern = url_path_join(web_app.settings['base_url'], '/api/quickopen')
web_app.add_handlers(host_pattern, [
(route_pattern, QuickOpenHandler)
])
nb_app.log.info(f'Registered QuickOpenHandler extension at URL path {route_pattern} '
f'to serve results of scanning local path {nb_app.notebook_dir}')
|
nilq/baby-python
|
python
|
from abc import ABC
from typing import Type
from bokeh.models.glyph import Glyph
from bokeh.models.renderers import GlyphRenderer
from xbokeh.common.assertions import assert_type
class Renderer(ABC):
def __init__(self, type_: Type, renderer: GlyphRenderer) -> None:
"""
:renderer: instance of GlyphRenderer
:data: data for ColumnDataSource.
ex) data = {'x': [1,2,3,4], 'y': np.ndarray([10.0, 20.0, 30.0, 40.0])}
"""
super().__init__()
assert_type(renderer, "renderer", GlyphRenderer)
assert_type(renderer.glyph, "renderer.glyph", type_)
assert_type(renderer.data_source.data, "self._renderer.data_source.data", dict)
self._renderer = renderer
self._glyph: Glyph = renderer.glyph
@property
def data(self) -> dict:
return self._renderer.data_source.data
def set_data(self, data: dict):
assert_type(data, "data", dict)
self._renderer.data_source.data = data
def set_property(self, **kwargs):
"""
Updates the model's property
"""
self._glyph.update(**kwargs)
def clear(self):
self.set_data({"x": [], "y": []})
|
nilq/baby-python
|
python
|
from math import log
from utils import iter_primes
__author__ = 'rafa'
def algorithm(limit):
n = 1
for p in iter_primes():
if p > limit:
return n
exponent = int(log(limit, p))
n *= p**exponent
def solver():
"""
2520 is the smallest number that can be divided by each of the numbers from
1 to 10 without any remainder.
What is the smallest positive number that is evenly divisible by all of the
numbers from 1 to 20?
"""
assert algorithm(10) == 2520
return algorithm(20)
|
nilq/baby-python
|
python
|
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
def plot_time_series(x: np.ndarray, title=None) -> None:
sns.set(font_scale=1.5)
sns.set_style("white")
t = np.arange(start=0, stop=x.shape[0])
plt.plot(t, x, linestyle='-', marker='o')
plt.title(title)
plt.xlabel(r'$t$')
plt.ylabel(r'$x_t$')
plt.show()
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.